diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index eb2acff8d2..9c7b97e803 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -12,17 +12,17 @@ jobs: - uses: actions/checkout@v4 - name: Set up go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 - name: Install helm-docs - run: go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.0 + run: go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.3 env: GOBIN: /usr/local/bin/ - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Run pre-commit uses: pre-commit/action@v3.0.0 @@ -35,6 +35,7 @@ jobs: matrix: python: - "3.11" + - "3.12" steps: - uses: actions/checkout@v4 @@ -54,22 +55,16 @@ jobs: with: fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v4 + - uses: azure/setup-helm@v3 with: - python-version: "3.11" - - - name: Install test dependencies - run: make init - - - name: Expand modified charts - run: expand-charts - - - name: Set up chart-testing - uses: helm/chart-testing-action@v2.4.0 + # Used to query GitHub for the latest Helm release. + token: ${{ secrets.GITHUB_TOKEN }} - - name: Run chart-testing (lint) - run: ct lint --all --config ct.yaml + - uses: lsst-sqre/run-tox@v1 + with: + python-version: "3.12" + tox-envs: phalanx-lint-change + cache-key-prefix: test # The minikube job always runs, but it quickly does nothing if no files that # would affect minikube were changed. This unfortunately requires a lot of @@ -93,12 +88,12 @@ jobs: filters: | minikube: - ".github/workflows/ci.yaml" - - "applications/*/Chart.yaml" - - "applications/*/templates/**" - - "applications/*/values.yaml" - - "applications/*/values-minikube.yaml" + - "applications/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}/Chart.yaml" + - "applications/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}/templates/**" + - "applications/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}/values.yaml" + - "applications/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}/values-minikube.yaml" - "environments/Chart.yaml" - - "environments/templates/**" + - "environments/templates/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}*" - "environments/values-minikube.yaml" - "installer/**" @@ -117,21 +112,20 @@ jobs: - name: Download installer dependencies if: steps.filter.outputs.minikube == 'true' run: | - curl -sSL -o /tmp/vault.zip https://releases.hashicorp.com/vault/1.14.0/vault_1.14.0_linux_amd64.zip + curl -sSL -o /tmp/vault.zip https://releases.hashicorp.com/vault/1.15.4/vault_1.15.4_linux_amd64.zip unzip /tmp/vault.zip sudo mv vault /usr/local/bin/vault sudo chmod +x /usr/local/bin/vault - sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.10/argocd-linux-amd64 + sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.8.6/argocd-linux-amd64 sudo chmod +x /usr/local/bin/argocd sudo apt-get install socat - sudo pip install -r installer/requirements.txt - name: Run installer timeout-minutes: 15 if: steps.filter.outputs.minikube == 'true' run: | cd installer - ./install.sh minikube ${{ secrets.MINIKUBE_VAULT_KEY }} + ./install.sh minikube "${{ secrets.MINIKUBE_VAULT_ROLE_ID }}" "${{ secrets.MINIKUBE_VAULT_SECRET_ID }}" - name: Get final list of resources if: steps.filter.outputs.minikube == 'true' diff --git a/.github/workflows/dependencies.yaml b/.github/workflows/dependencies.yaml index edca727e70..49b52fbb6d 100644 --- a/.github/workflows/dependencies.yaml +++ b/.github/workflows/dependencies.yaml @@ -18,7 +18,7 @@ jobs: - name: Run neophile uses: lsst-sqre/run-neophile@v1 with: - python-version: "3.11" + python-version: "3.12" mode: pr types: python app-id: ${{ secrets.NEOPHILE_APP_ID }} diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index e524d4d984..8dc01ded18 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -14,8 +14,6 @@ name: Docs - "renovate/**" - "tickets/**" - "u/**" - tags: - - "*" workflow_dispatch: {} jobs: @@ -39,25 +37,28 @@ jobs: - "docs/**" - "applications/*/Chart.yaml" - "applications/*/values.yaml" + - "applications/argocd/values-*.yaml" - "applications/gafaelfawr/values-*.yaml" - "environments/values-*.yaml" - "src/phalanx/**" + docsSpecific: + - "docs/**" - name: Install graphviz - if: steps.filter.outputs.docs == 'true' + if: steps.filter.outputs.docs == 'true' || github.event_name == 'workflow_dispatch' run: sudo apt-get install graphviz - name: Build docs - if: steps.filter.outputs.docs == 'true' + if: steps.filter.outputs.docs == 'true' || github.event_name == 'workflow_dispatch' uses: lsst-sqre/run-tox@v1 with: - python-version: "3.11" + python-version: "3.12" tox-envs: docs - # Only attempt documentation uploads for tagged releases and pull - # requests from ticket branches in the same repository. This avoids - # version clutter in the docs and failures when a PR doesn't have access - # to secrets. + # Upload docs: + # - on pushes to main if *any* documentation content might have changed + # - on workflow dispatches if any documentation content might have changed + # - on pushes to tickets/ branches if docs/ directory content changed - name: Upload to LSST the Docs uses: lsst-sqre/ltd-upload@v1 with: @@ -66,7 +67,6 @@ jobs: username: ${{ secrets.LTD_USERNAME }} password: ${{ secrets.LTD_PASSWORD }} if: >- - steps.filter.outputs.docs == 'true' - && github.event_name != 'merge_group' - && (github.event_name != 'pull_request' - || startsWith(github.head_ref, 'tickets/')) + (github.event_name == 'push' && github.ref_name == 'main' && steps.filter.outputs.docs == 'true') + || (github.event_name == 'workflow_dispatch') + || (github.event_name == 'pull_request' && startsWith(github.head_ref, 'tickets/') && steps.filter.outputs.docsSpecific == 'true') diff --git a/.github/workflows/linkcheck.yaml b/.github/workflows/linkcheck.yaml index 831f0d6436..458e5f558a 100644 --- a/.github/workflows/linkcheck.yaml +++ b/.github/workflows/linkcheck.yaml @@ -48,5 +48,5 @@ jobs: - name: Check links uses: lsst-sqre/run-tox@v1 with: - python-version: "3.11" + python-version: "3.12" tox-envs: docs-linkcheck diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fa06f3e758..6ebea0460f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,20 +1,20 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-merge-conflict - id: check-toml - id: trailing-whitespace - repo: https://github.com/adrienverge/yamllint - rev: v1.32.0 + rev: v1.33.0 hooks: - id: yamllint args: - -c=.yamllint.yml - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.26.3 + rev: 0.27.3 hooks: - id: check-jsonschema files: ^applications/.*/secrets(-[^./-]+)?\.yaml @@ -26,7 +26,7 @@ repos: files: ^docs/extras/schemas/.*\.json - repo: https://github.com/norwoodj/helm-docs - rev: v1.11.0 + rev: v1.11.3 hooks: - id: helm-docs args: @@ -46,15 +46,11 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.289 + rev: v0.1.8 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] - - - repo: https://github.com/psf/black - rev: 23.7.0 - hooks: - - id: black + - id: ruff-format - repo: https://github.com/adamchainz/blacken-docs rev: 1.16.0 diff --git a/Makefile b/Makefile index 5020c0ccf4..dd81ada03d 100644 --- a/Makefile +++ b/Makefile @@ -15,11 +15,11 @@ clean: .PHONY: init init: + pip install --upgrade pip pre-commit tox + pre-commit install pip install --editable . pip install --upgrade -r requirements/main.txt -r requirements/dev.txt rm -rf .tox - pip install --upgrade pre-commit tox - pre-commit install # This is defined as a Makefile target instead of only a tox command because # if the command fails we want to cat output.txt, which contains the diff --git a/applications/alert-stream-broker/Chart.yaml b/applications/alert-stream-broker/Chart.yaml index a6042337fe..cdeb3ac3e6 100644 --- a/applications/alert-stream-broker/Chart.yaml +++ b/applications/alert-stream-broker/Chart.yaml @@ -1,13 +1,13 @@ apiVersion: v2 name: alert-stream-broker -version: "3" +version: 1.0.0 description: Alert transmission to community brokers sources: - https://github.com/lsst-dm/alert_database_ingester - https://github.com/lsst-dm/alert-stream-simulator dependencies: - name: alert-stream-broker - version: 2.5.1 + version: 2.5.2 # The schema registry is bundled together in the same application as the # Kafka broker because Strimzi Registry Operator expects everything (the diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index d56b4589f3..5bfa1d8968 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -50,6 +50,8 @@ Alert transmission to community brokers | alert-database.storage.gcp.project | string | `""` | Name of a GCP project that has a bucket for database storage | | alert-database.storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data | | alert-stream-broker.cluster.name | string | `"alert-broker"` | Name used for the Kafka broker, and used by Strimzi for many annotations. | +| alert-stream-broker.clusterName | string | `"alert-broker"` | Name of a Strimzi Kafka cluster to connect to. | +| alert-stream-broker.clusterPort | int | `9092` | Port to connect to on the Strimzi Kafka cluster. It should be an internal TLS listener. | | alert-stream-broker.fullnameOverride | string | `""` | Override for the full name used for Kubernetes resources; by default one will be created based on the chart name and helm release name. | | alert-stream-broker.kafka.config | object | `{"log.retention.bytes":"42949672960","log.retention.hours":168,"offsets.retention.minutes":1440}` | Configuration overrides for the Kafka server. | | alert-stream-broker.kafka.config."log.retention.bytes" | string | `"42949672960"` | Maximum retained number of bytes for a broker's data. This is a string to avoid YAML type conversion issues for large numbers. | @@ -76,14 +78,20 @@ Alert transmission to community brokers | alert-stream-broker.kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | | alert-stream-broker.kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging | | alert-stream-broker.kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | +| alert-stream-broker.maxBytesRetained | string | `"24000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | +| alert-stream-broker.maxMillisecondsRetained | string | `"604800000"` | Maximum amount of time to save simulated alerts in the replay topic, in milliseconds. Default is 7 days. | | alert-stream-broker.nameOverride | string | `""` | | +| alert-stream-broker.schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | | alert-stream-broker.strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | | alert-stream-broker.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | +| alert-stream-broker.testTopicName | string | `"alert-stream-test"` | Name of the topic which will be used to send test alerts. | +| alert-stream-broker.testTopicPartitions | int | `8` | | +| alert-stream-broker.testTopicReplicas | int | `2` | | | alert-stream-broker.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | | alert-stream-broker.tls.subject.organization | string | `"Vera C. Rubin Observatory"` | Organization to use in the 'Subject' field of the broker's TLS certificate. | -| alert-stream-broker.users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | +| alert-stream-broker.users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated","alert-stream-test"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | | alert-stream-broker.users[0].groups | list | `["rubin-testing"]` | A list of string prefixes for groups that the user should get admin access to, allowing them to create, delete, describe, etc consumer groups. Note that these are prefix-matched, not just literal exact matches. | -| alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated"]` | A list of topics that the user should get read-only access to. | +| alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. | | alert-stream-broker.users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | | alert-stream-broker.vaultSecretsPath | string | `""` | Path to the secret resource in Vault | | alert-stream-broker.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/Chart.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/Chart.yaml index 41df3cce85..ba9abab12b 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/Chart.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: alert-stream-broker -version: 2.5.1 +version: 2.5.2 description: Kafka broker cluster for distributing alerts maintainers: - name: bsmart diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/README.md b/applications/alert-stream-broker/charts/alert-stream-broker/README.md index 6fac6e2664..75f458e99e 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-broker/README.md @@ -7,6 +7,8 @@ Kafka broker cluster for distributing alerts | Key | Type | Default | Description | |-----|------|---------|-------------| | cluster.name | string | `"alert-broker"` | Name used for the Kafka broker, and used by Strimzi for many annotations. | +| clusterName | string | `"alert-broker"` | Name of a Strimzi Kafka cluster to connect to. | +| clusterPort | int | `9092` | Port to connect to on the Strimzi Kafka cluster. It should be an internal TLS listener. | | fullnameOverride | string | `""` | Override for the full name used for Kubernetes resources; by default one will be created based on the chart name and helm release name. | | kafka.config | object | `{"log.retention.bytes":"42949672960","log.retention.hours":168,"offsets.retention.minutes":1440}` | Configuration overrides for the Kafka server. | | kafka.config."log.retention.bytes" | string | `"42949672960"` | Maximum retained number of bytes for a broker's data. This is a string to avoid YAML type conversion issues for large numbers. | @@ -33,14 +35,20 @@ Kafka broker cluster for distributing alerts | kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | | kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging | | kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | +| maxBytesRetained | string | `"24000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | +| maxMillisecondsRetained | string | `"604800000"` | Maximum amount of time to save simulated alerts in the replay topic, in milliseconds. Default is 7 days. | | nameOverride | string | `""` | | +| schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | | strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | | superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | +| testTopicName | string | `"alert-stream-test"` | Name of the topic which will be used to send test alerts. | +| testTopicPartitions | int | `8` | | +| testTopicReplicas | int | `2` | | | tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | | tls.subject.organization | string | `"Vera C. Rubin Observatory"` | Organization to use in the 'Subject' field of the broker's TLS certificate. | -| users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | +| users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated","alert-stream-test"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | | users[0].groups | list | `["rubin-testing"]` | A list of string prefixes for groups that the user should get admin access to, allowing them to create, delete, describe, etc consumer groups. Note that these are prefix-matched, not just literal exact matches. | -| users[0].readonlyTopics | list | `["alert-stream","alerts-simulated"]` | A list of topics that the user should get read-only access to. | +| users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. | | users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | | vaultSecretsPath | string | `""` | Path to the secret resource in Vault | | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml new file mode 100644 index 0000000000..17049cb56d --- /dev/null +++ b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml @@ -0,0 +1,13 @@ +apiVersion: "kafka.strimzi.io/{{ .Values.strimziAPIVersion }}" +kind: KafkaTopic +metadata: + name: "{{ .Values.testTopicName }}" + labels: + strimzi.io/cluster: "{{ .Values.clusterName }}" +spec: + partitions: {{ .Values.testTopicPartitions }} + replicas: {{ .Values.testTopicReplicas }} + config: + cleanup.policy: "delete" + retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days + retention.bytes: {{ .Values.maxBytesRetained }} diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml index 9c48cd5ff6..d18993399b 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml @@ -114,7 +114,7 @@ users: - # -- The username for the user that should be created. username: rubin-testing # -- A list of topics that the user should get read-only access to. - readonlyTopics: ["alert-stream", "alerts-simulated"] + readonlyTopics: ["alert-stream", "alerts-simulated", "alert-stream-test"] # -- A list of string prefixes for groups that the user should get admin # access to, allowing them to create, delete, describe, etc consumer # groups. Note that these are prefix-matched, not just literal exact @@ -148,3 +148,29 @@ vaultSecretsPath: "" fullnameOverride: "" nameOverride: "" + +# -- Name of the topic which will be used to send test alerts. +testTopicName: alert-stream-test + +# -- Integer ID to use in the prefix of alert data packets. This should be a +# valid Confluent Schema Registry ID associated with the schema used. +schemaID: 1 + +# -- Name of a Strimzi Kafka cluster to connect to. +clusterName: alert-broker + +# -- Port to connect to on the Strimzi Kafka cluster. It should be an internal +# TLS listener. +clusterPort: 9092 + +# -- Maximum amount of time to save simulated alerts in the replay topic, in +# milliseconds. Default is 7 days. +maxMillisecondsRetained: "604800000" + +# -- Maximum number of bytes for the replay topic, per partition, per replica. +# Default is 100GB, but should be lower to not fill storage. +maxBytesRetained: "24000000000" + +testTopicPartitions: 8 + +testTopicReplicas: 2 diff --git a/applications/alert-stream-broker/secrets-idfint.yaml b/applications/alert-stream-broker/secrets-idfint.yaml new file mode 100644 index 0000000000..9522a0504a --- /dev/null +++ b/applications/alert-stream-broker/secrets-idfint.yaml @@ -0,0 +1,18 @@ +"alerce-idfint-password": + description: "?" +"ampel-idfint-password": + description: "?" +"antares-idfint-password": + description: "?" +"babamul-idfint-password": + description: "?" +"fink-idfint-password": + description: "?" +"lasair-idfint-password": + description: "?" +"pittgoogle-idfint-password": + description: "?" +"rubin-communitybroker-idfint-password": + description: "?" +"rubin-devel-idfint-password": + description: "?" diff --git a/applications/alert-stream-broker/secrets.yaml b/applications/alert-stream-broker/secrets.yaml new file mode 100644 index 0000000000..cab38f58c0 --- /dev/null +++ b/applications/alert-stream-broker/secrets.yaml @@ -0,0 +1,2 @@ +"kafka-admin-password": + description: "?" diff --git a/applications/alert-stream-broker/values-idfint.yaml b/applications/alert-stream-broker/values-idfint.yaml deleted file mode 100644 index 3b94677a96..0000000000 --- a/applications/alert-stream-broker/values-idfint.yaml +++ /dev/null @@ -1,134 +0,0 @@ -alert-stream-broker: - cluster: - name: "alert-broker" - - kafka: - # Addresses based on the state as of 2021-12-02; these were assigned by - # Google and now we're pinning them. - externalListener: - tls: - enabled: true - bootstrap: - ip: "35.224.176.103" - host: alert-stream-int.lsst.cloud - brokers: - - ip: "34.28.80.188" - host: alert-stream-int-broker-0.lsst.cloud - - ip: "35.188.136.140" - host: alert-stream-int-broker-1.lsst.cloud - - ip: "35.238.84.221" - host: alert-stream-int-broker-2.lsst.cloud - # - ip: "35.184.182.182" - # host: alert-stream-int-broker-3.lsst.cloud - # - ip: "35.232.191.72" - # host: alert-stream-int-broker-4.lsst.cloud - # - ip: "34.27.122.46" - # host: alert-stream-int-broker-5.lsst.cloud - - replicas: 3 - - storage: - size: 1500Gi - - nodePool: - affinities: - - key: kafka - value: ok - - tolerations: - - key: kafka - value: ok - effect: NoSchedule - vaultSecretsPath: "secret/k8s_operator/data-int.lsst.cloud/alert-stream-broker" - - users: - # A user for development purposes by the Rubin team, with access to all - # topics in readonly mode. - - username: "rubin-devel-idfint" - readonlyTopics: ["*"] - groups: ["rubin-devel-idfint"] - - # A user used by the Rubin team but with similar access to the community - # broker users. - - username: "rubin-communitybroker-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["rubin-communitybroker-idfint"] - - # The actual community broker users - - username: "alerce-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["alerce-idfint"] - - - username: "ampel-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["ampel-idfint"] - - - username: "antares-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["antares-idfint"] - - - username: "babamul-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["babamul-idfint"] - - - username: "fink-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["fink-idfint"] - - - username: "lasair-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["lasair-idfint"] - - - username: "pittgoogle-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["pittgoogle-idfint"] - -alert-stream-schema-registry: - hostname: "alert-schemas-int.lsst.cloud" - schemaTopic: "registry-schemas" - -alert-stream-simulator: - clusterPort: 9092 # internal TLS listener - replayTopicName: "alerts-simulated" - replayTopicPartitions: 300 - staticTopicName: "alerts-static" - image: - tag: v1.2.1 - -alert-database: - ingester: - image: - tag: v2.0.2 - - logLevel: verbose - - schemaRegistryURL: https://alert-schemas-int.lsst.cloud - - serviceAccountName: alert-database-writer - - kafka: - cluster: alert-broker - port: 9092 - topic: alerts-simulated - - gcp: - serviceAccountName: alertdb-writer - projectID: science-platform-int-dc5d - - server: - serviceAccountName: alert-database-reader - - gcp: - serviceAccountName: alertdb-reader - projectID: science-platform-int-dc5d - - ingress: - enabled: true - host: "data-int.lsst.cloud" - gafaelfawrAuthQuery: "scope=read:alertdb" - - storage: - gcp: - project: science-platform-int-dc5d - alertBucket: rubin-alertdb-int-us-central1-packets - schemaBucket: rubin-alertdb-int-us-central1-schemas diff --git a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml index 08c469616d..a178351831 100644 --- a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml +++ b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml @@ -9,7 +9,7 @@ alert-stream-broker: kafka: - version: 3.4.0 + version: 3.5.1 # -- Encoding version for messages, see # https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. logMessageFormatVersion: 3.4 @@ -110,6 +110,10 @@ alert-stream-broker: readonlyTopics: ["alerts-simulated"] groups: ["pittgoogle-idfint"] + testTopicName: "alert-stream-test" + testTopicPartitions: 20 + testTopicReplicas: 1 + alert-stream-schema-registry: hostname: "usdf-alert-schemas-dev.slac.stanford.edu" schemaTopic: "registry-schemas" diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index ce1ac7671f..23d1cb0757 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.33.3 + version: 0.40.7 repository: https://argoproj.github.io/argo-helm diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 15fff6ccad..993b83aaac 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.46.7 + version: 5.53.8 repository: https://argoproj.github.io/argo-helm diff --git a/applications/argocd/values-idfdev.yaml b/applications/argocd/values-idfdev.yaml index 24ff51f640..02c9448719 100644 --- a/applications/argocd/values-idfdev.yaml +++ b/applications/argocd/values-idfdev.yaml @@ -35,4 +35,5 @@ argo-cd: g, roby@lsst.cloud, role:admin g, kkoehler@lsst.cloud, role:admin g, fritzm@lsst.cloud, role:admin + g, dirving@lsst.cloud, role:admin scopes: "[email]" diff --git a/applications/argocd/values-idfint.yaml b/applications/argocd/values-idfint.yaml index c2745b744d..88aa16f9b9 100644 --- a/applications/argocd/values-idfint.yaml +++ b/applications/argocd/values-idfint.yaml @@ -37,4 +37,5 @@ argo-cd: g, fritzm@lsst.cloud, role:admin g, drbsmart@lsst.cloud, role:admin g, ecbellm@lsst.cloud, role:admin + g, dirving@lsst.cloud, role:admin scopes: "[email]" diff --git a/applications/argocd/values-usdf-tel-rsp.yaml b/applications/argocd/values-usdf-tel-rsp.yaml index 280ffe7033..8399177d91 100644 --- a/applications/argocd/values-usdf-tel-rsp.yaml +++ b/applications/argocd/values-usdf-tel-rsp.yaml @@ -30,8 +30,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: vcluster--usdf-tel-rsp + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/argocd/values-usdfdev-alert-stream-broker.yaml b/applications/argocd/values-usdfdev-alert-stream-broker.yaml index 61287b465c..120229e94d 100644 --- a/applications/argocd/values-usdfdev-alert-stream-broker.yaml +++ b/applications/argocd/values-usdfdev-alert-stream-broker.yaml @@ -32,8 +32,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: vcluster--usdf-alert-stream-broker-dev + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/argocd/values-usdfdev-prompt-processing.yaml b/applications/argocd/values-usdfdev-prompt-processing.yaml index 2be48eed63..6cbf7a6d7a 100644 --- a/applications/argocd/values-usdfdev-prompt-processing.yaml +++ b/applications/argocd/values-usdfdev-prompt-processing.yaml @@ -31,8 +31,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: vcluster--usdf-prompt-processing-dev + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index fa44ff14a9..561b45be9b 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -30,8 +30,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: usdf-rsp-dev-argocd + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. @@ -56,6 +56,9 @@ argo-cd: g, rra@slac.stanford.edu, role:admin g, fritzm@slac.stanford.edu, role:admin g, cslater@slac.stanford.edu, role:admin + g, neilsen@slac.stanford.edu, role:admin + g, saranda@slac.stanford.edu, role:admin + g, ktl@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | diff --git a/applications/argocd/values-usdfint.yaml b/applications/argocd/values-usdfint.yaml new file mode 100644 index 0000000000..90d4f43990 --- /dev/null +++ b/applications/argocd/values-usdfint.yaml @@ -0,0 +1,86 @@ +argo-cd: + redis: + enabled: true + + server: + ingress: + enabled: true + hosts: + - "usdf-rsp-int.slac.stanford.edu" + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + paths: + - /argo-cd(/|$)(.*) + + extraArgs: + - "--basehref=/argo-cd" + - "--insecure=true" + + env: + - name: HTTP_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + config: + url: https://usdf-rsp-int.slac.stanford.edu/argo-cd + oidc.config: | + name: SLAC + issuer: https://dex.slac.stanford.edu + clientID: "vcluster--usdf-rsp-int" + clientSecret: $dex.clientSecret + # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] + requestedScopes: ["openid", "profile", "email", "groups"] + # Optional set of OIDC claims to request on the ID token. + requestedIDTokenClaims: {"groups": {"essential": true}} + rbacConfig: + policy.csv: | + g, ytl@slac.stanford.edu, role:admin + g, ppascual@slac.stanford.edu, role:admin + g, pav@slac.stanford.edu, role:admin + g, dspeck@slac.stanford.edu, role:admin + g, afausti@slac.stanford.edu, role:admin + g, mfl@slac.stanford.edu, role:admin + g, cbanek@slac.stanford.edu, role:admin + g, frossie@slac.stanford.edu, role:admin + g, hchiang2@slac.stanford.edu, role:admin + g, athor@slac.stanford.edu, role:admin + g, jsick@slac.stanford.edu, role:admin + g, reinking@slac.stanford.edu, role:admin + g, smart@slac.stanford.edu, role:admin + g, omullan@slac.stanford.edu, role:admin + g, mreuter@slac.stanford.edu, role:admin + g, rra@slac.stanford.edu, role:admin + scopes: "[email]" + + helm.repositories: | + - url: https://lsst-sqre.github.io/charts/ + name: lsst-sqre + - url: https://charts.helm.sh/stable + name: stable + repoServer: + + env: + - name: HTTP_PROXY + value: http://sdfproxy.sdf.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://sdfproxy.sdf.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + controller: + + env: + - name: HTTP_PROXY + value: http://sdfproxy.sdf.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://sdfproxy.sdf.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + configs: + secret: + createSecret: false diff --git a/applications/argocd/values-usdfprod-prompt-processing.yaml b/applications/argocd/values-usdfprod-prompt-processing.yaml index 8aff78959e..a2267e17f8 100644 --- a/applications/argocd/values-usdfprod-prompt-processing.yaml +++ b/applications/argocd/values-usdfprod-prompt-processing.yaml @@ -31,8 +31,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: vcluster--usdf-prompt-processing + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 86f3188b58..e7b190ceb2 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -30,8 +30,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: usdf-rsp-argocd + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml new file mode 100644 index 0000000000..00b6f524f8 --- /dev/null +++ b/applications/auxtel/Chart.yaml @@ -0,0 +1,81 @@ +apiVersion: v2 +name: auxtel +version: 1.0.0 +description: Deployment for the Auxiliary Telescope CSCs +dependencies: +- name: csc_collector + version: 1.0.0 + repository: file://../../charts/csc_collector +- name: hexapod-sim + version: 1.0.0 + condition: hexapod-sim.enabled +- name: csc + alias: ataos + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: atdome + version: 1.0.0 + condition: atdome.enabled + repository: file://../../charts/csc +- name: csc + alias: atdome-sim + version: 1.0.0 + condition: atdome-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: atdometrajectory + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: atheaderservice + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: athexapod + version: 1.0.0 + condition: athexapod.enabled + repository: file://../../charts/csc +- name: csc + alias: athexapod-sim + version: 1.0.0 + condition: athexapod-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: atmcs + version: 1.0.0 + condition: atmcs.enabled + repository: file://../../charts/csc +- name: csc + alias: atmcs-sim + version: 1.0.0 + condition: atmcs-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: atoods + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: atpneumatics + version: 1.0.0 + condition: atpneumatics.enabled + repository: file://../../charts/csc +- name: csc + alias: atpneumatics-sim + version: 1.0.0 + condition: atpneumatics-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: atptg + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: atspectrograph + version: 1.0.0 + condition: atspectrograph.enabled + repository: file://../../charts/csc +- name: csc + alias: atspectrograph-sim + version: 1.0.0 + condition: atspectrograph-sim.enabled + repository: file://../../charts/csc diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md new file mode 100644 index 0000000000..eb239ab869 --- /dev/null +++ b/applications/auxtel/README.md @@ -0,0 +1,36 @@ +# auxtel + +Deployment for the Auxiliary Telescope CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| atdome-sim.enabled | bool | `false` | Enable the ATDome simulator CSC | +| atdome.enabled | bool | `false` | Enable the ATDome CSC | +| athexapod-sim.enabled | bool | `false` | Enable the ATHexapod simulator CSC | +| athexapod.enabled | bool | `false` | Enable the ATHexapod CSC | +| atmcs-sim.enabled | bool | `false` | Enable the ATMCS simulator CSC | +| atmcs.enabled | bool | `false` | Enable the ATMCS CSC | +| atpneumatics-sim.enabled | bool | `false` | Enable the ATPneumatics simulator CSC | +| atpneumatics.enabled | bool | `false` | Enable the ATPneumatics CSC | +| atspectrograph-sim.enabled | bool | `false` | Enable the ATSpectograph simulator CSC | +| atspectrograph.enabled | bool | `false` | Enable the ATSpectrograph CSC | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| hexapod-sim.enabled | bool | `false` | Enable the hexapod controller simulator | +| hexapod-sim.image | object | `{"pullPolicy":"Always","repository":"ts-dockerhub.lsst.org/hexapod_simulator","tag":"latest"}` | This section holds the configuration of the container image | +| hexapod-sim.image.pullPolicy | string | `"Always"` | The policy to apply when pulling an image for deployment | +| hexapod-sim.image.repository | string | `"ts-dockerhub.lsst.org/hexapod_simulator"` | The Docker registry name of the container image | +| hexapod-sim.image.tag | string | `"latest"` | The tag of the container image | +| hexapod-sim.namespace | string | `"auxtel"` | This is the namespace in which the hexapod controller simulator will be placed | diff --git a/applications/auxtel/charts/hexapod-sim/Chart.yaml b/applications/auxtel/charts/hexapod-sim/Chart.yaml new file mode 100644 index 0000000000..5dd5941e44 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +description: Chart for the hexapod simulator that supports the ATHexapod +name: hexapod-sim +version: 1.0.0 diff --git a/applications/auxtel/charts/hexapod-sim/README.md b/applications/auxtel/charts/hexapod-sim/README.md new file mode 100644 index 0000000000..b5a01ae11b --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/README.md @@ -0,0 +1,13 @@ +# hexapod-sim + +Chart for the hexapod simulator that supports the ATHexapod + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| image | object | `{"pullPolicy":"Always","repository":"ts-dockerhub.lsst.org/hexapod_simulator","tag":"latest"}` | This section holds the configuration of the container image | +| image.pullPolicy | string | `"Always"` | The policy to apply when pulling an image for deployment | +| image.repository | string | `"ts-dockerhub.lsst.org/hexapod_simulator"` | The Docker registry name of the container image | +| image.tag | string | `"latest"` | The tag of the container image | +| namespace | string | `"auxtel"` | This is the namespace in which the hexapod controller simulator will be placed | diff --git a/applications/auxtel/charts/hexapod-sim/templates/_helpers.tpl b/applications/auxtel/charts/hexapod-sim/templates/_helpers.tpl new file mode 100644 index 0000000000..b0b8517ad5 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "hexapod-sim.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml b/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml new file mode 100644 index 0000000000..67ca984209 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml @@ -0,0 +1,27 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "hexapod-sim.name" . }} + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ include "hexapod-sim.name" . }} + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} + template: + metadata: + labels: + app: {{ include "hexapod-sim.name" . }} + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} + spec: + containers: + - name: {{ include "hexapod-sim.name" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + stdin: true + tty: true + imagePullSecrets: + - name: nexus3-docker diff --git a/applications/auxtel/charts/hexapod-sim/templates/service.yaml b/applications/auxtel/charts/hexapod-sim/templates/service.yaml new file mode 100644 index 0000000000..64bd7bf413 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/templates/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} + name: {{ include "hexapod-sim.name" . }} + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 50000 + targetPort: 50000 + selector: + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} + type: ClusterIP diff --git a/applications/auxtel/charts/hexapod-sim/values.yaml b/applications/auxtel/charts/hexapod-sim/values.yaml new file mode 100644 index 0000000000..e3daccc617 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/values.yaml @@ -0,0 +1,10 @@ +# -- This is the namespace in which the hexapod controller simulator will be placed +namespace: auxtel +# -- This section holds the configuration of the container image +image: + # -- The Docker registry name of the container image + repository: ts-dockerhub.lsst.org/hexapod_simulator + # -- The tag of the container image + tag: latest + # -- The policy to apply when pulling an image for deployment + pullPolicy: Always diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml new file mode 100644 index 0000000000..d961044765 --- /dev/null +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -0,0 +1,164 @@ +csc_collector: + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + - name: butler-secret + key: butler-secret + +ataos: + image: + repository: ts-dockerhub.lsst.org/ataos + pullPolicy: Always + +atdome-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atdome + pullPolicy: Always + env: + RUN_ARG: --simulate + +atdometrajectory: + image: + repository: ts-dockerhub.lsst.org/atdometrajectory + pullPolicy: Always + +atheaderservice: + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + URL_SPEC: --lfa_mode s3 --s3instance tuc + TSTAND_HEADERSERVICE: TUCSON + CAMERA: at + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + +athexapod-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/athexapod + pullPolicy: Always + +atmcs-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atmcs_sim + pullPolicy: Always + +atoods: + image: + repository: ts-dockerhub.lsst.org/atoods + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + CTRL_OODS_CONFIG_FILE: /etc/atoods.yaml + butlerSecret: + containerPath: &bS-cP /home/saluser/.lsst + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *bS-cP + nfsMountpoint: + - name: auxtel-gen3-butler + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/repo/LATISS + - name: auxtel-oods-data + containerPath: /data + readOnly: false + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel + configfile: + path: /etc + filename: atoods.yaml + content: | + defaultInterval: &interval + days: 0 + hours: 0 + minutes: 0 + seconds: 0 + + ingester: + imageStagingDirectory: /data/staging/auxtel/oods + butlers: + - butler: + instrument: lsst.obs.lsst.Latiss + class: + import : lsst.ctrl.oods.gen3ButlerIngester + name : Gen3ButlerIngester + stagingDirectory : /data/lsstdata/TTS/auxtel/oods/gen3butler/raw + badFileDirectory: /data/lsstdata/TTS/auxtel/oods/gen3butler/badfiles + repoDirectory : /repo/LATISS + collections: + - LATISS/raw/all + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 30 + batchSize: 20 + scanInterval: + <<: *interval + seconds: 2 + + cacheCleaner: + # ONLY clean out empty directories here, never files + clearEmptyDirectories: + - /data/lsstdata/TTS/auxtel/oods/gen3butler/raw + # clean out empty directories and old files from these directories + clearEmptyDirectoriesAndOldFiles: + - /data/lsstdata/TTS/auxtel/oods/gen3butler/badfiles + - /data/staging/auxtel/oods + - /data/staging/auxtel/forwarder + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 31 + directoriesEmptyForMoreThan: + <<: *interval + days: 2 + +atpneumatics-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/at_pneumatics_sim + pullPolicy: Always + +atptg: + image: + repository: ts-dockerhub.lsst.org/ptkernel + pullPolicy: Always + env: + TELESCOPE: AT + +atspectrograph-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atspec + pullPolicy: Always + env: + RUN_ARG: --simulate + +hexapod-sim: + enabled: true diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml new file mode 100644 index 0000000000..e4cd333773 --- /dev/null +++ b/applications/auxtel/values.yaml @@ -0,0 +1,99 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +hexapod-sim: + # -- Enable the hexapod controller simulator + enabled: false + +atdome: + # -- Enable the ATDome CSC + enabled: false + +atdome-sim: + # -- Enable the ATDome simulator CSC + enabled: false + +athexapod: + # -- Enable the ATHexapod CSC + enabled: false + +athexapod-sim: + # -- Enable the ATHexapod simulator CSC + enabled: false + +atmcs: + # -- Enable the ATMCS CSC + enabled: false + +atmcs-sim: + # -- Enable the ATMCS simulator CSC + enabled: false + +atpneumatics: + # -- Enable the ATPneumatics CSC + enabled: false + +atpneumatics-sim: + # -- Enable the ATPneumatics simulator CSC + enabled: false + +atspectrograph: + # -- Enable the ATSpectrograph CSC + enabled: false + +atspectrograph-sim: + # -- Enable the ATSpectograph simulator CSC + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/onepassword-connect-dev/.helmignore b/applications/butler/.helmignore similarity index 100% rename from applications/onepassword-connect-dev/.helmignore rename to applications/butler/.helmignore diff --git a/applications/butler/Chart.yaml b/applications/butler/Chart.yaml new file mode 100644 index 0000000000..817bcdfc83 --- /dev/null +++ b/applications/butler/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.0.2 +description: Server for Butler data abstraction service +name: butler +sources: +- https://github.com/lsst/daf_butler +type: application +version: 1.0.0 diff --git a/applications/butler/README.md b/applications/butler/README.md new file mode 100644 index 0000000000..46acdc866a --- /dev/null +++ b/applications/butler/README.md @@ -0,0 +1,32 @@ +# butler + +Server for Butler data abstraction service + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the butler deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of butler deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of butler deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | +| config.indexUri | string | `""` | URI to the DirectButler repository index file listing the configurations for each repository to be hosted by this server. | +| config.pathPrefix | string | `"/api/butler"` | The prefix of the path portion of the URL where the Butler service will be exposed. For example, if the service should be exposed at `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` | +| config.repositoryLabels | list | `[]` | List of Butler repository labels which will be hosted by this server, matching those from the index file. | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the butler image | +| image.repository | string | `"ghcr.io/lsst/daf_butler"` | Image to use in the butler deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the butler deployment pod | +| podAnnotations | object | `{}` | Annotations for the butler deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the butler deployment pod | +| tolerations | list | `[]` | Tolerations for the butler deployment pod | diff --git a/applications/butler/secrets.yaml b/applications/butler/secrets.yaml new file mode 100644 index 0000000000..1b2d88511e --- /dev/null +++ b/applications/butler/secrets.yaml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/butler/templates/_helpers.tpl b/applications/butler/templates/_helpers.tpl new file mode 100644 index 0000000000..636ac425d9 --- /dev/null +++ b/applications/butler/templates/_helpers.tpl @@ -0,0 +1,44 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "butler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "butler.labels" -}} +helm.sh/chart: {{ include "butler.chart" . }} +{{ include "butler.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "butler.selectorLabels" -}} +app.kubernetes.io/name: "butler" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "butler.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/applications/butler/templates/deployment.yaml b/applications/butler/templates/deployment.yaml new file mode 100644 index 0000000000..7bca29eb03 --- /dev/null +++ b/applications/butler/templates/deployment.yaml @@ -0,0 +1,115 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "butler" + labels: + {{- include "butler.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "butler.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "butler.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: "/opt/lsst/butler/secrets/aws-credentials.ini" + - name: PGPASSFILE + value: "/opt/lsst/butler/secrets/postgres-credentials.txt" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/opt/lsst/butler/secrets/butler-gcs-creds.json" + - name: S3_ENDPOINT_URL + value: "https://storage.googleapis.com" + - name: DAF_BUTLER_REPOSITORY_INDEX + value: {{ .Values.config.indexUri | quote }} + volumeMounts: + - name: "butler-secrets" + mountPath: "/opt/lsst/butler/secrets" + readOnly: true + volumes: + # butler-secrets-raw pulls in the secrets from the vault as files. + # These files are owned by root and group/world readable. + # This volume is not used directly by the container running the actual + # Butler application. + - name: "butler-secrets-raw" + secret: + secretName: {{ include "butler.fullname" . }} + # Postgres will not use a pgpass file (postgres-credentials.txt in the + # vault) if it is group/world writeable or owned by a different user. + # So the initContainers below copies the files from butler-secrets-raw + # to butlet-secrets, changing the owner and permissions. + # This volume is the one used by the container running the actual + # Butler application. + - name: "butler-secrets" + emptyDir: {} + initContainers: + # To deal with the Postgres file permission issued mentioned above, + # copy the secrets from butler-secrets-raw to butler-secrets. + # This initContainer definition is borrowed from obsloctap's + # deployment.yaml. + - name: fix-secret-permissions + image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/sh" + - "-c" + - | + cp -RL /tmp/butler-secrets-raw/* /opt/lsst/butler/secrets/ + chmod 0400 /opt/lsst/butler/secrets/* + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + volumeMounts: + - name: "butler-secrets" + mountPath: "/opt/lsst/butler/secrets" + - name: "butler-secrets-raw" + mountPath: "/tmp/butler-secrets-raw" + readOnly: true + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/butler/templates/hpa.yaml b/applications/butler/templates/hpa.yaml new file mode 100644 index 0000000000..9eab162305 --- /dev/null +++ b/applications/butler/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "butler" + labels: + {{- include "butler.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "butler" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/butler/templates/ingress-anonymous.yaml b/applications/butler/templates/ingress-anonymous.yaml new file mode 100644 index 0000000000..ebfcf7264a --- /dev/null +++ b/applications/butler/templates/ingress-anonymous.yaml @@ -0,0 +1,45 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "butler-anonymous" + labels: + {{- include "butler.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + anonymous: true +template: + metadata: + name: "butler-anonymous" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + # For direct end-user use of the Butler client library, the + # Butler() convenience constructor must be able to load a + # configuration file via unauthenticated HTTP. This exists for + # compatibility with the way Butler instances were configured prior + # to the existence of the Butler server -- they are passed the URI + # for a repository root on the filesystem or HTTP, from which a + # configuration file is loaded. + {{- range $repositoryLabel := .Values.config.repositoryLabels }} + - path: "{{ $.Values.config.pathPrefix }}/repo/{{ $repositoryLabel }}/butler.yaml" + pathType: "Exact" + backend: + service: + name: "butler" + port: + number: 8080 + - path: "{{ $.Values.config.pathPrefix }}/repo/{{ $repositoryLabel }}/butler.json" + pathType: "Exact" + backend: + service: + name: "butler" + port: + number: 8080 + {{- end }} diff --git a/applications/butler/templates/ingress-authenticated.yaml b/applications/butler/templates/ingress-authenticated.yaml new file mode 100644 index 0000000000..d12b573eb0 --- /dev/null +++ b/applications/butler/templates/ingress-authenticated.yaml @@ -0,0 +1,40 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "butler" + labels: + {{- include "butler.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + loginRedirect: false + # Butler needs a delegated token so that we can query Gafaelfawr for the + # user's group membership + delegate: + internal: + service: "butler" + scopes: [] + +template: + metadata: + name: "butler" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + {{- range $repositoryLabel := .Values.config.repositoryLabels }} + - path: "{{ $.Values.config.pathPrefix }}/repo/{{ $repositoryLabel }}" + pathType: "Prefix" + backend: + service: + name: "butler" + port: + number: 8080 + {{- end }} diff --git a/applications/cachemachine/templates/networkpolicy.yaml b/applications/butler/templates/networkpolicy.yaml similarity index 79% rename from applications/cachemachine/templates/networkpolicy.yaml rename to applications/butler/templates/networkpolicy.yaml index 2741f62d58..ebe6c44067 100644 --- a/applications/cachemachine/templates/networkpolicy.yaml +++ b/applications/butler/templates/networkpolicy.yaml @@ -1,11 +1,11 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: {{ include "cachemachine.fullname" . }} + name: "butler" spec: podSelector: matchLabels: - {{- include "cachemachine.selectorLabels" . | nindent 6 }} + {{- include "butler.selectorLabels" . | nindent 6 }} policyTypes: - Ingress ingress: diff --git a/applications/butler/templates/service.yaml b/applications/butler/templates/service.yaml new file mode 100644 index 0000000000..4906d7b6d3 --- /dev/null +++ b/applications/butler/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "butler" + labels: + {{- include "butler.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "butler.selectorLabels" . | nindent 4 }} diff --git a/applications/butler/templates/vault-secrets.yaml b/applications/butler/templates/vault-secrets.yaml new file mode 100644 index 0000000000..38fd855560 --- /dev/null +++ b/applications/butler/templates/vault-secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "butler.fullname" . }} + labels: + {{- include "butler.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/butler" + type: Opaque diff --git a/applications/butler/values-idfdev.yaml b/applications/butler/values-idfdev.yaml new file mode 100644 index 0000000000..1d5d57e000 --- /dev/null +++ b/applications/butler/values-idfdev.yaml @@ -0,0 +1,7 @@ +image: + pullPolicy: Always + +config: + indexUri: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml" + repositoryLabels: + - dp02 diff --git a/applications/butler/values-idfint.yaml b/applications/butler/values-idfint.yaml new file mode 100644 index 0000000000..2aa6066c53 --- /dev/null +++ b/applications/butler/values-idfint.yaml @@ -0,0 +1,4 @@ +config: + indexUri: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" + repositoryLabels: + - dp02 diff --git a/applications/butler/values-idfprod.yaml b/applications/butler/values-idfprod.yaml new file mode 100644 index 0000000000..3ae3da39fd --- /dev/null +++ b/applications/butler/values-idfprod.yaml @@ -0,0 +1,5 @@ +config: + indexUri: "s3://butler-us-central1-repo-locations/data-repos.yaml" + repositoryLabels: + - dp01 + - dp02 diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml new file mode 100644 index 0000000000..92d382b98b --- /dev/null +++ b/applications/butler/values.yaml @@ -0,0 +1,78 @@ +# Default values for butler. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the butler deployment + repository: "ghcr.io/lsst/daf_butler" + + # -- Pull policy for the butler image + pullPolicy: "IfNotPresent" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +autoscaling: + # -- Enable autoscaling of butler deployment + enabled: false + + # -- Minimum number of butler deployment pods + minReplicas: 1 + + # -- Maximum number of butler deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of butler deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the butler deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the butler deployment pod +resources: {} + +# -- Node selection rules for the butler deployment pod +nodeSelector: {} + +# -- Tolerations for the butler deployment pod +tolerations: [] + +# -- Affinity rules for the butler deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + +config: + # -- URI to the DirectButler repository index file listing the configurations + # for each repository to be hosted by this server. + indexUri: "" + + # -- List of Butler repository labels which will be hosted by this server, + # matching those from the index file. + repositoryLabels: [] + + # -- The prefix of the path portion of the URL where the Butler service will + # be exposed. For example, if the service should be exposed at + # `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` + pathPrefix: "/api/butler" diff --git a/applications/cachemachine/Chart.yaml b/applications/cachemachine/Chart.yaml deleted file mode 100644 index fd8100af9e..0000000000 --- a/applications/cachemachine/Chart.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v2 -name: cachemachine -version: 1.0.0 -description: JupyterLab image prepuller -sources: - - https://github.com/lsst-sqre/cachemachine -appVersion: 1.2.2 diff --git a/applications/cachemachine/README.md b/applications/cachemachine/README.md deleted file mode 100644 index 1ed392e993..0000000000 --- a/applications/cachemachine/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# cachemachine - -JupyterLab image prepuller - -## Source Code - -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the cachemachine frontend pod | -| autostart | object | `{}` | Autostart configuration. Each key is the name of a class of images to pull, and the value is the JSON specification for which and how many images to pull. | -| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the cachemachine image | -| image.repository | string | `"lsstsqre/cachemachine"` | cachemachine image to use | -| image.tag | string | The appVersion of the chart | Tag of cachemachine image to use | -| ingress.annotations | object | `{}` | Additional annotations to add for endpoints that are authenticated | -| ingress.anonymousAnnotations | object | `{}` | Additional annotations to add for endpoints that allow anonymous access, such as `/*/available` | -| nameOverride | string | `""` | Override the base name for resources | -| nodeSelector | object | `{}` | Node selector rules for the cachemachine frontend pod | -| podAnnotations | object | `{}` | Annotations for the cachemachine frontend pod | -| resources | object | `{}` | Resource limits and requests for the cachemachine frontend pod | -| serviceAccount | object | `{"annotations":{},"name":""}` | Secret names to use for all Docker pulls | -| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | -| serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the cachemachine frontend pod | diff --git a/applications/cachemachine/templates/_helpers.tpl b/applications/cachemachine/templates/_helpers.tpl deleted file mode 100644 index 6599ed07b6..0000000000 --- a/applications/cachemachine/templates/_helpers.tpl +++ /dev/null @@ -1,60 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "cachemachine.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "cachemachine.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cachemachine.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "cachemachine.labels" -}} -app.kubernetes.io/name: {{ include "cachemachine.name" . }} -helm.sh/chart: {{ include "cachemachine.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "cachemachine.selectorLabels" -}} -app.kubernetes.io/name: {{ include "cachemachine.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "cachemachine.serviceAccountName" -}} -{{ default (include "cachemachine.fullname" .) .Values.serviceAccount.name }} -{{- end -}} diff --git a/applications/cachemachine/templates/configmap.yaml b/applications/cachemachine/templates/configmap.yaml deleted file mode 100644 index 013ff04860..0000000000 --- a/applications/cachemachine/templates/configmap.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "cachemachine.fullname" . }}-autostart - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -data: - {{- toYaml .Values.autostart | nindent 2 }} diff --git a/applications/cachemachine/templates/deployment.yaml b/applications/cachemachine/templates/deployment.yaml deleted file mode 100644 index b8105098c2..0000000000 --- a/applications/cachemachine/templates/deployment.yaml +++ /dev/null @@ -1,96 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "cachemachine.fullname" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - replicas: 1 - selector: - matchLabels: - {{- include "cachemachine.selectorLabels" . | nindent 6 }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "cachemachine.selectorLabels" . | nindent 8 }} - spec: - imagePullSecrets: - - name: "pull-secret" - serviceAccountName: {{ template "cachemachine.serviceAccountName" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 - containers: - - name: {{ .Chart.Name }} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "all" - readOnlyRootFilesystem: true - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - env: - - name: DOCKER_SECRET_NAME - value: "pull-secret" - ports: - - name: "http" - containerPort: 8080 - protocol: "TCP" - readinessProbe: - httpGet: - path: "/" - port: "http" - {{- with .Values.resources }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: "docker-creds" - mountPath: "/etc/secrets" - readOnly: true - - name: autostart - mountPath: "/etc/cachemachine" - readOnly: true - - name: podinfo - mountPath: /etc/podinfo - volumes: - - name: docker-creds - secret: - secretName: pull-secret - - name: autostart - configMap: - name: {{ include "cachemachine.fullname" . }}-autostart - - name: podinfo - downwardAPI: - items: - - path: "annotations" - fieldRef: - fieldPath: metadata.annotations - - path: "labels" - fieldRef: - fieldPath: metadata.labels - - path: "name" - fieldRef: - fieldPath: metadata.name - - path: "uid" - fieldRef: - fieldPath: metadata.uid - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/applications/cachemachine/templates/ingress.yaml b/applications/cachemachine/templates/ingress.yaml deleted file mode 100644 index 0fe53f9cee..0000000000 --- a/applications/cachemachine/templates/ingress.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: gafaelfawr.lsst.io/v1alpha1 -kind: GafaelfawrIngress -metadata: - name: {{ template "cachemachine.fullname" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -config: - baseUrl: {{ .Values.global.baseUrl | quote }} - scopes: - all: - - "exec:admin" - loginRedirect: true -template: - metadata: - name: {{ template "cachemachine.fullname" . }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 6 }} - {{- end }} - spec: - rules: - - host: {{ required "global.host must be set" .Values.global.host | quote }} - http: - paths: - - path: "/cachemachine" - pathType: "Prefix" - backend: - service: - name: {{ template "cachemachine.fullname" . }} - port: - number: 80 diff --git a/applications/cachemachine/templates/networkpolicy-pull.yaml b/applications/cachemachine/templates/networkpolicy-pull.yaml deleted file mode 100644 index de3104385d..0000000000 --- a/applications/cachemachine/templates/networkpolicy-pull.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ template "cachemachine.fullname" . }}-pull - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - # Reject all inbound and outbound connections to the pods that exist solely - # to pull Docker images. - podSelector: - matchLabels: - cachemachine: "pull" - policyTypes: - - Ingress - - Egress diff --git a/applications/cachemachine/templates/service.yaml b/applications/cachemachine/templates/service.yaml deleted file mode 100644 index 63ccbc2ed1..0000000000 --- a/applications/cachemachine/templates/service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "cachemachine.fullname" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: "http" - protocol: "TCP" - selector: - {{- include "cachemachine.selectorLabels" . | nindent 4 }} diff --git a/applications/cachemachine/templates/serviceaccount.yaml b/applications/cachemachine/templates/serviceaccount.yaml deleted file mode 100644 index 81a80ff760..0000000000 --- a/applications/cachemachine/templates/serviceaccount.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["list"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ template "cachemachine.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ template "cachemachine.serviceAccountName" . }} - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -rules: - - apiGroups: ["apps"] - resources: ["daemonsets"] - verbs: ["create", "delete"] - - apiGroups: ["apps"] - resources: ["daemonsets/status"] - verbs: ["get"] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ template "cachemachine.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: {{ template "cachemachine.serviceAccountName" . }} - apiGroup: rbac.authorization.k8s.io diff --git a/applications/cachemachine/templates/tests/test-connection.yaml b/applications/cachemachine/templates/tests/test-connection.yaml deleted file mode 100644 index 35c987cdcc..0000000000 --- a/applications/cachemachine/templates/tests/test-connection.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: {{ include "cachemachine.fullname" . }}-test-connection - annotations: - "helm.sh/hook": "test-success" - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - containers: - - name: "wget" - image: "busybox" - command: ['wget'] - args: - - '{{ include "cachemachine.fullname" . }}:8080' - restartPolicy: Never diff --git a/applications/cachemachine/templates/vault-secrets.yaml b/applications/cachemachine/templates/vault-secrets.yaml deleted file mode 100644 index 6f813c9b7d..0000000000 --- a/applications/cachemachine/templates/vault-secrets.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: pull-secret - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" - type: kubernetes.io/dockerconfigjson diff --git a/applications/cachemachine/values-base.yaml b/applications/cachemachine/values-base.yaml deleted file mode 100644 index 2a5640986d..0000000000 --- a/applications/cachemachine/values-base.yaml +++ /dev/null @@ -1,25 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": { - "jupyterlab": "ok" - }, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "ts-dockerhub.lsst.org", - "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0032", - "num_releases": 0, - "num_weeklies": 3, - "num_dailies": 2, - "cycle": 32, - "alias_tags": [ - "latest", - "latest_daily", - "latest_weekly" - ] - } - ] - } diff --git a/applications/cachemachine/values-ccin2p3.yaml b/applications/cachemachine/values-ccin2p3.yaml deleted file mode 100644 index a5b8e8aef5..0000000000 --- a/applications/cachemachine/values-ccin2p3.yaml +++ /dev/null @@ -1,17 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "registry.hub.docker.com", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } diff --git a/applications/cachemachine/values-idfint.yaml b/applications/cachemachine/values-idfint.yaml deleted file mode 100644 index 0e80940198..0000000000 --- a/applications/cachemachine/values-idfint.yaml +++ /dev/null @@ -1,37 +0,0 @@ -image: - tag: "1.2.3" - -serviceAccount: - annotations: { - iam.gke.io/gcp-service-account: cachemachine-wi@science-platform-int-dc5d.iam.gserviceaccount.com - } - -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoGar", - "registry_url": "us-central1-docker.pkg.dev", - "gar_repository": "sciplat", - "gar_image": "sciplat-lab", - "project_id": "rubin-shared-services-71ec", - "location": "us-central1", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - }, - { - "type": "SimpleRepoMan", - "images": [ - { - "image_url": "us-central1-docker.pkg.dev/rubin-shared-services-71ec/sciplat/sciplat-lab:w_2023_07", - "name": "Weekly 2023_07" - } - ] - } - ] - } diff --git a/applications/cachemachine/values-minikube.yaml b/applications/cachemachine/values-minikube.yaml deleted file mode 100644 index 4369a6be97..0000000000 --- a/applications/cachemachine/values-minikube.yaml +++ /dev/null @@ -1,17 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "registry.hub.docker.com", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 0, - "num_weeklies": 0, - "num_dailies": 0 - } - ] - } diff --git a/applications/cachemachine/values-roe.yaml b/applications/cachemachine/values-roe.yaml deleted file mode 100644 index a5b8e8aef5..0000000000 --- a/applications/cachemachine/values-roe.yaml +++ /dev/null @@ -1,17 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "registry.hub.docker.com", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } diff --git a/applications/cachemachine/values-summit.yaml b/applications/cachemachine/values-summit.yaml deleted file mode 100644 index 215f2b1988..0000000000 --- a/applications/cachemachine/values-summit.yaml +++ /dev/null @@ -1,23 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "ts-dockerhub.lsst.org", - "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0032", - "num_releases": 0, - "num_weeklies": 3, - "num_dailies": 2, - "cycle": 32, - "alias_tags": [ - "latest", - "latest_daily", - "latest_weekly" - ] - } - ] - } diff --git a/applications/cachemachine/values-tucson-teststand.yaml b/applications/cachemachine/values-tucson-teststand.yaml deleted file mode 100644 index f88f37ba79..0000000000 --- a/applications/cachemachine/values-tucson-teststand.yaml +++ /dev/null @@ -1,23 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "ts-dockerhub.lsst.org", - "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0032", - "num_releases": 1, - "num_weeklies": 3, - "num_dailies": 2, - "cycle": 32, - "alias_tags": [ - "latest", - "latest_daily", - "latest_weekly" - ] - } - ] - } diff --git a/applications/cachemachine/values-usdfdev.yaml b/applications/cachemachine/values-usdfdev.yaml deleted file mode 100644 index 155360e916..0000000000 --- a/applications/cachemachine/values-usdfdev.yaml +++ /dev/null @@ -1,20 +0,0 @@ -image: - tag: "1.2.3" - -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "docker-registry.slac.stanford.edu", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } diff --git a/applications/cachemachine/values-usdfprod.yaml b/applications/cachemachine/values-usdfprod.yaml deleted file mode 100644 index d9693daab3..0000000000 --- a/applications/cachemachine/values-usdfprod.yaml +++ /dev/null @@ -1,17 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "docker-registry.slac.stanford.edu", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } diff --git a/applications/cachemachine/values.yaml b/applications/cachemachine/values.yaml deleted file mode 100644 index f6c7d38961..0000000000 --- a/applications/cachemachine/values.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Default values for cachemachine. - -# -- Override the base name for resources -nameOverride: "" - -# -- Override the full name for resources (includes the release name) -fullnameOverride: "" - -image: - # -- cachemachine image to use - repository: lsstsqre/cachemachine - - # -- Pull policy for the cachemachine image - pullPolicy: IfNotPresent - - # -- Tag of cachemachine image to use - # @default -- The appVersion of the chart - tag: "" - -# -- Secret names to use for all Docker pulls -serviceAccount: - # -- Name of the service account to use - # @default -- Name based on the fullname template - name: "" - - # -- Annotations to add to the service account - annotations: {} - -ingress: - # -- Additional annotations to add for endpoints that are authenticated - annotations: {} - - # -- Additional annotations to add for endpoints that allow anonymous - # access, such as `/*/available` - anonymousAnnotations: {} - -# -- Resource limits and requests for the cachemachine frontend pod -resources: {} - -# -- Annotations for the cachemachine frontend pod -podAnnotations: {} - -# -- Node selector rules for the cachemachine frontend pod -nodeSelector: {} - -# -- Tolerations for the cachemachine frontend pod -tolerations: [] - -# -- Affinity rules for the cachemachine frontend pod -affinity: {} - -# -- Autostart configuration. Each key is the name of a class of images to -# pull, and the value is the JSON specification for which and how many images -# to pull. -autostart: {} - -# The following will be set by parameters injected by Argo CD and should not -# be set in the individual environment values files. -global: - # -- Base URL for the environment - # @default -- Set by Argo CD - baseUrl: "" - - # -- Host name for ingress - # @default -- Set by Argo CD - host: "" - - # -- Base path for Vault secrets - # @default -- Set by Argo CD - vaultSecretsPath: "" diff --git a/applications/calsys/Chart.yaml b/applications/calsys/Chart.yaml new file mode 100644 index 0000000000..74d89a62b7 --- /dev/null +++ b/applications/calsys/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +name: calsys +version: 1.0.0 +description: Deployment for the Calibration System CSCs +dependencies: +- name: csc_collector + version: 1.0.0 + repository: file://../../charts/csc_collector +- name: csc + alias: gcheaderservice1 + version: 1.0.0 + condition: gcheaderservice1.enabled + repository: file://../../charts/csc +- name: csc + alias: simulation-gencam + version: 1.0.0 + condition: simulation-gencam.enabled + repository: file://../../charts/csc diff --git a/applications/calsys/README.md b/applications/calsys/README.md new file mode 100644 index 0000000000..056ed61adc --- /dev/null +++ b/applications/calsys/README.md @@ -0,0 +1,22 @@ +# calsys + +Deployment for the Calibration System CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| gcheaderservice1.enabled | bool | `false` | Enable the GCHeaderService:1 CSC | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| simulation-gencam.enabled | bool | `false` | Enabled the GenericCamera:1 CSC | diff --git a/applications/calsys/values-tucson-teststand.yaml b/applications/calsys/values-tucson-teststand.yaml new file mode 100644 index 0000000000..c4101cde0d --- /dev/null +++ b/applications/calsys/values-tucson-teststand.yaml @@ -0,0 +1,58 @@ +csc_collector: + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + +gcheaderservice1: + enabled: true + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + CAMERA: gc1 + TSTAND_HEADERSERVICE: TUCSON + URL_SPEC: --lfa_mode s3 --s3instance tuc + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + +simulation-gencam: + enabled: true + classifier: genericcamera1 + image: + repository: ts-dockerhub.lsst.org/genericcamera + pullPolicy: Always + env: + RUN_ARG: 1 + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + service: + enabled: true + port: 5013 + type: LoadBalancer diff --git a/applications/calsys/values.yaml b/applications/calsys/values.yaml new file mode 100644 index 0000000000..71ded1b43b --- /dev/null +++ b/applications/calsys/values.yaml @@ -0,0 +1,63 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +gcheaderservice1: + # -- Enable the GCHeaderService:1 CSC + enabled: false + +simulation-gencam: + # -- Enabled the GenericCamera:1 CSC + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml index aea788535f..41d5841a87 100644 --- a/applications/cert-manager/Chart.yaml +++ b/applications/cert-manager/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/cert-manager/cert-manager dependencies: - name: cert-manager - version: v1.13.0 + version: v1.13.3 repository: https://charts.jetstack.io diff --git a/applications/cert-manager/values-usdfdev.yaml b/applications/cert-manager/values-usdfdev.yaml deleted file mode 100644 index 9a069163fb..0000000000 --- a/applications/cert-manager/values-usdfdev.yaml +++ /dev/null @@ -1,5 +0,0 @@ -solver: - route53: - aws_access_key_id: AKIAQSJOS2SFL5I4TYND - hosted_zone: Z0567328105IEHEMIXLCO - vault_secret_path: "secret/rubin/data-dev.lsst.cloud/cert-manager" diff --git a/applications/control-system-test/Chart.yaml b/applications/control-system-test/Chart.yaml new file mode 100644 index 0000000000..5d4cbb6cf2 --- /dev/null +++ b/applications/control-system-test/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: control-system-test +version: 1.0.0 +description: Deployment for the Test CSCs and Integration Testing Workflows +dependencies: +- name: csc_collector + version: 1.0.0 + repository: file://../../charts/csc_collector +- name: csc + alias: test42 + version: 1.0.0 + repository: file://../../charts/csc +- name: integration-testing + version: 1.0.0 + condition: integration-testing.enabled diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md new file mode 100644 index 0000000000..f13374b774 --- /dev/null +++ b/applications/control-system-test/README.md @@ -0,0 +1,30 @@ +# control-system-test + +Deployment for the Test CSCs and Integration Testing Workflows + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| integration-testing.enabled | bool | `false` | Enable the integration testing system | +| integration-testing.envEfd | string | `nil` | The Name of the EFD instance. | +| integration-testing.image.tag | string | `nil` | The image tag for the Integration Test runner container | +| integration-testing.jobLabelName | string | `"control-system-test"` | Label for jobs to get them to appear in application | +| integration-testing.persistentVolume.claimName | string | `"saved-reports"` | PVC name for saving the reports | +| integration-testing.persistentVolume.storage | string | `"1Gi"` | Storage size request for the PVC | +| integration-testing.reportLocation | string | `"/home/saluser/robotframework_EFD/Reports"` | Container location of the RobotFramework reports | +| integration-testing.s3Bucket | string | `nil` | The S3 bucket name to use | +| integration-testing.serviceAccount | string | `"integration-tests"` | This sets the service account name | +| integration-testing.workflowName | string | `"integration-test-workflow"` | Name for the top-level workflow | diff --git a/applications/control-system-test/charts/integration-testing/Chart.yaml b/applications/control-system-test/charts/integration-testing/Chart.yaml new file mode 100644 index 0000000000..16458ad1e8 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +name: integration-testing +description: Helm chart for Integration Testing Workflows. +version: 1.0.0 diff --git a/applications/control-system-test/charts/integration-testing/README.md b/applications/control-system-test/charts/integration-testing/README.md new file mode 100644 index 0000000000..311c7d112f --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/README.md @@ -0,0 +1,17 @@ +# integration-testing + +Helm chart for Integration Testing Workflows. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| envEfd | string | `nil` | The Name of the EFD instance. | +| image.tag | string | `nil` | The image tag for the Integration Test runner container | +| jobLabelName | string | `"control-system-test"` | Label for jobs to get them to appear in application | +| persistentVolume.claimName | string | `"saved-reports"` | PVC name for saving the reports | +| persistentVolume.storage | string | `"1Gi"` | Storage size request for the PVC | +| reportLocation | string | `"/home/saluser/robotframework_EFD/Reports"` | Container location of the RobotFramework reports | +| s3Bucket | string | `nil` | The S3 bucket name to use | +| serviceAccount | string | `"integration-tests"` | This sets the service account name | +| workflowName | string | `"integration-test-workflow"` | Name for the top-level workflow | diff --git a/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml new file mode 100644 index 0000000000..d9f801111f --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml @@ -0,0 +1,34 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: cleanup-reports-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + ttlStrategy: + secondsAfterCompletion: 1800 + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + entrypoint: cleanup-reports + templates: + - name: cleanup-reports + metadata: + labels: + argocd.argoproj.io/instance: {{ .Values.jobLabelName }} + container: + image: alpine:latest + command: [sh, -c] + args: ["rm -f /pvc/*.* /pvc/STATE_FAILED"] + volumeMounts: + - name: testreports + mountPath: /pvc diff --git a/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml new file mode 100644 index 0000000000..78c4fb11e5 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: integration-test-controller-configmap + labels: + # Note that this label is required for the informer to detect this ConfigMap. + workflows.argoproj.io/configmap-type: Parameter +data: + artifactRepository: | # However, all nested maps must be strings + archiveLogs: true + s3: + endpoint: {{ $.Values.global.controlSystem.s3EndpointUrl | trimPrefix "https://" }} + bucket: {{ .Values.s3Bucket }} + insecure: false + accessKeySecret: + name: lfa + key: aws-access-key-id + secretKeySecret: + name: lfa + key: aws-secret-access-key diff --git a/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml new file mode 100644 index 0000000000..bd691ca6b1 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml @@ -0,0 +1,262 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: imaging-test-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20230601" + entrypoint: run-tests + onExit: save-reports + templates: + - name: run-tests + dag: + tasks: + - name: auxtel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Housekeeping.list" + - name: jobname + value: auxtel-housekeeping + - name: maintel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_MainTel_Housekeeping.list" + - name: jobname + value: maintel-housekeeping + - name: auxtel-image-verification + depends: auxtel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Image_Verification.list" + - name: jobname + value: auxtel-image-verification + - name: auxtel-latiss-daytime-checkout + depends: auxtel-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_LATISS_Checkout.list" + - name: jobname + value: auxtel-latiss-daytime-checkout + - name: auxtel-telescope-dome-daytime-checkout + depends: auxtel-latiss-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" + - name: jobname + value: auxtel-telescope-dome-daytime-checkout + - name: auxtel-telescope-slew-take-image-daytime-checkout + depends: auxtel-telescope-dome-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Slew_and_Take_Image_Checkout.list" + - name: jobname + value: auxtel-telescope-slew-take-image-daytime-checkout + - name: auxtel-prep-flat + depends: auxtel-telescope-slew-take-image-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Prep_Flat.list" + - name: jobname + value: auxtel-prep-flat + - name: auxtel-flat-calibrations + depends: auxtel-prep-flat + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Flat_Calibrations.list" + - name: jobname + value: auxtel-flat-calibrations + - name: auxtel-ptc-calibrations + depends: auxtel-flat-calibrations + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_PTC_Calibrations.list" + - name: jobname + value: auxtel-ptc-calibrations + - name: auxtel-prep-onsky + depends: auxtel-ptc-calibrations + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Prep_Onsky.list" + - name: jobname + value: auxtel-prep-onsky + - name: auxtel-cwfs-align + depends: auxtel-prep-onsky + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_WEP_Align.list" + - name: jobname + value: auxtel-cwfs-align + - name: auxtel-acq-take-seq-pointing + depends: auxtel-cwfs-align + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_POINTING.list" + - name: jobname + value: auxtel-acq-take-seq-pointing + - name: auxtel-acq-take-seq-verify + depends: auxtel-acq-take-seq-pointing + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_VERIFY.list" + - name: jobname + value: auxtel-acq-take-seq-verify + - name: auxtel-acq-take-seq-test + depends: auxtel-acq-take-seq-verify + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_TEST.list" + - name: jobname + value: auxtel-acq-take-seq-test + - name: auxtel-acq-take-seq-nominal + depends: auxtel-acq-take-seq-test + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_NOMINAL.list" + - name: jobname + value: auxtel-acq-take-seq-nominal + - name: auxtel-stop + depends: auxtel-acq-take-seq-nominal + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Stop.list" + - name: jobname + value: auxtel-stop + - name: auxtel-shutdown + depends: auxtel-stop + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Shutdown.list" + - name: jobname + value: auxtel-shutdown + - name: enable-atcs + depends: auxtel-shutdown + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Enable_ATCS.list" + - name: jobname + value: enable-atcs + - name: comcam-image-verification + depends: maintel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Image_Verification.list" + - name: jobname + value: comcam-image-verification + - name: comcam-calibrations + depends: comcam-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Calibrations.list" + - name: jobname + value: comcam-calibrations + + - name: save-reports + container: + image: alpine:latest + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/ + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }} diff --git a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml new file mode 100644 index 0000000000..5c08c7195c --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml @@ -0,0 +1,66 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: integration-test-job-template +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + entrypoint: inttest-template + ttlStrategy: + secondsAfterCompletion: 1800 + imagePullSecrets: + - name: nexus3-docker + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + templates: + - name: inttest-template + inputs: + parameters: + - name: integrationtest + value: "-A Run-Robot.list" + - name: jobname + value: "myjob" + - name: reportname + value: "report.xml" + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/{{ printf "{{inputs.parameters.reportname}}" }} + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }}/{{ printf "{{inputs.parameters.reportname}}" }} + metadata: + labels: + argocd.argoproj.io/instance: {{ .Values.jobLabelName }} + securityContext: + runAsUser: 73006 + runAsGroup: 73006 + fsGroup: 73006 + container: + command: [/home/saluser/.startup.sh] + name: test-{{ printf "{{inputs.parameters.jobname}}" }} + {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "ts-dockerhub.lsst.org/integrationtests:{{ $imageTag }}" + imagePullPolicy: Always + envFrom: + - configMapRef: + name: csc-env-config + env: + - name: ENV_EFD + value: {{ .Values.envEfd }} + - name: RUN_ARG + value: {{ printf "'{{inputs.parameters.integrationtest}}'" }} + - name: LSST_KAFKA_SECURITY_PASSWORD + valueFrom: + secretKeyRef: + name: ts-salkafka + key: ts-salkafka-password + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + readOnly: false diff --git a/applications/control-system-test/charts/integration-testing/templates/rbac.yaml b/applications/control-system-test/charts/integration-testing/templates/rbac.yaml new file mode 100644 index 0000000000..a417c9c9e7 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/rbac.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Values.serviceAccount }}-role +rules: + - apiGroups: ["batch"] + resources: ["jobs", "jobs/status", "configmaps"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["argoproj.io"] + resources: ["workflowtaskresults"] + verbs: ["create", "patch"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.serviceAccount }}-rolebinding +roleRef: + kind: Role + name: {{ .Values.serviceAccount }}-role + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount }} diff --git a/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml new file mode 100644 index 0000000000..bfe96f0150 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml @@ -0,0 +1,142 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: restart-test-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20230601" + entrypoint: run-tests + onExit: save-reports + templates: + - name: run-tests + dag: + tasks: + - name: cameras-offline + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Offline.list" + - name: jobname + value: cameras-offline + - name: standby + depends: cameras-offline + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Standby.list" + - name: jobname + value: standby + - name: disabled + depends: standby + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Disabled.list" + - name: jobname + value: disabled + - name: enabled + depends: disabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Enabled.list" + - name: jobname + value: enabled + - name: auxtel-housekeeping + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Housekeeping.list" + - name: jobname + value: auxtel-housekeeping + - name: maintel-housekeeping + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_MainTel_Housekeeping.list" + - name: jobname + value: maintel-housekeeping + - name: auxtel-image-verification + depends: auxtel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Image_Verification.list" + - name: jobname + value: auxtel-image-verification + - name: comcam-image-verification + depends: maintel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Image_Verification.list" + - name: jobname + value: comcam-image-verification + - name: love-stress-test + depends: auxtel-image-verification && comcam-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_LOVE_Stress_Test.list" + - name: jobname + value: love-stress-test + + - name: save-reports + container: + image: alpine:latest + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/ + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }} diff --git a/applications/control-system-test/charts/integration-testing/templates/saved-reports-pvc.yaml b/applications/control-system-test/charts/integration-testing/templates/saved-reports-pvc.yaml new file mode 100644 index 0000000000..53ed38d981 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/saved-reports-pvc.yaml @@ -0,0 +1,10 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.persistentVolume.claimName }} +spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: rook-ceph-block + resources: + requests: + storage: {{ .Values.persistentVolume.storage }} diff --git a/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml new file mode 100644 index 0000000000..3ff64caf36 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml @@ -0,0 +1,54 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: shutdown-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20230327" + entrypoint: run-tests + onExit: save-reports + templates: + - name: run-tests + dag: + tasks: + - name: shutdown + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Shutdown.list" + - name: jobname + value: shutdown + + - name: save-reports + container: + image: alpine:latest + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/ + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }} diff --git a/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml new file mode 100644 index 0000000000..d9df3378d2 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml @@ -0,0 +1,45 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: simple-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: {{ .Values.jobLabelName }} +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + imagePullSecrets: + - name: nexus3-docker + podMetadata: + labels: + argocd.argoproj.io/instance: {{ .Values.jobLabelName }} + arguments: + parameters: + - name: date-key + value: "20230601" + entrypoint: run-tests + templates: + - name: run-tests + steps: + - - name: cleanup + templateRef: + name: cleanup-reports-workflow + template: cleanup-reports + - - name: standby + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Standby.list" + - name: jobname + value: simple-standby + - name: reportname + value: standby.xml diff --git a/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml new file mode 100644 index 0000000000..8db41e3941 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml @@ -0,0 +1,329 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: {{ .Values.workflowName }} + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20230327" + entrypoint: run-tests + onExit: save-reports + templates: + - name: run-tests + dag: + tasks: + - name: cameras-offline + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Offline.list" + - name: jobname + value: cameras-offline + - name: standby + depends: cameras-offline + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Standby.list" + - name: jobname + value: standby + - name: disabled + depends: standby + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Disabled.list" + - name: jobname + value: disabled + - name: enabled + depends: disabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Enabled.list" + - name: jobname + value: enabled + - name: auxtel-housekeeping + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Housekeeping.list" + - name: jobname + value: auxtel-housekeeping + - name: maintel-housekeeping + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_MainTel_Housekeeping.list" + - name: jobname + value: maintel-housekeeping + - name: auxtel-image-verification + depends: auxtel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Image_Verification.list" + - name: jobname + value: auxtel-image-verification + - name: auxtel-latiss-daytime-checkout + depends: auxtel-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_LATISS_Checkout.list" + - name: jobname + value: auxtel-latiss-daytime-checkout + - name: auxtel-telescope-dome-daytime-checkout + depends: auxtel-latiss-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" + - name: jobname + value: auxtel-telescope-dome-daytime-checkout + - name: auxtel-telescope-slew-take-image-daytime-checkout + depends: auxtel-telescope-dome-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Slew_and_Take_Image_Checkout.list" + - name: jobname + value: auxtel-telescope-slew-take-image-daytime-checkout + - name: auxtel-prep-flat + depends: auxtel-telescope-slew-take-image-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Prep_Flat.list" + - name: jobname + value: auxtel-prep-flat + - name: auxtel-flat-calibrations + depends: auxtel-prep-flat + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Flat_Calibrations.list" + - name: jobname + value: auxtel-flat-calibrations + - name: auxtel-ptc-calibrations + depends: auxtel-flat-calibrations + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_PTC_Calibrations.list" + - name: jobname + value: auxtel-ptc-calibrations + - name: auxtel-prep-onsky + depends: auxtel-ptc-calibrations + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Prep_Onsky.list" + - name: jobname + value: auxtel-prep-onsky + - name: auxtel-cwfs-align + depends: auxtel-prep-onsky + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_WEP_Align.list" + - name: jobname + value: auxtel-cwfs-align + - name: auxtel-acq-take-seq-pointing + depends: auxtel-cwfs-align + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_POINTING.list" + - name: jobname + value: auxtel-acq-take-seq-pointing + - name: auxtel-acq-take-seq-verify + depends: auxtel-acq-take-seq-pointing + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_VERIFY.list" + - name: jobname + value: auxtel-acq-take-seq-verify + - name: auxtel-acq-take-seq-test + depends: auxtel-acq-take-seq-verify + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_TEST.list" + - name: jobname + value: auxtel-acq-take-seq-test + - name: auxtel-acq-take-seq-nominal + depends: auxtel-acq-take-seq-test + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_NOMINAL.list" + - name: jobname + value: auxtel-acq-take-seq-nominal + - name: auxtel-stop + depends: auxtel-acq-take-seq-nominal + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Stop.list" + - name: jobname + value: auxtel-stop + - name: auxtel-shutdown + depends: auxtel-stop + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Shutdown.list" + - name: jobname + value: auxtel-shutdown + - name: enable-atcs + depends: auxtel-shutdown + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Enable_ATCS.list" + - name: jobname + value: enable-atcs + - name: comcam-image-verification + depends: maintel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Image_Verification.list" + - name: jobname + value: comcam-image-verification + - name: comcam-calibrations + depends: comcam-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Calibrations.list" + - name: jobname + value: comcam-calibrations + - name: love-stress-test + depends: comcam-calibrations && enable-atcs + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_LOVE_Stress_Test.list" + - name: jobname + value: love-stress-test + - name: shutdown + depends: love-stress-test + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Shutdown.list" + - name: jobname + value: shutdown + + - name: save-reports + container: + image: alpine:latest + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/ + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }} diff --git a/applications/control-system-test/charts/integration-testing/values.yaml b/applications/control-system-test/charts/integration-testing/values.yaml new file mode 100644 index 0000000000..d88872a8d0 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/values.yaml @@ -0,0 +1,20 @@ +# -- The Name of the EFD instance. +envEfd: +# -- The S3 bucket name to use +s3Bucket: +# -- Container location of the RobotFramework reports +reportLocation: /home/saluser/robotframework_EFD/Reports +image: + # -- The image tag for the Integration Test runner container + tag: +# -- Name for the top-level workflow +workflowName: integration-test-workflow +# -- This sets the service account name +serviceAccount: integration-tests +persistentVolume: + # -- PVC name for saving the reports + claimName: saved-reports + # -- Storage size request for the PVC + storage: 1Gi +# -- Label for jobs to get them to appear in application +jobLabelName: control-system-test diff --git a/applications/control-system-test/values-tucson-teststand.yaml b/applications/control-system-test/values-tucson-teststand.yaml new file mode 100644 index 0000000000..361e1468e2 --- /dev/null +++ b/applications/control-system-test/values-tucson-teststand.yaml @@ -0,0 +1,21 @@ +csc_collector: + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + +test42: + image: + repository: ts-dockerhub.lsst.org/test + pullPolicy: Always + env: + RUN_ARG: 42 + +integration-testing: + enabled: true + envEfd: tucson_teststand_efd + s3Bucket: rubinobs-lfa-tuc diff --git a/applications/control-system-test/values.yaml b/applications/control-system-test/values.yaml new file mode 100644 index 0000000000..be5d0e15cf --- /dev/null +++ b/applications/control-system-test/values.yaml @@ -0,0 +1,59 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +integration-testing: + # -- Enable the integration testing system + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/datalinker/secrets.yaml b/applications/datalinker/secrets.yaml new file mode 100644 index 0000000000..3f830741d4 --- /dev/null +++ b/applications/datalinker/secrets.yaml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/datalinker/templates/ingress-image.yaml b/applications/datalinker/templates/ingress-image.yaml index 889ba5e5ca..a2aa58f7b7 100644 --- a/applications/datalinker/templates/ingress-image.yaml +++ b/applications/datalinker/templates/ingress-image.yaml @@ -9,6 +9,13 @@ config: scopes: all: - "read:image" + # Request a delegated token to use for making calls to Butler server with the + # end-user's credentials. + delegate: + internal: + service: "datalinker" + scopes: + - "read:image" template: metadata: name: {{ include "datalinker.fullname" . }}-image diff --git a/applications/datalinker/values-idfint.yaml b/applications/datalinker/values-idfint.yaml index e69de29bb2..288a3da54a 100644 --- a/applications/datalinker/values-idfint.yaml +++ b/applications/datalinker/values-idfint.yaml @@ -0,0 +1,2 @@ +config: + separateSecrets: true diff --git a/applications/datalinker/values-idfprod.yaml b/applications/datalinker/values-idfprod.yaml index e69de29bb2..288a3da54a 100644 --- a/applications/datalinker/values-idfprod.yaml +++ b/applications/datalinker/values-idfprod.yaml @@ -0,0 +1,2 @@ +config: + separateSecrets: true diff --git a/applications/datalinker/values-minikube.yaml b/applications/datalinker/values-usdfint.yaml similarity index 100% rename from applications/datalinker/values-minikube.yaml rename to applications/datalinker/values-usdfint.yaml diff --git a/applications/eas/Chart.yaml b/applications/eas/Chart.yaml new file mode 100644 index 0000000000..73a46f3723 --- /dev/null +++ b/applications/eas/Chart.yaml @@ -0,0 +1,172 @@ +apiVersion: v2 +name: eas +version: 1.0.0 +description: Deployment for the Environmental Awareness Systems CSCs +dependencies: +- name: csc_collector + version: 1.0.0 + repository: file://../../charts/csc_collector +- name: csc + alias: auxtel-ess01 + version: 1.0.0 + condition: auxtel-ess01.enabled + repository: file://../../charts/csc +- name: csc + alias: auxtel-ess01-sim + version: 1.0.0 + condition: auxtel-ess01-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: auxtel-ess02 + version: 1.0.0 + condition: auxtel-ess02.enabled + repository: file://../../charts/csc +- name: csc + alias: auxtel-ess02-sim + version: 1.0.0 + condition: auxtel-ess02-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: auxtel-ess03 + version: 1.0.0 + condition: auxtel-ess03.enabled + repository: file://../../charts/csc +- name: csc + alias: auxtel-ess03-sim + version: 1.0.0 + condition: auxtel-ess03-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: auxtel-ess04 + version: 1.0.0 + condition: auxtel-ess04.enabled + repository: file://../../charts/csc +- name: csc + alias: auxtel-ess04-sim + version: 1.0.0 + condition: auxtel-ess04-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: calibhill-ess01 + version: 1.0.0 + condition: calibhill-ess01.enabled + repository: file://../../charts/csc +- name: csc + alias: calibhill-ess01-sim + version: 1.0.0 + condition: calibhill-ess01-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: dimm1 + version: 1.0.0 + condition: dimm1.enabled + repository: file://../../charts/csc +- name: csc + alias: dimm1-sim + version: 1.0.0 + condition: dimm1-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: dimm2 + version: 1.0.0 + condition: dimm2.enabled + repository: file://../../charts/csc +- name: csc + alias: dimm2-sim + version: 1.0.0 + condition: dimm2-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: dsm1 + version: 1.0.0 + condition: dsm1.enabled + repository: file://../../charts/csc +- name: csc + alias: dsm1-sim + version: 1.0.0 + condition: dsm1-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: dsm2 + version: 1.0.0 + condition: dsm2.enabled + repository: file://../../charts/csc +- name: csc + alias: dsm2-sim + version: 1.0.0 + condition: dsm2-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: m2-ess106 + version: 1.0.0 + condition: m2-ess106.enabled + repository: file://../../charts/csc +- name: csc + alias: m2-ess106-sim + version: 1.0.0 + condition: m2-ess106-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdome-ess01 + version: 1.0.0 + condition: mtdome-ess01.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdome-ess01-sim + version: 1.0.0 + condition: mtdome-ess01-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdome-ess02 + version: 1.0.0 + condition: mtdome-ess02.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdome-ess02-sim + version: 1.0.0 + condition: mtdome-ess02-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdome-ess03 + version: 1.0.0 + condition: mtdome-ess03.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdome-ess03-sim + version: 1.0.0 + condition: mtdome-ess03-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: tma-ess01 + version: 1.0.0 + condition: tma-ess01.enabled + repository: file://../../charts/csc +- name: csc + alias: tma-ess01-sim + version: 1.0.0 + condition: tma-ess01-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: tma-ess104 + version: 1.0.0 + condition: tma-ess104.enabled + repository: file://../../charts/csc +- name: csc + alias: tma-ess104-sim + version: 1.0.0 + condition: tma-ess104-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: tma-ess105 + version: 1.0.0 + condition: tma-ess105.enabled + repository: file://../../charts/csc +- name: csc + alias: tma-ess105-sim + version: 1.0.0 + condition: tma-ess105-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: weatherforecast + version: 1.0.0 + repository: file://../../charts/csc diff --git a/applications/eas/README.md b/applications/eas/README.md new file mode 100644 index 0000000000..11c38edd72 --- /dev/null +++ b/applications/eas/README.md @@ -0,0 +1,52 @@ +# eas + +Deployment for the Environmental Awareness Systems CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| auxtel-ess01-sim.enabled | bool | `false` | Enable the ESS:201 simulator CSC | +| auxtel-ess01.enabled | bool | `false` | Enable the ESS:201 CSC | +| auxtel-ess02-sim.enabled | bool | `false` | Enable the ESS:202 simulator CSC | +| auxtel-ess02.enabled | bool | `false` | Enable the ESS:202 CSC | +| auxtel-ess03-sim.enabled | bool | `false` | Enable the ESS:203 simulator CSC | +| auxtel-ess03.enabled | bool | `false` | Enable the ESS:203 CSC | +| auxtel-ess04-sim.enabled | bool | `false` | Enable the ESS:204 simulator CSC | +| auxtel-ess04.enabled | bool | `false` | Enable the ESS:204 CSC | +| calibhill-ess01-sim.enabled | bool | `false` | Enable the ESS:301 simulator CSC | +| calibhill-ess01.enabled | bool | `false` | Enable the ESS:301 CSC | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| dimm1-sim.enabled | bool | `false` | Enable the DIMM:1 simulator CSC | +| dimm1.enabled | bool | `false` | Enable the DIMM:1 CSC | +| dimm2-sim.enabled | bool | `false` | Enable the DIMM:2 simulator CSC | +| dimm2.enabled | bool | `false` | Enable the DIMM:2 CSC | +| dsm1-sim.enabled | bool | `false` | Enable the DSM:1 simulator CSC | +| dsm1.enabled | bool | `false` | Enable the DSM:1 CSC | +| dsm2-sim.enabled | bool | `false` | Enable the DSM:2 simulator CSC | +| dsm2.enabled | bool | `false` | Enable the DSM:2 CSC | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| m2-ess106-sim.enabled | bool | `false` | Enable the ESS:106 simulator CSC | +| m2-ess106.enabled | bool | `false` | Enable the ESS:106 CSC | +| mtdome-ess01-sim.enabled | bool | `false` | Enable the ESS:101 simulator CSC | +| mtdome-ess01.enabled | bool | `false` | Enable the ESS:101 CSC | +| mtdome-ess02-sim.enabled | bool | `false` | Enable the ESS:102 simulator CSC | +| mtdome-ess02.enabled | bool | `false` | Enable the ESS:102 CSC | +| mtdome-ess03-sim.enabled | bool | `false` | Enable the ESS:103 simulator CSC | +| mtdome-ess03.enabled | bool | `false` | Enable the ESS:103 CSC | +| tma-ess01-sim.enabled | bool | `false` | Enable the ESS:1 simulator CSC | +| tma-ess01.enabled | bool | `false` | Enable the ESS:1 CSC | +| tma-ess104-sim.enabled | bool | `false` | Enable the ESS:104 simulator CSC | +| tma-ess104.enabled | bool | `false` | Enable the ESS:104 CSC | +| tma-ess105-sim.enabled | bool | `false` | Enable the ESS:105 simulator CSC | +| tma-ess105.enabled | bool | `false` | Enable the ESS:105 CSC | diff --git a/applications/eas/values-tucson-teststand.yaml b/applications/eas/values-tucson-teststand.yaml new file mode 100644 index 0000000000..5e35acc017 --- /dev/null +++ b/applications/eas/values-tucson-teststand.yaml @@ -0,0 +1,162 @@ +csc_collector: + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: meteoblue + key: ts/software/meteoblue + +auxtel-ess01-sim: + enabled: true + classifier: ess201 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 201 --simulate + +auxtel-ess02-sim: + enabled: true + classifier: ess202 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 202 --simulate + +auxtel-ess03-sim: + enabled: true + classifier: ess203 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 203 --simulate + +auxtel-ess04-sim: + enabled: true + classifier: ess204 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 204 --simulate + +calibhill-ess01-sim: + enabled: true + classifier: ess301 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 301 --simulate + +dimm1-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/dimm + pullPolicy: Always + env: + RUN_ARG: 1 --simulate + +dimm2-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/dimm + pullPolicy: Always + env: + RUN_ARG: 2 --simulate + +dsm1-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/dsm + pullPolicy: Always + env: + CSC_INDEX: 1 + RUN_ARG: --simulate 1 --state enabled + +dsm2-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/dsm + pullPolicy: Always + env: + CSC_INDEX: 2 + RUN_ARG: --simulate 2 --state enabled + +m2-ess106-sim: + enabled: true + classifier: ess106 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 106 --simulate + +mtdome-ess01-sim: + enabled: true + classifier: ess101 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 101 --simulate + +mtdome-ess02-sim: + enabled: true + classifier: ess102 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 102 --simulate + +mtdome-ess03-sim: + enabled: true + classifier: ess103 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 103 --simulate + +tma-ess01-sim: + enabled: true + classifier: ess1 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 1 --simulate + +tma-ess104-sim: + enabled: true + classifier: ess104 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 104 --simulate + +tma-ess105-sim: + enabled: true + classifier: ess105 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 105 --simulate + +weatherforecast: + image: + repository: ts-dockerhub.lsst.org/weatherforecast + pullPolicy: Always + env: + RUN_ARG: --state enabled + envSecrets: + - name: METEOBLUE_API_KEY + secretName: meteoblue + secretKey: api-key diff --git a/applications/eas/values.yaml b/applications/eas/values.yaml new file mode 100644 index 0000000000..14b4035f93 --- /dev/null +++ b/applications/eas/values.yaml @@ -0,0 +1,183 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +auxtel-ess01: + # -- Enable the ESS:201 CSC + enabled: false + +auxtel-ess01-sim: + # -- Enable the ESS:201 simulator CSC + enabled: false + +auxtel-ess02: + # -- Enable the ESS:202 CSC + enabled: false + +auxtel-ess02-sim: + # -- Enable the ESS:202 simulator CSC + enabled: false + +auxtel-ess03: + # -- Enable the ESS:203 CSC + enabled: false + +auxtel-ess03-sim: + # -- Enable the ESS:203 simulator CSC + enabled: false + +auxtel-ess04: + # -- Enable the ESS:204 CSC + enabled: false + +auxtel-ess04-sim: + # -- Enable the ESS:204 simulator CSC + enabled: false + +calibhill-ess01: + # -- Enable the ESS:301 CSC + enabled: false + +calibhill-ess01-sim: + # -- Enable the ESS:301 simulator CSC + enabled: false + +dimm1: + # -- Enable the DIMM:1 CSC + enabled: false + +dimm1-sim: + # -- Enable the DIMM:1 simulator CSC + enabled: false + +dimm2: + # -- Enable the DIMM:2 CSC + enabled: false + +dimm2-sim: + # -- Enable the DIMM:2 simulator CSC + enabled: false + +dsm1: + # -- Enable the DSM:1 CSC + enabled: false + +dsm1-sim: + # -- Enable the DSM:1 simulator CSC + enabled: false + +dsm2: + # -- Enable the DSM:2 CSC + enabled: false + +dsm2-sim: + # -- Enable the DSM:2 simulator CSC + enabled: false + +m2-ess106: + # -- Enable the ESS:106 CSC + enabled: false + +m2-ess106-sim: + # -- Enable the ESS:106 simulator CSC + enabled: false + +mtdome-ess01: + # -- Enable the ESS:101 CSC + enabled: false + +mtdome-ess01-sim: + # -- Enable the ESS:101 simulator CSC + enabled: false + +mtdome-ess02: + # -- Enable the ESS:102 CSC + enabled: false + +mtdome-ess02-sim: + # -- Enable the ESS:102 simulator CSC + enabled: false + +mtdome-ess03: + # -- Enable the ESS:103 CSC + enabled: false + +mtdome-ess03-sim: + # -- Enable the ESS:103 simulator CSC + enabled: false + +tma-ess01: + # -- Enable the ESS:1 CSC + enabled: false + +tma-ess01-sim: + # -- Enable the ESS:1 simulator CSC + enabled: false + +tma-ess104: + # -- Enable the ESS:104 CSC + enabled: false + +tma-ess104-sim: + # -- Enable the ESS:104 simulator CSC + enabled: false + +tma-ess105: + # -- Enable the ESS:105 CSC + enabled: false + +tma-ess105-sim: + # -- Enable the ESS:105 simulator CSC + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/exposurelog/values-usdfdev.yaml b/applications/exposurelog/values-usdfdev.yaml new file mode 100644 index 0000000000..5153d2fde7 --- /dev/null +++ b/applications/exposurelog/values-usdfdev.yaml @@ -0,0 +1,6 @@ +config: + site_id: usdfdev + butler_uri_1: s3://rubin-summit-users/butler.yaml +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/applications/exposurelog/values-usdfprod.yaml b/applications/exposurelog/values-usdfprod.yaml new file mode 100644 index 0000000000..8f4f585d48 --- /dev/null +++ b/applications/exposurelog/values-usdfprod.yaml @@ -0,0 +1,6 @@ +config: + site_id: usdfprod + butler_uri_1: s3://rubin-summit-users/butler.yaml +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/applications/filestore-backup/Chart.yaml b/applications/filestore-backup/Chart.yaml new file mode 100644 index 0000000000..49468d7b3d --- /dev/null +++ b/applications/filestore-backup/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: filestore-backup +version: 1.0.0 +description: Tool to manage Google Filestore backups +sources: + - https://github.com/lsst-sqre/rubin-google-filestore-tools +appVersion: 0.1.3 diff --git a/applications/filestore-backup/README.md b/applications/filestore-backup/README.md new file mode 100644 index 0000000000..716863d752 --- /dev/null +++ b/applications/filestore-backup/README.md @@ -0,0 +1,33 @@ +# filestore-backup + +Tool to manage Google Filestore backups + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the filestore-backup pods | +| config.backup.debug | bool | `false` | Turn on debugging mode | +| config.backup.schedule | string | fields are minute hour day-of-month month day-of-week | Backup schedule, in Unix cron job format | +| config.fileShare | string | `"share1"` | File Share name for filestore instance. Always "share1" unless storage is on an Enterprise tier | +| config.instance | string | None, must be set | Filestore instance (e.g. "fshare-instance-dev") | +| config.purge.debug | bool | `false` | Turn on debugging mode | +| config.purge.keep | int | `6` | Number of backups to keep when purging | +| config.purge.schedule | string | fields are minute hour day-of-month month day-of-week | Purge schedule, in Unix cron job format: | +| config.zone | string | None, must be set | Zone for Filestore instance (e.g. "b" from "us-central1-b") | +| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| global.gcpProjectId | string | Set by Argo CD | GCP Project ID | +| global.gcpRegion | string | Set by Argo CD | GCP Region | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the filestore-backup image | +| image.repository | string | `"ghcr.io/lsst-sqre/rubin-google-filestore-tools"` | Filestore-Backup image to use | +| image.tag | string | The appVersion of the chart | Tag of filestore-backup image to use | +| nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | Node selector rules for the filestore-backup pods | +| podAnnotations | object | `{}` | Annotations for the filestore-backup pods | +| resources | object | `{}` | Resource limits and requests for the filestore-backup pods | +| tolerations | list | `[]` | Tolerations for the filestore-backup pods | diff --git a/applications/moneypenny/templates/_helpers.tpl b/applications/filestore-backup/templates/_helpers.tpl similarity index 70% rename from applications/moneypenny/templates/_helpers.tpl rename to applications/filestore-backup/templates/_helpers.tpl index ff1f0f98a7..4a25052fc3 100644 --- a/applications/moneypenny/templates/_helpers.tpl +++ b/applications/filestore-backup/templates/_helpers.tpl @@ -2,7 +2,7 @@ {{/* Expand the name of the chart. */}} -{{- define "moneypenny.name" -}} +{{- define "filestore-backup.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} @@ -11,7 +11,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "moneypenny.fullname" -}} +{{- define "filestore-backup.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} @@ -27,16 +27,16 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "moneypenny.chart" -}} +{{- define "filestore-backup.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Common labels */}} -{{- define "moneypenny.labels" -}} -app.kubernetes.io/name: {{ include "moneypenny.name" . }} -helm.sh/chart: {{ include "moneypenny.chart" . }} +{{- define "filestore-backup.labels" -}} +app.kubernetes.io/name: {{ include "filestore-backup.name" . }} +helm.sh/chart: {{ include "filestore-backup.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} @@ -47,14 +47,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "moneypenny.selectorLabels" -}} -app.kubernetes.io/name: {{ include "moneypenny.name" . }} +{{- define "filestore-backup.selectorLabels" -}} +app.kubernetes.io/name: {{ include "filestore-backup.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "moneypenny.serviceAccountName" -}} -{{ default (include "moneypenny.fullname" .) .Values.serviceAccount.name }} -{{- end -}} diff --git a/applications/filestore-backup/templates/cronjob-create-backup.yaml b/applications/filestore-backup/templates/cronjob-create-backup.yaml new file mode 100644 index 0000000000..25192a942f --- /dev/null +++ b/applications/filestore-backup/templates/cronjob-create-backup.yaml @@ -0,0 +1,49 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: create-backup + labels: + {{- include "filestore-backup.labels" . | nindent 4 }} +spec: + schedule: {{ .Values.config.backup.schedule | quote }} + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + serviceAccountName: "filestore-backup" + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.tolerations }} + affinity: + {{- toYaml . | nindent 12 }} + {{- end }} + containers: + - name: create-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" + value: {{ required ".Values.global.gcpProjectId must be set to a valid Google Project ID" .Values.global.gcpProjectId | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" + value: "{{ .Values.global.gcpRegion }}-{{ .Values.config.zone }}" + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" + value: {{ .Values.config.instance | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" + value: {{ .Values.config.fileShare | quote }} + {{- with .Values.config.backup.debug }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" + value: "true" + {{- end }} + command: [ "create_backup" ] diff --git a/applications/filestore-backup/templates/cronjob-purge-backup.yaml b/applications/filestore-backup/templates/cronjob-purge-backup.yaml new file mode 100644 index 0000000000..298a7eb2e7 --- /dev/null +++ b/applications/filestore-backup/templates/cronjob-purge-backup.yaml @@ -0,0 +1,49 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: purge-backup + labels: + {{- include "filestore-backup.labels" . | nindent 4 }} +spec: + schedule: {{ .Values.config.purge.schedule | quote }} + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + serviceAccountName: "filestore-backup" + {{- with .Values.tolerations }} + tolerations: {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.tolerations }} + affinity: {{ toYaml . | nindent 12 }} + {{- end }} + containers: + - name: purge-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" + value: {{ .Values.global.gcpProjectId | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" + value: "{{ .Values.global.gcpRegion }}-{{ .Values.config.zone }}" + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" + value: {{ .Values.config.instance | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" + value: {{ .Values.config.fileShare | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_KEEP_BACKUPS" + value: {{ .Values.config.purge.keep | quote }} + {{- with .Values.config.purge.debug }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" + value: "true" + {{- end }} + command: [ "purge_backup" ] diff --git a/applications/filestore-backup/templates/serviceaccount.yaml b/applications/filestore-backup/templates/serviceaccount.yaml new file mode 100644 index 0000000000..65f0f1e837 --- /dev/null +++ b/applications/filestore-backup/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "filestore-backup" + labels: + {{- include "filestore-backup.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: "filestore-tool@{{ .Values.global.gcpProjectId }}.iam.gserviceaccount.com" diff --git a/applications/filestore-backup/values-idfdev.yaml b/applications/filestore-backup/values-idfdev.yaml new file mode 100644 index 0000000000..4818994c15 --- /dev/null +++ b/applications/filestore-backup/values-idfdev.yaml @@ -0,0 +1,8 @@ +config: + instance: "fshare-instance-dev" + zone: "b" + backup: + debug: true + purge: + debug: true + keep: 3 diff --git a/applications/filestore-backup/values.yaml b/applications/filestore-backup/values.yaml new file mode 100644 index 0000000000..0dd1d494e5 --- /dev/null +++ b/applications/filestore-backup/values.yaml @@ -0,0 +1,79 @@ +# Default values for filestore-backup. + +# -- Override the base name for resources +nameOverride: "" + +# -- Override the full name for resources (includes the release name) +fullnameOverride: "" + +# -- Resource limits and requests for the filestore-backup pods +resources: {} + +# -- Annotations for the filestore-backup pods +podAnnotations: {} + +# -- Node selector rules for the filestore-backup pods +nodeSelector: {} + +# -- Tolerations for the filestore-backup pods +tolerations: [] + +# -- Affinity rules for the filestore-backup pods +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +image: + # -- Filestore-Backup image to use + repository: ghcr.io/lsst-sqre/rubin-google-filestore-tools + + # -- Pull policy for the filestore-backup image + pullPolicy: "IfNotPresent" + + # -- Tag of filestore-backup image to use + # @default -- The appVersion of the chart + tag: "" + +config: + # -- Filestore instance (e.g. "fshare-instance-dev") + # @default -- None, must be set + instance: "" + + # -- Zone for Filestore instance (e.g. "b" from "us-central1-b") + # @default -- None, must be set + zone: "" + + # -- File Share name for filestore instance. Always "share1" unless + # storage is on an Enterprise tier + fileShare: "share1" + backup: + # -- Turn on debugging mode + debug: false + + # -- Backup schedule, in Unix cron job format + # @default -- fields are minute hour day-of-month month day-of-week + schedule: "0 10 * * *" + purge: + # -- Turn on debugging mode + debug: false + + # -- Purge schedule, in Unix cron job format: + # @default -- fields are minute hour day-of-month month day-of-week + schedule: "45 10 * * *" + + # -- Number of backups to keep when purging + keep: 6 + +global: + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- GCP Project ID + # @default -- Set by Argo CD + + gcpProjectId: "" + + # -- GCP Region + # @default -- Set by Argo CD + gcpRegion: "" diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index 211cd6641f..2bf9e7af96 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,11 +5,11 @@ description: Authentication and identity system home: https://gafaelfawr.lsst.io/ sources: - https://github.com/lsst-sqre/gafaelfawr -appVersion: 9.3.1 +appVersion: 9.6.1 dependencies: - name: redis - version: 1.0.8 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 0bf73c6bd9..9facb8b1e0 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -17,13 +17,14 @@ Authentication and identity system | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.11"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.16"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | -| cloudsql.resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod | +| cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy pod | | cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | +| config.cadcBaseUuid | string | Disabled | Whether to support the `/auth/cadc/userinfo` route. If set, this UUID is used as the namespace to generate UUID v5 `sub` claims returned by this route to meet the needs of CADC authentication code. | | config.cilogon.clientId | string | `""` | CILogon client ID. One and only one of this, `config.github.clientId`, or `config.oidc.clientId` must be set. | | config.cilogon.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | | config.cilogon.gidClaim | string | Do not set a primary GID | Claim from which to get the primary GID (only used if not retrieved from LDAP or Firestore) | @@ -48,6 +49,7 @@ Authentication and identity system | config.ldap.groupBaseDn | string | None, must be set | Base DN for the LDAP search to find a user's groups | | config.ldap.groupMemberAttr | string | `"member"` | Member attribute of the object class. Values must match the username returned in the token from the OpenID Connect authentication server. | | config.ldap.groupObjectClass | string | `"posixGroup"` | Object class containing group information | +| config.ldap.groupSearchByDn | bool | `false` | Whether to search for group membership by user DN rather than bare usernames. Most LDAP servers use full DNs for group membership, so normally this should be set to true, but it requires `userBaseDn` also be set. | | config.ldap.kerberosConfig | string | Use anonymous binds | Enable GSSAPI (Kerberos) binds to LDAP using this `krb5.conf` file. If set, `ldap-keytab` must be set in the Gafaelfawr Vault secret. Set either this or `userDn`, not both. | | config.ldap.nameAttr | string | `"displayName"` | Attribute containing the user's full name | | config.ldap.uidAttr | string | Get UID from upstream authentication provider | Attribute containing the user's UID number (set to `uidNumber` for most LDAP servers) | @@ -83,17 +85,19 @@ Authentication and identity system | ingress.additionalHosts | list | `[]` | Defines additional FQDNs for Gafaelfawr. This doesn't work for cookie or browser authentication, but for token-based services like git-lfs or the webdav server it does. | | maintenance.affinity | object | `{}` | Affinity rules for Gafaelfawr maintenance and audit pods | | maintenance.auditSchedule | string | `"30 3 * * *"` | Cron schedule string for Gafaelfawr data consistency audit (in UTC) | +| maintenance.cleanupSeconds | int | 86400 (1 day) | How long to keep old jobs around before deleting them | +| maintenance.deadlineSeconds | int | 300 (5 minutes) | How long the job is allowed to run before it will be terminated | | maintenance.maintenanceSchedule | string | `"5 * * * *"` | Cron schedule string for Gafaelfawr periodic maintenance (in UTC) | | maintenance.nodeSelector | object | `{}` | Node selection rules for Gafaelfawr maintenance and audit pods | | maintenance.podAnnotations | object | `{}` | Annotations for Gafaelfawr maintenance and audit pods | -| maintenance.resources | object | `{}` | Resource limits and requests for Gafaelfawr maintenance and audit pods | +| maintenance.resources | object | See `values.yaml` | Resource limits and requests for Gafaelfawr maintenance and audit pods | | maintenance.tolerations | list | `[]` | Tolerations for Gafaelfawr maintenance and audit pods | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Gafaelfawr frontend pod | | operator.affinity | object | `{}` | Affinity rules for the token management pod | | operator.nodeSelector | object | `{}` | Node selection rules for the token management pod | | operator.podAnnotations | object | `{}` | Annotations for the token management pod | -| operator.resources | object | `{}` | Resource limits and requests for the Gafaelfawr Kubernetes operator | +| operator.resources | object | See `values.yaml` | Resource limits and requests for the Gafaelfawr Kubernetes operator. The limits are artificially higher since the operator pod is also where we manually run `gafaelfawr audit --fix`, which requires more CPU and memory. | | operator.tolerations | list | `[]` | Tolerations for the token management pod | | podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | | redis.affinity | object | `{}` | Affinity rules for the Redis pod | @@ -109,5 +113,5 @@ Authentication and identity system | redis.resources | object | See `values.yaml` | Resource limits and requests for the Redis pod | | redis.tolerations | list | `[]` | Tolerations for the Redis pod | | replicaCount | int | `1` | Number of web frontend pods to start | -| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | +| resources | object | See `values.yaml` | Resource limits and requests for the Gafaelfawr frontend pod | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | diff --git a/applications/gafaelfawr/crds/ingress.yaml b/applications/gafaelfawr/crds/ingress.yaml index d81837e8f4..89d7ddbe2a 100644 --- a/applications/gafaelfawr/crds/ingress.yaml +++ b/applications/gafaelfawr/crds/ingress.yaml @@ -172,6 +172,13 @@ spec: - true required: - anonymous + username: + type: string + description: >- + Restrict access to this ingress to the given username. All + other users, regardless of their scopes, will receive 403 + errors. The user's token must still satisfy any scope + constraints. template: type: object description: "The template used to create the ingress." diff --git a/applications/gafaelfawr/templates/configmap.yaml b/applications/gafaelfawr/templates/configmap.yaml index e5c02c34d9..d5511ecc23 100644 --- a/applications/gafaelfawr/templates/configmap.yaml +++ b/applications/gafaelfawr/templates/configmap.yaml @@ -27,6 +27,9 @@ {{- if .Values.config.slackAlerts }} slackWebhookFile: "/etc/gafaelfawr/secrets/slack-webhook" {{- end }} + {{- if .Values.config.cadcBaseUuid }} + cadcBaseUuid: {{ .Values.config.cadcBaseUuid | quote }} + {{- end }} {{- if .Values.config.github.clientId }} @@ -126,6 +129,7 @@ {{- end }} groupObjectClass: {{ .Values.config.ldap.groupObjectClass | quote }} groupMemberAttr: {{ .Values.config.ldap.groupMemberAttr | quote }} + groupSearchByDn: {{ .Values.config.ldap.groupSearchByDn }} {{- if .Values.config.ldap.userBaseDn }} userBaseDn: {{ .Values.config.ldap.userBaseDn | quote }} userSearchAttr: {{ .Values.config.ldap.userSearchAttr | quote }} diff --git a/applications/gafaelfawr/templates/cronjob-audit.yaml b/applications/gafaelfawr/templates/cronjob-audit.yaml index 741313c352..a69bdfb895 100644 --- a/applications/gafaelfawr/templates/cronjob-audit.yaml +++ b/applications/gafaelfawr/templates/cronjob-audit.yaml @@ -10,6 +10,8 @@ spec: concurrencyPolicy: "Forbid" jobTemplate: spec: + activeDeadlineSeconds: {{ .Values.maintenance.deadlineSeconds }} + ttlSecondsAfterFinished: {{ .Values.maintenance.cleanupSeconds }} template: metadata: {{- with .Values.maintenance.podAnnotations }} diff --git a/applications/gafaelfawr/templates/cronjob-maintenance.yaml b/applications/gafaelfawr/templates/cronjob-maintenance.yaml index 22364d99e2..1635bcf17f 100644 --- a/applications/gafaelfawr/templates/cronjob-maintenance.yaml +++ b/applications/gafaelfawr/templates/cronjob-maintenance.yaml @@ -9,6 +9,8 @@ spec: concurrencyPolicy: "Forbid" jobTemplate: spec: + activeDeadlineSeconds: {{ .Values.maintenance.deadlineSeconds }} + ttlSecondsAfterFinished: {{ .Values.maintenance.cleanupSeconds }} template: metadata: {{- with .Values.maintenance.podAnnotations }} diff --git a/applications/gafaelfawr/values-base.yaml b/applications/gafaelfawr/values-base.yaml index 586d34c848..68f389572f 100644 --- a/applications/gafaelfawr/values-base.yaml +++ b/applications/gafaelfawr/values-base.yaml @@ -3,11 +3,28 @@ redis: storageClass: "rook-ceph-block" config: + logLevel: "DEBUG" slackAlerts: true databaseUrl: "postgresql://gafaelfawr@postgresdb01.ls.lsst.org/gafaelfawr" - github: - clientId: "ec88b9b897f302b620d1" + oidc: + clientId: "rsp-bts" + audience: "rsp-bts" + loginUrl: "https://keycloak.ls.lsst.org/realms/master/protocol/openid-connect/auth" + tokenUrl: "https://keycloak.ls.lsst.org/realms/master/protocol/openid-connect/token" + issuer: "https://keycloak.ls.lsst.org/realms/master" + scopes: + - "openid" + usernameClaim: "preferred_username" + + ldap: + url: "ldap://ipa.lsst.org" + userDn: "uid=svc_rsp,cn=users,cn=accounts,dc=lsst,dc=cloud" + userBaseDn: "cn=users,cn=accounts,dc=lsst,dc=cloud" + uidAttr: "uidNumber" + gidAttr: "gidNumber" + groupBaseDn: "cn=groups,cn=accounts,dc=lsst,dc=cloud" + groupSearchByDn: true # Support OpenID Connect clients like Chronograf. oidcServer: @@ -16,82 +33,22 @@ config: # Allow access by GitHub team. groupMapping: "admin:provision": - - github: - organization: "lsst-sqre" - team: "square" + - "sqre" "exec:admin": - - github: - organization: "lsst-sqre" - team: "square" + - "k8s-manke" + - "sqre" "exec:internal-tools": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "exec:notebook": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "exec:portal": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "read:image": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "read:tap": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "write:sasquatch": - - github: - organization: "lsst-sqre" - team: "square" + - "sqre" initialAdmins: - "afausti" diff --git a/applications/gafaelfawr/values-idfdev.yaml b/applications/gafaelfawr/values-idfdev.yaml index 91f77427e0..c27d939d26 100644 --- a/applications/gafaelfawr/values-idfdev.yaml +++ b/applications/gafaelfawr/values-idfdev.yaml @@ -30,6 +30,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "db8626e0-3b93-45c0-89ab-3058b0ed39fe" + # User quota settings for services. quota: default: diff --git a/applications/gafaelfawr/values-idfint.yaml b/applications/gafaelfawr/values-idfint.yaml index 9d85b88fe9..533e39f197 100644 --- a/applications/gafaelfawr/values-idfint.yaml +++ b/applications/gafaelfawr/values-idfint.yaml @@ -31,6 +31,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "dd5cd3ee-4239-48e4-b0e3-282f2328b9d1" + # User quota settings for services. quota: default: diff --git a/applications/gafaelfawr/values-idfprod.yaml b/applications/gafaelfawr/values-idfprod.yaml index dfdd5df5ea..a8cabefc8b 100644 --- a/applications/gafaelfawr/values-idfprod.yaml +++ b/applications/gafaelfawr/values-idfprod.yaml @@ -26,6 +26,9 @@ config: firestore: project: "rsp-firestore-stable-e8eb" + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "5f0eb655-0e72-4948-a6a5-a94c0be9019f" + # User quota settings for services. quota: default: diff --git a/applications/gafaelfawr/values-roundtable-prod.yaml b/applications/gafaelfawr/values-roundtable-prod.yaml index 82c14bd15d..b91f96b362 100644 --- a/applications/gafaelfawr/values-roundtable-prod.yaml +++ b/applications/gafaelfawr/values-roundtable-prod.yaml @@ -18,11 +18,22 @@ config: oidcServer: enabled: false + knownScopes: + "write:git-lfs": >- + Can write objects to Git LFS storage bucket + groupMapping: "exec:admin": - github: organization: "lsst-sqre" team: "square" + "write:git-lfs": + - github: + organization: "lsst" + team: "data-management" + - github: + organization: "lsst" + team: "simulations" initialAdmins: - "afausti" @@ -34,3 +45,8 @@ config: errorFooter: | To report problems or ask for help, contact #dm-square on the LSSTC Slack. + +ingress: + additionalHosts: + - "git-lfs.lsst.cloud" + - "git-lfs-rw.lsst.cloud" diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml index 74d3c872c6..7056c16d50 100644 --- a/applications/gafaelfawr/values-usdfdev.yaml +++ b/applications/gafaelfawr/values-usdfdev.yaml @@ -4,6 +4,13 @@ replicaCount: 2 redis: persistence: storageClass: "wekafs--sdf-k8s01" + resources: + limits: + cpu: "1" + memory: "200Mi" + requests: + cpu: "50m" + memory: "50Mi" config: internalDatabase: true @@ -11,6 +18,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "efa0a347-b648-4948-a987-055efbf6802a" + oidc: clientId: rubin-usdf-rsp-dev audience: "rubin-usdf-rsp-dev" diff --git a/applications/gafaelfawr/values-usdfint.yaml b/applications/gafaelfawr/values-usdfint.yaml new file mode 100644 index 0000000000..db082d66f5 --- /dev/null +++ b/applications/gafaelfawr/values-usdfint.yaml @@ -0,0 +1,227 @@ +replicaCount: 2 + +# Use the CSI storage class so that we can use snapshots. +redis: + persistence: + storageClass: "wekafs--sdf-k8s01" + +config: + internalDatabase: true + + oidcServer: + enabled: true + + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "82c6fc76-b7d3-4368-92a9-6a468dfa23dc" + + oidc: + clientId: vcluster--usdf-rsp-int + audience: "vcluster--usdf-rsp-int" + loginUrl: "https://dex.slac.stanford.edu/auth" + tokenUrl: "https://dex.slac.stanford.edu/token" + issuer: "https://dex.slac.stanford.edu" + scopes: + - "openid" + - "email" + - "groups" + - "profile" + usernameClaim: "name" + + ldap: + url: ldaps://ldap-unix.slac.stanford.edu:636 + groupBaseDn: ou=Group,dc=slac,dc=stanford,dc=edu + groupObjectClass: posixGroup + groupMemberAttr: memberUid + userBaseDn: ou=Accounts,dc=slac,dc=stanford,dc=edu + userSearchAttr: uid + addUserGroup: false + uidAttr: uidNumber + gidAttr: gidNumber + nameAttr: gecos + + groupMapping: + "admin:token": + - "rubinmgr" + - "unix-admin" + "exec:admin": + - "rubinmgr" + - "unix-admin" + "exec:notebook": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "exec:portal": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "exec:user": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "read:tap": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "read:image": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "write:sasquatch": + - "rubinmgr" + - "unix-admin" + + initialAdmins: + - "afausti" + - "athor" + - "cbanek" + - "frossie" + - "jonathansick" + - "rra" + - "simonkrughoff" + - "ytl" + - "ppascual" diff --git a/applications/gafaelfawr/values-usdfprod.yaml b/applications/gafaelfawr/values-usdfprod.yaml index eb620aba7d..d7909f6996 100644 --- a/applications/gafaelfawr/values-usdfprod.yaml +++ b/applications/gafaelfawr/values-usdfprod.yaml @@ -11,6 +11,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "595f5a03-bef4-473b-8e5a-588d87f13799" + oidc: clientId: rubin-usdf-rsp audience: "rubin-usdf-rsp" diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index ed8e2eedd7..8c97dca5d9 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -21,7 +21,14 @@ image: tag: "" # -- Resource limits and requests for the Gafaelfawr frontend pod -resources: {} +# @default -- See `values.yaml` +resources: + limits: + cpu: "1" + memory: "300Mi" + requests: + cpu: "100m" + memory: "150Mi" # -- Annotations for the Gafaelfawr frontend pod podAnnotations: {} @@ -67,6 +74,12 @@ config: # `slack-webhook` secret must also be set. slackAlerts: false + # -- Whether to support the `/auth/cadc/userinfo` route. If set, this UUID + # is used as the namespace to generate UUID v5 `sub` claims returned by this + # route to meet the needs of CADC authentication code. + # @default -- Disabled + cadcBaseUuid: "" + github: # -- GitHub client ID. One and only one of this, `config.cilogon.clientId`, # or `config.oidc.clientId` must be set. @@ -186,6 +199,12 @@ config: # returned in the token from the OpenID Connect authentication server. groupMemberAttr: "member" + # -- Whether to search for group membership by user DN rather than bare + # usernames. Most LDAP servers use full DNs for group membership, so + # normally this should be set to true, but it requires `userBaseDn` also + # be set. + groupSearchByDn: false + # -- Base DN for the LDAP search to find a user's entry # @default -- Get user metadata from the upstream authentication provider userBaseDn: "" @@ -297,7 +316,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.11" + tag: "1.33.16" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" @@ -312,7 +331,14 @@ cloudsql: serviceAccount: "" # -- Resource limits and requests for the Cloud SQL Proxy pod - resources: {} + # @default -- See `values.yaml` + resources: + limits: + cpu: "100m" + memory: "20Mi" + requests: + cpu: "5m" + memory: "7Mi" # -- Annotations for the Cloud SQL Proxy pod podAnnotations: {} @@ -333,8 +359,23 @@ maintenance: # -- Cron schedule string for Gafaelfawr periodic maintenance (in UTC) maintenanceSchedule: "5 * * * *" + # -- How long the job is allowed to run before it will be terminated + # @default -- 300 (5 minutes) + deadlineSeconds: 300 + + # -- How long to keep old jobs around before deleting them + # @default -- 86400 (1 day) + cleanupSeconds: 86400 + # -- Resource limits and requests for Gafaelfawr maintenance and audit pods - resources: {} + # @default -- See `values.yaml` + resources: + limits: + cpu: "1" + memory: "300Mi" + requests: + cpu: "100m" + memory: "150Mi" # -- Annotations for Gafaelfawr maintenance and audit pods podAnnotations: {} @@ -349,8 +390,17 @@ maintenance: affinity: {} operator: - # -- Resource limits and requests for the Gafaelfawr Kubernetes operator - resources: {} + # -- Resource limits and requests for the Gafaelfawr Kubernetes operator. + # The limits are artificially higher since the operator pod is also where we + # manually run `gafaelfawr audit --fix`, which requires more CPU and memory. + # @default -- See `values.yaml` + resources: + limits: + cpu: "500m" + memory: "500Mi" + requests: + cpu: "10m" + memory: "150Mi" # -- Annotations for the token management pod podAnnotations: {} @@ -398,8 +448,10 @@ redis: resources: limits: cpu: "1" + memory: "40Mi" requests: - cpu: "100m" + cpu: "50m" + memory: "6Mi" # -- Pod annotations for the Redis pod podAnnotations: {} diff --git a/applications/giftless/Chart.yaml b/applications/giftless/Chart.yaml index 08dbe29ad5..23c43ec83e 100644 --- a/applications/giftless/Chart.yaml +++ b/applications/giftless/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: giftless -version: 0.0.1 +version: 1.0.0 description: Git-LFS server with GCS S3 backend, with Rubin-specific auth sources: - https://github.com/datopian/giftless diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 4979510629..c228c423dc 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -11,9 +11,11 @@ Git-LFS server with GCS S3 backend, with Rubin-specific auth | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the giftless frontend pod | -| config | object | `{"bucketName":"","projectName":""}` | Configuration for giftless server | -| config.bucketName | string | Must be overridden in environment-specific values file | Bucket name for GCS LFS Object bucket | -| config.projectName | string | Must be overridden in environment-specific values file | Project name for GCS LFS Object bucket | +| config | object | `{"bucketName":"","serviceAccountReadonly":"","serviceAccountReadwrite":"","storageProjectName":""}` | Configuration for giftless server | +| config.bucketName | string | Must be overridden in environment-specific values file | Bucket name for GCS LFS Object Storage bucket | +| config.serviceAccountReadonly | string | Must be overridden in environment-specific values file | Read-only service account name for GCS LFS Object Storage bucket | +| config.serviceAccountReadwrite | string | Must be overridden in environment-specific values file | Read-write service account name for GCS LFS Object Storage bucket | +| config.storageProjectName | string | Must be overridden in environment-specific values file | Project name for GCS LFS Object Storage bucket | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the giftless image | @@ -28,6 +30,12 @@ Git-LFS server with GCS S3 backend, with Rubin-specific auth | podAnnotations | object | `{}` | Annotations for the giftless frontend pod | | resources | object | `{}` | Resource limits and requests for the giftless frontend pod | | server.debug | bool | `false` | Turn on debugging mode | -| server.processes | int | `2` | Number of processes for server | -| server.threads | int | `2` | Number of threads per process | +| server.readonly | object | `{"processes":2,"replicas":1,"threads":2}` | Values for readonly server | +| server.readonly.processes | int | `2` | Number of processes for readonly server | +| server.readonly.replicas | int | `1` | Number of replicas for readonly server | +| server.readonly.threads | int | `2` | Number of threads per readonly process | +| server.readwrite | object | `{"processes":2,"replicas":1,"threads":2}` | Values for readwrite server | +| server.readwrite.processes | int | `2` | Number of processes for readwrite server | +| server.readwrite.replicas | int | `1` | Number of replicas for readwrite server | +| server.readwrite.threads | int | `2` | Number of threads per readwrite process | | tolerations | list | `[]` | Tolerations for the giftless frontend pod | diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index 7cb4901730..c0d486d9b3 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ include "giftless.fullname" . }} + name: {{ template "giftless.fullname" . }}-ro labels: {{- include "giftless.labels" . | nindent 4 }} data: @@ -14,9 +14,9 @@ data: options: storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" storage_options: - account_key_file: "/etc/secret/giftless-gcp-key.json" - project_name: {{ .Values.config.projectName | quote }} + project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} + serviceaccount_email: {{ .Values.config.serviceAccountReadonly }} --- apiVersion: v1 kind: ConfigMap @@ -34,6 +34,8 @@ data: options: storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" storage_options: - account_key_file: "/etc/secret/giftless-gcp-key.json" - project_name: {{ .Values.config.projectName | quote }} + project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} + serviceaccount_email: {{ .Values.config.serviceAccountReadwrite }} + noverify_upload: true + diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index ab17ea9b2c..96ab1977fc 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -1,20 +1,21 @@ # Note that this creates two nearly-identical deployments, one named -# "giftless" and one named "giftless-rw". The only real difference -# between them is that their configuration configmaps and secrets are -# different: one has the configuration for read-only access to the Git -# LFS server, and other has configuration for read-write access. It is -# possible that we might in future want to further split the -# configuration in order to allow, for instance, different numbers of -# processes and threads for the read-write and the read-only servers, on -# the grounds that our Git LFS usage is read-mostly. +# "giftless-ro" and one named "giftless-rw". The only real difference +# between them is that their configuration configmaps and +# serviceaccounts are different: one has the configuration for read-only +# access to the Git LFS server, and other has configuration for +# read-write access. It is possible that we might in future want to +# further split the configuration in order to allow, for instance, +# different numbers of processes and threads for the read-write and the +# read-only servers, on the grounds that our Git LFS usage is +# read-mostly. apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro labels: {{- include "giftless.labels" . | nindent 4 }} spec: - replicas: 1 + replicas: {{ .Values.server.readonly.replicas }} selector: matchLabels: {{- include "giftless.selectorLabels" . | nindent 6 }} @@ -30,7 +31,7 @@ spec: labels: {{- include "giftless.selectorLabels" . | nindent 8 }} spec: - automountServiceAccountToken: false + serviceAccountName: "git-lfs-ro" containers: - name: {{ .Chart.Name }} command: @@ -41,9 +42,9 @@ spec: - "-T" - "--die-on-term" - "--threads" - - "{{- .Values.server.threads }}" + - "{{- .Values.server.readonly.threads }}" - "-p" - - "{{- .Values.server.processes }}" + - "{{- .Values.server.readonly.processes }}" - "--manage-script-name" - "--callable" - "app" @@ -75,8 +76,6 @@ spec: mountPath: "/tmp" - name: "giftless-config" mountPath: "/etc/giftless" - - name: "giftless-secret" - mountPath: "/etc/secret" securityContext: runAsNonRoot: true runAsUser: 1000 @@ -86,10 +85,7 @@ spec: emptyDir: {} - name: "giftless-config" configMap: - name: {{ include "giftless.fullname" . | quote }} - - name: "giftless-secret" - secret: - secretName: {{ include "giftless.fullname" . | quote }} + name: {{ template "giftless.fullname" . }}-ro {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -110,7 +106,7 @@ metadata: labels: {{- include "giftless-rw.labels" . | nindent 4 }} spec: - replicas: 1 + replicas: {{ .Values.server.readwrite.replicas }} selector: matchLabels: {{- include "giftless-rw.selectorLabels" . | nindent 6 }} @@ -126,7 +122,7 @@ spec: labels: {{- include "giftless-rw.selectorLabels" . | nindent 8 }} spec: - automountServiceAccountToken: false + serviceAccountName: "git-lfs-rw" containers: - name: {{ .Chart.Name }} command: @@ -137,9 +133,9 @@ spec: - "-T" - "--die-on-term" - "--threads" - - "{{- .Values.server.threads }}" + - "{{- .Values.server.readwrite.threads }}" - "-p" - - "{{- .Values.server.processes }}" + - "{{- .Values.server.readwrite.processes }}" - "--manage-script-name" - "--callable" - "app" @@ -171,8 +167,6 @@ spec: mountPath: "/tmp" - name: "giftless-config" mountPath: "/etc/giftless" - - name: "giftless-secret" - mountPath: "/etc/secret" securityContext: runAsNonRoot: true runAsUser: 1000 @@ -183,9 +177,6 @@ spec: - name: "giftless-config" configMap: name: {{ template "giftless.fullname" . }}-rw - - name: "giftless-secret" - secret: - secretName: {{ include "giftless.fullname" . | quote }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index 499575871c..77e97b5227 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -1,7 +1,7 @@ apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro labels: {{- include "giftless.labels" . | nindent 4 }} config: @@ -15,7 +15,7 @@ template: {{- with .Values.ingress.annotations }} {{- toYaml . | nindent 6 }} {{- end }} - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro spec: tls: - hosts: @@ -29,7 +29,54 @@ template: pathType: "Prefix" backend: service: - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro + port: + number: 5000 +--- +# +# We need this one because the default Giftless transfer implementation +# generates a Bearer token for verification...but since we're going +# through Gafaelfawr, that gets replaced with the Gafaelfawr token. +# Then verification fails but the upload succeeds. +# +# This just means Gafaelfawr lets any verification request through. +# That does mean that absolutely anyone can verify stored objects. +# Since we already provide exactly that service anonymously on the +# readonly endpoint, I don't think this changes anything. +# +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: {{ include "giftless.fullname" . }}-rw-anon-verify + labels: + {{- include "giftless.labels" . | nindent 4 }} +config: + baseUrl: "https://{{ .Values.ingress.hostname.readwrite }}" + scopes: + anonymous: true +template: + metadata: + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-dns" + nginx.ingress.kubernetes.io/use-regex: "true" + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 6 }} + {{- end }} + name: {{ include "giftless.fullname" . }}-rw-anon-verify + spec: + tls: + - hosts: + - {{ .Values.ingress.hostname.readwrite | quote }} + secretName: tls-rw + rules: + - host: {{ .Values.ingress.hostname.readwrite | quote }} + http: + paths: + - path: "/.*/objects/storage/verify$" + pathType: "ImplementationSpecific" + backend: + service: + name: {{ include "giftless.fullname" . }}-rw port: number: 5000 --- diff --git a/applications/giftless/templates/service.yaml b/applications/giftless/templates/service.yaml index 1ce6a9be64..31dd6357d5 100644 --- a/applications/giftless/templates/service.yaml +++ b/applications/giftless/templates/service.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro labels: {{- include "giftless.labels" . | nindent 4 }} spec: diff --git a/applications/giftless/templates/serviceaccount.yaml b/applications/giftless/templates/serviceaccount.yaml new file mode 100644 index 0000000000..d7fcd57bb1 --- /dev/null +++ b/applications/giftless/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "git-lfs-ro" + labels: + {{- include "giftless.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: {{ required "config.serviceAccountReadonly must be set to a valid Google service account" .Values.config.serviceAccountReadonly | quote }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "git-lfs-rw" + labels: + {{- include "giftless-rw.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: {{ required "config.serviceAccountReadwrite must be set to a valid Google service account" .Values.config.serviceAccountReadwrite | quote }} diff --git a/applications/giftless/templates/vault-secrets.yaml b/applications/giftless/templates/vault-secrets.yaml deleted file mode 100644 index 0466225d3c..0000000000 --- a/applications/giftless/templates/vault-secrets.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: {{ include "giftless.fullname" . }} - labels: - {{- include "giftless.labels" . | nindent 4 }} -spec: - path: "{{ .Values.global.vaultSecretsPath }}/giftless" - type: "Opaque" diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 5bbf6b5651..14c7681165 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,3 +1,7 @@ +image: + pullPolicy: "Always" + repository: "docker.io/lsstsqre/giftless" + tag: "upstream" server: debug: true ingress: @@ -5,5 +9,7 @@ ingress: readonly: "git-lfs-dev.lsst.cloud" readwrite: "git-lfs-dev-rw.lsst.cloud" config: - projectName: "plasma-geode-127520" - bucketName: "rubin-gitlfs-experimental" + storageProjectName: "data-curation-prod-fbdb" + bucketName: "rubin-us-central1-git-lfs-dev" + serviceAccountReadonly: "git-lfs-ro@roundtable-dev-abe2.iam.gserviceaccount.com" + serviceAccountReadwrite: "git-lfs-rw@roundtable-dev-abe2.iam.gserviceaccount.com" diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml new file mode 100644 index 0000000000..9fcb5a17c2 --- /dev/null +++ b/applications/giftless/values-roundtable-prod.yaml @@ -0,0 +1,16 @@ +image: + pullPolicy: "Always" + repository: "docker.io/lsstsqre/giftless" + tag: "upstream" +server: + readonly: + replicas: 3 +ingress: + hostname: + readonly: "git-lfs.lsst.cloud" + readwrite: "git-lfs-rw.lsst.cloud" +config: + storageProjectName: "data-curation-prod-fbdb" + bucketName: "rubin-us-central1-git-lfs" + serviceAccountReadonly: "git-lfs-ro@roundtable-prod-f6fd.iam.gserviceaccount.com" + serviceAccountReadwrite: "git-lfs-rw@roundtable-prod-f6fd.iam.gserviceaccount.com" diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index 0de26a224b..c1edd2c65b 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -48,19 +48,37 @@ ingress: server: # -- Turn on debugging mode debug: false - # -- Number of processes for server - processes: 2 - # -- Number of threads per process - threads: 2 + # -- Values for readonly server + readonly: + # -- Number of replicas for readonly server + replicas: 1 + # -- Number of processes for readonly server + processes: 2 + # -- Number of threads per readonly process + threads: 2 + # -- Values for readwrite server + readwrite: + # -- Number of replicas for readwrite server + replicas: 1 + # -- Number of processes for readwrite server + processes: 2 + # -- Number of threads per readwrite process + threads: 2 # -- Configuration for giftless server config: - # -- Project name for GCS LFS Object bucket + # -- Project name for GCS LFS Object Storage bucket # @default -- Must be overridden in environment-specific values file - projectName: "" - # -- Bucket name for GCS LFS Object bucket + storageProjectName: "" + # -- Bucket name for GCS LFS Object Storage bucket # @default -- Must be overridden in environment-specific values file bucketName: "" + # -- Read-only service account name for GCS LFS Object Storage bucket + # @default -- Must be overridden in environment-specific values file + serviceAccountReadonly: "" + # -- Read-write service account name for GCS LFS Object Storage bucket + # @default -- Must be overridden in environment-specific values file + serviceAccountReadwrite: "" global: # -- Base path for Vault secrets diff --git a/applications/hips/values-minikube.yaml b/applications/hips/values-minikube.yaml deleted file mode 100644 index 44e7bb33bc..0000000000 --- a/applications/hips/values-minikube.yaml +++ /dev/null @@ -1,4 +0,0 @@ -config: - gcsProject: "bogus" - gcsBucket: "bogus" - serviceAccount: "bogus" diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index a99c6145ca..0e0d226198 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.8.0 + version: 4.9.0 repository: https://kubernetes.github.io/ingress-nginx diff --git a/applications/jira-data-proxy/.helmignore b/applications/jira-data-proxy/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/jira-data-proxy/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/jira-data-proxy/Chart.yaml b/applications/jira-data-proxy/Chart.yaml new file mode 100644 index 0000000000..bf316d6622 --- /dev/null +++ b/applications/jira-data-proxy/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: "1.0.0" +description: Jira API read-only proxy for Times Square users. +name: jira-data-proxy +sources: + - https://github.com/lsst-sqre/jira-data-proxy +type: application +version: 1.0.0 diff --git a/applications/jira-data-proxy/README.md b/applications/jira-data-proxy/README.md new file mode 100644 index 0000000000..925984cc76 --- /dev/null +++ b/applications/jira-data-proxy/README.md @@ -0,0 +1,32 @@ +# jira-data-proxy + +Jira API read-only proxy for Times Square users. + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the jira-data-proxy deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of jira-data-proxy deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of jira-data-proxy deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of jira-data-proxy deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of jira-data-proxy deployment pods | +| config.jiraUrl | string | `"https://jira.lsstcorp.org/"` | Jira base URL | +| config.logLevel | string | `"info"` | Logging level | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the jira-data-proxy image | +| image.repository | string | `"ghcr.io/lsst-sqre/jira-data-proxy"` | Image to use in the jira-data-proxy deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| ingress.path | string | `"/jira-data-proxy"` | Path prefix where jira-data-proxy is served | +| nodeSelector | object | `{}` | Node selection rules for the jira-data-proxy deployment pod | +| podAnnotations | object | `{}` | Annotations for the jira-data-proxy deployment pod | +| replicaCount | int | `2` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the jira-data-proxy deployment pod | +| tolerations | list | `[]` | Tolerations for the jira-data-proxy deployment pod | diff --git a/applications/jira-data-proxy/secrets.yaml b/applications/jira-data-proxy/secrets.yaml new file mode 100644 index 0000000000..de40ddcac1 --- /dev/null +++ b/applications/jira-data-proxy/secrets.yaml @@ -0,0 +1,4 @@ +JIRA_USERNAME: + description: JIRA account username. +JIRA_PASSWORD: + description: JIRA account password. diff --git a/applications/onepassword-connect-dev/templates/_helpers.tpl b/applications/jira-data-proxy/templates/_helpers.tpl similarity index 58% rename from applications/onepassword-connect-dev/templates/_helpers.tpl rename to applications/jira-data-proxy/templates/_helpers.tpl index 368309ecc2..4630659730 100644 --- a/applications/onepassword-connect-dev/templates/_helpers.tpl +++ b/applications/jira-data-proxy/templates/_helpers.tpl @@ -1,16 +1,16 @@ {{/* Create chart name and version as used by the chart label. */}} -{{- define "onepassword-connect-dev.chart" -}} +{{- define "jira-data-proxy.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} -{{- define "onepassword-connect-dev.labels" -}} -helm.sh/chart: {{ include "onepassword-connect-dev.chart" . }} -{{ include "onepassword-connect-dev.selectorLabels" . }} +{{- define "jira-data-proxy.labels" -}} +helm.sh/chart: {{ include "jira-data-proxy.chart" . }} +{{ include "jira-data-proxy.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} @@ -20,7 +20,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "onepassword-connect-dev.selectorLabels" -}} -app.kubernetes.io/name: "onepassword-connect-dev" +{{- define "jira-data-proxy.selectorLabels" -}} +app.kubernetes.io/name: "jira-data-proxy" app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} diff --git a/applications/jira-data-proxy/templates/configmap.yaml b/applications/jira-data-proxy/templates/configmap.yaml new file mode 100644 index 0000000000..2b7c79a267 --- /dev/null +++ b/applications/jira-data-proxy/templates/configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +data: + SAFIR_LOG_LEVEL: {{ .Values.config.logLevel | quote }} + SAFIR_PATH_PREFIX: {{ .Values.ingress.path | quote }} + JIRA_BASE_URL: {{ .Values.config.jiraUrl | quote }} diff --git a/applications/jira-data-proxy/templates/deployment.yaml b/applications/jira-data-proxy/templates/deployment.yaml new file mode 100644 index 0000000000..a5325f7dcc --- /dev/null +++ b/applications/jira-data-proxy/templates/deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} + app.kubernetes.io/component: "server" + app.kubernetes.io/part-of: "jira-data-proxy" +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "jira-data-proxy.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "jira-data-proxy.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "server" + app.kubernetes.io/part-of: "jira-data-proxy" + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + envFrom: + - configMapRef: + name: "jira-data-proxy" + env: + - name: "JIRA_USERNAME" + valueFrom: + secretKeyRef: + name: "jira-data-proxy" + key: "JIRA_USERNAME" + - name: "JIRA_PASSWORD" + valueFrom: + secretKeyRef: + name: "jira-data-proxy" + key: "JIRA_PASSWORD" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/jira-data-proxy/templates/hpa.yaml b/applications/jira-data-proxy/templates/hpa.yaml new file mode 100644 index 0000000000..1b3370740e --- /dev/null +++ b/applications/jira-data-proxy/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "jira-data-proxy" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/moneypenny/templates/ingress.yaml b/applications/jira-data-proxy/templates/ingress.yaml similarity index 52% rename from applications/moneypenny/templates/ingress.yaml rename to applications/jira-data-proxy/templates/ingress.yaml index 566f195cd8..771d96fc5e 100644 --- a/applications/moneypenny/templates/ingress.yaml +++ b/applications/jira-data-proxy/templates/ingress.yaml @@ -1,31 +1,31 @@ apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: - name: {{ template "moneypenny.fullname" . }} + name: "jira-data-proxy" labels: - {{- include "moneypenny.labels" . | nindent 4 }} + {{- include "jira-data-proxy.labels" . | nindent 4 }} config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: all: - - "admin:provision" + - "exec:notebook" + loginRedirect: false # endpoint is for API use only template: metadata: - name: {{ template "moneypenny.fullname" . }} + name: "jira-data-proxy" + {{- with .Values.ingress.annotations }} annotations: - nginx.ingress.kubernetes.io/proxy-read-timeout: "310" - {{- with .Values.ingress.annotations }} {{- toYaml . | nindent 6 }} - {{- end }} + {{- end }} spec: rules: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/moneypenny" - pathType: Prefix + - path: {{ .Values.ingress.path | quote }} + pathType: "Prefix" backend: service: - name: {{ include "moneypenny.fullname" . }} + name: "jira-data-proxy" port: number: 8080 diff --git a/applications/moneypenny/templates/networkpolicy.yaml b/applications/jira-data-proxy/templates/networkpolicy.yaml similarity index 72% rename from applications/moneypenny/templates/networkpolicy.yaml rename to applications/jira-data-proxy/templates/networkpolicy.yaml index 850f72ad2a..affb92cc0e 100644 --- a/applications/moneypenny/templates/networkpolicy.yaml +++ b/applications/jira-data-proxy/templates/networkpolicy.yaml @@ -1,13 +1,11 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: {{ template "moneypenny.fullname" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} + name: "jira-data-proxy" spec: podSelector: matchLabels: - {{- include "moneypenny.selectorLabels" . | nindent 6 }} + {{- include "jira-data-proxy.selectorLabels" . | nindent 6 }} policyTypes: - Ingress ingress: diff --git a/applications/jira-data-proxy/templates/service.yaml b/applications/jira-data-proxy/templates/service.yaml new file mode 100644 index 0000000000..93e189e821 --- /dev/null +++ b/applications/jira-data-proxy/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "jira-data-proxy.selectorLabels" . | nindent 4 }} diff --git a/applications/jira-data-proxy/templates/vault-secret.yaml b/applications/jira-data-proxy/templates/vault-secret.yaml new file mode 100644 index 0000000000..609a812e36 --- /dev/null +++ b/applications/jira-data-proxy/templates/vault-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/jira-data-proxy" + type: Opaque diff --git a/applications/jira-data-proxy/values-idfdev.yaml b/applications/jira-data-proxy/values-idfdev.yaml new file mode 100644 index 0000000000..d31626eed3 --- /dev/null +++ b/applications/jira-data-proxy/values-idfdev.yaml @@ -0,0 +1,4 @@ +image: + pullPolicy: Always +config: + logLevel: "DEBUG" diff --git a/applications/moneypenny/values-minikube.yaml b/applications/jira-data-proxy/values-usdfdev.yaml similarity index 100% rename from applications/moneypenny/values-minikube.yaml rename to applications/jira-data-proxy/values-usdfdev.yaml diff --git a/applications/jira-data-proxy/values.yaml b/applications/jira-data-proxy/values.yaml new file mode 100644 index 0000000000..4002a96f0e --- /dev/null +++ b/applications/jira-data-proxy/values.yaml @@ -0,0 +1,74 @@ +# Default values for jira-data-proxy. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +config: + # -- Logging level + logLevel: "info" + + # -- Jira base URL + jiraUrl: "https://jira.lsstcorp.org/" + +# -- Number of web deployment pods to start +replicaCount: 2 + +image: + # -- Image to use in the jira-data-proxy deployment + repository: "ghcr.io/lsst-sqre/jira-data-proxy" + + # -- Pull policy for the jira-data-proxy image + pullPolicy: "IfNotPresent" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + + # -- Path prefix where jira-data-proxy is served + path: "/jira-data-proxy" + +autoscaling: + # -- Enable autoscaling of jira-data-proxy deployment + enabled: false + + # -- Minimum number of jira-data-proxy deployment pods + minReplicas: 1 + + # -- Maximum number of jira-data-proxy deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of jira-data-proxy deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the jira-data-proxy deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the jira-data-proxy deployment pod +resources: {} + +# -- Node selection rules for the jira-data-proxy deployment pod +nodeSelector: {} + +# -- Tolerations for the jira-data-proxy deployment pod +tolerations: [] + +# -- Affinity rules for the jira-data-proxy deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/applications/kubernetes-replicator/Chart.yaml b/applications/kubernetes-replicator/Chart.yaml index dbb914dee2..0fa53d5822 100644 --- a/applications/kubernetes-replicator/Chart.yaml +++ b/applications/kubernetes-replicator/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/mittwald/kubernetes-replicator dependencies: - name: kubernetes-replicator - version: 2.9.1 + version: 2.9.2 repository: https://helm.mittwald.de diff --git a/applications/linters/secrets.yaml b/applications/linters/secrets.yaml new file mode 100644 index 0000000000..cf3da81fc7 --- /dev/null +++ b/applications/linters/secrets.yaml @@ -0,0 +1,12 @@ +aws: + description: >- + Shell commands to set the environment variables required for + authentication to AWS. + onepassword: + encoded: true +slack: + description: >- + Shell commands to set the environment variable pointing to a Slack + incoming webhook for reporting status. + onepassword: + encoded: true diff --git a/applications/livetap/Chart.yaml b/applications/livetap/Chart.yaml index 8d2668f59b..650616a8d9 100644 --- a/applications/livetap/Chart.yaml +++ b/applications/livetap/Chart.yaml @@ -9,4 +9,4 @@ sources: dependencies: - name: cadc-tap version: 1.0.0 - repository: "file://../../charts/cadc-tap/" + repository: "file://../../charts/cadc-tap" diff --git a/applications/livetap/values-usdfint.yaml b/applications/livetap/values-usdfint.yaml new file mode 100644 index 0000000000..7d89dafb89 --- /dev/null +++ b/applications/livetap/values-usdfint.yaml @@ -0,0 +1,4 @@ +cadc-tap: + tapSchema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-livetap" diff --git a/applications/love/Chart.yaml b/applications/love/Chart.yaml new file mode 100644 index 0000000000..246cedd0e5 --- /dev/null +++ b/applications/love/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +name: love +version: 1.0.0 +description: Deployment for the LSST Operators Visualization Environment +dependencies: +- name: csc_collector + version: 1.0.0 + repository: file://../../charts/csc_collector +- name: csc + alias: love-commander + version: 1.0.0 + repository: file://../../charts/csc +- name: love-manager + version: 1.0.0 +- name: love-nginx + version: 1.0.0 +- name: love-producer + version: 1.0.0 diff --git a/applications/love/README.md b/applications/love/README.md new file mode 100644 index 0000000000..e875e93406 --- /dev/null +++ b/applications/love/README.md @@ -0,0 +1,184 @@ +# love + +Deployment for the LSST Operators Visualization Environment + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| love-manager.envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | +| love-manager.manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | +| love-manager.manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| love-manager.manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| love-manager.manager.frontend.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| love-manager.manager.frontend.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| love-manager.manager.frontend.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| love-manager.manager.frontend.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| love-manager.manager.frontend.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| love-manager.manager.frontend.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| love-manager.manager.frontend.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| love-manager.manager.frontend.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| love-manager.manager.frontend.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.frontend.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.frontend.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager frontend | +| love-manager.manager.frontend.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| love-manager.manager.frontend.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager frontend | +| love-manager.manager.frontend.env.DB_PORT | int | `5432` | The port for the database service | +| love-manager.manager.frontend.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager frontend | +| love-manager.manager.frontend.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| love-manager.manager.frontend.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| love-manager.manager.frontend.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | +| love-manager.manager.frontend.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| love-manager.manager.frontend.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| love-manager.manager.frontend.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| love-manager.manager.frontend.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| love-manager.manager.frontend.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| love-manager.manager.frontend.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| love-manager.manager.frontend.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| love-manager.manager.frontend.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| love-manager.manager.frontend.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager frontend admin user password secret key name | +| love-manager.manager.frontend.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager frontend authlist_user password secret key name | +| love-manager.manager.frontend.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager frontend LDAP binding password secret key name | +| love-manager.manager.frontend.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager frontend cmd_user user password secret key name | +| love-manager.manager.frontend.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| love-manager.manager.frontend.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager frontend process connection password secret key name | +| love-manager.manager.frontend.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| love-manager.manager.frontend.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager frontend secret secret key name | +| love-manager.manager.frontend.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager frontend user user password secret key name | +| love-manager.manager.frontend.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| love-manager.manager.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager frontend image | +| love-manager.manager.frontend.image.repository | string | `"lsstts/love-manager"` | The LOVE manager frontend image to use | +| love-manager.manager.frontend.nodeSelector | object | `{}` | Node selection rules for the LOVE manager frontend pods | +| love-manager.manager.frontend.ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.manager.frontend.ports.node | int | `30000` | The port on the node for normal communcations | +| love-manager.manager.frontend.readinessProbe | object | `{}` | Configuration for the LOVE manager frontend pods readiness probe | +| love-manager.manager.frontend.replicas | int | `1` | Set the default number of LOVE manager frontend pod replicas | +| love-manager.manager.frontend.resources | object | `{}` | Resource specifications for the LOVE manager frontend pods | +| love-manager.manager.frontend.tolerations | list | `[]` | Toleration specifications for the LOVE manager frontend pods | +| love-manager.manager.producers.affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | +| love-manager.manager.producers.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| love-manager.manager.producers.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| love-manager.manager.producers.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| love-manager.manager.producers.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| love-manager.manager.producers.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| love-manager.manager.producers.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| love-manager.manager.producers.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| love-manager.manager.producers.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| love-manager.manager.producers.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| love-manager.manager.producers.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| love-manager.manager.producers.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.producers.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.producers.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | +| love-manager.manager.producers.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| love-manager.manager.producers.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | +| love-manager.manager.producers.env.DB_PORT | int | `5432` | The port for the database service | +| love-manager.manager.producers.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | +| love-manager.manager.producers.env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | +| love-manager.manager.producers.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| love-manager.manager.producers.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| love-manager.manager.producers.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| love-manager.manager.producers.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| love-manager.manager.producers.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| love-manager.manager.producers.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| love-manager.manager.producers.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| love-manager.manager.producers.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| love-manager.manager.producers.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| love-manager.manager.producers.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| love-manager.manager.producers.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | +| love-manager.manager.producers.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | +| love-manager.manager.producers.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | +| love-manager.manager.producers.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | +| love-manager.manager.producers.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| love-manager.manager.producers.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | +| love-manager.manager.producers.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| love-manager.manager.producers.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | +| love-manager.manager.producers.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | +| love-manager.manager.producers.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| love-manager.manager.producers.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | +| love-manager.manager.producers.image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | +| love-manager.manager.producers.nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | +| love-manager.manager.producers.ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.manager.producers.ports.node | int | `30000` | The port on the node for normal communcations | +| love-manager.manager.producers.readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | +| love-manager.manager.producers.replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | +| love-manager.manager.producers.resources | object | `{}` | Resource specifications for the LOVE manager producers pods | +| love-manager.manager.producers.tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| love-manager.namespace | string | `"love"` | The overall namespace for the application | +| love-manager.redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | +| love-manager.redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | +| love-manager.redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | +| love-manager.redis.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the redis image | +| love-manager.redis.image.repository | string | `"redis"` | The redis image to use | +| love-manager.redis.image.tag | string | `"5.0.3"` | The tag to use for the redis image | +| love-manager.redis.nodeSelector | object | `{}` | Node selection rules for the LOVE redis pods | +| love-manager.redis.port | int | `6379` | The redis port number | +| love-manager.redis.resources | object | `{}` | Resource specifications for the LOVE redis pods | +| love-manager.redis.tolerations | list | `[]` | Toleration specifications for the LOVE redis pods | +| love-manager.secret_path | string | `"lsst.local"` | The site-specific path to find Vault secrets | +| love-manager.viewBackup.affinity | object | `{}` | Affinity rules for the LOVE view backup pods | +| love-manager.viewBackup.enabled | bool | `false` | Whether view backup is active | +| love-manager.viewBackup.env | object | `{}` | Place to specify additional environment variables for the view backup job | +| love-manager.viewBackup.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| love-manager.viewBackup.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the view backup image | +| love-manager.viewBackup.image.repository | string | `"lsstts/love-view-backup"` | The view backup image to use | +| love-manager.viewBackup.image.tag | string | `"develop"` | The tag to use for the view backup image | +| love-manager.viewBackup.nodeSelector | object | `{}` | Node selection rules for the LOVE view backup pods | +| love-manager.viewBackup.resources | object | `{}` | Resource specifications for the LOVE view backup pods | +| love-manager.viewBackup.restartPolicy | string | `"Never"` | The restart policy type for the view backup cronjob | +| love-manager.viewBackup.schedule | string | `"0 0 1 1 *"` | The view backup job schedule in cron format | +| love-manager.viewBackup.tolerations | list | `[]` | Toleration specifications for the LOVE view backup pods | +| love-manager.viewBackup.ttlSecondsAfterFinished | string | `""` | Time after view backup job finishes before deletion (ALPHA) | +| love-nginx.affinity | object | `{}` | Affinity rules for the NGINX pod | +| love-nginx.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the NGINX image | +| love-nginx.image.repository | string | `"nginx"` | The NGINX image to use | +| love-nginx.image.tag | string | `"1.14.2"` | The tag to use for the NGINX image | +| love-nginx.imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | +| love-nginx.ingress.annotations | object | `{}` | Annotations for the NGINX ingress | +| love-nginx.ingress.className | string | `"nginx"` | Assign the Ingress class name | +| love-nginx.ingress.hostname | string | `"love.local"` | Hostname for the NGINX ingress | +| love-nginx.ingress.httpPath | string | `"/"` | Path name associated with the NGINX ingress | +| love-nginx.ingress.pathType | string | `""` | Set the Kubernetes path type for the NGINX ingress | +| love-nginx.initContainers.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the frontend image | +| love-nginx.initContainers.frontend.image.repository | string | `"lsstts/love-frontend"` | The frontend image to use | +| love-nginx.initContainers.frontend.image.tag | string | `nil` | | +| love-nginx.initContainers.manager.command | list | `["/bin/sh","-c","mkdir -p /usr/src/love-manager/media/thumbnails; mkdir -p /usr/src/love-manager/media/configs; cp -Rv /usr/src/love/manager/static /usr/src/love-manager; cp -uv /usr/src/love/manager/ui_framework/fixtures/thumbnails/* /usr/src/love-manager/media/thumbnails; cp -uv /usr/src/love/manager/api/fixtures/configs/* /usr/src/love-manager/media/configs"]` | The command to execute for the love-manager static content | +| love-nginx.initContainers.manager.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the love-manager static content image | +| love-nginx.initContainers.manager.image.repository | string | `"lsstts/love-manager"` | The static love-manager content image to use | +| love-nginx.initContainers.manager.image.tag | string | `nil` | | +| love-nginx.loveConfig | string | `"{\n \"alarms\": {\n \"minSeveritySound\": \"serious\",\n \"minSeverityNotification\": \"warning\"\n },\n \"camFeeds\": {\n \"generic\": \"/gencam\",\n \"allSky\": \"/gencam\"\n }\n}\n"` | Configuration specificiation for the LOVE service | +| love-nginx.namespace | string | `"love"` | The overall namespace for the application | +| love-nginx.nginxConfig | string | `"server {\n listen 80;\n server_name localhost;\n location / {\n root /usr/src/love-frontend;\n try_files $uri$args $uri$args/ $uri/ /index.html;\n }\n location /manager {\n proxy_pass http://love-manager-service:8000;\n proxy_http_version 1.1;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n proxy_set_header Host $host;\n proxy_redirect off;\n }\n location /manager/static {\n alias /usr/src/love-manager/static;\n }\n location /manager/media {\n alias /usr/src/love-manager/media;\n }\n}\n"` | Configuration specification for the NGINX service | +| love-nginx.nodeSelector | object | `{}` | Node selection rules for the NGINX pod | +| love-nginx.ports.container | int | `80` | Container port for the NGINX service | +| love-nginx.ports.node | int | `30000` | Node port for the NGINX service | +| love-nginx.resources | object | `{}` | Resource specifications for the NGINX pod | +| love-nginx.serviceType | string | `"ClusterIP"` | Service type specification | +| love-nginx.staticStore.accessMode | string | `"ReadWriteMany"` | The access mode for the NGINX static store | +| love-nginx.staticStore.claimSize | string | `"2Gi"` | The size of the NGINX static store request | +| love-nginx.staticStore.name | string | `"love-nginx-static"` | Label for the NGINX static store | +| love-nginx.staticStore.storageClass | string | `"local-store"` | The storage class to request the disk allocation from | +| love-nginx.tolerations | list | `[]` | Toleration specifications for the NGINX pod | +| love-producer.affinity | object | `{}` | Affinity rules applied to all LOVE producer pods | +| love-producer.annotations | object | `{}` | This allows for the specification of pod annotations. | +| love-producer.env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | +| love-producer.envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | +| love-producer.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | +| love-producer.image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | +| love-producer.image.tag | string | `nil` | | +| love-producer.nodeSelector | object | `{}` | Node selection rules applied to all LOVE producer pods | +| love-producer.producers | obj | `[]` | This sections sets the list of producers to use. The producers should be specified like: _name_: The identifying name for the CSC producer _csc_: _CSC name:index_ The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | +| love-producer.replicaCount | int | `1` | Set the replica count for the LOVE producers | +| love-producer.resources | object | `{}` | Resource specifications applied to all LOVE producer pods | +| love-producer.tolerations | list | `[]` | Toleration specifications applied to all LOVE producer pods | diff --git a/applications/love/charts/love-manager/Chart.yaml b/applications/love/charts/love-manager/Chart.yaml new file mode 100644 index 0000000000..cee16201a6 --- /dev/null +++ b/applications/love/charts/love-manager/Chart.yaml @@ -0,0 +1,4 @@ +name: love-manager +apiVersion: v2 +version: 1.0.0 +description: Helm chart for the LOVE manager service. diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md new file mode 100644 index 0000000000..4bb92383ae --- /dev/null +++ b/applications/love/charts/love-manager/README.md @@ -0,0 +1,130 @@ +# love-manager + +Helm chart for the LOVE manager service. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | +| manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | +| manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| manager.frontend.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| manager.frontend.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| manager.frontend.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| manager.frontend.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| manager.frontend.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| manager.frontend.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| manager.frontend.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| manager.frontend.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| manager.frontend.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.frontend.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.frontend.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager frontend | +| manager.frontend.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| manager.frontend.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager frontend | +| manager.frontend.env.DB_PORT | int | `5432` | The port for the database service | +| manager.frontend.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager frontend | +| manager.frontend.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| manager.frontend.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| manager.frontend.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | +| manager.frontend.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| manager.frontend.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| manager.frontend.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| manager.frontend.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| manager.frontend.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| manager.frontend.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| manager.frontend.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| manager.frontend.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| manager.frontend.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager frontend admin user password secret key name | +| manager.frontend.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager frontend authlist_user password secret key name | +| manager.frontend.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager frontend LDAP binding password secret key name | +| manager.frontend.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager frontend cmd_user user password secret key name | +| manager.frontend.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| manager.frontend.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager frontend process connection password secret key name | +| manager.frontend.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| manager.frontend.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager frontend secret secret key name | +| manager.frontend.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager frontend user user password secret key name | +| manager.frontend.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| manager.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager frontend image | +| manager.frontend.image.repository | string | `"lsstts/love-manager"` | The LOVE manager frontend image to use | +| manager.frontend.nodeSelector | object | `{}` | Node selection rules for the LOVE manager frontend pods | +| manager.frontend.ports.container | int | `8000` | The port on the container for normal communications | +| manager.frontend.ports.node | int | `30000` | The port on the node for normal communcations | +| manager.frontend.readinessProbe | object | `{}` | Configuration for the LOVE manager frontend pods readiness probe | +| manager.frontend.replicas | int | `1` | Set the default number of LOVE manager frontend pod replicas | +| manager.frontend.resources | object | `{}` | Resource specifications for the LOVE manager frontend pods | +| manager.frontend.tolerations | list | `[]` | Toleration specifications for the LOVE manager frontend pods | +| manager.producers.affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | +| manager.producers.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| manager.producers.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| manager.producers.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| manager.producers.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| manager.producers.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| manager.producers.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| manager.producers.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| manager.producers.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| manager.producers.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| manager.producers.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| manager.producers.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.producers.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.producers.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | +| manager.producers.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| manager.producers.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | +| manager.producers.env.DB_PORT | int | `5432` | The port for the database service | +| manager.producers.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | +| manager.producers.env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | +| manager.producers.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| manager.producers.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| manager.producers.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| manager.producers.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| manager.producers.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| manager.producers.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| manager.producers.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| manager.producers.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| manager.producers.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| manager.producers.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| manager.producers.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | +| manager.producers.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | +| manager.producers.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | +| manager.producers.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | +| manager.producers.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| manager.producers.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | +| manager.producers.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| manager.producers.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | +| manager.producers.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | +| manager.producers.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| manager.producers.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | +| manager.producers.image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | +| manager.producers.nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | +| manager.producers.ports.container | int | `8000` | The port on the container for normal communications | +| manager.producers.ports.node | int | `30000` | The port on the node for normal communcations | +| manager.producers.readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | +| manager.producers.replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | +| manager.producers.resources | object | `{}` | Resource specifications for the LOVE manager producers pods | +| manager.producers.tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| namespace | string | `"love"` | The overall namespace for the application | +| redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | +| redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | +| redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | +| redis.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the redis image | +| redis.image.repository | string | `"redis"` | The redis image to use | +| redis.image.tag | string | `"5.0.3"` | The tag to use for the redis image | +| redis.nodeSelector | object | `{}` | Node selection rules for the LOVE redis pods | +| redis.port | int | `6379` | The redis port number | +| redis.resources | object | `{}` | Resource specifications for the LOVE redis pods | +| redis.tolerations | list | `[]` | Toleration specifications for the LOVE redis pods | +| secret_path | string | `"lsst.local"` | The site-specific path to find Vault secrets | +| viewBackup.affinity | object | `{}` | Affinity rules for the LOVE view backup pods | +| viewBackup.enabled | bool | `false` | Whether view backup is active | +| viewBackup.env | object | `{}` | Place to specify additional environment variables for the view backup job | +| viewBackup.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| viewBackup.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the view backup image | +| viewBackup.image.repository | string | `"lsstts/love-view-backup"` | The view backup image to use | +| viewBackup.image.tag | string | `"develop"` | The tag to use for the view backup image | +| viewBackup.nodeSelector | object | `{}` | Node selection rules for the LOVE view backup pods | +| viewBackup.resources | object | `{}` | Resource specifications for the LOVE view backup pods | +| viewBackup.restartPolicy | string | `"Never"` | The restart policy type for the view backup cronjob | +| viewBackup.schedule | string | `"0 0 1 1 *"` | The view backup job schedule in cron format | +| viewBackup.tolerations | list | `[]` | Toleration specifications for the LOVE view backup pods | +| viewBackup.ttlSecondsAfterFinished | string | `""` | Time after view backup job finishes before deletion (ALPHA) | diff --git a/applications/love/charts/love-manager/templates/_helpers.tpl b/applications/love/charts/love-manager/templates/_helpers.tpl new file mode 100644 index 0000000000..df3787c60b --- /dev/null +++ b/applications/love/charts/love-manager/templates/_helpers.tpl @@ -0,0 +1,220 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "love-manager.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-manager.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains .Release.Name $name }} +{{- $name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Manager frontend fullname +*/}} +{{- define "love-manager-frontend.fullname" -}} +{{ include "love-manager.fullname" . }}-frontend +{{- end }} + +{{/* +Manager producers fullname +*/}} +{{- define "love-manager-producers.fullname" -}} +{{ include "love-manager.fullname" . }}-producers +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "love-manager.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "love-manager.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager.selectorLabels" . }} +{{- end }} + +{{/* +Manager Frontend Common labels +*/}} +{{- define "love-manager-frontend.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager-frontend.selectorLabels" . }} +{{- end }} + +{{/* +Manager Producers Common labels +*/}} +{{- define "love-manager-producers.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager-producers.selectorLabels" . }} +{{- end }} + +{{/* +Common Selector labels +*/}} +{{- define "love-manager.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Manager Frontend Selector labels +*/}} +{{- define "love-manager-frontend.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ include "love-manager.name" . }}-frontend +{{- end }} + +{{/* +Manager Producers Selector labels +*/}} +{{- define "love-manager-producers.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ include "love-manager.name" . }}-producers +{{- end }} + +{{/* +Handle environment parameters + */}} +{{- define "helpers.envFromList" -}} +{{- $secretName := .secretName }} +{{- range $var, $value := .env }} +{{- $item := dict "var" $var "value" $value "secretName" $secretName }} +{{ include "helpers.envType" $item }} +{{- end }} +{{- end }} + +{{/* +Determine type of environment +*/}} +{{- define "helpers.envType" -}} +- name: {{ .var }} +{{- if ne .secretName "" }} + valueFrom: + secretKeyRef: + name: {{ .secretName }}-secrets + key: {{ .value }} +{{- else }} + value: {{ .value | quote }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for database. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-manager.database.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains .Release.Name $name }} +{{- printf "%s-database" $name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-database" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels - database +*/}} +{{- define "love-manager.database.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager.database.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels - database +*/}} +{{- define "love-manager.database.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ include "love-manager.database.fullname" . }} +{{- end }} + +{{/* +Create a default fully qualified app name for redis. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-manager.redis.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains .Release.Name $name }} +{{- printf "%s-redis" $name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-redis" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels - redis +*/}} +{{- define "love-manager.redis.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager.redis.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels - redis +*/}} +{{- define "love-manager.redis.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ include "love-manager.redis.fullname" . }} +{{- end }} + +{{/* +Create a default fully qualified app name for the view backup. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-manager.view-backup.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains .Release.Name $name }} +{{- printf "%s-view-backup" $name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-view-backup" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels - view backup +*/}} +{{- define "love-manager.view-backup.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager.view-backup.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels - view backup +*/}} +{{- define "love-manager.view-backup.selectorLabels" -}} +type: love-manager-view-backup-job +{{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml b/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml new file mode 100644 index 0000000000..f57685aebe --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-manager-frontend.fullname" . }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} + labels: + {{- include "love-manager-frontend.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-manager-frontend.selectorLabels" . | nindent 6 }} + {{- if not .Values.manager.frontend.autoscaling.enabled }} + replicas: {{ .Values.manager.frontend.replicas }} + {{- end }} + template: + metadata: + labels: + {{- include "love-manager-frontend.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ include "love-manager-frontend.fullname" . }} + {{- $imageTag := .Values.manager.frontend.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "{{ .Values.manager.frontend.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ .Values.manager.frontend.image.pullPolicy }} + ports: + - containerPort: {{ .Values.manager.frontend.ports.container }} + env: + {{- $data := dict "env" .Values.manager.frontend.env "secretName" "" }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- if .Values.manager.frontend.envSecrets }} + {{- $data := dict "secretName" .Values.envSecretKeyName "env" .Values.manager.frontend.envSecrets }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- end }} + {{- with $.Values.manager.frontend.resources }} + resources: + {{- toYaml $.Values.manager.frontend.resources | nindent 10 }} + {{- end }} + {{- with $.Values.manager.frontend.readinessProbe }} + readinessProbe: + {{- toYaml $.Values.manager.frontend.readinessProbe | nindent 10 }} + {{- end }} + imagePullSecrets: + - name: nexus3-docker + {{- with $.Values.manager.frontend.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.manager.frontend.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.manager.frontend.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-frontend-hpa.yaml b/applications/love/charts/love-manager/templates/manager-frontend-hpa.yaml new file mode 100644 index 0000000000..12153a2d64 --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-frontend-hpa.yaml @@ -0,0 +1,47 @@ +{{- if .Values.manager.frontend.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "love-manager-frontend.fullname" . }} + labels: + {{- include "love-manager-frontend.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "love-manager-frontend.fullname" . }} + minReplicas: {{ .Values.manager.frontend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.manager.frontend.autoscaling.maxReplicas }} + metrics: + {{- if .Values.manager.frontend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.manager.frontend.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.manager.frontend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.manager.frontend.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- if or .Values.manager.frontend.autoscaling.scaleUpPolicy .Values.manager.frontend.autoscaling.scaleDownPolicy }} + behavior: + {{- if .Values.manager.frontend.autoscaling.scaleUpPolicy }} + scaleUp: + {{- with .Values.manager.frontend.autoscaling.scaleUpPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if .Values.manager.frontend.autoscaling.scaleDownPolicy }} + scaleDown: + {{- with .Values.manager.frontend.autoscaling.scaleDownPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-frontend-service.yaml b/applications/love/charts/love-manager/templates/manager-frontend-service.yaml new file mode 100644 index 0000000000..6d9e2028f5 --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-frontend-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-manager-frontend.fullname" . }}-service + namespace: {{ .Values.namespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-manager-frontend.fullname" . }} + ports: + - port: {{ .Values.manager.frontend.ports.container }} + diff --git a/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml new file mode 100644 index 0000000000..5bb7e050f1 --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-manager-producers.fullname" . }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} + labels: + {{- include "love-manager-producers.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-manager-producers.selectorLabels" . | nindent 6 }} + {{- if not .Values.manager.producers.autoscaling.enabled }} + replicas: {{ .Values.manager.producers.replicas }} + {{- end }} + template: + metadata: + labels: + {{- include "love-manager-producers.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ include "love-manager-producers.fullname" . }} + {{- $imageTag := .Values.manager.producers.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "{{ .Values.manager.producers.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ .Values.manager.producers.image.pullPolicy }} + ports: + - containerPort: {{ .Values.manager.producers.ports.container }} + env: + {{- $data := dict "env" .Values.manager.producers.env "secretName" "" }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- if .Values.manager.producers.envSecrets }} + {{- $data := dict "secretName" .Values.envSecretKeyName "env" .Values.manager.producers.envSecrets }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- end }} + {{- with $.Values.manager.producers.resources }} + resources: + {{- toYaml $.Values.manager.producers.resources | nindent 10 }} + {{- end }} + {{- with $.Values.manager.producers.readinessProbe }} + readinessProbe: + {{- toYaml $.Values.manager.producers.readinessProbe | nindent 10 }} + {{- end }} + imagePullSecrets: + - name: nexus3-docker + {{- with $.Values.manager.producers.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.manager.producers.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.manager.producers.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml b/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml new file mode 100644 index 0000000000..a44422835b --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml @@ -0,0 +1,47 @@ +{{- if .Values.manager.producers.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "love-manager-producers.fullname" . }} + labels: + {{- include "love-manager-producers.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "love-manager-producers.fullname" . }} + minReplicas: {{ .Values.manager.producers.autoscaling.minReplicas }} + maxReplicas: {{ .Values.manager.producers.autoscaling.maxReplicas }} + metrics: + {{- if .Values.manager.producers.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.manager.producers.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.manager.producers.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.manager.producers.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- if or .Values.manager.producers.autoscaling.scaleUpPolicy .Values.manager.producers.autoscaling.scaleDownPolicy }} + behavior: + {{- if .Values.manager.producers.autoscaling.scaleUpPolicy }} + scaleUp: + {{- with .Values.manager.producers.autoscaling.scaleUpPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if .Values.manager.producers.autoscaling.scaleDownPolicy }} + scaleDown: + {{- with .Values.manager.producers.autoscaling.scaleDownPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-producers-service.yaml b/applications/love/charts/love-manager/templates/manager-producers-service.yaml new file mode 100644 index 0000000000..bf90a53f9b --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-producers-service.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-manager-producers.fullname" . }}-service + namespace: {{ .Values.namespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-manager-producers.fullname" . }} + ports: + - port: {{ .Values.manager.producers.ports.container }} diff --git a/applications/love/charts/love-manager/templates/redis-configmap.yaml b/applications/love/charts/love-manager/templates/redis-configmap.yaml new file mode 100644 index 0000000000..fcff21243d --- /dev/null +++ b/applications/love/charts/love-manager/templates/redis-configmap.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: redis-conf +data: + redis.conf: | +{{ .Values.redis.config | indent 4 }} diff --git a/applications/love/charts/love-manager/templates/redis-deployment.yaml b/applications/love/charts/love-manager/templates/redis-deployment.yaml new file mode 100644 index 0000000000..3e27f50898 --- /dev/null +++ b/applications/love/charts/love-manager/templates/redis-deployment.yaml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-manager.redis.fullname" . }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} + labels: + {{- include "love-manager.redis.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-manager.redis.selectorLabels" . | nindent 6 }} + replicas: {{ .Values.redis.replicas | default 1 }} + template: + metadata: + labels: + {{- include "love-manager.redis.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ include "love-manager.redis.fullname" . }} + image: "{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}" + imagePullPolicy: {{ .Values.redis.image.pullPolicy }} + command: [ "redis-server", "/data/redis.conf", "--appendonly", "yes", "--requirepass", "$(REDIS_PASS)" ] + ports: + - containerPort: {{ .Values.redis.port }} + env: + {{- $data := dict "env" .Values.redis.env "secretName" "" }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- $data := dict "env" .Values.redis.envSecrets "secretName" .Values.envSecretKeyName }} + {{- include "helpers.envFromList" $data | indent 10 }} + volumeMounts: + - mountPath: /data/redis.conf + readOnly: true + name: redis-conf + {{- with $.Values.redis.resources }} + resources: + {{- toYaml $.Values.redis.resources | nindent 10 }} + {{- end }} + volumes: + - name: redis-conf + configMap: + name: redis-conf + items: + - key: redis.conf + path: redis.conf + {{- with $.Values.redis.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.redis.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.redis.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-manager/templates/redis-service.yaml b/applications/love/charts/love-manager/templates/redis-service.yaml new file mode 100644 index 0000000000..5afec4bc11 --- /dev/null +++ b/applications/love/charts/love-manager/templates/redis-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-manager.redis.fullname" . }}-service + namespace: {{ $.Values.global.controlSystemAppNamespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-manager.redis.fullname" . }} + ports: + - port: {{ .Values.redis.port }} + diff --git a/applications/love/charts/love-manager/templates/vault-secret.yaml b/applications/love/charts/love-manager/templates/vault-secret.yaml new file mode 100644 index 0000000000..e6e927d144 --- /dev/null +++ b/applications/love/charts/love-manager/templates/vault-secret.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ $.Values.envSecretKeyName }}-secrets + namespace: {{ $.Values.global.controlSystemAppNamespace }} +spec: + path: {{ $.Values.global.vaultSecretsPath }}/ts/software/{{ $.Values.envSecretKeyName }} + type: Opaque diff --git a/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml new file mode 100644 index 0000000000..e153bcdd70 --- /dev/null +++ b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml @@ -0,0 +1,81 @@ +{{- if .Values.viewBackup.enabled }} +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "love-manager.view-backup.fullname" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "love-manager.view-backup.labels" . | nindent 4 }} +spec: + concurrencyPolicy: Forbid + schedule: {{ .Values.viewBackup.schedule | quote }} + jobTemplate: + metadata: + labels: + {{- include "love-manager.view-backup.labels" . | nindent 8 }} + spec: + completions: 1 + {{- if .Values.viewBackup.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.viewBackup.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + labels: + {{- include "love-manager.view-backup.labels" . | nindent 12 }} + spec: + containers: + - name: {{ include "love-manager.view-backup.fullname" . }} + image: "{{ .Values.viewBackup.image.repository }}:{{ .Values.viewBackup.image.tag }}" + imagePullPolicy: {{ .Values.viewBackup.image.pullPolicy }} + env: + - name: PGHOST + value: {{ .Values.manager.frontend.env.DB_HOST | quote }} + - name: PGPORT + value: {{ .Values.manager.frontend.env.DB_PORT | quote }} + - name: PGDATABASE + value: {{ .Values.manager.frontend.env.DB_NAME | quote }} + - name: PGUSER + value: {{ .Values.manager.frontend.env.DB_USER | quote }} + - name: LOVE_SITE + value: {{ .Values.manager.frontend.env.LOVE_SITE | quote }} + {{- range $env_var, $env_value := .Values.viewBackup.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.namespace }}-secrets + key: {{ .Values.manager.frontend.envSecrets.DB_PASS }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ .Values.namespace }}-lfa + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.namespace }}-lfa + key: aws-secret-access-key + {{- with $.Values.viewBackup.resources }} + resources: + {{- toYaml $.Values.viewBackup.resources | nindent 16 }} + {{- end }} + restartPolicy: {{ .Values.viewBackup.restartPolicy }} + {{- if $.Values.viewBackup.image.nexus3 }} + imagePullSecrets: + - name: {{ $.Values.namespace }}-{{ $.Values.viewBackup.image.nexus3 }} + {{- end }} + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 12 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml $ | nindent 12 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml $ | nindent 12 }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml new file mode 100644 index 0000000000..2b6040b4c9 --- /dev/null +++ b/applications/love/charts/love-manager/values.yaml @@ -0,0 +1,271 @@ +# -- The overall namespace for the application +namespace: love +# -- The site-specific path to find Vault secrets +secret_path: lsst.local +# -- The top-level secret key name that houses the rest of the secrets +envSecretKeyName: love +manager: + frontend: + image: + # -- The LOVE manager frontend image to use + repository: lsstts/love-manager + # -- The pull policy on the LOVE manager frontend image + pullPolicy: IfNotPresent + # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled + nexus3: "" + ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 + env: + # -- The site tag where LOVE is being run + LOVE_SITE: local + # -- The external URL from the NGINX server for LOVE + SERVER_URL: love.lsst.local + # -- The Kubernetes sub-path for LOVE + URL_SUBPATH: /love + # -- Set the manager to use LFA storage + REMOTE_STORAGE: true + # -- Set the hostname for the Jira instance + JIRA_API_HOSTNAME: jira.lsstcorp.org + # -- Set the Jira project ID + JIRA_PROJECT_ID: 14601 + # -- Set the URL for the OLE instance + OLE_API_HOSTNAME: site.lsst.local + # -- Set the URI for the 1st LDAP server + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local + # -- Set the URI for the 2nd LDAP server + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local + # -- Set the URI for the 3rd LDAP server + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local + # -- The URL path for the LOVE producer websocket host + LOVE_PRODUCER_WEBSOCKET_HOST: love-service/manager/ws/subscription + # -- Label for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_HOSTNAME: love-commander-service + # -- Port number for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_PORT: 5000 + # -- The type of database engine being used for the LOVE manager frontend + DB_ENGINE: postgresql + # -- The name of the database being used for the LOVE manager frontend + DB_NAME: love + # -- The database user needed for access from the LOVE manager frontend + DB_USER: love + # -- The name of the database service + DB_HOST: love-manager-database-service + # -- The port for the database service + DB_PORT: 5432 + # -- The name of the redis service + REDIS_HOST: love-manager-redis-service + # -- The expiration time for the redis service + REDIS_CONFIG_EXPIRY: 5 + # -- The connection capacity for the redis service + REDIS_CONFIG_CAPACITY: 5000 + envSecrets: + # -- The LOVE manager frontend secret secret key name + SECRET_KEY: manager-secret-key + # -- The LOVE manager frontend process connection password secret key name + PROCESS_CONNECTION_PASS: process-connection-pass + # -- The LOVE manager frontend admin user password secret key name + ADMIN_USER_PASS: admin-user-pass + # -- The LOVE manager frontend user user password secret key name + USER_USER_PASS: user-user-pass + # -- The LOVE manager frontend cmd_user user password secret key name + CMD_USER_PASS: cmd-user-pass + # -- The LOVE manager frontend authlist_user password secret key name + AUTHLIST_USER_PASS: authlist-user-pass + # -- The LOVE manager frontend LDAP binding password secret key name + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + # -- The database password secret key name. + # Must match `database.envSecrets.POSTGRES_PASSWORD` + DB_PASS: db-pass + # -- The redis password secret key name. + # Must match `redis.envSecrets.REDIS_PASS` + REDIS_PASS: redis-pass + # -- Set the default number of LOVE manager frontend pod replicas + replicas: 1 + autoscaling: + # -- Whether automatic horizontal scaling is active + enabled: true + # -- The allowed minimum number of replicas + minReplicas: 1 + # -- The allowed maximum number of replicas + maxReplicas: 100 + # -- The percentage of CPU utilization that will trigger the scaling + targetCPUUtilizationPercentage: 80 + # -- (int) The percentage of memory utilization that will trigger the scaling + targetMemoryUtilizationPercentage: "" + # -- Policy for scaling up manager pods + scaleUpPolicy: {} + # -- Policy for scaling down manager pods + scaleDownPolicy: {} + # -- Resource specifications for the LOVE manager frontend pods + resources: {} + # -- Node selection rules for the LOVE manager frontend pods + nodeSelector: {} + # -- Toleration specifications for the LOVE manager frontend pods + tolerations: [] + # -- Affinity rules for the LOVE manager frontend pods + affinity: {} + # -- Configuration for the LOVE manager frontend pods readiness probe + readinessProbe: {} + producers: + image: + # -- The LOVE manager producers image to use + repository: lsstts/love-manager + # -- The pull policy on the LOVE manager producers image + pullPolicy: IfNotPresent + # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled + nexus3: "" + ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 + env: + # -- The site tag where LOVE is being run + LOVE_SITE: local + # -- The external URL from the NGINX server for LOVE + SERVER_URL: love.lsst.local + # -- The Kubernetes sub-path for LOVE + URL_SUBPATH: /love + # -- Set the manager to use LFA storage + REMOTE_STORAGE: true + # -- Set the hostname for the Jira instance + JIRA_API_HOSTNAME: jira.lsstcorp.org + # -- Set the Jira project ID + JIRA_PROJECT_ID: 14601 + # -- Set the URL for the OLE instance + OLE_API_HOSTNAME: site.lsst.local + # -- Set the URI for the 1st LDAP server + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local + # -- Set the URI for the 2nd LDAP server + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local + # -- Set the URI for the 3rd LDAP server + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local + # -- Have the LOVE producer managers not query commander + HEARTBEAT_QUERY_COMMANDER: false + # -- Label for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_HOSTNAME: love-commander-service + # -- Port number for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_PORT: 5000 + # -- The type of database engine being used for the LOVE manager producers + DB_ENGINE: postgresql + # -- The name of the database being used for the LOVE manager producers + DB_NAME: love + # -- The database user needed for access from the LOVE manager producers + DB_USER: love + # -- The name of the database service + DB_HOST: love-manager-database-service + # -- The port for the database service + DB_PORT: 5432 + # -- The name of the redis service + REDIS_HOST: love-manager-redis-service + # -- The expiration time for the redis service + REDIS_CONFIG_EXPIRY: 5 + # -- The connection capacity for the redis service + REDIS_CONFIG_CAPACITY: 5000 + envSecrets: + # -- The LOVE manager producers secret secret key name + SECRET_KEY: manager-secret-key + # -- The LOVE manager producers process connection password secret key name + PROCESS_CONNECTION_PASS: process-connection-pass + # -- The LOVE manager producers admin user password secret key name + ADMIN_USER_PASS: admin-user-pass + # -- The LOVE manager producers user user password secret key name + USER_USER_PASS: user-user-pass + # -- The LOVE manager producers cmd_user user password secret key name + CMD_USER_PASS: cmd-user-pass + # -- The LOVE manager producers authlist_user password secret key name + AUTHLIST_USER_PASS: authlist-user-pass + # -- The LOVE manager producers LDAP binding password secret key name + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + # -- The database password secret key name. + # Must match `database.envSecrets.POSTGRES_PASSWORD` + DB_PASS: db-pass + # -- The redis password secret key name. + # Must match `redis.envSecrets.REDIS_PASS` + REDIS_PASS: redis-pass + # -- Set the default number of LOVE manager producers pod replicas + replicas: 1 + autoscaling: + # -- Whether automatic horizontal scaling is active + enabled: true + # -- The allowed minimum number of replicas + minReplicas: 1 + # -- The allowed maximum number of replicas + maxReplicas: 100 + # -- The percentage of CPU utilization that will trigger the scaling + targetCPUUtilizationPercentage: 80 + # -- (int) The percentage of memory utilization that will trigger the scaling + targetMemoryUtilizationPercentage: "" + # -- Policy for scaling up manager pods + scaleUpPolicy: {} + # -- Policy for scaling down manager pods + scaleDownPolicy: {} + # -- Resource specifications for the LOVE manager producers pods + resources: {} + # -- Node selection rules for the LOVE manager producers pods + nodeSelector: {} + # -- Toleration specifications for the LOVE manager producers pods + tolerations: [] + # -- Affinity rules for the LOVE manager producers pods + affinity: {} + # -- Configuration for the LOVE manager producers pods readiness probe + readinessProbe: {} +redis: + image: + # -- The redis image to use + repository: redis + # -- The tag to use for the redis image + tag: 5.0.3 + # -- The pull policy for the redis image + pullPolicy: IfNotPresent + envSecrets: + # -- The redis password secret key name + REDIS_PASS: redis-pass + # -- The redis port number + port: 6379 + # -- Resource specifications for the LOVE redis pods + resources: {} + # -- Node selection rules for the LOVE redis pods + nodeSelector: {} + # -- Toleration specifications for the LOVE redis pods + tolerations: [] + # -- Affinity rules for the LOVE redis pods + affinity: {} + # -- Configuration specification for the redis service + config: | + timeout 60 +viewBackup: + # -- Whether view backup is active + enabled: false + image: + # -- The view backup image to use + repository: lsstts/love-view-backup + # -- The tag to use for the view backup image + tag: develop + # -- The pull policy to use for the view backup image + pullPolicy: IfNotPresent + # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled + nexus3: "" + # -- Place to specify additional environment variables for the view backup job + env: {} + # -- The view backup job schedule in cron format + schedule: "0 0 1 1 *" + # -- The restart policy type for the view backup cronjob + restartPolicy: Never + # -- Time after view backup job finishes before deletion (ALPHA) + ttlSecondsAfterFinished: "" + # -- Resource specifications for the LOVE view backup pods + resources: {} + # -- Node selection rules for the LOVE view backup pods + nodeSelector: {} + # -- Toleration specifications for the LOVE view backup pods + tolerations: [] + # -- Affinity rules for the LOVE view backup pods + affinity: {} diff --git a/applications/love/charts/love-nginx/Chart.yaml b/applications/love/charts/love-nginx/Chart.yaml new file mode 100644 index 0000000000..53060a9776 --- /dev/null +++ b/applications/love/charts/love-nginx/Chart.yaml @@ -0,0 +1,4 @@ +name: love-nginx +apiVersion: v2 +version: 1.0.0 +description: Helm chart for the LOVE Nginx server. diff --git a/applications/love/charts/love-nginx/README.md b/applications/love/charts/love-nginx/README.md new file mode 100644 index 0000000000..c383628235 --- /dev/null +++ b/applications/love/charts/love-nginx/README.md @@ -0,0 +1,38 @@ +# love-nginx + +Helm chart for the LOVE Nginx server. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the NGINX pod | +| image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the NGINX image | +| image.repository | string | `"nginx"` | The NGINX image to use | +| image.tag | string | `"1.14.2"` | The tag to use for the NGINX image | +| imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | +| ingress.annotations | object | `{}` | Annotations for the NGINX ingress | +| ingress.className | string | `"nginx"` | Assign the Ingress class name | +| ingress.hostname | string | `"love.local"` | Hostname for the NGINX ingress | +| ingress.httpPath | string | `"/"` | Path name associated with the NGINX ingress | +| ingress.pathType | string | `""` | Set the Kubernetes path type for the NGINX ingress | +| initContainers.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the frontend image | +| initContainers.frontend.image.repository | string | `"lsstts/love-frontend"` | The frontend image to use | +| initContainers.frontend.image.tag | string | `nil` | | +| initContainers.manager.command | list | `["/bin/sh","-c","mkdir -p /usr/src/love-manager/media/thumbnails; mkdir -p /usr/src/love-manager/media/configs; cp -Rv /usr/src/love/manager/static /usr/src/love-manager; cp -uv /usr/src/love/manager/ui_framework/fixtures/thumbnails/* /usr/src/love-manager/media/thumbnails; cp -uv /usr/src/love/manager/api/fixtures/configs/* /usr/src/love-manager/media/configs"]` | The command to execute for the love-manager static content | +| initContainers.manager.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the love-manager static content image | +| initContainers.manager.image.repository | string | `"lsstts/love-manager"` | The static love-manager content image to use | +| initContainers.manager.image.tag | string | `nil` | | +| loveConfig | string | `"{\n \"alarms\": {\n \"minSeveritySound\": \"serious\",\n \"minSeverityNotification\": \"warning\"\n },\n \"camFeeds\": {\n \"generic\": \"/gencam\",\n \"allSky\": \"/gencam\"\n }\n}\n"` | Configuration specificiation for the LOVE service | +| namespace | string | `"love"` | The overall namespace for the application | +| nginxConfig | string | `"server {\n listen 80;\n server_name localhost;\n location / {\n root /usr/src/love-frontend;\n try_files $uri$args $uri$args/ $uri/ /index.html;\n }\n location /manager {\n proxy_pass http://love-manager-service:8000;\n proxy_http_version 1.1;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n proxy_set_header Host $host;\n proxy_redirect off;\n }\n location /manager/static {\n alias /usr/src/love-manager/static;\n }\n location /manager/media {\n alias /usr/src/love-manager/media;\n }\n}\n"` | Configuration specification for the NGINX service | +| nodeSelector | object | `{}` | Node selection rules for the NGINX pod | +| ports.container | int | `80` | Container port for the NGINX service | +| ports.node | int | `30000` | Node port for the NGINX service | +| resources | object | `{}` | Resource specifications for the NGINX pod | +| serviceType | string | `"ClusterIP"` | Service type specification | +| staticStore.accessMode | string | `"ReadWriteMany"` | The access mode for the NGINX static store | +| staticStore.claimSize | string | `"2Gi"` | The size of the NGINX static store request | +| staticStore.name | string | `"love-nginx-static"` | Label for the NGINX static store | +| staticStore.storageClass | string | `"local-store"` | The storage class to request the disk allocation from | +| tolerations | list | `[]` | Toleration specifications for the NGINX pod | diff --git a/applications/love/charts/love-nginx/templates/_helpers.tpl b/applications/love/charts/love-nginx/templates/_helpers.tpl new file mode 100644 index 0000000000..82f31f6cdf --- /dev/null +++ b/applications/love/charts/love-nginx/templates/_helpers.tpl @@ -0,0 +1,47 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "love-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-nginx.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "love-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "love-nginx.labels" -}} +helm.sh/chart: {{ include "love-nginx.chart" . }} +{{ include "love-nginx.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "love-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-nginx.name" . }} +app.kubernetes.io/instance: {{ include "love-nginx.name" . }} +{{- end }} diff --git a/applications/love/charts/love-nginx/templates/config.yaml b/applications/love/charts/love-nginx/templates/config.yaml new file mode 100644 index 0000000000..c1eee8e834 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/config.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-conf +data: + nginx.conf: | +{{ .Values.nginxConfig | indent 4 }} diff --git a/applications/love/charts/love-nginx/templates/ingress.yaml b/applications/love/charts/love-nginx/templates/ingress.yaml new file mode 100644 index 0000000000..693b94ff2c --- /dev/null +++ b/applications/love/charts/love-nginx/templates/ingress.yaml @@ -0,0 +1,27 @@ +--- +{{- if eq .Values.serviceType "ClusterIP" }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "love-nginx.name" . }}-ingress + namespace: {{ $.Values.global.controlSystemAppNamespace }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + - host: {{ .Values.ingress.hostname }} + http: + paths: + - path: {{ .Values.ingress.httpPath }} + pathType: {{ default "Prefix" .Values.ingress.pathType }} + backend: + service: + name: {{ include "love-nginx.name" . }}-service + port: + number: {{ .Values.ports.container }} +{{- end }} diff --git a/applications/love/charts/love-nginx/templates/love-config.yaml b/applications/love/charts/love-nginx/templates/love-config.yaml new file mode 100644 index 0000000000..190a9e29a2 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/love-config.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: love-conf +data: + default.json: | +{{ .Values.loveConfig | indent 4 }} diff --git a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml new file mode 100644 index 0000000000..e2ca31535f --- /dev/null +++ b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-nginx.name" . }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} + labels: + {{- include "love-nginx.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-nginx.selectorLabels" . | nindent 6 }} + replicas: {{ .Values.replicas | default 1 }} + template: + metadata: + labels: + {{- include "love-nginx.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + initContainers: + - name: love-frontend + {{- $feImageTag := .Values.initContainers.frontend.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "{{ .Values.initContainers.frontend.image.repository }}:{{ $feImageTag }}" + imagePullPolicy: {{ .Values.initContainers.frontend.image.pullPolicy }} + command: ["/bin/sh", "-c", "mkdir -p /usr/src/love-frontend; cp -Rv /usr/src/love/ /usr/src/love-frontend"] + volumeMounts: + - mountPath: /usr/src + name: {{ .Values.staticStore.name }} + - name: love-manager-static + {{- $mgImageTag := .Values.initContainers.manager.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "{{ .Values.initContainers.manager.image.repository }}:{{ $mgImageTag }}" + imagePullPolicy: {{ .Values.initContainers.manager.image.pullPolicy }} + {{- with .Values.initContainers.manager.command }} + command: + {{- range $item := $.Values.initContainers.manager.command }} + - {{ $item | quote }} + {{- end }} + {{- end }} + volumeMounts: + - mountPath: /usr/src + name: {{ .Values.staticStore.name }} + containers: + - name: {{ include "love-nginx.name" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.ports.container }} + volumeMounts: + - mountPath: /etc/nginx/conf.d + readOnly: true + name: nginx-conf + - mountPath: /usr/src + name: {{ .Values.staticStore.name }} + - mountPath: /usr/src/love-manager/media/configs + name: love-conf + {{- with $.Values.resources }} + resources: + {{- toYaml $.Values.resources | nindent 10 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + name: nginx-conf + items: + - key: nginx.conf + path: nginx.conf + - name: {{ .Values.staticStore.name }} + persistentVolumeClaim: + claimName: {{ .Values.staticStore.name }}-pvc + - name: love-conf + configMap: + name: love-conf + items: + - key: default.json + path: default.json + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-nginx/templates/service.yaml b/applications/love/charts/love-nginx/templates/service.yaml new file mode 100644 index 0000000000..70b2972a87 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/service.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-nginx.name" . }}-service + namespace: {{ $.Values.global.controlSystemAppNamespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-nginx.name" . }} + type: {{ .Values.serviceType }} + ports: + - port: {{ .Values.ports.container }} + targetPort: {{ .Values.ports.container }} + {{- if ne .Values.serviceType "ClusterIP" }} + nodePort: {{ .Values.ports.node }} + {{- end }} diff --git a/applications/love/charts/love-nginx/templates/volumeclaim.yaml b/applications/love/charts/love-nginx/templates/volumeclaim.yaml new file mode 100644 index 0000000000..3f09833420 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/volumeclaim.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.staticStore.name }}-pvc +spec: + accessModes: + - {{ .Values.staticStore.accessMode | quote }} + resources: + requests: + storage: {{ .Values.staticStore.claimSize }} + storageClassName: {{ .Values.staticStore.storageClass }} diff --git a/applications/love/charts/love-nginx/values.yaml b/applications/love/charts/love-nginx/values.yaml new file mode 100644 index 0000000000..bb8a20b5fb --- /dev/null +++ b/applications/love/charts/love-nginx/values.yaml @@ -0,0 +1,103 @@ +# -- The overall namespace for the application +namespace: love +image: + # -- The NGINX image to use + repository: nginx + # -- The tag to use for the NGINX image + tag: 1.14.2 + # -- The pull policy on the NGINX image + pullPolicy: IfNotPresent +# -- Service type specification +serviceType: ClusterIP +ports: + # -- Container port for the NGINX service + container: 80 + # -- Node port for the NGINX service + node: 30000 +ingress: + # -- Hostname for the NGINX ingress + hostname: love.local + # -- Path name associated with the NGINX ingress + httpPath: / + # -- Set the Kubernetes path type for the NGINX ingress + pathType: "" + # -- Assign the Ingress class name + className: nginx + # -- Annotations for the NGINX ingress + annotations: {} +# -- Configuration specification for the NGINX service +nginxConfig: | + server { + listen 80; + server_name localhost; + location / { + root /usr/src/love-frontend; + try_files $uri$args $uri$args/ $uri/ /index.html; + } + location /manager { + proxy_pass http://love-manager-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /manager/static { + alias /usr/src/love-manager/static; + } + location /manager/media { + alias /usr/src/love-manager/media; + } + } +# -- Configuration specificiation for the LOVE service +loveConfig: | + { + "alarms": { + "minSeveritySound": "serious", + "minSeverityNotification": "warning" + }, + "camFeeds": { + "generic": "/gencam", + "allSky": "/gencam" + } + } +# -- The list of pull secrets needed for the images. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (The label identifying the pull-secret to use) +imagePullSecrets: [] +initContainers: + frontend: + image: + # -- The frontend image to use + repository: lsstts/love-frontend + # str -- The tag to use for the frontend image + tag: + # -- The pull policy to use for the frontend image + pullPolicy: IfNotPresent + manager: + image: + # -- The static love-manager content image to use + repository: lsstts/love-manager + # str -- The tag to use for the love-manager static content image + tag: + # -- The pull policy to use for the love-manager static content image + pullPolicy: IfNotPresent + # -- The command to execute for the love-manager static content + command: ["/bin/sh", "-c", "mkdir -p /usr/src/love-manager/media/thumbnails; mkdir -p /usr/src/love-manager/media/configs; cp -Rv /usr/src/love/manager/static /usr/src/love-manager; cp -uv /usr/src/love/manager/ui_framework/fixtures/thumbnails/* /usr/src/love-manager/media/thumbnails; cp -uv /usr/src/love/manager/api/fixtures/configs/* /usr/src/love-manager/media/configs"] +staticStore: + # -- Label for the NGINX static store + name: love-nginx-static + # -- The storage class to request the disk allocation from + storageClass: local-store + # -- The access mode for the NGINX static store + accessMode: ReadWriteMany + # -- The size of the NGINX static store request + claimSize: 2Gi +# -- Resource specifications for the NGINX pod +resources: {} +# -- Node selection rules for the NGINX pod +nodeSelector: {} +# -- Toleration specifications for the NGINX pod +tolerations: [] +# -- Affinity rules for the NGINX pod +affinity: {} diff --git a/applications/love/charts/love-producer/Chart.yaml b/applications/love/charts/love-producer/Chart.yaml new file mode 100644 index 0000000000..101bd0ad9e --- /dev/null +++ b/applications/love/charts/love-producer/Chart.yaml @@ -0,0 +1,4 @@ +name: love-producer +apiVersion: v2 +version: 1.0.0 +description: Helm chart for the LOVE producers. diff --git a/applications/love/charts/love-producer/README.md b/applications/love/charts/love-producer/README.md new file mode 100644 index 0000000000..7857e17d30 --- /dev/null +++ b/applications/love/charts/love-producer/README.md @@ -0,0 +1,20 @@ +# love-producer + +Helm chart for the LOVE producers. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules applied to all LOVE producer pods | +| annotations | object | `{}` | This allows for the specification of pod annotations. | +| env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | +| envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | +| image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | +| image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | +| image.tag | string | `nil` | | +| nodeSelector | object | `{}` | Node selection rules applied to all LOVE producer pods | +| producers | obj | `[]` | This sections sets the list of producers to use. The producers should be specified like: _name_: The identifying name for the CSC producer _csc_: _CSC name:index_ The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | +| replicaCount | int | `1` | Set the replica count for the LOVE producers | +| resources | object | `{}` | Resource specifications applied to all LOVE producer pods | +| tolerations | list | `[]` | Toleration specifications applied to all LOVE producer pods | diff --git a/applications/love/charts/love-producer/templates/_helpers.tpl b/applications/love/charts/love-producer/templates/_helpers.tpl new file mode 100644 index 0000000000..af6ce0dc52 --- /dev/null +++ b/applications/love/charts/love-producer/templates/_helpers.tpl @@ -0,0 +1,53 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "love-producer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-producer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create app name from release and producer name. +*/}} +{{- define "love-producer.appName" -}} +{{ printf "%s-producer-%s" .Release.Name .Producer | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "love-producer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "love-producer.labels" -}} +helm.sh/chart: {{ include "love-producer.chart" . }} +{{ include "love-producer.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "love-producer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-producer.name" . }} +{{- end }} diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml new file mode 100644 index 0000000000..fcc11046a0 --- /dev/null +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -0,0 +1,95 @@ +{{- range $producer := .Values.producers }} +{{ $appName := printf "%s-producer-%s" $.Release.Name $producer.name | trunc 63 | trimSuffix "-" }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $appName }} + labels: + {{- include "love-producer.labels" $ | nindent 4 }} +spec: + replicas: {{ $.Values.replicaCount }} + selector: + matchLabels: + {{- include "love-producer.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/instance: {{ $appName }} + template: + metadata: + {{- with $.Values.annotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "love-producer.selectorLabels" $ | nindent 8 }} + app.kubernetes.io/instance: {{ $appName }} + spec: + containers: + - name: {{ $producer.name }} + {{- $imageTag := $.Values.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "{{ $.Values.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + envFrom: + - configMapRef: + name: csc-env-config + env: + - name: LOVE_CSC_PRODUCER + value: {{ $producer.csc | quote }} + - name: LSST_KAFKA_SECURITY_PASSWORD + valueFrom: + secretKeyRef: + name: ts-salkafka + key: ts-salkafka-password + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env_var, $env_value := $.Values.envSecrets }} + - name: {{ $env_var }} + valueFrom: + secretKeyRef: + name: love-secrets + key: {{ $env_value }} + {{- end }} + {{- if or $.Values.resources $producer.resources }} + {{- $resources := "" }} + {{- if $producer.resources }} + {{- $resources = $producer.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + imagePullSecrets: + - name: nexus3-docker + {{- if or $.Values.nodeSelector $producer.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $producer.nodeSelector }} + {{- $nodeSelector = $producer.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $producer.affinity }} + {{- $affinity := "" }} + {{- if $producer.affinity }} + {{- $affinity = $producer.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $producer.tolerations }} + {{- $tolerations := "" }} + {{- if $producer.tolerations }} + {{- $tolerations = $producer.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-producer/values.yaml b/applications/love/charts/love-producer/values.yaml new file mode 100644 index 0000000000..ca39d63d95 --- /dev/null +++ b/applications/love/charts/love-producer/values.yaml @@ -0,0 +1,35 @@ +# -- Set the replica count for the LOVE producers +replicaCount: 1 +image: + # -- The LOVE producer image to use + repository: lsstts/love-producer + # str -- The tag to use for the LOVE producer image + tag: + # -- The pull policy on the LOVE producer image + pullPolicy: IfNotPresent +# -- This section holds a set of key, value pairs for environmental variables +env: + WEBSOCKET_HOST: love-nginx/manager/ws/subscription +# -- This section holds a set of key, value pairs for secrets +envSecrets: + PROCESS_CONNECTION_PASS: process-connection-pass +# -- (obj) This sections sets the list of producers to use. +# The producers should be specified like: +# _name_: The identifying name for the CSC producer +# _csc_: _CSC name:index_ +# The following attributes are optional +# _resources_ (A resource object specification) +# _nodeSelector_ (A node selector object specification) +# _tolerations_ (A list of tolerations) +# _affinity_ (An affinity object specification) +producers: [] +# -- This allows for the specification of pod annotations. +annotations: {} +# -- Resource specifications applied to all LOVE producer pods +resources: {} +# -- Node selection rules applied to all LOVE producer pods +nodeSelector: {} +# -- Toleration specifications applied to all LOVE producer pods +tolerations: [] +# -- Affinity rules applied to all LOVE producer pods +affinity: {} diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml new file mode 100644 index 0000000000..858f7a6e3a --- /dev/null +++ b/applications/love/values-tucson-teststand.yaml @@ -0,0 +1,338 @@ +csc_collector: + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + +love-commander: + image: + repository: ts-dockerhub.lsst.org/love-commander + pullPolicy: Always + env: + S3_INSTANCE: tuc + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + service: + enabled: true + port: 5000 + type: ClusterIP + +love-manager: + manager: + frontend: + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + SERVER_URL: tucson-teststand.lsst.codes + OLE_API_HOSTNAME: tucson-teststand.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.tu.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.tu.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.tu.lsst.org + DB_HOST: postgresdb01.tu.lsst.org + LOVE_SITE: tucson + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + producers: + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + SERVER_URL: tucson-teststand.lsst.codes + OLE_API_HOSTNAME: tucson-teststand.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.tu.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.tu.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.tu.lsst.org + DB_HOST: postgresdb01.tu.lsst.org + LOVE_SITE: tucson + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + redis: + image: + repository: redis + tag: '7' + pullPolicy: IfNotPresent + config: | + timeout 60 + viewBackup: + enabled: true + image: + repository: ts-dockerhub.lsst.org/love-view-backup + pullPolicy: Always + schedule: 0 12 * * * + +love-nginx: + image: + repository: nginx + tag: 1.25.1 + pullPolicy: Always + ingress: + hostname: tucson-teststand.lsst.codes + httpPath: /love + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" + imagePullSecrets: + - name: nexus3-docker + initContainers: + frontend: + image: + repository: ts-dockerhub.lsst.org/love-frontend-k8s + pullPolicy: Always + manager: + image: + repository: ts-dockerhub.lsst.org/love-manager-static + pullPolicy: Always + command: + - /bin/sh + - -c + - mkdir -p /usr/src/love-manager; cp -Rv /usr/src/love/manager/media /usr/src/love-manager; cp -Rv /usr/src/love/manager/static /usr/src/love-manager + staticStore: + name: love-nginx-static + storageClass: rook-ceph-block + accessMode: ReadWriteOnce + claimSize: 2Gi + nginxConfig: | + server { + listen 80; + server_name localhost; + location /love { + root /usr/src/love-frontend; + try_files $uri$args $uri$args/ $uri/ /love/index.html; + } + location /love/manager { + client_max_body_size 5M; + proxy_pass http://love-manager-frontend-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/producers { + proxy_pass http://love-manager-producers-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/media { + alias /usr/src/love-manager/media; + } + location /love/manager/static { + alias /usr/src/love-manager/static; + } + location /love/manager/media { + alias /usr/src/love-manager/media; + } + location /love/simcam { + proxy_pass http://simulation-gencam-service.calsys:5013/; + proxy_set_header Host $host/love; + } + } + loveConfig: | + { + "alarms": { + "minSeveritySound": "mute", + "minSeverityNotification": "mute" + }, + "camFeeds": { + "simcam": "/love/simcam" + }, + "efd": { + "defaultEfdInstance": "tucson_teststand_efd", + "urlStatus": "https://tucson-teststand.lsst.codes/influxdb/health" + }, + "sal": { + "urlStatus": "https://tucson-teststand.lsst.codes/sasquatch-rest-proxy/brokers", + "expectedBrokerList": [0, 1, 2] + } + } + +love-producer: + image: + repository: ts-dockerhub.lsst.org/love-producer + pullPolicy: Always + env: + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription + resources: + requests: + cpu: 10m + memory: 100Mi + limits: + cpu: 100m + memory: 300Mi + producers: + - name: ataos + csc: ATAOS:0 + - name: atcamera + csc: ATCamera:0 + - name: atdome + csc: ATDome:0 + - name: atdometrajectory + csc: ATDomeTrajectory:0 + - name: atheaderservice + csc: ATHeaderService:0 + - name: athexapod + csc: ATHexapod:0 + - name: atmcs + csc: ATMCS:0 + - name: atocps + csc: OCPS:1 + - name: atoods + csc: ATOODS:0 + - name: atpneumatics + csc: ATPneumatics:0 + - name: atptg + csc: ATPtg:0 + - name: atscheduler + csc: Scheduler:2 + - name: atscriptqueue + csc: ScriptQueue:2 + - name: atspectrograph + csc: ATSpectrograph:0 + - name: authorize + csc: Authorize:0 + - name: auxteless01 + csc: ESS:201 + - name: auxteless02 + csc: ESS:202 + - name: auxteless03 + csc: ESS:203 + - name: auxteless04 + csc: ESS:204 + - name: calibhilless01 + csc: ESS:301 + - name: camerahexapod + csc: MTHexapod:1 + - name: cccamera + csc: CCCamera:0 + - name: ccheaderservice + csc: CCHeaderService:0 + - name: ccoods + csc: CCOODS:0 + - name: ccocps + csc: OCPS:2 + - name: dimm1 + csc: DIMM:1 + - name: dimm2 + csc: DIMM:2 + - name: dsm1 + csc: DSM:1 + - name: dsm2 + csc: DSM:2 + - name: gcheaderservice1 + csc: GCHeaderService:1 + - name: genericcamera1 + csc: GenericCamera:1 + - name: lasertracker1 + csc: LaserTracker:1 + - name: love + csc: LOVE:0 + - name: m2ess106 + csc: ESS:106 + - name: m2hexapod + csc: MTHexapod:2 + - name: mtaircompressor1 + csc: MTAirCompressor:1 + - name: mtaircompressor2 + csc: MTAirCompressor:2 + - name: mtaos + csc: MTAOS:0 + - name: mtdome + csc: MTDome:0 + - name: mtdomeess01 + csc: ESS:101 + - name: mtdomeess02 + csc: ESS:102 + - name: mtdomeess03 + csc: ESS:103 + - name: mtdometrajectory + csc: MTDomeTrajectory:0 + - name: mtm1m3 + csc: MTM1M3:0 + - name: mtm2 + csc: MTM2:0 + - name: mtmount + csc: MTMount:0 + - name: mtptg + csc: MTPtg:0 + - name: mtrotator + csc: MTRotator:0 + - name: mtscheduler + csc: Scheduler:1 + - name: mtscriptqueue + csc: ScriptQueue:1 + - name: tmaess01 + csc: ESS:1 + - name: tmaess104 + csc: ESS:104 + - name: tmaess105 + csc: ESS:105 + - name: watcher + csc: Watcher:0 + - name: weatherforecast + csc: WeatherForecast:0 diff --git a/applications/love/values.yaml b/applications/love/values.yaml new file mode 100644 index 0000000000..346ac1d5b0 --- /dev/null +++ b/applications/love/values.yaml @@ -0,0 +1,55 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/mobu/Chart.yaml b/applications/mobu/Chart.yaml index 9d1709f040..c348afc8e8 100644 --- a/applications/mobu/Chart.yaml +++ b/applications/mobu/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: mobu version: 1.0.0 -description: Continuous integration testing +description: "Continuous integration testing" sources: - - https://github.com/lsst-sqre/mobu -appVersion: 6.1.1 + - "https://github.com/lsst-sqre/mobu" +appVersion: 7.0.0 diff --git a/applications/mobu/templates/deployment.yaml b/applications/mobu/templates/deployment.yaml index d80bb97975..95b7299cb9 100644 --- a/applications/mobu/templates/deployment.yaml +++ b/applications/mobu/templates/deployment.yaml @@ -25,27 +25,27 @@ spec: - name: {{ .Chart.Name }} env: {{- if .Values.config.slackAlerts }} - - name: "ALERT_HOOK" + - name: "MOBU_ALERT_HOOK" valueFrom: secretKeyRef: name: {{ template "mobu.fullname" . }}-secret key: "ALERT_HOOK" {{- end }} {{- if .Values.config.autostart }} - - name: "AUTOSTART" + - name: "MOBU_AUTOSTART_PATH" value: "/etc/mobu/autostart.yaml" {{- end }} - - name: "ENVIRONMENT_URL" + - name: "MOBU_ENVIRONMENT_URL" value: {{ .Values.global.baseUrl }} - - name: "GAFAELFAWR_TOKEN" + - name: "MOBU_GAFAELFAWR_TOKEN" valueFrom: secretKeyRef: name: {{ template "mobu.fullname" . }}-gafaelfawr-token key: "token" - - name: "SAFIR_PATH_PREFIX" + - name: "MOBU_PATH_PREFIX" value: {{ .Values.config.pathPrefix | quote }} {{- if (not .Values.config.debug) }} - - name: "SAFIR_PROFILE" + - name: "MOBU_LOGGING_PROFILE" value: "production" {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 37d3033f71..19c702a5af 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -1,10 +1,10 @@ config: debug: true autostart: - - name: "weekly" + - name: "recommended" count: 1 users: - - username: "bot-mobu-weekly" + - username: "bot-mobu-recommended" scopes: - "exec:notebook" - "exec:portal" @@ -13,11 +13,8 @@ config: business: type: "NotebookRunner" options: - image: - image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "tutorial" count: 1 @@ -31,13 +28,10 @@ config: business: type: "NotebookRunner" options: - image: - image_class: "latest-weekly" repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" repo_branch: "prod" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false restart: true - name: "tap" count: 1 @@ -45,7 +39,7 @@ config: - username: "bot-mobu-tap" scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 6f5f7d0beb..6a0cdf5515 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -1,22 +1,5 @@ config: autostart: - - name: "nublado2" - count: 1 - users: - - username: "bot-mobu-nublado2" - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - repo_url: "https://github.com/lsst-sqre/system-test.git" - repo_branch: "prod" - max_executions: 1 - url_prefix: "/n2" - restart: true - name: "recommended" count: 1 users: @@ -31,7 +14,6 @@ config: options: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "weekly" count: 1 @@ -45,11 +27,8 @@ config: business: type: "NotebookRunner" options: - image: - image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "tutorial" count: 1 @@ -63,13 +42,10 @@ config: business: type: "NotebookRunner" options: - image: - image_class: "latest-weekly" repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" repo_branch: "prod" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false restart: true - name: "tap" count: 1 @@ -77,7 +53,7 @@ config: - username: "bot-mobu-tap" scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/mobu/values-idfprod.yaml b/applications/mobu/values-idfprod.yaml index 0f6813639a..ca438e299c 100644 --- a/applications/mobu/values-idfprod.yaml +++ b/applications/mobu/values-idfprod.yaml @@ -15,7 +15,6 @@ config: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" max_executions: 1 - use_cachemachine: false restart: true - name: "quickbeam" count: 1 @@ -33,7 +32,6 @@ config: repo_branch: "prod" idle_time: 900 delete_lab: false - use_cachemachine: false restart: true - name: "tutorial" count: 1 @@ -51,7 +49,25 @@ config: repo_branch: "prod" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false + restart: true + - name: "tutorial-weekly" + count: 1 + users: + - username: "bot-mobu-tutorial-weekly" + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + image: + image_class: "latest-weekly" + repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" + repo_branch: "prod" + max_executions: 1 + working_directory: "notebooks/tutorial-notebooks" restart: true - name: "tap" count: 1 @@ -59,7 +75,7 @@ config: - username: "bot-mobu-tap" scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/mobu/values-usdfdev.yaml b/applications/mobu/values-usdfdev.yaml index 1a55036ea1..facf862243 100644 --- a/applications/mobu/values-usdfdev.yaml +++ b/applications/mobu/values-usdfdev.yaml @@ -19,37 +19,16 @@ config: image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - - name: "weekly" + - name: "tap" count: 1 users: - username: "bot-mobu02" uidnumber: 45693 gidnumber: 1126 - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - image: - image_class: "latest-weekly" - repo_url: "https://github.com/lsst-sqre/system-test.git" - repo_branch: "prod" - use_cachemachine: false - restart: true - - name: "tap" - count: 1 - users: - - username: "bot-mobu03" - uidnumber: 45694 - gidnumber: 1126 scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/mobu/values-usdfint.yaml b/applications/mobu/values-usdfint.yaml new file mode 100644 index 0000000000..84c264637d --- /dev/null +++ b/applications/mobu/values-usdfint.yaml @@ -0,0 +1,22 @@ +config: + debug: true + autostart: + - name: "firefighter" + count: 1 + users: + - username: "bot-mobu03" + uidnumber: 45694 + gidnumber: 1126 + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + image: + image_class: "latest-weekly" + repo_url: "https://github.com/lsst-sqre/system-test.git" + repo_branch: "prod" + restart: true diff --git a/applications/mobu/values-usdfprod.yaml b/applications/mobu/values-usdfprod.yaml index 3bd79a6fe1..b04c82af58 100644 --- a/applications/mobu/values-usdfprod.yaml +++ b/applications/mobu/values-usdfprod.yaml @@ -17,7 +17,6 @@ config: options: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "tap" count: 1 @@ -27,7 +26,7 @@ config: gidnumber: 1126 scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/moneypenny/Chart.yaml b/applications/moneypenny/Chart.yaml deleted file mode 100644 index 9c0ba6863a..0000000000 --- a/applications/moneypenny/Chart.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v2 -appVersion: "1.0.0" -name: moneypenny -description: User provisioning actions -sources: - - https://github.com/lsst-sqre/moneypenny - - https://github.com/lsst-sqre/farthing - - https://github.com/lsst-sqre/inituserhome -version: 1.0.2 -annotations: - phalanx.lsst.io/docs: | - - id: "SQR-052" - title: >- - Proposal for privilege separation in RSP Notebook Aspect containers - url: "https://sqr-052.lsst.io/" diff --git a/applications/moneypenny/README.md b/applications/moneypenny/README.md deleted file mode 100644 index 1cfedae207..0000000000 --- a/applications/moneypenny/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# moneypenny - -User provisioning actions - -## Source Code - -* -* -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the vo-cutouts frontend pod | -| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the moneypenny image | -| image.repository | string | `"lsstsqre/moneypenny"` | moneypenny image to use | -| image.tag | string | The appVersion of the chart | Tag of moneypenny image to use | -| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | -| nameOverride | string | `""` | Override the base name for resources | -| nodeSelector | object | `{}` | Node selector rules for the vo-cutouts frontend pod | -| orders.commission | list | `[{"image":"lsstsqre/farthing","name":"farthing","securityContext":{"allowPrivilegeEscalation":false,"runAsNonRootUser":true,"runAsUser":1000}}]` | List of specifications for containers to run to commission a new user. Each member of the list should set a container `name`, `image`, and `securityContext` and may contain `volumeMounts`. | -| orders.retire | list | `[{"image":"lsstsqre/farthing","name":"farthing","securityContext":{"allowPrivilegeEscalation":false,"runAsNonRootUser":true,"runAsUser":1000}}]` | List of specifications for containers to run to retire a user. Each member of the list should set a container `name`, `image`, and `securityContext` and may contain `volumeMounts`. | -| orders.volumes | list | `[]` | Additional volumes to mount when commissioning or retiring users. | -| podAnnotations | object | `{}` | Annotations for the vo-cutouts frontend pod | -| quips | string | A small selection | Moneypenny quotes | -| replicaCount | int | `1` | Number of pods to start | -| resources | object | `{}` | Resource limits and requests for the vo-cutouts frontend pod | -| serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | diff --git a/applications/moneypenny/templates/cm-m-config.yaml b/applications/moneypenny/templates/cm-m-config.yaml deleted file mode 100644 index 5dedc2a46d..0000000000 --- a/applications/moneypenny/templates/cm-m-config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "moneypenny.fullname" . }}-m-config - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -data: - m.yaml: | - {{- toYaml .Values.orders | nindent 4 }} diff --git a/applications/moneypenny/templates/cm-quips.yaml b/applications/moneypenny/templates/cm-quips.yaml deleted file mode 100644 index a0e9f928ba..0000000000 --- a/applications/moneypenny/templates/cm-quips.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "moneypenny.fullname" . }}-quips - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -data: - quips.txt: | - {{- .Values.quips | nindent 4 }} diff --git a/applications/moneypenny/templates/configmap.yaml b/applications/moneypenny/templates/configmap.yaml deleted file mode 100644 index 646d1c8042..0000000000 --- a/applications/moneypenny/templates/configmap.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "moneypenny.fullname" .}} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -data: - SAFIR_NAME: "moneypenny" - SAFIR_PROFILE: "production" - SAFIR_LOGGER: "moneypenny" - SAFIR_LOG_LEVEL: "INFO" - DOCKER_SECRET_NAME: "pull-secret" diff --git a/applications/moneypenny/templates/deployment.yaml b/applications/moneypenny/templates/deployment.yaml deleted file mode 100644 index 2684cf8eea..0000000000 --- a/applications/moneypenny/templates/deployment.yaml +++ /dev/null @@ -1,96 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "moneypenny.fullname" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "moneypenny.selectorLabels" . | nindent 6 }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/config-m: {{ include (print $.Template.BasePath "/cm-m-config.yaml") . | sha256sum }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "moneypenny.selectorLabels" . | nindent 8 }} - spec: - imagePullSecrets: - - name: "pull-secret" - serviceAccountName: {{ include "moneypenny.serviceAccountName" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 - containers: - - name: "moneypenny" - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "all" - readOnlyRootFilesystem: true - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - envFrom: - - configMapRef: - name: {{ template "moneypenny.fullname" . }} - ports: - - name: "http" - containerPort: 8080 - protocol: "TCP" - livenessProbe: - httpGet: - path: "/" - port: "http" - readinessProbe: - httpGet: - path: "/" - port: "http" - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - name: "m-config" - mountPath: "/opt/lsst/software/moneypenny/config/M" - readOnly: true - - name: "quips" - mountPath: "/opt/lsst/software/moneypenny/config/quips" - readOnly: true - - name: "podinfo" - mountPath: "/etc/podinfo" - readOnly: true - volumes: - - name: "m-config" - configMap: - name: {{ template "moneypenny.fullname" . }}-m-config - - name: "quips" - configMap: - name: {{ template "moneypenny.fullname" . }}-quips - - name: "podinfo" - downwardAPI: - items: - - path: "name" - fieldRef: - fieldPath: "metadata.name" - - path: "uid" - fieldRef: - fieldPath: "metadata.uid" - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/applications/moneypenny/templates/role.yaml b/applications/moneypenny/templates/role.yaml deleted file mode 100644 index 0e730dd5fa..0000000000 --- a/applications/moneypenny/templates/role.yaml +++ /dev/null @@ -1,21 +0,0 @@ -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "moneypenny.serviceAccountName" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -rules: - - apiGroups: [""] - resources: - - "pods" - verbs: - - "create" - - "delete" - - "get" - - "list" - - "watch" - - apiGroups: [""] - resources: ["configmaps"] - verbs: - - "create" - - "delete" diff --git a/applications/moneypenny/templates/rolebinding.yaml b/applications/moneypenny/templates/rolebinding.yaml deleted file mode 100644 index 169978eeaf..0000000000 --- a/applications/moneypenny/templates/rolebinding.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "moneypenny.serviceAccountName" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ include "moneypenny.serviceAccountName" . }} -roleRef: - kind: Role - name: {{ include "moneypenny.serviceAccountName" . }} - apiGroup: rbac.authorization.k8s.io diff --git a/applications/moneypenny/templates/service.yaml b/applications/moneypenny/templates/service.yaml deleted file mode 100644 index 2b7d9b8da7..0000000000 --- a/applications/moneypenny/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "moneypenny.fullname" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -spec: - type: "ClusterIP" - ports: - - name: "http" - protocol: "TCP" - port: 8080 - targetPort: "http" - selector: - {{- include "moneypenny.selectorLabels" . | nindent 4 }} diff --git a/applications/moneypenny/templates/serviceaccount.yaml b/applications/moneypenny/templates/serviceaccount.yaml deleted file mode 100644 index 963cbe100d..0000000000 --- a/applications/moneypenny/templates/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "moneypenny.serviceAccountName" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} diff --git a/applications/moneypenny/values-base.yaml b/applications/moneypenny/values-base.yaml deleted file mode 100644 index e8bf412e25..0000000000 --- a/applications/moneypenny/values-base.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: nfs-jhome.ls.lsst.org - path: /jhome diff --git a/applications/moneypenny/values-ccin2p3.yaml b/applications/moneypenny/values-ccin2p3.yaml deleted file mode 100644 index e653e165c2..0000000000 --- a/applications/moneypenny/values-ccin2p3.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - hostPath: - path: /data/rsp/home - type: Directory diff --git a/applications/moneypenny/values-idfint.yaml b/applications/moneypenny/values-idfint.yaml deleted file mode 100644 index bf3fa84444..0000000000 --- a/applications/moneypenny/values-idfint.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: 10.22.240.130 - path: /share1/home diff --git a/applications/moneypenny/values-roe.yaml b/applications/moneypenny/values-roe.yaml deleted file mode 100644 index 0dbe21c7f7..0000000000 --- a/applications/moneypenny/values-roe.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: 192.41.122.33 - path: /jhome diff --git a/applications/moneypenny/values-summit.yaml b/applications/moneypenny/values-summit.yaml deleted file mode 100644 index 1436234dbd..0000000000 --- a/applications/moneypenny/values-summit.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: nfs1.cp.lsst.org - path: /jhome diff --git a/applications/moneypenny/values-tucson-teststand.yaml b/applications/moneypenny/values-tucson-teststand.yaml deleted file mode 100644 index 845233c931..0000000000 --- a/applications/moneypenny/values-tucson-teststand.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: nfs-jhome.tu.lsst.org - path: /jhome diff --git a/applications/moneypenny/values-usdfdev.yaml b/applications/moneypenny/values-usdfdev.yaml deleted file mode 100644 index fe9848cc82..0000000000 --- a/applications/moneypenny/values-usdfdev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -orders: - commission: [] - retire: [] diff --git a/applications/moneypenny/values-usdfprod.yaml b/applications/moneypenny/values-usdfprod.yaml deleted file mode 100644 index fe9848cc82..0000000000 --- a/applications/moneypenny/values-usdfprod.yaml +++ /dev/null @@ -1,3 +0,0 @@ -orders: - commission: [] - retire: [] diff --git a/applications/moneypenny/values.yaml b/applications/moneypenny/values.yaml deleted file mode 100644 index 743e2bc0e9..0000000000 --- a/applications/moneypenny/values.yaml +++ /dev/null @@ -1,118 +0,0 @@ -# Default values for moneypenny. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# -- Override the base name for resources -nameOverride: "" - -# -- Override the full name for resources (includes the release name) -fullnameOverride: "" - -# -- Number of pods to start -replicaCount: 1 - -image: - # -- moneypenny image to use - repository: "lsstsqre/moneypenny" - - # -- Pull policy for the moneypenny image - pullPolicy: "IfNotPresent" - - # -- Tag of moneypenny image to use - # @default -- The appVersion of the chart - tag: "" - -serviceAccount: - # -- Name of the service account to use - # @default -- Name based on the fullname template - name: "" - -ingress: - # -- Additional annotations to add to the ingress - annotations: {} - -orders: - # -- List of specifications for containers to run to commission a new user. - # Each member of the list should set a container `name`, `image`, and - # `securityContext` and may contain `volumeMounts`. - commission: - - name: farthing - image: lsstsqre/farthing - securityContext: - runAsUser: 1000 - runAsNonRootUser: true - allowPrivilegeEscalation: false - - # -- List of specifications for containers to run to retire a user. Each - # member of the list should set a container `name`, `image`, and - # `securityContext` and may contain `volumeMounts`. - retire: - - name: farthing - image: lsstsqre/farthing - securityContext: - runAsUser: 1000 - runAsNonRootUser: true - allowPrivilegeEscalation: false - - # -- Additional volumes to mount when commissioning or retiring users. - volumes: [] - -# -- Resource limits and requests for the vo-cutouts frontend pod -resources: {} - -# -- Annotations for the vo-cutouts frontend pod -podAnnotations: {} - -# -- Node selector rules for the vo-cutouts frontend pod -nodeSelector: {} - -# -- Tolerations for the vo-cutouts frontend pod -tolerations: [] - -# -- Affinity rules for the vo-cutouts frontend pod -affinity: {} - -# -- Moneypenny quotes -# @default -- A small selection -quips: | - Flattery will get you nowhere... but don't stop trying. - % - You never take me to dinner looking like this, James. You never take me to dinner, period. - % - M: (on intercom) Miss Moneypenny, give 007 the password we've agreed - with Japanese SIS. - Moneypenny: Yes, Sir. We tried to think of something that you wouldn't - forget. - Bond: Yes? - Moneypenny: I... love... you. Repeat it please, to make sure you get it. - Bond: Don't worry, I get it. Sayonara. - % - My problem is, James, you never do anything with me. - % - I didn't know you were a music lover. Any time you want to come over and hear my Barry Manilow collection... - % - Someday you'll have to make good on your innuendos. - % - You always were a cunning linguist, James. - % - Bond: (about getting shot) In your defense, a moving target is harder to hit. - Moneypenny: Then you'd better keep moving. - % - Moneypenny: Cut-throat razor. How very traditional. - Bond: Well, I like to do some things the old-fashioned way. - Moneypenny: Sometimes the old ways are best. - -# The following will be set by parameters injected by Argo CD and should not -# be set in the individual environment values files. -global: - # -- Base URL for the environment - # @default -- Set by Argo CD - baseUrl: "" - - # -- Host name for ingress - # @default -- Set by Argo CD - host: "" - - # -- Base path for Vault secrets - # @default -- Set by Argo CD - vaultSecretsPath: "" diff --git a/applications/monitoring/Chart.yaml b/applications/monitoring/Chart.yaml index b34119eed7..fb706e2ce6 100644 --- a/applications/monitoring/Chart.yaml +++ b/applications/monitoring/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: monitoring -version: 0.0.1 +version: 1.0.0 description: Chronograf-based UI for monitoring (data stored in InfluxDBv2) sources: - https://github.com/lsst-sqre/rubin-influx-tools diff --git a/applications/monitoring/secrets.yaml b/applications/monitoring/secrets.yaml new file mode 100644 index 0000000000..7dfd1a819c --- /dev/null +++ b/applications/monitoring/secrets.yaml @@ -0,0 +1,15 @@ +GH_CLIENT_SECRET: + description: >- + ? +INFLUXDB_TOKEN: + description: >- + ? +TOKEN_SECRET: + description: >- + ? +admin-token: + description: >- + ? +influx-alert-token: + description: >- + ? diff --git a/applications/monitoring/values-roundtable-dev.yaml b/applications/monitoring/values-roundtable-dev.yaml index ec948c5cce..232dd87c7b 100644 --- a/applications/monitoring/values-roundtable-dev.yaml +++ b/applications/monitoring/values-roundtable-dev.yaml @@ -1,4 +1,8 @@ chronograf: + persistence: + enabled: true + size: 1Gi + storageClass: standard-rwo env: GH_CLIENT_ID: "e85fe410b0021a251180" cronjob: diff --git a/applications/narrativelog/Chart.yaml b/applications/narrativelog/Chart.yaml index 73f54a8182..089e6748c7 100644 --- a/applications/narrativelog/Chart.yaml +++ b/applications/narrativelog/Chart.yaml @@ -12,4 +12,4 @@ version: 1.0.0 # number should be incremented each time you make changes to the # application. Versions are not expected to follow Semantic Versioning. They # should reflect the version the application is using. -appVersion: 0.5.1 +appVersion: 0.6.1 diff --git a/applications/narrativelog/values-usdfdev.yaml b/applications/narrativelog/values-usdfdev.yaml new file mode 100644 index 0000000000..c7c8760ec3 --- /dev/null +++ b/applications/narrativelog/values-usdfdev.yaml @@ -0,0 +1,5 @@ +config: + site_id: usdfdev +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/applications/narrativelog/values-usdfprod.yaml b/applications/narrativelog/values-usdfprod.yaml new file mode 100644 index 0000000000..bf9b05e6b1 --- /dev/null +++ b/applications/narrativelog/values-usdfprod.yaml @@ -0,0 +1,5 @@ +config: + site_id: usdfprod +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml b/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml index 0f7e1444fc..7cabb3eb3b 100644 --- a/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml +++ b/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml @@ -5,8 +5,8 @@ knative: lsstcamUrl: http://prompt-proto-service-lsstcam.prompt-proto-service-lsstcam/next-visit kafka: - schemaRegistryUrl: http://10.99.65.182:8081 - sasquatchAddress: 10.96.224.141:9094 + schemaRegistryUrl: http://10.96.181.159:8081 + sasquatchAddress: 10.100.226.209:9094 consumerGroup: test-group-3 nextVisitTopic: test.next-visit diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index 2a7acc1739..b6f0c92aed 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: noteburst version: 1.0.0 -appVersion: "0.7.1" +appVersion: "0.8.0" description: Noteburst is a notebook execution service for the Rubin Science Platform. type: application home: https://noteburst.lsst.io/ @@ -13,7 +13,7 @@ maintainers: dependencies: - name: redis - version: 1.0.8 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/noteburst/README.md b/applications/noteburst/README.md index 2d56c582c5..4e7cf96373 100644 --- a/applications/noteburst/README.md +++ b/applications/noteburst/README.md @@ -22,7 +22,7 @@ Noteburst is a notebook execution service for the Rubin Science Platform. | config.nubladoControllerPathPrefix | string | `"/nublado"` | URL path prefix for the Nublado JupyterLab Controller service | | config.worker.identities | list | `[]` | Science Platform user identities that workers can acquire. Each item is an object with username and uuid keys | | config.worker.imageReference | string | `""` | Nublado image reference, applicable when imageSelector is "reference" | -| config.worker.imageSelector | string | `"weekly"` | Nublado image stream to select: "recommended", "weekly" or "reference" | +| config.worker.imageSelector | string | `"recommended"` | Nublado image stream to select: "recommended", "weekly" or "reference" | | config.worker.jobTimeout | int | `300` | The default notebook execution timeout, in seconds. | | config.worker.keepAlive | string | `"normal"` | Worker keep alive mode: "normal", "fast", "disabled" | | config.worker.tokenLifetime | string | `"2419200"` | Worker token lifetime, in seconds. | diff --git a/applications/noteburst/values.yaml b/applications/noteburst/values.yaml index 34ecd9bc3f..37c2119e75 100644 --- a/applications/noteburst/values.yaml +++ b/applications/noteburst/values.yaml @@ -118,7 +118,7 @@ config: tokenScopes: "exec:notebook,read:image,read:tap,read:alertdb" # -- Nublado image stream to select: "recommended", "weekly" or "reference" - imageSelector: "weekly" + imageSelector: "recommended" # -- Nublado image reference, applicable when imageSelector is "reference" imageReference: "" diff --git a/applications/nublado-fileservers/Chart.yaml b/applications/nublado-fileservers/Chart.yaml new file mode 100644 index 0000000000..7f44131510 --- /dev/null +++ b/applications/nublado-fileservers/Chart.yaml @@ -0,0 +1,3 @@ +apiVersion: v2 +name: nublado-fileservers +version: 1.0.0 diff --git a/applications/nublado-fileservers/templates/_helpers.tpl b/applications/nublado-fileservers/templates/_helpers.tpl new file mode 100644 index 0000000000..b8c17a8560 --- /dev/null +++ b/applications/nublado-fileservers/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nublado-fileservers.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "nublado-fileservers.labels" -}} +helm.sh/chart: {{ include "nublado-fileservers.chart" . }} +{{ include "nublado-fileservers.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "nublado-fileservers.selectorLabels" -}} +app.kubernetes.io/name: "nublado-fileservers" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/nublado-fileservers/templates/networkpolicy.yaml b/applications/nublado-fileservers/templates/networkpolicy.yaml new file mode 100644 index 0000000000..da7ec8c714 --- /dev/null +++ b/applications/nublado-fileservers/templates/networkpolicy.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "nublado-fileservers" + labels: + {{- include "nublado-fileservers.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + nublado.lsst.io/category: "fileserver" + policyTypes: + - Ingress + ingress: + - from: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8000 diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 9133539d24..8714a9229c 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -3,16 +3,15 @@ name: nublado version: 1.0.0 description: JupyterHub and custom spawner for the Rubin Science Platform sources: - - https://github.com/lsst-sqre/jupyterlab-controller - - https://github.com/lsst-sqre/rsp-restspawner -home: https://github.com/lsst-sqre/jupyterlab-controller -appVersion: 0.7.3 + - https://github.com/lsst-sqre/nublado +home: https://nublado.lsst.io/ +appVersion: 4.0.2 dependencies: - name: jupyterhub # This is the Zero To Jupyterhub version, *not* the version of the # Jupyterhub package itself. - version: "2.0.0" + version: "3.2.1" repository: https://jupyterhub.github.io/helm-chart/ annotations: diff --git a/applications/nublado/README.md b/applications/nublado/README.md index b4e666b5f9..d5cdce7a0e 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -2,24 +2,43 @@ JupyterHub and custom spawner for the Rubin Science Platform -**Homepage:** +**Homepage:** ## Source Code -* -* +* ## Values | Key | Type | Default | Description | |-----|------|---------|-------------| -| controller.affinity | object | `{}` | Affinity rules for the lab controller pod | -| controller.config.fileserver.enabled | bool | `false` | Enable fileserver management | -| controller.config.fileserver.image | string | `"ghcr.io/lsst-sqre/worblehat"` | Image for fileserver container | -| controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user fileservers | -| controller.config.fileserver.pullPolicy | string | `"IfNotPresent"` | Pull policy for fileserver container | -| controller.config.fileserver.tag | string | `"0.1.0"` | Tag for fileserver container | -| controller.config.fileserver.timeout | int | `3600` | Timeout for user fileservers, in seconds | +| cloudsql.affinity | object | `{}` | Affinity rules for the Cloud SQL Auth Proxy pod | +| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with Cloud SQL databases on Google Cloud | +| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | +| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | +| cloudsql.image.tag | string | `"1.33.16"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a Cloud SQL PostgreSQL instance | +| cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Auth Proxy pod | +| cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Auth Proxy pod | +| cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy pod | +| cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `cloud-sql-proxy` Kubernetes service account and has the `cloudsql.client` role | +| cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Auth Proxy pod | +| controller.affinity | object | `{}` | Affinity rules for the Nublado controller | +| controller.config.fileserver.affinity | object | `{}` | Affinity rules for user file server pods | +| controller.config.fileserver.application | string | `"nublado-fileservers"` | Argo CD application in which to collect user file servers | +| controller.config.fileserver.creationTimeout | int | `120` | Timeout to wait for Kubernetes to create file servers, in seconds | +| controller.config.fileserver.deleteTimeout | int | 60 (1 minute) | Timeout for deleting a user's file server from Kubernetes, in seconds | +| controller.config.fileserver.enabled | bool | `false` | Enable user file servers | +| controller.config.fileserver.idleTimeout | int | `3600` | Timeout for idle user fileservers, in seconds | +| controller.config.fileserver.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for file server image | +| controller.config.fileserver.image.repository | string | `"ghcr.io/lsst-sqre/worblehat"` | File server image to use | +| controller.config.fileserver.image.tag | string | `"0.1.0"` | Tag of file server image to use | +| controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user file servers | +| controller.config.fileserver.nodeSelector | object | `{}` | Node selector rules for user file server pods | +| controller.config.fileserver.pathPrefix | string | `"/files"` | Path prefix for user file servers | +| controller.config.fileserver.resources | object | See `values.yaml` | Resource requests and limits for user file servers | +| controller.config.fileserver.tolerations | list | `[]` | Tolerations for user file server pods | +| controller.config.fileserver.volumeMounts | list | `[]` | Volumes that should be made available via WebDAV | | controller.config.images.aliasTags | list | `[]` | Additional tags besides `recommendedTag` that should be recognized as aliases. | | controller.config.images.cycle | string | `nil` | Restrict images to this SAL cycle, if given. | | controller.config.images.numDailies | int | `3` | Number of most-recent dailies to prepull. | @@ -28,30 +47,40 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.images.pin | list | `[]` | List of additional image tags to prepull. Listing the image tagged as recommended here is recommended when using a Docker image source to ensure its name can be expanded properly in the menu. | | controller.config.images.recommendedTag | string | `"recommended"` | Tag marking the recommended image (shown first in the menu) | | controller.config.images.source | object | None, must be specified | Source for prepulled images. For Docker, set `type` to `docker`, `registry` to the hostname and `repository` to the name of the repository. For Google Artifact Repository, set `type` to `google`, `location` to the region, `projectId` to the Google project, `repository` to the name of the repository, and `image` to the name of the image. | -| controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab. | +| controller.config.lab.affinity | object | `{}` | Affinity rules for user lab pods | +| controller.config.lab.application | string | `"nublado-users"` | Argo CD application in which to collect user lab objects | +| controller.config.lab.deleteTimeout | int | 60 (1 minute) | Timeout for deleting a user's lab resources from Kubernetes in seconds | +| controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab | +| controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | -| controller.config.lab.initcontainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image reference), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with `allowPrivilegeEscalation` true. Otherwise it will, run as UID 1000. | +| controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumeMounts` (similar to the main `volumeMountss` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | +| controller.config.lab.namespacePrefix | string | `"nublado"` | Prefix for namespaces for user labs. To this will be added a dash (`-`) and the user's username. | +| controller.config.lab.nodeSelector | object | `{}` | Node selector rules for user lab pods | +| controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | +| controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | | controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | -| controller.config.lab.sizes | object | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Names must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI prefixes for memory are supported. `large`) | -| controller.config.lab.volumes | list | `[]` | Volumes that should be mounted in lab pods. This supports NFS, HostPath, and PVC volume types (differentiated in source.type) | -| controller.config.safir.logLevel | string | `"INFO"` | Level of Python logging | -| controller.config.safir.pathPrefix | string | `"/nublado"` | Path prefix that will be routed to the controller | +| controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Sizes must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | +| controller.config.lab.spawnTimeout | int | `600` | How long to wait for Kubernetes to spawn a lab in seconds. This should generally be shorter than the spawn timeout set in JupyterHub. | +| controller.config.lab.tolerations | list | `[]` | Tolerations for user lab pods | +| controller.config.lab.volumeMounts | list | `[]` | Volumes that should be mounted in lab pods. | +| controller.config.lab.volumes | list | `[]` | Volumes that will be in lab pods or init containers. This supports NFS, HostPath, and PVC volume types (differentiated in source.type). | +| controller.config.logLevel | string | `"INFO"` | Level of Python logging | +| controller.config.pathPrefix | string | `"/nublado"` | Path prefix that will be routed to the controller | | controller.googleServiceAccount | string | None, must be set when using Google Artifact Registry | If Google Artifact Registry is used as the image source, the Google service account that has an IAM binding to the `nublado-controller` Kubernetes service account and has the Artifact Registry reader role | -| controller.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the nublado image | -| controller.image.repository | string | `"ghcr.io/lsst-sqre/jupyterlab-controller"` | nublado image to use | -| controller.image.tag | string | The appVersion of the chart | Tag of nublado image to use | -| controller.ingress.annotations | object | `{}` | Additional annotations to add for the lab controller pod ingress | -| controller.nodeSelector | object | `{}` | Node selector rules for the lab controller pod | -| controller.podAnnotations | object | `{}` | Annotations for the lab controller pod | -| controller.resources | object | `{}` | Resource limits and requests for the lab controller pod | +| controller.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the controller image | +| controller.image.repository | string | `"ghcr.io/lsst-sqre/nublado-controller"` | Nublado controller image to use | +| controller.image.tag | string | The appVersion of the chart | Tag of Nublado controller image to use | +| controller.ingress.annotations | object | `{}` | Additional annotations to add for the Nublado controller ingress | +| controller.nodeSelector | object | `{}` | Node selector rules for the Nublado controller | +| controller.podAnnotations | object | `{}` | Annotations for the Nublado controller | +| controller.resources | object | See `values.yaml` | Resource limits and requests for the Nublado controller | | controller.slackAlerts | bool | `false` | Whether to enable Slack alerts. If set to true, `slack_webhook` must be set in the corresponding Nublado Vault secret. | -| controller.tolerations | list | `[]` | Tolerations for the lab controller pod | +| controller.tolerations | list | `[]` | Tolerations for the Nublado controller | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | hub.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. | -| hub.timeout.spawn | int | `600` | Timeout for the Kubernetes spawn process in seconds. (Allow long enough to pull uncached images if needed.) | | hub.timeout.startup | int | `90` | Timeout for JupyterLab to start. Currently this sometimes takes over 60 seconds for reasons we don't understand. | | jupyterhub.cull.enabled | bool | `true` | Enable the lab culler. | | jupyterhub.cull.every | int | 600 (10 minutes) | How frequently to check for idle labs in seconds | @@ -69,20 +98,18 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.hub.extraEnv | object | Gets `JUPYTERHUB_CRYPT_KEY` from `nublado-secret` | Additional environment variables to set | | jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | | jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | -| jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/rsp-restspawner"` | Image to use for JupyterHub | -| jupyterhub.hub.image.tag | string | `"0.3.2"` | Tag of image to use for JupyterHub | +| jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/nublado-jupyterhub"` | Image to use for JupyterHub | +| jupyterhub.hub.image.tag | string | `"4.0.2"` | Tag of image to use for JupyterHub | | jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | | jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | | jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | -| jupyterhub.ingress.enabled | bool | `false` | Whether to enable the default ingress | +| jupyterhub.ingress.enabled | bool | `false` | Whether to enable the default ingress. Should always be disabled since we install our own `GafaelfawrIngress` | | jupyterhub.prePuller.continuous.enabled | bool | `false` | Whether to run the JupyterHub continuous prepuller (the Nublado controller does its own prepulling) | | jupyterhub.prePuller.hook.enabled | bool | `false` | Whether to run the JupyterHub hook prepuller (the Nublado controller does its own prepulling) | | jupyterhub.proxy.chp.networkPolicy.interNamespaceAccessLabels | string | `"accept"` | Enable access to the proxy from other namespaces, since we put each user's lab environment in its own namespace | | jupyterhub.proxy.service.type | string | `"ClusterIP"` | Only expose the proxy to the cluster, overriding the default of exposing the proxy directly to the Internet | | jupyterhub.scheduling.userPlaceholder.enabled | bool | `false` | Whether to spawn placeholder pods representing fake users to force autoscaling in advance of running out of resources | | jupyterhub.scheduling.userScheduler.enabled | bool | `false` | Whether the user scheduler should be enabled | -| jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | Whether to configure iptables to block cloud metadata endpoints. This is unnecessary in our environments (they are blocked by cluster configuration) and thus is disabled to reduce complexity. | -| jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | Start command for labs | -| jupyterhub.singleuser.defaultUrl | string | `"/lab"` | Default URL prefix for lab endpoints | | proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | +| secrets.installTsSalKafkaSecret | bool | `false` | Whether to install the T&S SAL Kafka secret. | | secrets.templateSecrets | bool | `false` | Whether to use the new secrets management mechanism. If enabled, the Vault nublado secret will be split into a nublado secret for JupyterHub and a nublado-lab-secret secret used as a source for secret values for the user's lab. | diff --git a/applications/nublado2/secrets-idfdev.yaml b/applications/nublado/secrets-idfint.yaml similarity index 90% rename from applications/nublado2/secrets-idfdev.yaml rename to applications/nublado/secrets-idfint.yaml index 97d5af3ca8..6f66967c08 100644 --- a/applications/nublado2/secrets-idfdev.yaml +++ b/applications/nublado/secrets-idfint.yaml @@ -2,6 +2,8 @@ description: >- Google Cloud Storage credentials to the Butler data store, formatted using AWS syntax for use with boto. + onepassword: + encoded: true "butler-gcs-idf-creds.json": description: >- Google Cloud Storage credentials to the Butler data store in the native @@ -13,3 +15,5 @@ "postgres-credentials.txt": description: >- PostgreSQL credentials in its pgpass format for the Butler database. + onepassword: + encoded: true diff --git a/applications/nublado/secrets-idfprod.yaml b/applications/nublado/secrets-idfprod.yaml new file mode 100644 index 0000000000..6f66967c08 --- /dev/null +++ b/applications/nublado/secrets-idfprod.yaml @@ -0,0 +1,19 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + onepassword: + encoded: true +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. +"butler-hmac-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the private + key syntax used for HMACs. +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + onepassword: + encoded: true diff --git a/applications/nublado/templates/cloudsql-deployment.yaml b/applications/nublado/templates/cloudsql-deployment.yaml new file mode 100644 index 0000000000..f0eb9449b4 --- /dev/null +++ b/applications/nublado/templates/cloudsql-deployment.yaml @@ -0,0 +1,63 @@ +{{- if .Values.cloudsql.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloud-sql-proxy + labels: + {{- include "nublado.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.cloudsql.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/component: "cloud-sql-proxy" + template: + metadata: + {{- with .Values.cloudsql.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app.kubernetes.io/component: "cloud-sql-proxy" + spec: + serviceAccountName: "cloud-sql-proxy" + containers: + - name: "cloud-sql-proxy" + command: + - "/cloud_sql_proxy" + - "-ip_address_types=PRIVATE" + - "-log_debug_stdout=true" + - "-structured_logs=true" + - "-instances={{ required "cloudsql.instanceConnectionName must be specified" .Values.cloudsql.instanceConnectionName }}=tcp:0.0.0.0:5432" + image: "{{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }}" + imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy | quote }} + ports: + - containerPort: 5432 + name: "http" + protocol: "TCP" + {{- with .Values.cloudsql.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + {{- with .Values.cloudsql.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.cloudsql.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.cloudsql.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/nublado/templates/cloudsql-networkpolicy.yaml b/applications/nublado/templates/cloudsql-networkpolicy.yaml new file mode 100644 index 0000000000..114540980c --- /dev/null +++ b/applications/nublado/templates/cloudsql-networkpolicy.yaml @@ -0,0 +1,26 @@ +{{- if .Values.cloudsql.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "cloud-sql-proxy" + labels: + {{- include "nublado.labels" . | nindent 4 }} +spec: + podSelector: + # This policy controls inbound and outbound access to the Cloud SQL Proxy. + matchLabels: + {{- include "nublado.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "cloud-sql-proxy" + policyTypes: + - Ingress + ingress: + # Allow inbound access to the Cloud SQL Proxy from the Hub. + - from: + - podSelector: + matchLabels: + app: "jupyterhub" + component: "hub" + ports: + - protocol: "TCP" + port: 5432 +{{- end }} diff --git a/applications/nublado/templates/cloudsql-service.yaml b/applications/nublado/templates/cloudsql-service.yaml new file mode 100644 index 0000000000..3c29083064 --- /dev/null +++ b/applications/nublado/templates/cloudsql-service.yaml @@ -0,0 +1,16 @@ +{{- if .Values.cloudsql.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: "cloud-sql-proxy" + labels: + {{- include "nublado.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - protocol: "TCP" + port: 5432 + targetPort: "http" + selector: + app.kubernetes.io/component: "cloud-sql-proxy" +{{- end }} diff --git a/applications/nublado/templates/cloudsql-serviceaccount.yaml b/applications/nublado/templates/cloudsql-serviceaccount.yaml new file mode 100644 index 0000000000..69cd1acc71 --- /dev/null +++ b/applications/nublado/templates/cloudsql-serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.cloudsql.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-sql-proxy + labels: + {{- include "nublado.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: {{ required "cloudsql.serviceAccount must be set to a valid Google service account" .Values.cloudsql.serviceAccount | quote }} +{{- end }} diff --git a/applications/nublado/templates/controller-deployment.yaml b/applications/nublado/templates/controller-deployment.yaml index 660b274c03..800fc2cb41 100644 --- a/applications/nublado/templates/controller-deployment.yaml +++ b/applications/nublado/templates/controller-deployment.yaml @@ -36,8 +36,6 @@ spec: image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.controller.image.pullPolicy | quote }} env: - - name: DOCKER_SECRET_NAME - value: "pull-secret" - name: EXTERNAL_INSTANCE_URL value: {{ .Values.global.baseUrl | quote }} {{- if .Values.controller.slackAlerts }} @@ -70,8 +68,6 @@ spec: {{- end }} - name: "podinfo" mountPath: "/etc/podinfo" - - name: "tmp" - mountPath: "/tmp" {{- with .Values.controller.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -106,5 +102,3 @@ spec: - path: "uid" fieldRef: fieldPath: "metadata.uid" - - name: "tmp" - emptyDir: {} diff --git a/applications/nublado/templates/controller-ingress-admin.yaml b/applications/nublado/templates/controller-ingress-admin.yaml index 3d797d9a5a..43043b1cc4 100644 --- a/applications/nublado/templates/controller-ingress-admin.yaml +++ b/applications/nublado/templates/controller-ingress-admin.yaml @@ -8,7 +8,7 @@ config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: all: - - "admin:jupyterlab" + - "exec:admin" template: metadata: name: "controller-admin" @@ -17,7 +17,7 @@ template: - host: {{ .Values.global.host | quote }} http: paths: - - path: {{ .Values.controller.config.safir.pathPrefix | quote }} + - path: {{ .Values.controller.config.pathPrefix | quote }} pathType: "Prefix" backend: service: diff --git a/applications/nublado/templates/controller-ingress-anonymous.yaml b/applications/nublado/templates/controller-ingress-anonymous.yaml index c41858c1e9..5148c0f253 100644 --- a/applications/nublado/templates/controller-ingress-anonymous.yaml +++ b/applications/nublado/templates/controller-ingress-anonymous.yaml @@ -16,21 +16,21 @@ template: - host: {{ .Values.global.host | quote }} http: paths: - - path: "{{ .Values.controller.config.safir.pathPrefix }}/openapi.json" + - path: "{{ .Values.controller.config.pathPrefix }}/openapi.json" pathType: "Exact" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/docs" + - path: "{{ .Values.controller.config.pathPrefix }}/docs" pathType: "Exact" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/redoc" + - path: "{{ .Values.controller.config.pathPrefix }}/redoc" pathType: "Exact" backend: service: diff --git a/applications/nublado/templates/controller-ingress-files.yaml b/applications/nublado/templates/controller-ingress-files.yaml index 77b125044c..03abf181c9 100644 --- a/applications/nublado/templates/controller-ingress-files.yaml +++ b/applications/nublado/templates/controller-ingress-files.yaml @@ -1,3 +1,4 @@ +{{- if .Values.controller.config.fileserver.enabled -}} apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: @@ -21,10 +22,11 @@ template: - host: {{ .Values.global.host | quote }} http: paths: - - path: "/files" + - path: {{ .Values.controller.config.fileserver.pathPrefix | quote }} pathType: "Prefix" backend: service: name: "nublado-controller" port: number: 80 +{{- end }} diff --git a/applications/nublado/templates/controller-ingress-hub.yaml b/applications/nublado/templates/controller-ingress-hub.yaml new file mode 100644 index 0000000000..2c1c00c611 --- /dev/null +++ b/applications/nublado/templates/controller-ingress-hub.yaml @@ -0,0 +1,28 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "nublado-controller-hub" + labels: + {{- include "nublado.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "admin:jupyterlab" +template: + metadata: + name: "controller-hub" + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + spec: + rules: + - host: {{ .Values.global.host | quote }} + http: + paths: + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/labs" + pathType: "Prefix" + backend: + service: + name: "nublado-controller" + port: + number: 80 diff --git a/applications/nublado/templates/controller-ingress-user.yaml b/applications/nublado/templates/controller-ingress-user.yaml index 2f6894df1d..45549f4703 100644 --- a/applications/nublado/templates/controller-ingress-user.yaml +++ b/applications/nublado/templates/controller-ingress-user.yaml @@ -21,28 +21,28 @@ template: - host: {{ .Values.global.host | quote }} http: paths: - - path: "{{ .Values.controller.config.safir.pathPrefix }}/spawner/v1/labs/.*/create" + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/labs/.*/create" pathType: "ImplementationSpecific" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/spawner/v1/labs/.*/events" + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/labs/.*/events" pathType: "ImplementationSpecific" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/spawner/v1/lab-form" + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/lab-form" pathType: "Prefix" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/spawner/v1/user-status" + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/user-status" pathType: "Exact" backend: service: diff --git a/applications/nublado/templates/hub-configmap.yaml b/applications/nublado/templates/hub-configmap.yaml index 22bf56bc36..f76116475e 100644 --- a/applications/nublado/templates/hub-configmap.yaml +++ b/applications/nublado/templates/hub-configmap.yaml @@ -6,11 +6,10 @@ metadata: {{- include "nublado.labels" . | nindent 4 }} data: 00_nublado.py: | - import rsp_restspawner - - # Use our authenticator and spawner. - c.JupyterHub.authenticator_class = "rsp_restspawner.GafaelfawrAuthenticator" - c.JupyterHub.spawner_class = "rsp_restspawner.RSPRestSpawner" + # Use our authenticator and spawner. Both register custom entry points, + # so the full module and class name is not required. + c.JupyterHub.authenticator_class = "gafaelfawr" + c.JupyterHub.spawner_class = "nublado" # Set internal Hub API URL. c.JupyterHub.hub_connect_url = ( @@ -30,13 +29,13 @@ data: # Use JupyterLab by default. c.Spawner.default_url = "/lab" - # Allow ten minutes for the lab to spawn in case it needs to be pulled. - c.Spawner.start_timeout = {{ .Values.hub.timeout.spawn }} + # How long to wait for Kubernetes to start the lab. This must match the + # corresponding setting in the Nublado controller. + c.Spawner.start_timeout = {{ .Values.controller.config.lab.spawnTimeout }} - # Allow 90 seconds for JupyterLab to start. For reasons we do not yet - # understand, it is often glacially slow and sometimes takes over 60 - # seconds. + # How long to wait for the JupyterLab process to respond to network + # connections after the pod has started running. c.Spawner.http_timeout = {{ .Values.hub.timeout.startup }} # Configure the URL to the lab controller. - c.RSPRestSpawner.controller_url = "{{ .Values.global.baseUrl }}{{ .Values.controller.config.safir.pathPrefix }}" + c.NubladoSpawner.controller_url = "{{ .Values.global.baseUrl }}{{ .Values.controller.config.pathPrefix }}" diff --git a/applications/nublado/templates/vault-secrets.yaml b/applications/nublado/templates/vault-secrets.yaml index 592042f21b..6b3df719a0 100644 --- a/applications/nublado/templates/vault-secrets.yaml +++ b/applications/nublado/templates/vault-secrets.yaml @@ -59,3 +59,15 @@ spec: path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" type: kubernetes.io/dockerconfigjson {{- end }} +{{- if .Values.secrets.installTsSalKafkaSecret }} +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: kafka-secret + labels: + {{- include "nublado.labels" . | nindent 4 }} +spec: + path: "{{- .Values.global.vaultSecretsPath }}/ts/software/ts-salkafka" + type: Opaque +{{- end }} diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 5e7eb18580..4d7e510c72 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -8,10 +8,9 @@ controller: num_releases: 0 num_weeklies: 3 num_dailies: 2 - cycle: 32 - recommended_tag: "recommended_c0032" + cycle: 34 + recommended_tag: "recommended_c0034" lab: - pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: @@ -19,50 +18,72 @@ controller: LSST_DDS_INTERFACE: "net1" LSST_DDS_PARTITION_PREFIX: "base" LSST_SITE: "base" - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" + initContainers: + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" + privileged: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" + pullSecret: "pull-secret" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" - mode: "rw" - source: - type: "nfs" - serverPath: "/jhome" - server: "nfs-jhome.ls.lsst.org" - - containerPath: "/project" - mode: "rw" - source: - type: "nfs" - serverPath: "/project" - server: "nfs-project.ls.lsst.org" - - containerPath: "/scratch" - mode: "rw" - source: - type: "nfs" - serverPath: "/scratch" - server: "nfs-scratch.ls.lsst.org" - - containerPath: "/datasets" - mode: "rw" - source: - type: "nfs" - serverPath: "/lsstdata" - server: "nfs-lsstdata.ls.lsst.org" - - containerPath: "/repo/LATISS" - mode: "rw" - source: - type: "nfs" - serverPath: "/auxtel/repo/LATISS" - server: "nfs-auxtel.ls.lsst.org" - - containerPath: "/net/obs-env" - mode: "rw" - source: - type: "nfs" - serverPath: "/obs-env" - server: "nfs-obsenv.ls.lsst.org" - - containerPath: "/data/lsstdata/BTS/auxtel" - source: - type: "nfs" - serverPath: "/auxtel/lsstdata/BTS/auxtel" - server: "nfs-auxtel.ls.lsst.org" + - name: "home" + source: + type: "nfs" + serverPath: "/rsphome" + server: "nfs-rsphome.ls.lsst.org" + - name: "project" + source: + type: "nfs" + serverPath: "/project" + server: "nfs-project.ls.lsst.org" + - name: "scratch" + source: + type: "nfs" + serverPath: "/scratch" + server: "nfs-scratch.ls.lsst.org" + - name: "datasets" + source: + type: "nfs" + serverPath: "/lsstdata" + server: "nfs-lsstdata.ls.lsst.org" + - name: "latiss" + source: + type: "nfs" + serverPath: "/auxtel/repo/LATISS" + server: "nfs-auxtel.ls.lsst.org" + - name: "obs-env" + source: + type: "nfs" + serverPath: "/obs-env" + server: "nfs-obsenv.ls.lsst.org" + - name: "auxtel" + source: + type: "nfs" + serverPath: "/auxtel/lsstdata/BTS/auxtel" + server: "nfs-auxtel.ls.lsst.org" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" + - containerPath: "/datasets" + volumeName: "datasets" + - containerPath: "/repo/LATISS" + volumeName: "latiss" + - containerPath: "/net/obs-env" + volumeName: "obs-env" + - containerPath: "/data/lsstdata/BTS/auxtel" + volumeName: "auxtel" jupyterhub: cull: diff --git a/applications/nublado/values-ccin2p3.yaml b/applications/nublado/values-ccin2p3.yaml new file mode 100644 index 0000000000..58d5ae0294 --- /dev/null +++ b/applications/nublado/values-ccin2p3.yaml @@ -0,0 +1,55 @@ +controller: + config: + images: + source: + type: "docker" + registry: "registry.hub.docker.com" + repository: "lsstsqre/sciplat-lab" + lab: + env: + AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" + AUTO_REPO_BRANCH: "prod" + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" + CULL_KERNEL_IDLE_TIMEOUT: "432000" + CULL_KERNEL_CONNECTED: "True" + CULL_KERNEL_INTERVAL: "300" + CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" + CULL_TERMINAL_INTERVAL: "300" + NO_ACTIVITY_TIMEOUT: "432000" + homedirPrefix: "/homedirs" + homedirSchema: "initialThenUsername" + homedirSuffix: "rsp_home" + initContainers: + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" + privileged: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" + pullSecret: "pull-secret" + volumes: + - name: "home" + source: + type: "hostPath" + path: "/pbs/home" + volumeMounts: + - containerMount: "/home" + volumeName: "home" + +proxy: + ingress: + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "50s" + nginx.ingress.kubernetes.io/proxy-read-timeout: "50s" + nginx.ingress.kubernetes.io/client-max-body-size: "50m" + +jupyterhub: + hub: + db: + upgrade: true + cull: + timeout: 432000 + every: 300 + maxAge: 2160000 diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 90fb492f09..053f136afe 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -2,11 +2,12 @@ controller: googleServiceAccount: "nublado-controller@science-platform-dev-7696.iam.gserviceaccount.com" slackAlerts: true config: - safir: - logLevel: "DEBUG" + logLevel: "DEBUG" fileserver: enabled: true - timeout: 43200 + volumeMounts: + - containerPath: "/home" + volumeName: "home" images: source: type: "google" @@ -14,10 +15,6 @@ controller: projectId: "rubin-shared-services-71ec" repository: "sciplat" image: "sciplat-lab" - recommendedTag: "recommended" - numReleases: 1 - numWeeklies: 2 - numDailies: 3 lab: env: AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -27,17 +24,14 @@ controller: GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" S3_ENDPOINT_URL: "https://storage.googleapis.com" initContainers: - - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - mode: "rw" - source: - type: nfs - serverPath: "/share1/home" - server: "10.87.86.26" - + volumeName: "home" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" @@ -48,28 +42,37 @@ controller: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" - mode: "rw" + - name: "home" source: type: nfs serverPath: "/share1/home" server: "10.87.86.26" - - containerPath: "/project" - mode: "rw" + - name: "project" source: type: nfs serverPath: "/share1/project" server: "10.87.86.26" - - containerPath: "/scratch" - mode: "rw" + - name: "scratch" source: type: nfs serverPath: "/share1/scratch" server: "10.87.86.26" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" jupyterhub: hub: db: - url: "postgresql://nublado3@postgres.postgres/nublado3" - + url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" +hub: + internalDatabase: false +cloudsql: + enabled: true + instanceConnectionName: "science-platform-dev-7696:us-central1:science-platform-dev-e9e11de2" + serviceAccount: "nublado@science-platform-dev-7696.iam.gserviceaccount.com" secrets: templateSecrets: true diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 7c794d2cd1..1bd223b5b4 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -4,6 +4,9 @@ controller: config: fileserver: enabled: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" images: source: type: "google" @@ -11,10 +14,6 @@ controller: projectId: "rubin-shared-services-71ec" repository: "sciplat" image: "sciplat-lab" - recommendedTag: "recommended" - numReleases: 1 - numWeeklies: 2 - numDailies: 3 lab: env: AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -33,31 +32,15 @@ controller: NO_ACTIVITY_TIMEOUT: "432000" CULL_KERNEL_IDLE_TIMEOUT: "432000" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - - sizes: - small: - cpu: 1.0 - memory: 4Gi - medium: - cpu: 2.0 - memory: 8Gi - large: - cpu: 4.0 - memory: 16Gi - huge: - cpu: 8.0 - memory: 32Gi initContainers: - - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - mode: "rw" - source: - serverPath: "/share1/home" - server: "10.22.240.130" - type: "nfs" + volumeName: "home" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" @@ -67,25 +50,42 @@ controller: secretKey: "butler-hmac-idf-creds.json" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" + sizes: + - size: small + cpu: 1.0 + memory: 4Gi + - size: medium + cpu: 2.0 + memory: 8Gi + - size: large + cpu: 4.0 + memory: 16Gi + - size: huge + cpu: 8.0 + memory: 32Gi volumes: - - containerPath: "/home" - mode: "rw" + - name: "home" source: serverPath: "/share1/home" server: "10.22.240.130" type: "nfs" - - containerPath: "/project" - mode: "rw" + - name: "project" source: serverPath: "/share1/project" server: "10.22.240.130" type: "nfs" - - containerPath: "/scratch" - mode: "rw" + - name: "scratch" source: serverPath: "/share1/scratch" server: "10.22.240.130" type: "nfs" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" jupyterhub: hub: @@ -93,8 +93,8 @@ jupyterhub: ServerApp: shutdown_no_activity_timeout: 432000 db: - url: "postgresql://nublado3@postgres.postgres/nublado3" - + url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" + upgrade: true cull: enabled: true users: false @@ -102,3 +102,12 @@ jupyterhub: timeout: 432000 every: 300 maxAge: 2160000 + +hub: + internalDatabase: false +cloudsql: + enabled: true + instanceConnectionName: "science-platform-int-dc5d:us-central1:science-platform-int-8f439af2" + serviceAccount: "nublado@science-platform-int-dc5d.iam.gserviceaccount.com" +secrets: + templateSecrets: true diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index d7b2704849..764adc9635 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -9,10 +9,6 @@ controller: projectId: "rubin-shared-services-71ec" repository: "sciplat" image: "sciplat-lab" - recommendedTag: "recommended" - numReleases: 1 - numWeeklies: 2 - numDailies: 3 lab: env: AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -24,28 +20,15 @@ controller: NO_ACTIVITY_TIMEOUT: "432000" CULL_KERNEL_IDLE_TIMEOUT: "432000" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - - sizes: - small: - cpu: 1.0 - memory: 4Gi - medium: - cpu: 2.0 - memory: 8Gi - large: - cpu: 4.0 - memory: 16Gi initContainers: - - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - mode: "rw" - source: - serverPath: "/share1/home" - server: "10.13.105.122" - type: "nfs" + volumeName: "home" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" @@ -55,25 +38,39 @@ controller: secretKey: "butler-hmac-idf-creds.json" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" + sizes: + - size: small + cpu: 1.0 + memory: 4Gi + - size: medium + cpu: 2.0 + memory: 8Gi + - size: large + cpu: 4.0 + memory: 16Gi volumes: - - containerPath: "/home" - mode: "rw" + - name: "home" source: serverPath: "/share1/home" server: "10.13.105.122" type: "nfs" - - containerPath: "/project" - mode: "rw" + - name: "project" source: serverPath: "/share1/project" server: "10.13.105.122" type: "nfs" - - containerPath: "/scratch" - mode: "rw" + - name: "scratch" source: serverPath: "/share1/scratch" server: "10.13.105.122" type: "nfs" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" jupyterhub: hub: @@ -81,8 +78,8 @@ jupyterhub: ServerApp: shutdown_no_activity_timeout: 432000 db: - url: "postgresql://nublado3@postgres.postgres/nublado3" - + url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" + upgrade: true cull: enabled: true users: false @@ -90,3 +87,11 @@ jupyterhub: timeout: 432000 every: 300 maxAge: 2160000 +hub: + internalDatabase: false +cloudsql: + enabled: true + instanceConnectionName: "science-platform-stable-6994:us-central1:science-platform-stable-0c29612b" + serviceAccount: "nublado@science-platform-stable-6994.iam.gserviceaccount.com" +secrets: + templateSecrets: true diff --git a/applications/nublado/values-roe.yaml b/applications/nublado/values-roe.yaml new file mode 100644 index 0000000000..04cb8d0f55 --- /dev/null +++ b/applications/nublado/values-roe.yaml @@ -0,0 +1,45 @@ +controller: + config: + images: + source: + type: "docker" + registry: "registry.hub.docker.com" + repository: "lsstsqre/sciplat-lab" + lab: + env: + AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" + AUTO_REPO_BRANCH: "prod" + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" + initContainers: + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" + privileged: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" + pullSecret: "pull-secret" + volumes: + - name: "data" + source: + serverPath: "/data" + server: "192.41.122.33" + type: "nfs" + - name: "home" + source: + serverPath: "/jhome" + server: "192.41.122.33" + type: "nfs" + - name: "datasets" + source: + serverPath: "/datasets" + server: "192.41.122.33" + type: "nfs" + volumeMounts: + - containerPath: "/data" + volumeName: "data" + - containerPath: "/home" + volumeName: "home" + - containerPath: "/datasets" + volumeName: "datasets" diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 63d3cf7483..5faa7dbb87 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -8,10 +8,9 @@ controller: num_releases: 0 num_weeklies: 3 num_dailies: 2 - cycle: 32 - recommended_tag: "recommended_c0032" + cycle: 34 + recommended_tag: "recommended_c0034" lab: - pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: @@ -19,70 +18,100 @@ controller: LSST_DDS_INTERFACE: "net1" LSST_DDS_PARTITION_PREFIX: "summit" LSST_SITE: "summit" - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" + initContainers: + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" + privileged: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" + pullSecret: "pull-secret" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" - mode: "rw" - source: - type: "nfs" - serverPath: "/jhome" - server: "nfs1.cp.lsst.org" - - containerPath: "/project" - mode: "rw" - source: - type: "nfs" - serverPath: "/project" - server: "nfs1.cp.lsst.org" - - containerPath: "/scratch" - mode: "rw" - source: - type: "nfs" - serverPath: "/scratch" - server: "nfs1.cp.lsst.org" - - containerPath: "/repo/LATISS" - mode: "rw" - source: - type: "nfs" - serverPath: "/auxtel/repo/LATISS" - server: "nfs-auxtel.cp.lsst.org" - - containerPath: "/repo/LSSTComCam" - mode: "rw" - source: - type: "nfs" - serverPath: "/repo/LSSTComCam" - server: "comcam-archiver.cp.lsst.org" - - containerPath: "/net/obs-env" - mode: "rw" - source: - type: "nfs" - serverPath: "/obs-env" - server: "nfs-obsenv.cp.lsst.org" - - containerPath: "/readonly/lsstdata/other" - source: - type: "nfs" - serverPath: "/lsstdata" - server: "nfs1.cp.lsst.org" - - containerPath: "/readonly/lsstdata/comcam" - source: - type: "nfs" - serverPath: "/lsstdata" - server: "comcam-archiver.cp.lsst.org" - - containerPath: "/readonly/lsstdata/auxtel" - source: - type: "nfs" - serverPath: "/auxtel/lsstdata" - server: "nfs-auxtel.cp.lsst.org" - - containerPath: "/data/lsstdata/base/comcam" - source: - type: "nfs" - serverPath: "/lsstdata/base/comcam" - server: "comcam-archiver.cp.lsst.org" - - containerPath: "/data/lsstdata/base/auxtel" - source: - type: "nfs" - serverPath: "/auxtel/lsstdata/base/auxtel" - server: "nfs-auxtel.cp.lsst.org" + - name: "home" + source: + type: "nfs" + serverPath: "/jhome" + server: "nfs1.cp.lsst.org" + - name: "project" + source: + type: "nfs" + serverPath: "/project" + server: "nfs1.cp.lsst.org" + - name: "scratch" + source: + type: "nfs" + serverPath: "/scratch" + server: "nfs1.cp.lsst.org" + - name: "latiss" + source: + type: "nfs" + serverPath: "/auxtel/repo/LATISS" + server: "nfs-auxtel.cp.lsst.org" + - name: "lsstcomcam" + source: + type: "nfs" + serverPath: "/repo/LSSTComCam" + server: "comcam-archiver.cp.lsst.org" + - name: "obs-env" + source: + type: "nfs" + serverPath: "/obs-env" + server: "nfs-obsenv.cp.lsst.org" + - name: "lsstdata-other" + source: + type: "nfs" + serverPath: "/lsstdata" + server: "nfs1.cp.lsst.org" + - name: "lsstdata-comcam" + source: + type: "nfs" + serverPath: "/lsstdata" + server: "comcam-archiver.cp.lsst.org" + - name: "lsstdata-auxtel" + source: + type: "nfs" + serverPath: "/auxtel/lsstdata" + server: "nfs-auxtel.cp.lsst.org" + - name: "lsstdata-base-comcam" + source: + type: "nfs" + serverPath: "/lsstdata/base/comcam" + server: "comcam-archiver.cp.lsst.org" + - name: "lsstdata-base-auxtel" + source: + type: "nfs" + serverPath: "/auxtel/lsstdata/base/auxtel" + server: "nfs-auxtel.cp.lsst.org" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" + - containerPath: "/repo/LATISS" + volumeName: "latiss" + - containerPath: "/repo/LSSTComCam" + volumeName: "lsstcomcam" + - containerPath: "/net/obs-env" + volumeName: "obs-env" + - containerPath: "/readonly/lsstdata/other" + volumeName: "lsstdata-other" + - containerPath: "/readonly/lsstdata/comcam" + volumeName: "lsstdata-comcam" + - containerPath: "/readonly/lsstdata/auxtel" + volumeName: "lsstdata-auxtel" + - containerPath: "/data/lsstdata/base/comcam" + volumeName: "lsstdata-base-comcam" + - containerPath: "/data/lsstdata/base/auxtel" + volumeName: "lsstdata-base-auxtel" jupyterhub: cull: @@ -92,7 +121,6 @@ jupyterhub: every: 300 maxAge: 2160000 hub: - baseUrl: "/n3" db: upgrade: true url: "postgresql://nublado3@postgresdb01.cp.lsst.org/nublado3" diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 82a329ec79..ce7f8b97cb 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -8,72 +8,102 @@ controller: num_releases: 0 num_weeklies: 3 num_dailies: 2 - cycle: 32 - recommended_tag: "recommended_c0032" + cycle: null + recommended_tag: "recommended_k0001" lab: - pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" + LSST_SITE: tucson LSST_DDS_INTERFACE: net1 LSST_DDS_PARTITION_PREFIX: tucson - LSST_SITE: tucson + LSST_TOPIC_SUBNAME: sal + LSST_KAFKA_PASSFILE: "/opt/lsst/software/jupyterlab/secrets/kafka_credentials.txt" + LSST_KAFKA_BROKER_ADDR: sasquatch-kafka-brokers.sasquatch:9092 + LSST_SCHEMA_REGISTRY_URL: http://sasquatch-schema-registry.sasquatch:8081 PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" + pullSecret: "pull-secret" + secrets: + - secretName: "kafka-secret" + secretKey: "kafka_credentials.txt" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + initContainers: + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" + privileged: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" volumes: - - containerPath: "/home" - mode: "rw" + - name: "home" source: type: "nfs" serverPath: "/jhome" server: "nfs-jhome.tu.lsst.org" - - containerPath: "/project" - mode: "rw" + - name: "project" source: type: "nfs" serverPath: "/project" server: "nfs-project.tu.lsst.org" - - containerPath: "/scratch" - mode: "rw" + - name: "scratch" source: type: "nfs" serverPath: "/scratch" server: "nfs-scratch.tu.lsst.org" - - containerPath: "/datasets" - mode: "rw" + - name: "datasets" source: type: "nfs" serverPath: "/lsstdata" server: "nfs-lsstdata.tu.lsst.org" - - containerPath: "/repo/LATISS" - mode: "rw" + - name: "latiss" source: type: "nfs" serverPath: "/auxtel/repo/LATISS" server: "nfs-auxtel.tu.lsst.org" - - containerPath: "/net/obs-env" - mode: "rw" + - name: "obs-env" source: type: "nfs" serverPath: "/obs-env" server: "nfs-obsenv.tu.lsst.org" - - containerPath: "/repo/LSSTComCam" - mode: "rw" + - name: "lsstcomcan" source: type: "nfs" serverPath: "/repo/LSSTComCam" server: "comcam-archiver.tu.lsst.org" - - containerPath: "/data/lsstdata/TTS/auxtel" + - name: "auxtel" source: type: "nfs" serverPath: "/auxtel/lsstdata/TTS/auxtel" server: "nfs-auxtel.tu.lsst.org" - - containerPath: "/data/lsstdata/TTS/comcam" + - name: "comcam" source: type: "nfs" serverPath: "/lsstdata/TTS/comcam" server: "comcam-archiver.tu.lsst.org" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" + - containerPath: "/datasets" + volumeName: "datasets" + - containerPath: "/repo/LATISS" + volumeName: "latiss" + - containerPath: "/net/obs-env" + volumeName: "obs-env" + - containerPath: "/repo/LSSTComCam" + volumeName: "lsstcomcam" + - containerPath: "/data/lsstdata/TTS/auxtel" + volumeName: "auxtel" + - containerPath: "/data/lsstdata/TTS/comcam" + volumeName: "comcam" jupyterhub: cull: @@ -86,3 +116,6 @@ jupyterhub: db: upgrade: true url: "postgresql://nublado3@postgresdb01.tu.lsst.org/nublado3" + +secrets: + installTsSalKafkaSecret: true diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 8b24353338..cab1342b14 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -1,26 +1,14 @@ controller: config: - safir: - logLevel: "DEBUG" - fileserver: - enabled: false - timeout: 21600 - + logLevel: "DEBUG" images: source: type: "docker" registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" - recommendedTag: "recommended" - numReleases: 1 - numWeeklies: 2 - numDailies: 3 - + pin: + - "w_2023_47" lab: - pullSecret: "pull-secret" - - homedirSchema: "initialThenUsername" - env: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -32,56 +20,53 @@ controller: http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1" - - files: + homedirSchema: "initialThenUsername" + nss: # Add rubin_users group (there is not yet a simpler way to do this). - /etc/group: - contents: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - utmp:x:22: - tape:x:33: - utempter:x:35: - video:x:39: - ftp:x:50: - lock:x:54: - tss:x:59: - audio:x:63: - dbus:x:81: - screen:x:84: - nobody:x:99: - users:x:100: - systemd-journal:x:190: - systemd-network:x:192: - cgred:x:997: - ssh_keys:x:998: - input:x:999: - rubin_users:x:4085: - + baseGroup: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + rubin_users:x:4085: + pullSecret: "pull-secret" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" - volumes: - - containerPath: "/home" - mode: "rw" + - name: "sdf-home" source: type: "persistentVolumeClaim" storageClassName: "sdf-home" @@ -90,9 +75,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/project" - subPath: "g" - mode: "rw" + - name: "sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -101,18 +84,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/sdf/group/rubin" - mode: "rw" - source: - type: "persistentVolumeClaim" - storageClassName: "sdf-group-rubin" - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: "1Gi" - - containerPath: "/sdf/data/rubin" - mode: "rw" + - name: "sdf-data-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-data-rubin" @@ -121,8 +93,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/scratch" - mode: "rw" + - name: "sdf-scratch" source: type: "persistentVolumeClaim" storageClassName: "sdf-scratch" @@ -131,8 +102,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/rubin" - mode: "rw" + - name: "fs-ddn-sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-rubin" @@ -141,8 +111,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/lsst" - mode: "rw" + - name: "fs-ddn-sdf-group-lsst" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-lsst" @@ -151,6 +120,22 @@ controller: resources: requests: storage: "1Gi" + volumeMounts: + - containerPath: "/home" + volumeName: "sdf-home" + - containerPath: "/project" + subPath: "g" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/group/rubin" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/data/rubin" + volumeName: "sdf-data-rubin" + - containerPath: "/scratch" + volumeName: "sdf-scratch" + - containerPath: "/fs/ddn/sdf/group/rubin" + volumeName: "fs-ddn-sdf-group-rubin" + - containerPath: "/fs/ddn/sdf/group/lsst" + volumeName: "fs-ddn-sdf-group-lsst" proxy: ingress: @@ -164,6 +149,7 @@ jupyterhub: baseUrl: "/nb" db: url: "postgresql://nublado3@postgres.postgres/nublado3" + upgrade: true cull: timeout: 432000 every: 300 diff --git a/applications/nublado/values-usdfint.yaml b/applications/nublado/values-usdfint.yaml new file mode 100644 index 0000000000..cab1342b14 --- /dev/null +++ b/applications/nublado/values-usdfint.yaml @@ -0,0 +1,156 @@ +controller: + config: + logLevel: "DEBUG" + images: + source: + type: "docker" + registry: "docker-registry.slac.stanford.edu" + repository: "lsstsqre/sciplat-lab" + pin: + - "w_2023_47" + lab: + env: + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" + DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" + HUB_ROUTE: "/nb/hub" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" + PGUSER: "rubin" + S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" + https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" + no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1" + homedirSchema: "initialThenUsername" + nss: + # Add rubin_users group (there is not yet a simpler way to do this). + baseGroup: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + rubin_users:x:4085: + pullSecret: "pull-secret" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "aws-credentials.ini" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + volumes: + - name: "sdf-home" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-home" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - name: "sdf-group-rubin" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-group-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - name: "sdf-data-rubin" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-data-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - name: "sdf-scratch" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-scratch" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - name: "fs-ddn-sdf-group-rubin" + source: + type: "persistentVolumeClaim" + storageClassName: "fs-ddn-sdf-group-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - name: "fs-ddn-sdf-group-lsst" + source: + type: "persistentVolumeClaim" + storageClassName: "fs-ddn-sdf-group-lsst" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + volumeMounts: + - containerPath: "/home" + volumeName: "sdf-home" + - containerPath: "/project" + subPath: "g" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/group/rubin" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/data/rubin" + volumeName: "sdf-data-rubin" + - containerPath: "/scratch" + volumeName: "sdf-scratch" + - containerPath: "/fs/ddn/sdf/group/rubin" + volumeName: "fs-ddn-sdf-group-rubin" + - containerPath: "/fs/ddn/sdf/group/lsst" + volumeName: "fs-ddn-sdf-group-lsst" + +proxy: + ingress: + annotations: + # proxy-body-size is temporary until USDF uses our normal ingress-nginx, + # which already configures a larger value. + nginx.ingress.kubernetes.io/proxy-body-size: "50m" + +jupyterhub: + hub: + baseUrl: "/nb" + db: + url: "postgresql://nublado3@postgres.postgres/nublado3" + upgrade: true + cull: + timeout: 432000 + every: 300 + maxAge: 2160000 diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml index 5c8dfb9c4f..e0c2309934 100644 --- a/applications/nublado/values-usdfprod.yaml +++ b/applications/nublado/values-usdfprod.yaml @@ -1,26 +1,14 @@ controller: config: - safir: - logLevel: "DEBUG" - fileserver: - enabled: false - timeout: 21600 - + logLevel: "DEBUG" images: source: type: "docker" registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" - recommendedTag: "recommended" - numReleases: 1 - numWeeklies: 2 - numDailies: 3 - + pin: + - "w_2023_47" lab: - pullSecret: "pull-secret" - - homedirSchema: "initialThenUsername" - env: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -32,56 +20,53 @@ controller: http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1" - - files: + homedirSchema: "initialThenUsername" + nss: # Add rubin_users group (there is not yet a simpler way to do this). - /etc/group: - contents: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - utmp:x:22: - tape:x:33: - utempter:x:35: - video:x:39: - ftp:x:50: - lock:x:54: - tss:x:59: - audio:x:63: - dbus:x:81: - screen:x:84: - nobody:x:99: - users:x:100: - systemd-journal:x:190: - systemd-network:x:192: - cgred:x:997: - ssh_keys:x:998: - input:x:999: - rubin_users:x:4085: - + baseGroup: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + rubin_users:x:4085: + pullSecret: "pull-secret" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" - volumes: - - containerPath: "/home" - mode: "rw" + - name: "sdf-home" source: type: "persistentVolumeClaim" storageClassName: "sdf-home" @@ -90,9 +75,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/project" - subPath: "g" - mode: "rw" + - name: "sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -101,18 +84,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/sdf/group/rubin" - mode: "rw" - source: - type: "persistentVolumeClaim" - storageClassName: "sdf-group-rubin" - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: "1Gi" - - containerPath: "/sdf/data/rubin" - mode: "rw" + - name: "sdf-data-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-data-rubin" @@ -121,8 +93,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/scratch" - mode: "rw" + - name: "sdf-scratch" source: type: "persistentVolumeClaim" storageClassName: "sdf-scratch" @@ -131,8 +102,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/rubin" - mode: "rw" + - name: "fs-ddn-sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-rubin" @@ -141,8 +111,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/lsst" - mode: "rw" + - name: "fs-ddn-sdf-group-lsst" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-lsst" @@ -151,6 +120,22 @@ controller: resources: requests: storage: "1Gi" + volumeMounts: + - containerPath: "/home" + volumeName: "sdf-home" + - containerPath: "/project" + subPath: "g" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/group/rubin" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/data/rubin" + volumeName: "sdf-data-rubin" + - containerPath: "/scratch" + volumeName: "sdf-scratch" + - containerPath: "/fs/ddn/sdf/group/rubin" + volumeName: "fs-ddn-sdf-group-rubin" + - containerPath: "/fs/ddn/sdf/group/lsst" + volumeName: "fs-ddn-sdf-group-lsst" proxy: ingress: @@ -163,6 +148,7 @@ jupyterhub: hub: db: url: "postgresql://nublado3@postgres.postgres/nublado3" + upgrade: true cull: timeout: 432000 every: 300 diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index e23d3133d9..29d1e83e63 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -1,66 +1,120 @@ # Default values for Nublado. controller: + # -- Affinity rules for the Nublado controller + affinity: {} + + # -- If Google Artifact Registry is used as the image source, the Google + # service account that has an IAM binding to the `nublado-controller` + # Kubernetes service account and has the Artifact Registry reader role + # @default -- None, must be set when using Google Artifact Registry + googleServiceAccount: "" + image: - # -- nublado image to use - repository: ghcr.io/lsst-sqre/jupyterlab-controller + # -- Nublado controller image to use + repository: "ghcr.io/lsst-sqre/nublado-controller" - # -- Pull policy for the nublado image - pullPolicy: IfNotPresent + # -- Pull policy for the controller image + pullPolicy: "IfNotPresent" - # -- Tag of nublado image to use + # -- Tag of Nublado controller image to use # @default -- The appVersion of the chart tag: "" - # -- Affinity rules for the lab controller pod - affinity: {} + ingress: + # -- Additional annotations to add for the Nublado controller ingress + annotations: {} - # -- Node selector rules for the lab controller pod + # -- Node selector rules for the Nublado controller nodeSelector: {} - # -- Annotations for the lab controller pod + # -- Annotations for the Nublado controller podAnnotations: {} - # -- Resource limits and requests for the lab controller pod - resources: {} - - # -- Tolerations for the lab controller pod - tolerations: [] - - ingress: - # -- Additional annotations to add for the lab controller pod ingress - annotations: {} - - # -- If Google Artifact Registry is used as the image source, the Google - # service account that has an IAM binding to the `nublado-controller` - # Kubernetes service account and has the Artifact Registry reader role - # @default -- None, must be set when using Google Artifact Registry - googleServiceAccount: "" + # -- Resource limits and requests for the Nublado controller + # @default -- See `values.yaml` + resources: + limits: + cpu: "0.25" + memory: "200Mi" + requests: + cpu: "0.05" + memory: "120Mi" # -- Whether to enable Slack alerts. If set to true, `slack_webhook` must be # set in the corresponding Nublado Vault secret. slackAlerts: false + # -- Tolerations for the Nublado controller + tolerations: [] + # Passed as YAML to the lab controller. config: + # -- Level of Python logging + logLevel: "INFO" + + # -- Path prefix that will be routed to the controller + pathPrefix: "/nublado" + fileserver: - # -- Enable fileserver management + # -- Enable user file servers enabled: false - # -- Image for fileserver container - image: ghcr.io/lsst-sqre/worblehat + # -- Affinity rules for user file server pods + affinity: {} + + # -- Argo CD application in which to collect user file servers + application: "nublado-fileservers" - # -- Tag for fileserver container - tag: 0.1.0 + # -- Timeout to wait for Kubernetes to create file servers, in seconds + creationTimeout: 120 - # -- Pull policy for fileserver container - pullPolicy: IfNotPresent + # -- Timeout for deleting a user's file server from Kubernetes, in + # seconds + # @default -- 60 (1 minute) + deleteTimeout: 60 - # -- Timeout for user fileservers, in seconds - timeout: 3600 + # -- Timeout for idle user fileservers, in seconds + idleTimeout: 3600 - # -- Namespace for user fileservers - namespace: fileservers + image: + # -- File server image to use + repository: "ghcr.io/lsst-sqre/worblehat" + + # -- Pull policy for file server image + pullPolicy: "IfNotPresent" + + # -- Tag of file server image to use + tag: "0.1.0" + + # -- Namespace for user file servers + namespace: "fileservers" + + # -- Node selector rules for user file server pods + nodeSelector: {} + + # -- Path prefix for user file servers + pathPrefix: "/files" + + # -- Resource requests and limits for user file servers + # @default -- See `values.yaml` + resources: + requests: + cpu: 0.1 + memory: "1Gi" + limits: + cpu: 1 + memory: "10Gi" + + # -- Tolerations for user file server pods + tolerations: [] + + # -- Volumes that should be made available via WebDAV + volumeMounts: [] + # volumeMounts: + # - containerPath: "/project" + # readOnly: true + # volumeName: "project" images: # -- Source for prepulled images. For Docker, set `type` to `docker`, @@ -97,7 +151,18 @@ controller: aliasTags: [] lab: - # -- Environment variables to set for every user lab. + # -- Affinity rules for user lab pods + affinity: {} + + # -- Argo CD application in which to collect user lab objects + application: "nublado-users" + + # -- Timeout for deleting a user's lab resources from Kubernetes in + # seconds + # @default -- 60 (1 minute) + deleteTimeout: 60 + + # -- Environment variables to set for every user lab # @default -- See `values.yaml` env: API_ROUTE: "/api" @@ -110,13 +175,113 @@ controller: NO_ACTIVITY_TIMEOUT: "432000" # Also from group? TAP_ROUTE: "/api/tap" + # -- Extra annotations to add to user lab pods + extraAnnotations: {} + + # -- Files to be mounted as ConfigMaps inside the user lab pod. + # `contents` contains the file contents. Set `modify` to true to make + # the file writable in the pod. + # @default -- See `values.yaml` + files: + /opt/lsst/software/jupyterlab/lsst_dask.yml: | + # No longer used, but preserves compatibility with runlab.sh + dask_worker.yml: | + enabled: false + /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template: | + # Licensed under the Apache License, Version 2.0 (the "License"); + # You may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Authors: + # - Wen Guan, , 2020 + [common] + # if logdir is configured, idds will write to idds.log in this + # directory, else idds will go to stdout/stderr. With supervisord, + # it's good to write to stdout/stderr, then supervisord can manage + # and rotate logs. + # logdir = /var/log/idds + loglevel = INFO + [rest] + host = https://iddsserver.cern.ch:443/idds + #url_prefix = /idds + #cacher_dir = /tmp + cacher_dir = /data/idds + # -- Containers run as init containers with each user pod. Each should - # set `name`, `image` (a Docker image reference), and `privileged`, and - # may contain `volumes` (similar to the main `volumes` - # configuration). If `privileged` is true, the container will run as - # root with `allowPrivilegeEscalation` true. Otherwise it will, run as - # UID 1000. - initcontainers: [] + # set `name`, `image` (a Docker image and pull policy specification), + # and `privileged`, and may contain `volumeMounts` (similar to the main + # `volumeMountss` configuration). If `privileged` is true, the container + # will run as root with all capabilities. Otherwise it will run as the + # user. + initContainers: [] + + # -- Prefix for namespaces for user labs. To this will be added a dash + # (`-`) and the user's username. + namespacePrefix: "nublado" + + # -- Node selector rules for user lab pods + nodeSelector: {} + + nss: + # -- Base `/etc/passwd` file for lab containers + # @default -- See `values.yaml` + basePasswd: | + root:x:0:0:root:/root:/bin/bash + bin:x:1:1:bin:/bin:/sbin/nologin + daemon:x:2:2:daemon:/sbin:/sbin/nologin + adm:x:3:4:adm:/var/adm:/sbin/nologin + lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin + sync:x:5:0:sync:/sbin:/bin/sync + shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown + halt:x:7:0:halt:/sbin:/sbin/halt + mail:x:8:12:mail:/var/spool/mail:/sbin/nologin + operator:x:11:0:operator:/root:/sbin/nologin + games:x:12:100:games:/usr/games:/sbin/nologin + ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin + tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin + dbus:x:81:81:System message bus:/:/sbin/nologin + nobody:x:99:99:Nobody:/:/sbin/nologin + systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin + lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash + + # -- Base `/etc/group` file for lab containers + # @default -- See `values.yaml` + baseGroup: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: # -- Pull secret to use for labs. Set to the string `pull-secret` to use # the normal pull secret from Vault. @@ -129,130 +294,49 @@ controller: # that key. secrets: [] - # -- Available lab sizes. Names must be chosen from `fine`, + # -- Available lab sizes. Sizes must be chosen from `fine`, # `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, # `gargantuan`, and `colossal` in that order. Each should specify the - # maximum CPU equivalents and memory. SI prefixes for memory are - # supported. + # maximum CPU equivalents and memory. SI suffixes for memory are + # supported. Sizes will be shown in the order defined here, and the + # first defined size will be the default. # @default -- See `values.yaml` (specifies `small`, `medium`, and - # `large`) + # `large` with `small` as the default) sizes: - small: + - size: "small" cpu: 1.0 - memory: 4Gi - medium: + memory: "4Gi" + - size: "medium" cpu: 2.0 - memory: 8Gi - large: + memory: "8Gi" + - size: "large" cpu: 4.0 - memory: 16Gi + memory: "16Gi" + + # -- How long to wait for Kubernetes to spawn a lab in seconds. This + # should generally be shorter than the spawn timeout set in JupyterHub. + spawnTimeout: 600 + + # -- Tolerations for user lab pods + tolerations: [] - # -- Volumes that should be mounted in lab pods. This supports NFS, - # HostPath, and PVC volume types (differentiated in source.type) + # -- Volumes that will be in lab pods or init containers. This supports + # NFS, HostPath, and PVC volume types (differentiated in source.type). volumes: [] # volumes: - # - containerPath: "/project" - # mode: "rw" + # - name: "project" # source: # type: nfs + # readOnly: true # serverPath: "/share1/project" # server: "10.87.86.26" - # -- Files to be mounted as ConfigMaps inside the user lab pod. - # `contents` contains the file contents. Set `modify` to true to make - # the file writable in the pod. - # @default -- See `values.yaml` - files: - /etc/passwd: - modify: true - contents: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash - /etc/group: - modify: true - contents: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - utmp:x:22: - tape:x:33: - utempter:x:35: - video:x:39: - ftp:x:50: - lock:x:54: - tss:x:59: - audio:x:63: - dbus:x:81: - screen:x:84: - nobody:x:99: - users:x:100: - systemd-journal:x:190: - systemd-network:x:192: - cgred:x:997: - ssh_keys:x:998: - input:x:999: - /opt/lsst/software/jupyterlab/lsst_dask.yml: - modify: false - contents: | - # No longer used, but preserves compatibility with runlab.sh - dask_worker.yml: | - enabled: false - /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template: - modify: false - contents: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - safir: - # -- Level of Python logging - logLevel: "INFO" - - # -- Path prefix that will be routed to the controller - pathPrefix: "/nublado" + # -- Volumes that should be mounted in lab pods. + volumeMounts: [] + # volumeMounts: + # - containerPath: "/project" + # readOnly: true + # volumeName: "project" # JupyterHub configuration handled directly by this chart rather than by Zero # to JupyterHub. @@ -263,10 +347,6 @@ hub: internalDatabase: true timeout: - # -- Timeout for the Kubernetes spawn process in seconds. (Allow long - # enough to pull uncached images if needed.) - spawn: 600 - # -- Timeout for JupyterLab to start. Currently this sometimes takes over # 60 seconds for reasons we don't understand. startup: 90 @@ -284,6 +364,9 @@ proxy: # Configuration for Nublado secrets management. secrets: + # -- Whether to install the T&S SAL Kafka secret. + installTsSalKafkaSecret: false + # -- Whether to use the new secrets management mechanism. If enabled, the # Vault nublado secret will be split into a nublado secret for JupyterHub # and a nublado-lab-secret secret used as a source for secret values for the @@ -298,16 +381,16 @@ jupyterhub: image: # -- Image to use for JupyterHub - name: ghcr.io/lsst-sqre/rsp-restspawner + name: "ghcr.io/lsst-sqre/nublado-jupyterhub" # -- Tag of image to use for JupyterHub - tag: 0.3.2 + tag: "4.0.2" # -- Resource limits and requests resources: limits: - cpu: 900m - memory: 1Gi # Should support about 200 users + cpu: "900m" + memory: "1Gi" # Should support about 200 users db: # -- Type of database to use @@ -319,7 +402,7 @@ jupyterhub: # -- URL of PostgreSQL server # @default -- Use the in-cluster PostgreSQL installed by Phalanx - url: "postgresql://jovyan@postgres.postgres/jupyterhub" + url: "postgresql://nublado3@postgres.postgres/jupyterhub" # -- Security context for JupyterHub container containerSecurityContext: @@ -382,30 +465,17 @@ jupyterhub: # controller does its own prepulling) enabled: false - singleuser: - cloudMetadata: - # -- Whether to configure iptables to block cloud metadata endpoints. - # This is unnecessary in our environments (they are blocked by cluster - # configuration) and thus is disabled to reduce complexity. - blockWithIptables: false - - # -- Start command for labs - cmd: "/opt/lsst/software/jupyterlab/runlab.sh" - - # -- Default URL prefix for lab endpoints - defaultUrl: "/lab" - proxy: service: # -- Only expose the proxy to the cluster, overriding the default of # exposing the proxy directly to the Internet - type: ClusterIP + type: "ClusterIP" chp: networkPolicy: # -- Enable access to the proxy from other namespaces, since we put # each user's lab environment in its own namespace - interNamespaceAccessLabels: accept + interNamespaceAccessLabels: "accept" # This currently causes Minikube deployment in GH-actions to fail. # We want it sometime but it's not critical; it will help with @@ -418,7 +488,8 @@ jupyterhub: # repeat the global host name and manually configure authentication, we # instead install our own GafaelfawrIngress. ingress: - # -- Whether to enable the default ingress + # -- Whether to enable the default ingress. Should always be disabled + # since we install our own `GafaelfawrIngress` enabled: false cull: @@ -454,6 +525,53 @@ jupyterhub: # autoscaling in advance of running out of resources enabled: false +cloudsql: + # -- Enable the Cloud SQL Auth Proxy, used with Cloud SQL databases on + # Google Cloud + enabled: false + + # -- Affinity rules for the Cloud SQL Auth Proxy pod + affinity: {} + + image: + # -- Cloud SQL Auth Proxy image to use + repository: "gcr.io/cloudsql-docker/gce-proxy" + + # -- Pull policy for Cloud SQL Auth Proxy images + pullPolicy: "IfNotPresent" + + # -- Cloud SQL Auth Proxy tag to use + tag: "1.33.16" + + # -- Instance connection name for a Cloud SQL PostgreSQL instance + # @default -- None, must be set if Cloud SQL Auth Proxy is enabled + instanceConnectionName: "" + + # -- Resource limits and requests for the Cloud SQL Proxy pod + # @default -- See `values.yaml` + resources: + limits: + cpu: "100m" + memory: "20Mi" + requests: + cpu: "5m" + memory: "7Mi" + + # -- Annotations for the Cloud SQL Auth Proxy pod + podAnnotations: {} + + # -- Node selection rules for the Cloud SQL Auth Proxy pod + nodeSelector: {} + + # -- The Google service account that has an IAM binding to the + # `cloud-sql-proxy` Kubernetes service account and has the `cloudsql.client` + # role + # @default -- None, must be set if Cloud SQL Auth Proxy is enabled + serviceAccount: "" + + # -- Tolerations for the Cloud SQL Auth Proxy pod + tolerations: [] + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/nublado2/Chart.yaml b/applications/nublado2/Chart.yaml deleted file mode 100644 index d758b2bf68..0000000000 --- a/applications/nublado2/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: nublado2 -version: 1.0.0 -description: JupyterHub for the Rubin Science Platform -home: https://github.com/lsst-sqre/nublado2 -sources: - - https://github.com/lsst-sqre/nublado2 -# This version is not used directly. Also update the tag in values.yaml. -appVersion: "2.6.1" - -# Match the jupyterhub Helm chart for kubeVersion -kubeVersion: ">=1.20.0-0" -dependencies: - - name: jupyterhub - # This is the Zero To Jupyterhub version, *not* the version of the - # Jupyterhub package itself. - version: "2.0.0" - repository: https://jupyterhub.github.io/helm-chart/ - -annotations: - phalanx.lsst.io/docs: | - - id: "DMTN-164" - title: "Nublado v2 Architecture" - url: "https://dmtn-164.lsst.io/" diff --git a/applications/nublado2/README.md b/applications/nublado2/README.md deleted file mode 100644 index 8f4eb07907..0000000000 --- a/applications/nublado2/README.md +++ /dev/null @@ -1,119 +0,0 @@ -# nublado2 - -JupyterHub for the Rubin Science Platform - -**Homepage:** - -## Source Code - -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| config.base_url | string | `""` | base_url must be set in each instantiation of this chart to the URL of the primary ingress. It's used to construct API requests to the authentication service (which should go through the ingress). | -| config.butler_secret_path | string | `""` | butler_secret_path must be set here, because it's passed through to the lab rather than being part of the Hub configuration. | -| config.cachemachine_image_policy | string | `"available"` | Cachemachine image policy: "available" or "desired". Use "desired" at instances with streaming image support. | -| config.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. | -| config.lab_environment | object | See `values.yaml` | Environment variables to set in spawned lab containers. Each value will be expanded using Jinja 2 templating. | -| config.pinned_images | list | `[]` | images to pin to spawner menu | -| config.pull_secret_path | string | `""` | pull_secret_path must also be set here; it specifies resources in the lab namespace | -| config.shutdown_on_logout | bool | `true` | shut down user pods on logout. Superfluous, because our LogoutHandler enforces this in any event, but nice to make explicit. | -| config.sizes | list | `[{"cpu":1,"name":"Small","ram":"4096M"},{"cpu":2,"name":"Medium","ram":"8192M"},{"cpu":4,"name":"Large","ram":"16384M"}]` | definitions of Lab sizes available in a given instance | -| config.user_resources_template | string | See `values.yaml` | Templates for the user resources to create for each lab spawn. This is a string that can be templated and then loaded as YAML to generate a list of Kubernetes objects to create. | -| config.volume_mounts | list | `[]` | Where to mount volumes for a particular instance | -| config.volumes | list | `[]` | Volumes to use for a particular instance | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| jupyterhub.cull.enabled | bool | `true` | | -| jupyterhub.cull.every | int | `600` | | -| jupyterhub.cull.maxAge | int | `5184000` | | -| jupyterhub.cull.removeNamedServers | bool | `true` | | -| jupyterhub.cull.timeout | int | `2592000` | | -| jupyterhub.cull.users | bool | `true` | | -| jupyterhub.hub.authenticatePrometheus | bool | `false` | | -| jupyterhub.hub.baseUrl | string | `"/nb"` | | -| jupyterhub.hub.config.Authenticator.enable_auth_state | bool | `true` | | -| jupyterhub.hub.config.JupyterHub.authenticator_class | string | `"nublado2.auth.GafaelfawrAuthenticator"` | | -| jupyterhub.hub.config.ServerApp.shutdown_no_activity_timeout | int | `604800` | | -| jupyterhub.hub.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | -| jupyterhub.hub.containerSecurityContext.runAsGroup | int | `768` | | -| jupyterhub.hub.containerSecurityContext.runAsUser | int | `768` | | -| jupyterhub.hub.db.password | string | `"true"` | | -| jupyterhub.hub.db.type | string | `"postgres"` | | -| jupyterhub.hub.db.url | string | `"postgresql://jovyan@postgres.postgres/jupyterhub"` | | -| jupyterhub.hub.existingSecret | string | `"nublado2-secret"` | | -| jupyterhub.hub.extraConfig."nublado.py" | string | `"import nublado2.hub_config\nnublado2.hub_config.HubConfig().configure(c)\n"` | | -| jupyterhub.hub.extraVolumeMounts[0].mountPath | string | `"/etc/jupyterhub/nublado_config.yaml"` | | -| jupyterhub.hub.extraVolumeMounts[0].name | string | `"nublado-config"` | | -| jupyterhub.hub.extraVolumeMounts[0].subPath | string | `"nublado_config.yaml"` | | -| jupyterhub.hub.extraVolumeMounts[1].mountPath | string | `"/etc/keys/gafaelfawr-token"` | | -| jupyterhub.hub.extraVolumeMounts[1].name | string | `"nublado-gafaelfawr"` | | -| jupyterhub.hub.extraVolumeMounts[1].subPath | string | `"token"` | | -| jupyterhub.hub.extraVolumes[0].configMap.name | string | `"nublado-config"` | | -| jupyterhub.hub.extraVolumes[0].name | string | `"nublado-config"` | | -| jupyterhub.hub.extraVolumes[1].name | string | `"nublado-gafaelfawr"` | | -| jupyterhub.hub.extraVolumes[1].secret.secretName | string | `"gafaelfawr-token"` | | -| jupyterhub.hub.image.name | string | `"lsstsqre/nublado2"` | | -| jupyterhub.hub.image.tag | string | `"2.6.1"` | | -| jupyterhub.hub.loadRoles.self.scopes[0] | string | `"admin:servers!user"` | | -| jupyterhub.hub.loadRoles.self.scopes[1] | string | `"read:metrics"` | | -| jupyterhub.hub.loadRoles.server.scopes[0] | string | `"inherit"` | | -| jupyterhub.hub.networkPolicy.enabled | bool | `false` | | -| jupyterhub.hub.resources.limits.cpu | string | `"900m"` | | -| jupyterhub.hub.resources.limits.memory | string | `"1Gi"` | | -| jupyterhub.imagePullSecrets[0].name | string | `"pull-secret"` | | -| jupyterhub.ingress.annotations | object | See `values.yaml` | Extra annotations to add to the ingress | -| jupyterhub.ingress.enabled | bool | `true` | | -| jupyterhub.ingress.ingressClassName | string | `"nginx"` | | -| jupyterhub.ingress.pathSuffix | string | `"*"` | | -| jupyterhub.prePuller.continuous.enabled | bool | `false` | | -| jupyterhub.prePuller.hook.enabled | bool | `false` | | -| jupyterhub.proxy.chp.networkPolicy.interNamespaceAccessLabels | string | `"accept"` | | -| jupyterhub.proxy.service.type | string | `"ClusterIP"` | | -| jupyterhub.scheduling.userPlaceholder.enabled | bool | `false` | | -| jupyterhub.scheduling.userScheduler.enabled | bool | `false` | | -| jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | | -| jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | | -| jupyterhub.singleuser.defaultUrl | string | `"/lab"` | | -| jupyterhub.singleuser.extraAnnotations."argocd.argoproj.io/compare-options" | string | `"IgnoreExtraneous"` | | -| jupyterhub.singleuser.extraAnnotations."argocd.argoproj.io/sync-options" | string | `"Prune=false"` | | -| jupyterhub.singleuser.extraLabels."argocd.argoproj.io/instance" | string | `"nublado-users"` | | -| jupyterhub.singleuser.extraLabels."hub.jupyter.org/network-access-hub" | string | `"true"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[0].mountPath | string | `"/etc/dask"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[0].name | string | `"dask"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[1].mountPath | string | `"/opt/lsst/software/jupyterlab/panda"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[1].name | string | `"idds-config"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[2].mountPath | string | `"/tmp"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[2].name | string | `"tmp"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[3].mountPath | string | `"/opt/lsst/software/jupyterlab/butler-secret"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[3].name | string | `"butler-secret"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[4].mountPath | string | `"/opt/lsst/software/jupyterlab/environment"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[4].name | string | `"lab-environment"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[5].mountPath | string | `"/etc/passwd"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[5].name | string | `"passwd"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[5].readOnly | bool | `true` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[5].subPath | string | `"passwd"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[6].mountPath | string | `"/etc/group"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[6].name | string | `"group"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[6].readOnly | bool | `true` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[6].subPath | string | `"group"` | | -| jupyterhub.singleuser.storage.extraVolumes[0].configMap.name | string | `"dask"` | | -| jupyterhub.singleuser.storage.extraVolumes[0].name | string | `"dask"` | | -| jupyterhub.singleuser.storage.extraVolumes[1].configMap.name | string | `"idds-config"` | | -| jupyterhub.singleuser.storage.extraVolumes[1].name | string | `"idds-config"` | | -| jupyterhub.singleuser.storage.extraVolumes[2].emptyDir | object | `{}` | | -| jupyterhub.singleuser.storage.extraVolumes[2].name | string | `"tmp"` | | -| jupyterhub.singleuser.storage.extraVolumes[3].name | string | `"butler-secret"` | | -| jupyterhub.singleuser.storage.extraVolumes[3].secret.secretName | string | `"butler-secret"` | | -| jupyterhub.singleuser.storage.extraVolumes[4].configMap.defaultMode | int | `420` | | -| jupyterhub.singleuser.storage.extraVolumes[4].configMap.name | string | `"lab-environment"` | | -| jupyterhub.singleuser.storage.extraVolumes[4].name | string | `"lab-environment"` | | -| jupyterhub.singleuser.storage.extraVolumes[5].configMap.defaultMode | int | `420` | | -| jupyterhub.singleuser.storage.extraVolumes[5].configMap.name | string | `"passwd"` | | -| jupyterhub.singleuser.storage.extraVolumes[5].name | string | `"passwd"` | | -| jupyterhub.singleuser.storage.extraVolumes[6].configMap.defaultMode | int | `420` | | -| jupyterhub.singleuser.storage.extraVolumes[6].configMap.name | string | `"group"` | | -| jupyterhub.singleuser.storage.extraVolumes[6].name | string | `"group"` | | -| jupyterhub.singleuser.storage.type | string | `"none"` | | -| network_policy.enabled | bool | `true` | | diff --git a/applications/nublado2/secrets.yaml b/applications/nublado2/secrets.yaml deleted file mode 100644 index 15d1b5eeba..0000000000 --- a/applications/nublado2/secrets.yaml +++ /dev/null @@ -1,17 +0,0 @@ -cryptkeeper_key: - description: "Encryption key for internal key management." - generate: - type: password -crypto_key: - description: "Encryption key for JupyterHub stored state." - generate: - type: password -hub_db_password: - description: "Password to authenticate to the JupyterHub session database." - generate: - type: password - if: config.internalDatabase -proxy_token: - description: "Token authenticating JupyterHub to the proxy server." - generate: - type: password diff --git a/applications/nublado2/templates/clusterrole.yaml b/applications/nublado2/templates/clusterrole.yaml deleted file mode 100644 index cc8a8b5e99..0000000000 --- a/applications/nublado2/templates/clusterrole.yaml +++ /dev/null @@ -1,28 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "nublado2.fullname" . }}-hub -rules: -- apiGroups: [""] - resources: ["pods","events", "namespaces", "serviceaccounts", "services", - "persistentvolumeclaims", "persistentvolumes", "resourcequotas", - "configmaps", "pods/log", "pods/exec"] - verbs: ["get", "list", "create", "watch", "delete", "update", "patch"] -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "create", "delete"] -- apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] -- apiGroups: ["rbac.authorization.k8s.io"] - resources: ["roles", "rolebindings"] - verbs: ["get", "list", "create", "delete"] -- apiGroups: ["argoproj.io"] - resources: ["workflows", "workflows/finalizers"] - verbs: ["get", "list", "create", "watch", "delete", "update", "patch"] -- apiGroups: ["argoproj.io"] - resources: ["workflowtemplates", "workflowtemplates/finalizers"] - verbs: ["get", "list", "watch"] -- apiGroups: ["ricoberger.de"] - resources: ["vaultsecrets"] - verbs: ["get", "create", "delete", "list"] diff --git a/applications/nublado2/templates/clusterrolebinding.yaml b/applications/nublado2/templates/clusterrolebinding.yaml deleted file mode 100644 index cdb0c5fd53..0000000000 --- a/applications/nublado2/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "nublado2.fullname" . }}-hub -subjects: - # Note: this service account is created by the jupyterhub subchart - - kind: ServiceAccount - name: hub - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ template "nublado2.fullname" . }}-hub - apiGroup: rbac.authorization.k8s.io diff --git a/applications/nublado2/templates/gafaelfawr-token.yaml b/applications/nublado2/templates/gafaelfawr-token.yaml deleted file mode 100644 index 06a9822b82..0000000000 --- a/applications/nublado2/templates/gafaelfawr-token.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: gafaelfawr.lsst.io/v1alpha1 -kind: GafaelfawrServiceToken -metadata: - name: "gafaelfawr-token" - labels: - {{- include "nublado2.labels" . | nindent 4 }} -spec: - service: "bot-nublado2" - scopes: - - "admin:provision" diff --git a/applications/nublado2/templates/netpol.yaml b/applications/nublado2/templates/netpol.yaml deleted file mode 100644 index 91da074252..0000000000 --- a/applications/nublado2/templates/netpol.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if .Values.network_policy.enabled }} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: hub - labels: - {{- include "nublado2.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - app: jupyterhub - component: hub - release: {{ .Release.Name }} - policyTypes: - - Ingress - - ingress: - # allowed pods (hub.jupyter.org/network-access-hub) --> hub - - ports: - - port: http - - port: 8081 - from: - - podSelector: - matchLabels: - hub.jupyter.org/network-access-hub: "true" - namespaceSelector: {} -{{- end }} diff --git a/applications/nublado2/templates/nublado-config.yaml b/applications/nublado2/templates/nublado-config.yaml deleted file mode 100644 index fbc234d394..0000000000 --- a/applications/nublado2/templates/nublado-config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: nublado-config - labels: - {{- include "nublado2.labels" . | nindent 4 }} -data: - nublado_config.yaml: | - {{- toYaml .Values.config | nindent 4 }} diff --git a/applications/nublado2/templates/vault-secrets.yaml b/applications/nublado2/templates/vault-secrets.yaml deleted file mode 100644 index 962d6c1896..0000000000 --- a/applications/nublado2/templates/vault-secrets.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: "nublado2-secret" -spec: - path: "{{- .Values.global.vaultSecretsPath }}/nublado2" - type: Opaque - - templates: - {{- /* dump in values.yaml for jupyterhub, without changing it */}} - {{- /* this is copied from the zero-to-jupyterhub chart where it does this */}} - {{- $values := merge dict .Values.jupyterhub }} - {{- /* passthrough subset of Chart / Release */}} - {{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }} - {{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }} - values.yaml: {{ $values | toYaml | quote }} - - {{- /* dump in the rest of the keys in this path and their values */}} - {{- /* this uses the templating provided by vault-secrets-operator */}} - hub.db.password: "{% .Secrets.hub_db_password %}" - hub.config.JupyterHub.cookie_secret: "{% .Secrets.crypto_key %}" - hub.config.CryptKeeper.keys: "{% .Secrets.cryptkeeper_key %}" - hub.config.ConfigurableHTTPProxy.auth_token: "{% .Secrets.proxy_token %}" ---- -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: pull-secret - labels: - {{- include "nublado2.labels" . | nindent 4 }} -spec: - path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" - type: kubernetes.io/dockerconfigjson diff --git a/applications/nublado2/values-base.yaml b/applications/nublado2/values-base.yaml deleted file mode 100644 index f546535e2f..0000000000 --- a/applications/nublado2/values-base.yaml +++ /dev/null @@ -1,81 +0,0 @@ -jupyterhub: - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - ingress: - hosts: ["base-lsp.lsst.codes"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://base-lsp.lsst.codes/login" - singleuser: - extraAnnotations: - k8s.v1.cni.cncf.io/networks: "kube-system/dds" - hub: - baseUrl: "/n2" - db: - upgrade: true - url: "postgresql://jovyan@postgresdb01.ls.lsst.org/jupyterhub" - -config: - base_url: "https://base-lsp.lsst.codes" - butler_secret_path: "secret/k8s_operator/base-lsp.lsst.codes/butler-secret" - pull_secret_path: "secret/k8s_operator/base-lsp.lsst.codes/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - LSST_DDS_INTERFACE: net1 - LSST_DDS_PARTITION_PREFIX: base - LSST_SITE: base - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "oods" - volumes: - - name: home - nfs: - path: /jhome - server: nfs-jhome.ls.lsst.org - - name: project - nfs: - path: /project - server: nfs-project.ls.lsst.org - - name: scratch - nfs: - path: /scratch - server: nfs-scratch.ls.lsst.org - - name: datasets - nfs: - path: /lsstdata - server: nfs-lsstdata.ls.lsst.org - - name: auxtel-butler - nfs: - path: /auxtel/repo/LATISS - server: nfs-auxtel.ls.lsst.org - - name: auxtel-oods - nfs: - path: /auxtel/lsstdata/BTS/auxtel - server: nfs-auxtel.ls.lsst.org - readOnly: true - - name: obs-env - nfs: - path: /obs-env - server: nfs-obsenv.ls.lsst.org - volume_mounts: - - name: home - mountPath: /home - - name: datasets - mountPath: /datasets - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - - name: auxtel-butler - mountPath: /repo/LATISS - - name: auxtel-oods - mountPath: /data/lsstdata/BTS/auxtel - readOnly: true - - name: obs-env - mountPath: /net/obs-env diff --git a/applications/nublado2/values-ccin2p3.yaml b/applications/nublado2/values-ccin2p3.yaml deleted file mode 100644 index 33e2c594ba..0000000000 --- a/applications/nublado2/values-ccin2p3.yaml +++ /dev/null @@ -1,209 +0,0 @@ -jupyterhub: - debug: - enabled: true - hub: - db: - upgrade: true - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["data-dev.lsst.eu"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://data-dev.lsst.eu/login" - nginx.ingress.kubernetes.io/auth-url: "https://data-dev.lsst.eu/auth?scope=exec:notebook¬ebook=true" - nginx.ingress.kubernetes.io/proxy-connect-timeout: "50s" - nginx.ingress.kubernetes.io/proxy-read-timeout: "50s" - nginx.ingress.kubernetes.io/client-max-body-size: "50m" - nginx.ingress.kubernetes.io/proxy-body-size: "50m" - -config: - base_url: "https://data-dev.lsst.eu" - butler_secret_path: "secret/k8s_operator/rsp-cc/butler-secret" - pull_secret_path: "secret/k8s_operator/rsp-cc/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_KERNEL_CONNECTED: "True" - CULL_KERNEL_INTERVAL: "300" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - CULL_TERMINAL_INTERVAL: "300" - pinned_images: - - image_url: registry.hub.docker.com/lsstsqre/sciplat-lab:recommended - name: Recommended - volumes: - - name: home - hostPath: - path: /pbs/home - - volume_mounts: - - name: home - mountPath: /home - - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - lsst_lcl:x:1000:{{ user }} - tss:x:59: - cgred:x:997: - screen:x:84: - jovyan:x:768:{{ user }}{% for g in groups %} - {{ g.name }}:x:{{ g.id }}:{{ user if g.id != gid else "" }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - {{ user }}:x:{{ uid }}:{{ gid if gid else uid }}::/home/{{ user[0] }}/{{ user }}/rsp_home:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds_cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson diff --git a/applications/nublado2/values-idfint.yaml b/applications/nublado2/values-idfint.yaml deleted file mode 100644 index c5812aedb8..0000000000 --- a/applications/nublado2/values-idfint.yaml +++ /dev/null @@ -1,246 +0,0 @@ -jupyterhub: - hub: - baseUrl: "/n2" - config: - ServerApp: - shutdown_no_activity_timeout: 432000 - - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["data-int.lsst.cloud"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://data-int.lsst.cloud/login" -config: - base_url: "https://data-int.lsst.cloud" - butler_secret_path: "secret/k8s_operator/data-int.lsst.cloud/butler-secret" - pull_secret_path: "secret/k8s_operator/data-int.lsst.cloud/pull-secret" - cachemachine_image_policy: "desired" - lab_environment: - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" - S3_ENDPOINT_URL: "https://storage.googleapis.com" - GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/butler-secret/butler-gcs-idf-creds.json" - DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" - AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks - AUTO_REPO_BRANCH: prod - AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod - PANDA_AUTH: oidc - PANDA_VERIFY_HOST: "off" - PANDA_AUTH_VO: Rubin - PANDA_URL_SSL: https://pandaserver-doma.cern.ch:25443/server/panda - PANDA_URL: http://pandaserver-doma.cern.ch:25080/server/panda - IDDS_CONFIG: /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template - PANDA_CONFIG_ROOT: "~" - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - sizes: - - name: Small - cpu: 1 - ram: 4096M - - name: Medium - cpu: 2 - ram: 8192M - - name: Large - cpu: 4 - ram: 16384M - - name: Huge - cpu: 8 - ram: 32768M - volumes: - - name: home - nfs: - path: /share1/home - server: 10.22.240.130 - - name: project - nfs: - path: /share1/project - server: 10.22.240.130 - - name: scratch - nfs: - path: /share1/scratch - server: 10.22.240.130 - volume_mounts: - - name: home - mountPath: /home - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - # Workaround to impose resource quotas at IDF - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - lsst_lcl:x:1000:{{ user }} - tss:x:59: - cgred:x:997: - screen:x:84: - jovyan:x:768:{{ user }}{% for g in groups %} - {{ g.name }}:x:{{ g.id }}:{{ user if g.id != gid else "" }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - {{ user }}:x:{{ uid }}:{{ gid if gid else uid }}::/home/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds.cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - - apiVersion: v1 - kind: ResourceQuota - metadata: - name: user-quota - namespace: "{{ user_namespace }}" - spec: - hard: - limits.cpu: 9 - limits.memory: 27Gi diff --git a/applications/nublado2/values-minikube.yaml b/applications/nublado2/values-minikube.yaml deleted file mode 100644 index 01245f89b7..0000000000 --- a/applications/nublado2/values-minikube.yaml +++ /dev/null @@ -1,23 +0,0 @@ -jupyterhub: - hub: - resources: {} - debug: - enabled: true - ingress: - hosts: ["minikube.lsst.codes"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://minikube.lsst.codes/login" -config: - base_url: "https://minikube.lsst.codes" - butler_secret_path: "secret/k8s_operator/minikube.lsst.codes/butler-secret" - pull_secret_path: "secret/k8s_operator/minikube.lsst.codes/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - volumes: - - name: home - emptyDir: {} - volume_mounts: - - name: home - mountPath: /home diff --git a/applications/nublado2/values-roe.yaml b/applications/nublado2/values-roe.yaml deleted file mode 100644 index 7ff9ae4f8f..0000000000 --- a/applications/nublado2/values-roe.yaml +++ /dev/null @@ -1,44 +0,0 @@ -jupyterhub: - ingress: - hosts: ["rsp.lsst.ac.uk"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://rsp.lsst.ac.uk/login" - nginx.ingress.kubernetes.io/auth-url: "https://rsp.lsst.ac.uk/auth?scope=exec:notebook¬ebook=true" - -config: - base_url: "https://rsp.lsst.ac.uk" - butler_secret_path: "secret/k8s_operator/roe/butler-secret" - pull_secret_path: "secret/k8s_operator/roe/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - pinned_images: - - image_url: registry.hub.docker.com/lsstsqre/sciplat-lab:recommended - name: Recommended - volumes: - - name: data - nfs: - path: /data - server: 192.41.122.33 - - name: home - nfs: - path: /jhome - server: 192.41.122.33 - - name: datasets - nfs: - path: /datasets - server: 192.41.122.33 - volume_mounts: - - name: data - mountPath: /data - - name: home - mountPath: /home - - name: datasets - mountPath: /datasets - -vault_secret_path: "secret/k8s_operator/roe/nublado2" - -pull-secret: - enabled: true - path: "secret/k8s_operator/roe/pull-secret" diff --git a/applications/nublado2/values-summit.yaml b/applications/nublado2/values-summit.yaml deleted file mode 100644 index 3f8f14c394..0000000000 --- a/applications/nublado2/values-summit.yaml +++ /dev/null @@ -1,105 +0,0 @@ -jupyterhub: - ingress: - hosts: ["summit-lsp.lsst.codes"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://summit-lsp.lsst.codes/login" - hub: - db: - upgrade: true - url: "postgresql://jovyan@postgresdb01.cp.lsst.org/jupyterhub" - singleuser: - extraAnnotations: - k8s.v1.cni.cncf.io/networks: "kube-system/dds" - -config: - base_url: "https://summit-lsp.lsst.codes" - butler_secret_path: "secret/k8s_operator/summit-lsp.lsst.codes/butler-secret" - pull_secret_path: "secret/k8s_operator/summit-lsp.lsst.codes/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - LSST_DDS_INTERFACE: net1 - LSST_DDS_PARTITION_PREFIX: summit - LSST_SITE: summit - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "oods" - volumes: - - name: home - nfs: - path: /jhome - server: nfs1.cp.lsst.org - - name: project - nfs: - path: /project - server: nfs1.cp.lsst.org - - name: scratch - nfs: - path: /scratch - server: nfs1.cp.lsst.org - - name: auxtel - nfs: - path: /auxtel/lsstdata - server: nfs-auxtel.cp.lsst.org - readOnly: true - - name: comcam - nfs: - path: /lsstdata - server: comcam-archiver.cp.lsst.org - readOnly: true - - name: other - nfs: - path: /lsstdata - server: nfs1.cp.lsst.org - readOnly: true - - name: latiss - nfs: - path: /auxtel/repo/LATISS - server: nfs-auxtel.cp.lsst.org - - name: base-auxtel - nfs: - path: /auxtel/lsstdata/base/auxtel - server: nfs-auxtel.cp.lsst.org - readOnly: true - - name: lsstcomcam - nfs: - path: /repo/LSSTComCam - server: comcam-archiver.cp.lsst.org - - name: base-comcam - nfs: - path: /lsstdata/base/comcam - server: comcam-archiver.cp.lsst.org - readOnly: true - - name: obs-env - nfs: - path: /obs-env - server: nfs-obsenv.cp.lsst.org - volume_mounts: - - name: home - mountPath: /home - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - - name: auxtel - mountPath: /readonly/lsstdata/auxtel - readOnly: true - - name: comcam - mountPath: /readonly/lsstdata/comcam - readOnly: true - - name: other - mountPath: /readonly/lsstdata/other - readOnly: true - - name: latiss - mountPath: /repo/LATISS - - name: base-auxtel - mountPath: /data/lsstdata/base/auxtel - readOnly: true - - name: lsstcomcam - mountPath: /repo/LSSTComCam - - name: base-comcam - mountPath: /data/lsstdata/base/comcam - readOnly: true - - name: obs-env - mountPath: /net/obs-env diff --git a/applications/nublado2/values-tucson-teststand.yaml b/applications/nublado2/values-tucson-teststand.yaml deleted file mode 100644 index 1594760b74..0000000000 --- a/applications/nublado2/values-tucson-teststand.yaml +++ /dev/null @@ -1,95 +0,0 @@ -jupyterhub: - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - ingress: - hosts: ["tucson-teststand.lsst.codes"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://tucson-teststand.lsst.codes/login" - singleuser: - extraAnnotations: - k8s.v1.cni.cncf.io/networks: "kube-system/dds" - hub: - baseUrl: "/n2" - db: - upgrade: true - url: "postgresql://jovyan@squoint.tu.lsst.org/jupyterhub" - -config: - base_url: "https://tucson-teststand.lsst.codes" - butler_secret_path: "secret/k8s_operator/tucson-teststand.lsst.codes/butler-secret" - pull_secret_path: "secret/k8s_operator/tucson-teststand.lsst.codes/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - LSST_DDS_INTERFACE: net1 - LSST_DDS_PARTITION_PREFIX: tucson - LSST_SITE: tucson - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "oods" - volumes: - - name: home - nfs: - path: /jhome - server: nfs-jhome.tu.lsst.org - - name: project - nfs: - path: /project - server: nfs-project.tu.lsst.org - - name: scratch - nfs: - path: /scratch - server: nfs-scratch.tu.lsst.org - - name: datasets - nfs: - path: /lsstdata - server: nfs-lsstdata.tu.lsst.org - - name: auxtel-butler - nfs: - path: /auxtel/repo/LATISS - server: nfs-auxtel.tu.lsst.org - - name: auxtel-oods - nfs: - path: /auxtel/lsstdata/TTS/auxtel - server: nfs-auxtel.tu.lsst.org - readOnly: true - - name: comcam-butler - nfs: - path: /repo/LSSTComCam - server: comcam-archiver.tu.lsst.org - - name: comcam-oods - nfs: - path: /lsstdata/TTS/comcam - server: comcam-archiver.tu.lsst.org - readOnly: true - - name: obs-env - nfs: - path: /obs-env - server: nfs-obsenv.tu.lsst.org - volume_mounts: - - name: home - mountPath: /home - - name: datasets - mountPath: /datasets - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - - name: auxtel-butler - mountPath: /repo/LATISS - - name: auxtel-oods - mountPath: /data/lsstdata/TTS/auxtel - readOnly: true - - name: comcam-butler - mountPath: /repo/LSSTComCam - - name: comcam-oods - mountPath: /data/lsstdata/TTS/comcam - readOnly: true - - name: obs-env - mountPath: /net/obs-env diff --git a/applications/nublado2/values-usdfdev.yaml b/applications/nublado2/values-usdfdev.yaml deleted file mode 100644 index 77d80bc08e..0000000000 --- a/applications/nublado2/values-usdfdev.yaml +++ /dev/null @@ -1,410 +0,0 @@ -jupyterhub: - - hub: - baseUrl: "/n2" - config: - ServerApp: - shutdown_no_activity_timeout: 432000 - - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["usdf-rsp-dev.slac.stanford.edu"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://usdf-rsp-dev.slac.stanford.edu/login" - nginx.ingress.kubernetes.io/auth-url: "https://usdf-rsp-dev.slac.stanford.edu/auth?scope=exec:notebook¬ebook=true" - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30s" - nginx.ingress.kubernetes.io/proxy-read-timeout: "20s" - nginx.ingress.kubernetes.io/client-max-body-size: "50m" - nginx.ingress.kubernetes.io/proxy-body-size: "50m" - -config: - base_url: "https://usdf-rsp-dev.slac.stanford.edu" - butler_secret_path: "secret/rubin/usdf-rsp-dev/butler-secret" - pull_secret_path: "secret/rubin/usdf-rsp-dev/pull-secret" - cachemachine_image_policy: "desired" - - lab_environment: - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "rubin" - AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" - AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks - AUTO_REPO_BRANCH: prod - AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_KERNEL_CONNECTED: "True" - CULL_KERNEL_INTERVAL: "300" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - CULL_TERMINAL_INTERVAL: "300" - http_proxy: http://sdfproxy.sdf.slac.stanford.edu:3128 - https_proxy: http://sdfproxy.sdf.slac.stanford.edu:3128 - no_proxy: hub.nublado2,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1 - - volumes: - - name: home - persistentVolumeClaim: - claimName: sdf-home - - name: sdf-group-rubin - persistentVolumeClaim: - claimName: sdf-group-rubin - - name: sdf-data-rubin - persistentVolumeClaim: - claimName: sdf-data-rubin - - name: fs-ddn-sdf-group-rubin - persistentVolumeClaim: - claimName: fs-ddn-sdf-group-rubin - - name: sdf-scratch - persistentVolumeClaim: - claimName: sdf-scratch - - name: fs-ddn-sdf-group-lsst - persistentVolumeClaim: - claimName: fs-ddn-sdf-group-lsst - volume_mounts: - - name: home - mountPath: "/home/" - - name: sdf-data-rubin - mountPath: /repo - subPath: repo - - name: sdf-group-rubin - mountPath: /project - subPath: g - - name: sdf-group-rubin - mountPath: /sdf/group/rubin - - name: sdf-data-rubin - mountPath: /sdf/data/rubin - - name: sdf-scratch - mountPath: /scratch - - name: fs-ddn-sdf-group-rubin - mountPath: /fs/ddn/sdf/group/rubin - - name: fs-ddn-sdf-group-lsst - mountPath: /fs/ddn/sdf/group/lsst - - - # Workaround to impose resource quotas at IDF - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - tss:x:59: - cgred:x:997: - screen:x:84: - provisionator:x:769: - rubin_users:x:4085:{% for group in groups %} - {{ group.name }}:x:{{ group.id }}:{{ user }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: gshadow - namespace: "{{ user_namespace }}" - data: - gshadow: | - root:!:: - bin:!:: - daemon:!:: - sys:!:: - adm:!:: - tty:!:: - disk:!:: - lp:!:: - mem:!:: - kmem:!:: - wheel:!:: - cdrom:!:: - mail:!:: - man:!:: - dialout:!:: - floppy:!:: - games:!:: - tape:!:: - video:!:: - ftp:!:: - lock:!:: - audio:!:: - nobody:!:: - users:!:: - utmp:!:: - utempter:!:: - input:!:: - systemd-journal:!:: - systemd-network:!:: - dbus:!:: - ssh_keys:!:: - tss:!:: - cgred:!:: - screen:!:: - provisionator:!:: - rubin_users:!::{% for g in groups %} - {{ g.name }}:!::{{ user }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - provisionator:x:769:769:Lab provisioning user:/home/provisionator:/bin/bash - {{ user }}:x:{{ uid }}:4085:rubin_users:/home/{{ user[0] }}/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: shadow - namespace: "{{ user_namespace }}" - data: - shadow: | - root:*:18000:0:99999:7::: - bin:*:18000:0:99999:7::: - daemon:*:18000:0:99999:7::: - adm:*:18000:0:99999:7::: - lp:*:18000:0:99999:7::: - sync:*:18000:0:99999:7::: - shutdown:*:18000:0:99999:7::: - halt:*:18000:0:99999:7::: - mail:*:18000:0:99999:7::: - operator:*:18000:0:99999:7::: - games:*:18000:0:99999:7::: - ftp:*:18000:0:99999:7::: - nobody:*:18000:0:99999:7::: - systemd-network:*:18000:0:99999:7::: - dbus:*:18000:0:99999:7::: - lsst_lcl:*:18000:0:99999:7::: - tss:*:18000:0:99999:7::: - provisionator:*:18000:0:99999:7::: - {{user}}:*:18000:0:99999:7::: - - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds_cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - - apiVersion: v1 - kind: ResourceQuota - metadata: - name: user-quota - namespace: "{{ user_namespace }}" - spec: - hard: - limits.cpu: 9 - limits.memory: 27Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-group-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-group-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-data-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-data-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-home - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-home - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-ddn-sdf-group-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-ddn-sdf-group-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-ddn-sdf-group-lsst - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-ddn-sdf-group-lsst - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-scratch - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-scratch - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - -vault_secret_path: "secret/rubin/usdf-rsp-dev/nublado2" - -pull-secret: - enabled: true - path: "secret/rubin/usdf-rsp-dev/pull-secret" diff --git a/applications/nublado2/values-usdfprod.yaml b/applications/nublado2/values-usdfprod.yaml deleted file mode 100644 index 690fd7c7bd..0000000000 --- a/applications/nublado2/values-usdfprod.yaml +++ /dev/null @@ -1,410 +0,0 @@ -jupyterhub: - - hub: - baseUrl: "/n2" - config: - ServerApp: - shutdown_no_activity_timeout: 432000 - - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["usdf-rsp.slac.stanford.edu"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://usdf-rsp.slac.stanford.edu/login" - nginx.ingress.kubernetes.io/auth-url: "https://usdf-rsp.slac.stanford.edu/auth?scope=exec:notebook¬ebook=true" - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30s" - nginx.ingress.kubernetes.io/proxy-read-timeout: "20s" - nginx.ingress.kubernetes.io/client-max-body-size: "50m" - nginx.ingress.kubernetes.io/proxy-body-size: "50m" - -config: - base_url: "https://usdf-rsp.slac.stanford.edu" - butler_secret_path: "secret/rubin/usdf-rsp/butler-secret" - pull_secret_path: "secret/rubin/usdf-rsp/pull-secret" - cachemachine_image_policy: "desired" - - lab_environment: - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "rubin" - AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" - AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks - AUTO_REPO_BRANCH: prod - AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_KERNEL_CONNECTED: "True" - CULL_KERNEL_INTERVAL: "300" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - CULL_TERMINAL_INTERVAL: "300" - http_proxy: http://sdfproxy.sdf.slac.stanford.edu:3128 - https_proxy: http://sdfproxy.sdf.slac.stanford.edu:3128 - no_proxy: hub.nublado2,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1 - - volumes: - - name: home - persistentVolumeClaim: - claimName: sdf-home - - name: sdf-group-rubin - persistentVolumeClaim: - claimName: sdf-group-rubin - - name: sdf-data-rubin - persistentVolumeClaim: - claimName: sdf-data-rubin - - name: fs-ddn-sdf-group-rubin - persistentVolumeClaim: - claimName: fs-ddn-sdf-group-rubin - - name: sdf-scratch - persistentVolumeClaim: - claimName: sdf-scratch - - name: fs-ddn-sdf-group-lsst - persistentVolumeClaim: - claimName: fs-ddn-sdf-group-lsst - volume_mounts: - - name: home - mountPath: "/home/" - - name: sdf-data-rubin - mountPath: /repo - subPath: repo - - name: sdf-group-rubin - mountPath: /project - subPath: g - - name: sdf-group-rubin - mountPath: /sdf/group/rubin - - name: sdf-data-rubin - mountPath: /sdf/data/rubin - - name: sdf-scratch - mountPath: /scratch - - name: fs-ddn-sdf-group-rubin - mountPath: /fs/ddn/sdf/group/rubin - - name: fs-ddn-sdf-group-lsst - mountPath: /fs/ddn/sdf/group/lsst - - - # Workaround to impose resource quotas at IDF - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - tss:x:59: - cgred:x:997: - screen:x:84: - provisionator:x:769: - rubin_users:x:4085:{% for group in groups %} - {{ group.name }}:x:{{ group.id }}:{{ user }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: gshadow - namespace: "{{ user_namespace }}" - data: - gshadow: | - root:!:: - bin:!:: - daemon:!:: - sys:!:: - adm:!:: - tty:!:: - disk:!:: - lp:!:: - mem:!:: - kmem:!:: - wheel:!:: - cdrom:!:: - mail:!:: - man:!:: - dialout:!:: - floppy:!:: - games:!:: - tape:!:: - video:!:: - ftp:!:: - lock:!:: - audio:!:: - nobody:!:: - users:!:: - utmp:!:: - utempter:!:: - input:!:: - systemd-journal:!:: - systemd-network:!:: - dbus:!:: - ssh_keys:!:: - tss:!:: - cgred:!:: - screen:!:: - provisionator:!:: - rubin_users:!::{% for g in groups %} - {{ g.name }}:!::{{ user }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - provisionator:x:769:769:Lab provisioning user:/home/provisionator:/bin/bash - {{ user }}:x:{{ uid }}:4085:rubin_users:/home/{{ user[0] }}/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: shadow - namespace: "{{ user_namespace }}" - data: - shadow: | - root:*:18000:0:99999:7::: - bin:*:18000:0:99999:7::: - daemon:*:18000:0:99999:7::: - adm:*:18000:0:99999:7::: - lp:*:18000:0:99999:7::: - sync:*:18000:0:99999:7::: - shutdown:*:18000:0:99999:7::: - halt:*:18000:0:99999:7::: - mail:*:18000:0:99999:7::: - operator:*:18000:0:99999:7::: - games:*:18000:0:99999:7::: - ftp:*:18000:0:99999:7::: - nobody:*:18000:0:99999:7::: - systemd-network:*:18000:0:99999:7::: - dbus:*:18000:0:99999:7::: - lsst_lcl:*:18000:0:99999:7::: - tss:*:18000:0:99999:7::: - provisionator:*:18000:0:99999:7::: - {{user}}:*:18000:0:99999:7::: - - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds_cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - - apiVersion: v1 - kind: ResourceQuota - metadata: - name: user-quota - namespace: "{{ user_namespace }}" - spec: - hard: - limits.cpu: 9 - limits.memory: 27Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-group-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-group-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-data-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-data-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-home - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-home - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-ddn-sdf-group-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-ddn-sdf-group-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-ddn-sdf-group-lsst - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-ddn-sdf-group-lsst - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-scratch - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-scratch - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - -vault_secret_path: "secret/rubin/usdf-rsp/nublado2" - -pull-secret: - enabled: true - path: "secret/rubin/usdf-rsp/pull-secret" diff --git a/applications/nublado2/values.yaml b/applications/nublado2/values.yaml deleted file mode 100644 index 8585f00d47..0000000000 --- a/applications/nublado2/values.yaml +++ /dev/null @@ -1,420 +0,0 @@ -# Default values for nublado2. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -jupyterhub: - hub: - authenticatePrometheus: false - image: - name: lsstsqre/nublado2 - tag: "2.6.1" - resources: - limits: - cpu: 900m - memory: 1Gi # Should support about 200 users - config: - Authenticator: - enable_auth_state: true - JupyterHub: - authenticator_class: nublado2.auth.GafaelfawrAuthenticator - ServerApp: - shutdown_no_activity_timeout: 604800 # one week - db: - # Password comes from the nublado2-secret. - type: "postgres" - password: "true" - url: "postgresql://jovyan@postgres.postgres/jupyterhub" - containerSecurityContext: - runAsUser: 768 - runAsGroup: 768 - allowPrivilegeEscalation: false - baseUrl: "/nb" - # Note: this has to match up with the kubernetes secret created by the - # vault secret, and since you can't put templating in a values file, I'm - # just setting the name here. - existingSecret: "nublado2-secret" - extraConfig: - nublado.py: | - import nublado2.hub_config - nublado2.hub_config.HubConfig().configure(c) - extraVolumes: - - name: nublado-config - configMap: - name: nublado-config - - name: nublado-gafaelfawr - secret: - secretName: gafaelfawr-token - extraVolumeMounts: - - name: nublado-config - mountPath: /etc/jupyterhub/nublado_config.yaml - subPath: nublado_config.yaml - - name: nublado-gafaelfawr - mountPath: /etc/keys/gafaelfawr-token - subPath: token - # We still have to use our own, enabled at the top level, which is - # similar but not identical. This one still doesn't work, even if - # you explicitly enable port 8081 so the labs can talk to the Hub. - networkPolicy: - enabled: false - loadRoles: - self: - scopes: ['admin:servers!user', 'read:metrics'] - server: - scopes: ['inherit'] # Let server use API like user - - prePuller: - continuous: - enabled: false - hook: - enabled: false - - singleuser: - cloudMetadata: - blockWithIptables: false - cmd: "/opt/lsst/software/jupyterlab/runlab.sh" - defaultUrl: "/lab" - extraAnnotations: - argocd.argoproj.io/compare-options: 'IgnoreExtraneous' - argocd.argoproj.io/sync-options: 'Prune=false' - extraLabels: - hub.jupyter.org/network-access-hub: 'true' - argocd.argoproj.io/instance: 'nublado-users' - storage: - extraVolumes: - - name: dask - configMap: - name: dask - - name: idds-config - configMap: - name: idds-config - - name: tmp - emptyDir: {} - - name: butler-secret - secret: - secretName: butler-secret - - name: lab-environment - configMap: - defaultMode: 420 - name: lab-environment - - name: passwd - configMap: - defaultMode: 420 - name: passwd - - name: group - configMap: - defaultMode: 420 - name: group - extraVolumeMounts: - - name: dask - mountPath: /etc/dask - - name: idds-config - mountPath: /opt/lsst/software/jupyterlab/panda - - name: tmp - mountPath: /tmp - - name: butler-secret - mountPath: /opt/lsst/software/jupyterlab/butler-secret - - name: lab-environment - mountPath: /opt/lsst/software/jupyterlab/environment - - name: passwd - mountPath: /etc/passwd - readOnly: true - subPath: passwd - - name: group - mountPath: /etc/group - readOnly: true - subPath: group - type: none - - proxy: - service: - type: ClusterIP - chp: - networkPolicy: - interNamespaceAccessLabels: accept - # This currently causes Minikube deployment in GH-actions to fail. - # We want it sometime but it's not critical; it will help with - # scale-down - # pdb: - # enabled: true - # minAvailable: 1 - - # Any instantiation of this chart must also set ingress.hosts and add - # the nginx.ingress.kubernetes.io/auth-signin annotation pointing to the - # appropriate fully-qualified URLs for the Gafaelfawr /login route. - ingress: - enabled: true - - # -- Extra annotations to add to the ingress - # @default -- See `values.yaml` - annotations: - nginx.ingress.kubernetes.io/auth-method: "GET" - nginx.ingress.kubernetes.io/auth-response-headers: "Authorization,Cookie,X-Auth-Request-Email,X-Auth-Request-User,X-Auth-Request-Token" - nginx.ingress.kubernetes.io/auth-url: "http://gafaelfawr.gafaelfawr.svc.cluster.local:8080/auth?scope=exec:notebook¬ebook=true&minimum_lifetime=2160000" - nginx.ingress.kubernetes.io/configuration-snippet: | - auth_request_set $auth_www_authenticate $upstream_http_www_authenticate; - auth_request_set $auth_status $upstream_http_x_error_status; - auth_request_set $auth_error_body $upstream_http_x_error_body; - error_page 403 = @autherror; - nginx.ingress.kubernetes.io/proxy-send-timeout: "300" - nginx.ingress.kubernetes.io/proxy-read-timeout: "300" - ingressClassName: "nginx" - pathSuffix: "*" - - cull: - enabled: true - timeout: 2592000 # 30 days -- shorten later - every: 600 # Check every ten minutes - users: true # log out user when we cull - removeNamedServers: true # Post-stop hook may already do this - maxAge: 5184000 # 60 days -- shorten later - - imagePullSecrets: - - name: pull-secret - - scheduling: - userScheduler: - enabled: false - userPlaceholder: - enabled: false - -config: - # -- Whether to use the cluster-internal PostgreSQL server instead of an - # external server. This is not used directly by the Nublado chart, but - # controls how the database password is managed. - internalDatabase: true - # -- base_url must be set in each instantiation of this chart to the URL of - # the primary ingress. It's used to construct API requests to the - # authentication service (which should go through the ingress). - base_url: "" - # -- butler_secret_path must be set here, because it's passed through to - # the lab rather than being part of the Hub configuration. - butler_secret_path: "" - # -- pull_secret_path must also be set here; it specifies resources in - # the lab namespace - pull_secret_path: "" - # -- images to pin to spawner menu - pinned_images: [] - # -- Cachemachine image policy: "available" or "desired". Use - # "desired" at instances with streaming image support. - cachemachine_image_policy: "available" - # -- shut down user pods on logout. Superfluous, because our - # LogoutHandler enforces this in any event, but nice to make explicit. - shutdown_on_logout: true - # -- definitions of Lab sizes available in a given instance - sizes: - - name: Small - cpu: 1 - ram: 4096M - - name: Medium - cpu: 2 - ram: 8192M - - name: Large - cpu: 4 - ram: 16384M - # -- Volumes to use for a particular instance - volumes: [] - # -- Where to mount volumes for a particular instance - volume_mounts: [] - - # -- Environment variables to set in spawned lab containers. Each value will - # be expanded using Jinja 2 templating. - # @default -- See `values.yaml` - lab_environment: - EXTERNAL_INSTANCE_URL: "{{ base_url }}" - FIREFLY_ROUTE: /portal/app - HUB_ROUTE: "{{ nublado_base_url }}" - JS9_ROUTE: /js9 - API_ROUTE: /api - TAP_ROUTE: /api/tap - SODA_ROUTE: /api/image/soda - WORKFLOW_ROUTE: /wf - AUTO_REPO_URLS: https://github.com/lsst-sqre/notebook-demo - NO_SUDO: "TRUE" - EXTERNAL_GID: "{{ gid if gid else uid }}" - EXTERNAL_GROUPS: "{{ external_groups }}" - EXTERNAL_UID: "{{ uid }}" - ACCESS_TOKEN: "{{ token }}" - IMAGE_DIGEST: "{{ options.image_info.digest }}" - IMAGE_DESCRIPTION: "{{ options.image_info.display_name }}" - RESET_USER_ENV: "{{ options.reset_user_env }}" - # We need to set CLEAR_DOTLOCAL until all images that didn't know - # about RESET_USER_ENV have aged out (late 2022) - CLEAR_DOTLOCAL: "{{ options.reset_user_env }}" - DEBUG: "{{ options.debug }}" - - # -- Templates for the user resources to create for each lab spawn. This is - # a string that can be templated and then loaded as YAML to generate a list - # of Kubernetes objects to create. - # @default -- See `values.yaml` - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - lsst_lcl:x:1000:{{ user }} - tss:x:59: - cgred:x:997: - screen:x:84: - jovyan:x:768:{{ user }}{% for g in groups %} - {{ g.name }}:x:{{ g.id }}:{{ user if g.id != gid else "" }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - {{ user }}:x:{{ uid }}:{{ gid if gid else uid }}::/home/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds_cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - -# Built-in network policy doesn't quite work (Labs can't talk to Hub, -# even with port 8081 explicitly enabled), so let's use our own for now. -network_policy: - enabled: true - -# The following will be set by parameters injected by Argo CD and should not -# be set in the individual environment values files. -global: - # -- Base path for Vault secrets - # @default -- Set by Argo CD - vaultSecretsPath: "" diff --git a/applications/obsloctap/README.md b/applications/obsloctap/README.md index c6cc3d3a5a..7dabea9cf2 100644 --- a/applications/obsloctap/README.md +++ b/applications/obsloctap/README.md @@ -11,6 +11,7 @@ Publish observing schedule | Key | Type | Default | Description | |-----|------|---------|-------------| | config.persistentVolumeClaims | list | `[]` | PersistentVolumeClaims to create. | +| config.separateSecrets | bool | `false` | Whether to use the new secrets management scheme | | config.volume_mounts | list | `[]` | Mount points for additional volumes | | config.volumes | list | `[]` | Additional volumes to attach | | environment | object | `{}` | Environment variables (e.g. butler configuration/auth parms) for panel | diff --git a/applications/obsloctap/secrets.yaml b/applications/obsloctap/secrets.yaml new file mode 100644 index 0000000000..3f830741d4 --- /dev/null +++ b/applications/obsloctap/secrets.yaml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/obsloctap/templates/deployment.yaml b/applications/obsloctap/templates/deployment.yaml index 8e0fe7ffc8..f878880fb3 100644 --- a/applications/obsloctap/templates/deployment.yaml +++ b/applications/obsloctap/templates/deployment.yaml @@ -18,7 +18,7 @@ spec: # butler-secrets-raw is the secrets we get from vault - name: "butler-secrets-raw" secret: - secretName: "butler-secret" + secretName: {{ include "obsloctap.fullname" . }} # butler-secrets are the copied and chmoded versions - name: "butler-secrets" emptyDir: {} diff --git a/applications/obsloctap/templates/vault-secrets.yaml b/applications/obsloctap/templates/vault-secrets.yaml index 2a0c967229..2229f2438a 100644 --- a/applications/obsloctap/templates/vault-secrets.yaml +++ b/applications/obsloctap/templates/vault-secrets.yaml @@ -1,10 +1,13 @@ ---- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: butler-secret + name: {{ template "obsloctap.fullname" . }} labels: {{- include "obsloctap.labels" . | nindent 4 }} spec: +{{- if .Values.config.separateSecrets }} + path: "{{ .Values.global.vaultSecretsPath }}/obsloctap" +{{- else }} path: "{{ .Values.global.vaultSecretsPath }}/butler-secret" +{{- end }} type: Opaque diff --git a/applications/obsloctap/values.yaml b/applications/obsloctap/values.yaml index bc23d3ab77..7b38506bcf 100644 --- a/applications/obsloctap/values.yaml +++ b/applications/obsloctap/values.yaml @@ -16,7 +16,6 @@ ingress: # -- Additional annotations to add to the ingress annotations: {} - config: # -- Additional volumes to attach volumes: [] @@ -27,6 +26,8 @@ config: # -- PersistentVolumeClaims to create. persistentVolumeClaims: [] + # -- Whether to use the new secrets management scheme + separateSecrets: false # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. diff --git a/applications/obssys/Chart.yaml b/applications/obssys/Chart.yaml new file mode 100644 index 0000000000..5ee1f1eafa --- /dev/null +++ b/applications/obssys/Chart.yaml @@ -0,0 +1,33 @@ +apiVersion: v2 +name: obssys +version: 1.0.0 +description: Deployment for the Observatory System CSCs +dependencies: +- name: csc_collector + version: 1.0.0 + repository: file://../../charts/csc_collector +- name: csc + alias: atqueue + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: atscheduler + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: authorize + version: 1.0.0 + condition: authorize.enabled + repository: file://../../charts/csc +- name: csc + alias: mtqueue + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: mtscheduler + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: watcher + version: 1.0.0 + repository: file://../../charts/csc diff --git a/applications/obssys/README.md b/applications/obssys/README.md new file mode 100644 index 0000000000..6890f6ad30 --- /dev/null +++ b/applications/obssys/README.md @@ -0,0 +1,21 @@ +# obssys + +Deployment for the Observatory System CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| authorize.enabled | bool | `false` | Enable the Authorize CSC | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml new file mode 100644 index 0000000000..7be607d4ae --- /dev/null +++ b/applications/obssys/values-tucson-teststand.yaml @@ -0,0 +1,227 @@ +namespace: &ns obssys + +csc_collector: + namespace: *ns + + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + - name: butler-secret + key: butler-secret + - name: love + key: ts/software/love + +atqueue: + namespace: *ns + classifier: scriptqueue2 + image: + repository: ts-dockerhub.lsst.org/scriptqueue + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + RUN_ARG: 2 --state enabled + USER_USERNAME: user + butlerSecret: + containerPath: &abS-cP /home/saluser/.lsst + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *abS-cP + nfsMountpoint: + - name: auxtel-gen3-butler + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/repo/LATISS + - name: auxtel-gen3-oods + containerPath: /data/lsstdata/TTS/auxtel + readOnly: true + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata/TTS/auxtel + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam + - name: comcam-gen3-oods + containerPath: /data/lsstdata/TTS/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/comcam + - name: project-shared + containerPath: /project + readOnly: false + server: nfs-project.tu.lsst.org + serverPath: /project + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.tu.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scriptqueue + topologyKey: "kubernetes.io/hostname" + +atscheduler: + namespace: *ns + classifier: scheduler2 + image: + repository: ts-dockerhub.lsst.org/scheduler + pullPolicy: Always + env: + INDEX: 2 + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + nfsMountpoint: + - name: rubin-sim-data + containerPath: /home/saluser/rubin_sim_data + readOnly: false + server: nfs-scratch.tu.lsst.org + serverPath: /scratch/scheduler + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.tu.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scheduler + topologyKey: "kubernetes.io/hostname" + +authorize: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/authorize + pullPolicy: Always + env: + RUN_ARG: --state enabled + AUTHLIST_USER_NAME: authlist_user + envSecrets: + - name: AUTHLIST_USER_PASS + secretName: love + secretKey: authlist-user-pass + +mtqueue: + namespace: *ns + classifier: scriptqueue1 + image: + repository: ts-dockerhub.lsst.org/scriptqueue + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + RUN_ARG: 1 --state enabled + USER_USERNAME: user + butlerSecret: + containerPath: &mbS-cP /home/saluser/.lsst + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *mbS-cP + nfsMountpoint: + - name: auxtel-gen3-butler + containerPath: /repo/LATISS + readOnly: false + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/repo/LATISS + - name: auxtel-gen3-oods + containerPath: /data/lsstdata/TTS/auxtel + readOnly: true + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata/TTS/auxtel + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam + - name: comcam-gen3-oods + containerPath: /data/lsstdata/TTS/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/comcam + - name: project-shared + containerPath: /project + readOnly: false + server: nfs-project.tu.lsst.org + serverPath: /project + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.tu.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scriptqueue + topologyKey: "kubernetes.io/hostname" + +mtscheduler: + namespace: *ns + classifier: scheduler1 + image: + repository: ts-dockerhub.lsst.org/scheduler + pullPolicy: Always + env: + INDEX: 1 + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + nfsMountpoint: + - name: rubin-sim-data + containerPath: /home/saluser/rubin_sim_data + readOnly: false + server: nfs-scratch.tu.lsst.org + serverPath: /scratch/scheduler + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.tu.lsst.org + serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scheduler + topologyKey: "kubernetes.io/hostname" + +watcher: + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/watcher + pullPolicy: Always diff --git a/applications/obssys/values.yaml b/applications/obssys/values.yaml new file mode 100644 index 0000000000..aa900f48bb --- /dev/null +++ b/applications/obssys/values.yaml @@ -0,0 +1,59 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +authorize: + # -- Enable the Authorize CSC + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/fileservers/Chart.yaml b/applications/ocps-uws-job/Chart.yaml similarity index 61% rename from applications/fileservers/Chart.yaml rename to applications/ocps-uws-job/Chart.yaml index 1190f73e60..aa41890510 100644 --- a/applications/fileservers/Chart.yaml +++ b/applications/ocps-uws-job/Chart.yaml @@ -1,3 +1,3 @@ apiVersion: v2 -name: fileservers +name: ocps-uws-job version: 1.0.0 diff --git a/applications/onepassword-connect-dev/README.md b/applications/onepassword-connect-dev/README.md deleted file mode 100644 index e0bd21762a..0000000000 --- a/applications/onepassword-connect-dev/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# onepassword-connect-dev - -1Password API server (dev) - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| idfdev.connect.applicationName | string | `"connect-idfdev"` | Name of the Kubernetes Deployment | -| idfdev.connect.credentialsKey | string | `"op-session"` | Name of key inside secret containing 1Password credentials | -| idfdev.connect.credentialsName | string | `"idfdev-secret"` | Name of secret containing the 1Password credentials | -| idfdev.connect.serviceType | string | `"ClusterIP"` | Type of service to create | diff --git a/applications/onepassword-connect-dev/secrets.yaml b/applications/onepassword-connect-dev/secrets.yaml deleted file mode 100644 index eac7d46c60..0000000000 --- a/applications/onepassword-connect-dev/secrets.yaml +++ /dev/null @@ -1,5 +0,0 @@ -idfdev: - description: >- - Credentials used by the 1Password Connect API server to access the vault - for the IDF dev (data-dev.lsst.cloud) environment. This secret can be - changed at any time. diff --git a/applications/onepassword-connect/.helmignore b/applications/onepassword-connect/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/onepassword-connect/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/onepassword-connect-dev/Chart.yaml b/applications/onepassword-connect/Chart.yaml similarity index 77% rename from applications/onepassword-connect-dev/Chart.yaml rename to applications/onepassword-connect/Chart.yaml index 65de2dccf6..d808bd3756 100644 --- a/applications/onepassword-connect-dev/Chart.yaml +++ b/applications/onepassword-connect/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -description: 1Password API server (dev) -name: onepassword-connect-dev +description: 1Password API server +name: onepassword-connect type: application version: 1.0.0 @@ -8,7 +8,6 @@ dependencies: - name: connect version: 1.14.0 repository: https://1password.github.io/connect-helm-charts/ - alias: idfdev annotations: phalanx.lsst.io/docs: | diff --git a/applications/onepassword-connect/README.md b/applications/onepassword-connect/README.md new file mode 100644 index 0000000000..24e2c7f2b9 --- /dev/null +++ b/applications/onepassword-connect/README.md @@ -0,0 +1,14 @@ +# onepassword-connect + +1Password API server + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| connect.connect.credentialsKey | string | `"op-session"` | Name of key inside secret containing 1Password credentials | +| connect.connect.credentialsName | string | `"onepassword-connect-secret"` | Name of secret containing the 1Password credentials | +| connect.connect.serviceType | string | `"ClusterIP"` | Type of service to create | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/onepassword-connect/secrets.yaml b/applications/onepassword-connect/secrets.yaml new file mode 100644 index 0000000000..a3906d8255 --- /dev/null +++ b/applications/onepassword-connect/secrets.yaml @@ -0,0 +1,14 @@ +op-session: + description: >- + Credentials used by the 1Password Connect API server to access 1Password + vaults. This credential is created when the Secret Managements workflow + is created and has access to all of the vaults served by that 1Password + Connect server. It is separate from the 1Password Connect tokens, which + are issued for each environment and have access only to the vault for + that environment. + + This is a base64-encoded version of the credentials file for the Connect + server created as part of the secrets automation workflow. (In other + words, the static secret itself is the base64-encoded version of the + JSON, and when written into a Kubernetes ``Secret`` resource, it will be + base64-encoded twice.) This secret can be changed at any time. diff --git a/applications/onepassword-connect/templates/_helpers.tpl b/applications/onepassword-connect/templates/_helpers.tpl new file mode 100644 index 0000000000..0ca015ad61 --- /dev/null +++ b/applications/onepassword-connect/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "onepassword-connect.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "onepassword-connect.labels" -}} +helm.sh/chart: {{ include "onepassword-connect.chart" . }} +{{ include "onepassword-connect.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "onepassword-connect.selectorLabels" -}} +app.kubernetes.io/name: "onepassword-connect" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/onepassword-connect-dev/templates/idfdev-ingress.yaml b/applications/onepassword-connect/templates/ingress.yaml similarity index 68% rename from applications/onepassword-connect-dev/templates/idfdev-ingress.yaml rename to applications/onepassword-connect/templates/ingress.yaml index b80a06c5bb..2639dc2627 100644 --- a/applications/onepassword-connect-dev/templates/idfdev-ingress.yaml +++ b/applications/onepassword-connect/templates/ingress.yaml @@ -1,9 +1,9 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ .Values.idfdev.connect.applicationName | quote }} + name: {{ .Values.connect.connect.applicationName | quote }} labels: - {{- include "onepassword-connect-dev.labels" . | nindent 4 }} + {{- include "onepassword-connect.labels" . | nindent 4 }} annotations: nginx.ingress.kubernetes.io/rewrite-target: "/$1" nginx.ingress.kubernetes.io/ssl-redirect: "true" @@ -14,10 +14,10 @@ spec: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/1password/idfdev/(.*)" + - path: "/1password/(.*)" pathType: "ImplementationSpecific" backend: service: - name: {{ .Values.idfdev.connect.applicationName | quote }} + name: {{ .Values.connect.connect.applicationName | quote }} port: name: "connect-api" diff --git a/applications/onepassword-connect-dev/templates/idfdev-vault-secrets.yaml b/applications/onepassword-connect/templates/vault-secrets.yaml similarity index 51% rename from applications/onepassword-connect-dev/templates/idfdev-vault-secrets.yaml rename to applications/onepassword-connect/templates/vault-secrets.yaml index d8d5bbdf93..fb9172122b 100644 --- a/applications/onepassword-connect-dev/templates/idfdev-vault-secrets.yaml +++ b/applications/onepassword-connect/templates/vault-secrets.yaml @@ -1,11 +1,9 @@ apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: "idfdev-secret" + name: {{ .Values.connect.connect.credentialsName | quote }} labels: - {{- include "onepassword-connect-dev.labels" . | nindent 4 }} + {{- include "onepassword-connect.labels" . | nindent 4 }} spec: - path: "{{ .Values.global.vaultSecretsPath }}/onepassword-connect-dev" + path: "{{ .Values.global.vaultSecretsPath }}/onepassword-connect" type: "Opaque" - templates: - op-session: "{% .Secrets.idfdev %}" diff --git a/applications/onepassword-connect-dev/values-roundtable-dev.yaml b/applications/onepassword-connect/values-roundtable-dev.yaml similarity index 100% rename from applications/onepassword-connect-dev/values-roundtable-dev.yaml rename to applications/onepassword-connect/values-roundtable-dev.yaml diff --git a/src/phalanx/testing/__init__.py b/applications/onepassword-connect/values-roundtable-prod.yaml similarity index 100% rename from src/phalanx/testing/__init__.py rename to applications/onepassword-connect/values-roundtable-prod.yaml diff --git a/applications/onepassword-connect-dev/values.yaml b/applications/onepassword-connect/values.yaml similarity index 80% rename from applications/onepassword-connect-dev/values.yaml rename to applications/onepassword-connect/values.yaml index 2af6cb4f5e..330aca9fb1 100644 --- a/applications/onepassword-connect-dev/values.yaml +++ b/applications/onepassword-connect/values.yaml @@ -1,14 +1,11 @@ -# Default values for onepassword-connect-dev. +# Default values for onepassword-connect. # This is a YAML-formatted file. # Declare variables to be passed into your templates. -idfdev: +connect: connect: - # -- Name of the Kubernetes Deployment - applicationName: "connect-idfdev" - # -- Name of secret containing the 1Password credentials - credentialsName: "idfdev-secret" + credentialsName: "onepassword-connect-secret" # -- Name of key inside secret containing 1Password credentials credentialsKey: "op-session" diff --git a/applications/ook/Chart.yaml b/applications/ook/Chart.yaml index 6ad1aeae30..e5de862ee2 100644 --- a/applications/ook/Chart.yaml +++ b/applications/ook/Chart.yaml @@ -2,7 +2,10 @@ apiVersion: v2 name: ook version: 1.0.0 appVersion: "0.9.0" -description: Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io. +description: > + Ook is the librarian service for Rubin Observatory. Ook indexes + documentation content into the Algolia search engine that powers the Rubin + Observatory documentation portal, lsst.io. type: application home: https://ook.lsst.io/ sources: diff --git a/applications/ook/README.md b/applications/ook/README.md index 6487a2508b..cc05df847e 100644 --- a/applications/ook/README.md +++ b/applications/ook/README.md @@ -1,6 +1,6 @@ # ook -Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io. +Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, lsst.io. **Homepage:** diff --git a/applications/ook/secrets.yaml b/applications/ook/secrets.yaml new file mode 100644 index 0000000000..29d39798cc --- /dev/null +++ b/applications/ook/secrets.yaml @@ -0,0 +1,26 @@ +ALGOLIA_APP_ID: + description: >- + The ID of the Algolia application. +ALGOLIA_API_KEY: + description: >- + The admin API key for the Algolia application. +OOK_GITHUB_APP_ID: + description: >- + The ID of the GitHub App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_ID +OOK_GITHUB_APP_PRIVATE_KEY: + description: >- + The private key for the GitHub App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_PRIVATE_KEY +ca.crt: + description: >- + The cluster CA certificate for the Kubernetes cluster. This is available + on the Kafka resource in the sasquatch application under the + ``status.listeners[].certificate`` field. + copy: + application: squarebot + key: ca.crt diff --git a/applications/plot-navigator/Chart.yaml b/applications/plot-navigator/Chart.yaml index 8d6f724b52..33aa44d9e2 100644 --- a/applications/plot-navigator/Chart.yaml +++ b/applications/plot-navigator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: plot-navigator description: Panel-based plot viewer -version: 1.7.0 +version: 1.0.0 sources: - https://github.com/lsst-dm/pipetask-plot-navigator appVersion: "0.10.2" diff --git a/applications/plot-navigator/README.md b/applications/plot-navigator/README.md index 6ca85a93d7..87a645d585 100644 --- a/applications/plot-navigator/README.md +++ b/applications/plot-navigator/README.md @@ -11,6 +11,7 @@ Panel-based plot viewer | Key | Type | Default | Description | |-----|------|---------|-------------| | config.persistentVolumeClaims | list | `[]` | PersistentVolumeClaims to create. | +| config.separateSecrets | bool | `false` | Whether to use the new secrets management scheme | | config.volume_mounts | list | `[]` | Mount points for additional volumes | | config.volumes | list | `[]` | Additional volumes to attach | | environment | object | `{}` | Environment variables (e.g. butler configuration/auth parms) for panel | @@ -18,5 +19,5 @@ Panel-based plot viewer | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.repository | string | `"ghcr.io/lsst-dm/pipetask-plot-navigator"` | plot-navigator image to use | -| image.tag | string | `""` | | +| image.tag | string | The appVersion of the chart | Tag of plot-navigator image to use | | ingress.annotations | object | `{}` | Additional annotations to add to the ingress | diff --git a/applications/plot-navigator/secrets.yaml b/applications/plot-navigator/secrets.yaml new file mode 100644 index 0000000000..3f830741d4 --- /dev/null +++ b/applications/plot-navigator/secrets.yaml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/plot-navigator/templates/deployment.yaml b/applications/plot-navigator/templates/deployment.yaml index e2c8cf52f2..fbd8bad61c 100644 --- a/applications/plot-navigator/templates/deployment.yaml +++ b/applications/plot-navigator/templates/deployment.yaml @@ -14,13 +14,11 @@ spec: labels: {{- include "plot-navigator.selectorLabels" . | nindent 8 }} spec: - imagePullSecrets: - - name: "pull-secret" volumes: # butler-secrets-raw is the secrets we get from vault - name: "butler-secrets-raw" secret: - secretName: "butler-secret" + secretName: {{ include "plot-navigator.fullname" . }} # butler-secrets are the copied and chmoded versions - name: "butler-secrets" emptyDir: {} diff --git a/applications/plot-navigator/templates/vault-secrets.yaml b/applications/plot-navigator/templates/vault-secrets.yaml index c189eb29c7..43310ae6b9 100644 --- a/applications/plot-navigator/templates/vault-secrets.yaml +++ b/applications/plot-navigator/templates/vault-secrets.yaml @@ -1,20 +1,13 @@ ---- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: butler-secret + name: {{ template "plot-navigator.fullname" . }} labels: {{- include "plot-navigator.labels" . | nindent 4 }} spec: +{{- if .Values.config.separateSecrets }} + path: "{{ .Values.global.vaultSecretsPath }}/plot-navigator" +{{- else }} path: "{{ .Values.global.vaultSecretsPath }}/butler-secret" +{{- end }} type: Opaque ---- -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: pull-secret - labels: - {{- include "plot-navigator.labels" . | nindent 4 }} -spec: - path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" - type: kubernetes.io/dockerconfigjson diff --git a/applications/plot-navigator/values-idfint.yaml b/applications/plot-navigator/values-idfint.yaml index 4dc30dc478..2a8515e988 100644 --- a/applications/plot-navigator/values-idfint.yaml +++ b/applications/plot-navigator/values-idfint.yaml @@ -3,3 +3,5 @@ environment: PGPASSFILE: "/home/worker/.lsst/postgres-credentials.txt" AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" S3_ENDPOINT_URL: "https://storage.googleapis.com" +config: + separateSecrets: true diff --git a/applications/plot-navigator/values-usdfint.yaml b/applications/plot-navigator/values-usdfint.yaml new file mode 100644 index 0000000000..72f5b541b2 --- /dev/null +++ b/applications/plot-navigator/values-usdfint.yaml @@ -0,0 +1,26 @@ +environment: + DAF_BUTLER_REPOSITORY_INDEX: "/sdf/group/rubin/shared/data-repos.yaml" + PGPASSFILE: "/home/worker/.lsst/postgres-credentials.txt" + PGUSER: "rubin" + AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" + S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + BUTLER_DEFAULT_REPO: "/repo/main" + +config: + volumes: + - name: sdf-group-rubin + persistentVolumeClaim: + claimName: sdf-group-rubin + - name: sdf-data-rubin + persistentVolumeClaim: + claimName: sdf-data-rubin + volume_mounts: + - name: sdf-group-rubin + mountPath: /sdf/group/rubin + - name: sdf-data-rubin + mountPath: /sdf/data/rubin + persistentVolumeClaims: + - name: sdf-group-rubin + storageClassName: sdf-group-rubin + - name: sdf-data-rubin + storageClassName: sdf-data-rubin diff --git a/applications/plot-navigator/values.yaml b/applications/plot-navigator/values.yaml index 29ed4cb802..3a808b27c6 100644 --- a/applications/plot-navigator/values.yaml +++ b/applications/plot-navigator/values.yaml @@ -1,6 +1,9 @@ image: # -- plot-navigator image to use repository: ghcr.io/lsst-dm/pipetask-plot-navigator + + # -- Tag of plot-navigator image to use + # @default -- The appVersion of the chart tag: "" # -- Environment variables (e.g. butler configuration/auth parms) for panel @@ -10,7 +13,6 @@ ingress: # -- Additional annotations to add to the ingress annotations: {} - config: # -- Additional volumes to attach volumes: [] @@ -21,6 +23,8 @@ config: # -- PersistentVolumeClaims to create. persistentVolumeClaims: [] + # -- Whether to use the new secrets management scheme + separateSecrets: false # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml index a9004bb2c4..632e887269 100644 --- a/applications/portal/Chart.yaml +++ b/applications/portal/Chart.yaml @@ -9,7 +9,7 @@ appVersion: "suit-2023.2.3" dependencies: - name: redis - version: 1.0.8 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/portal/values-usdfint.yaml b/applications/portal/values-usdfint.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/postgres/secrets.yaml b/applications/postgres/secrets.yaml index 5e03d36d1a..ed33a80e1d 100644 --- a/applications/postgres/secrets.yaml +++ b/applications/postgres/secrets.yaml @@ -10,12 +10,6 @@ gafaelfawr_password: copy: application: gafaelfawr key: database-password -jupyterhub_password: - description: "Password for the Nublado v2 JupyterHub session database." - if: jupyterhub_db - copy: - application: nublado2 - key: hub_db_password lovelog_password: description: "Password for the lovelog database." if: lovelog_db diff --git a/applications/postgres/values-idfdev.yaml b/applications/postgres/values-idfdev.yaml deleted file mode 100644 index 20c336e86a..0000000000 --- a/applications/postgres/values-idfdev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -nublado3_db: - user: "nublado3" - db: "nublado3" diff --git a/applications/postgres/values-usdfint.yaml b/applications/postgres/values-usdfint.yaml new file mode 100644 index 0000000000..dbc5324ac3 --- /dev/null +++ b/applications/postgres/values-usdfint.yaml @@ -0,0 +1,11 @@ +jupyterhub_db: + user: 'jovyan' + db: 'jupyterhub' +nublado3_db: + user: 'nublado3' + db: 'nublado3' +gafaelfawr_db: + user: 'gafaelfawr' + db: 'gafaelfawr' + +postgresStorageClass: 'wekafs--sdf-k8s01' diff --git a/applications/production-tools/Chart.yaml b/applications/production-tools/Chart.yaml index 95add46d37..3df45ea6a5 100644 --- a/applications/production-tools/Chart.yaml +++ b/applications/production-tools/Chart.yaml @@ -1,7 +1,6 @@ apiVersion: v2 name: production-tools version: 1.0.0 -dependencies: description: A collection of utility pages for monitoring data processing. sources: - https://github.com/lsst-dm/production_tools diff --git a/applications/production-tools/README.md b/applications/production-tools/README.md index cb7fa475cb..f2d753296d 100644 --- a/applications/production-tools/README.md +++ b/applications/production-tools/README.md @@ -11,6 +11,7 @@ A collection of utility pages for monitoring data processing. | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the production-tools deployment pod | +| config.separateSecrets | bool | `false` | Whether to use the new secrets management scheme | | environment | object | `{}` | | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | @@ -18,7 +19,7 @@ A collection of utility pages for monitoring data processing. | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the production-tools image | | image.repository | string | `"lsstdm/production_tools"` | Image to use in the production-tools deployment | -| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| image.tag | string | The appVersion of the chart | Tag of production-tools image to use | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selection rules for the production-tools deployment pod | diff --git a/applications/production-tools/secrets.yaml b/applications/production-tools/secrets.yaml new file mode 100644 index 0000000000..3f830741d4 --- /dev/null +++ b/applications/production-tools/secrets.yaml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/production-tools/templates/deployment.yaml b/applications/production-tools/templates/deployment.yaml index af46c2995e..932771af6e 100644 --- a/applications/production-tools/templates/deployment.yaml +++ b/applications/production-tools/templates/deployment.yaml @@ -29,7 +29,7 @@ spec: # butler-secrets-raw is the secrets we get from vault - name: "butler-secrets-raw" secret: - secretName: "butler-secret" + secretName: {{ include "production-tools.fullname" . }} # butler-secrets are the copied and chmoded versions - name: "butler-secrets" emptyDir: {} diff --git a/applications/production-tools/templates/vault-secrets.yaml b/applications/production-tools/templates/vault-secrets.yaml index 0b90cc3b7a..e93329880b 100644 --- a/applications/production-tools/templates/vault-secrets.yaml +++ b/applications/production-tools/templates/vault-secrets.yaml @@ -1,12 +1,15 @@ ---- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: butler-secret + name: {{ template "production-tools.fullname" . }} labels: {{- include "production-tools.labels" . | nindent 4 }} spec: +{{- if .Values.config.separateSecrets }} + path: "{{ .Values.global.vaultSecretsPath }}/production-tools" +{{- else }} path: "{{ .Values.global.vaultSecretsPath }}/butler-secret" +{{- end }} type: Opaque --- apiVersion: ricoberger.de/v1alpha1 diff --git a/applications/production-tools/values-idfint.yaml b/applications/production-tools/values-idfint.yaml index b89176b204..c846f462a2 100644 --- a/applications/production-tools/values-idfint.yaml +++ b/applications/production-tools/values-idfint.yaml @@ -3,3 +3,5 @@ environment: LOG_BUCKET: "drp-us-central1-logging" LOG_PREFIX: "Panda-RubinLog" WEB_CONCURRENCY: "4" +config: + separateSecrets: true diff --git a/applications/production-tools/values.yaml b/applications/production-tools/values.yaml index d0196e8401..a405a3726f 100644 --- a/applications/production-tools/values.yaml +++ b/applications/production-tools/values.yaml @@ -12,7 +12,8 @@ image: # -- Pull policy for the production-tools image pullPolicy: IfNotPresent - # -- Overrides the image tag whose default is the chart appVersion. + # -- Tag of production-tools image to use + # @default -- The appVersion of the chart tag: "" # -- Override the base name for resources @@ -31,6 +32,10 @@ ingress: # -- Additional annotations for the ingress rule annotations: {} +config: + # -- Whether to use the new secrets management scheme + separateSecrets: false + # -- Resource limits and requests for the production-tools deployment pod resources: {} diff --git a/applications/prompt-proto-service-hsc/Chart.yaml b/applications/prompt-proto-service-hsc/Chart.yaml index 7afc1a667c..4b7b508ea2 100644 --- a/applications/prompt-proto-service-hsc/Chart.yaml +++ b/applications/prompt-proto-service-hsc/Chart.yaml @@ -4,14 +4,17 @@ version: 1.0.0 description: >- Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles HSC images. -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" dependencies: - name: prompt-proto-service version: 1.0.0 diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index fa19d57c47..0c5921e80d 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -2,43 +2,38 @@ Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles HSC images. -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values | Key | Type | Default | Description | |-----|------|---------|-------------| | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | -| prompt-proto-service.apdb.db | string | `"lsst-devl"` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| prompt-proto-service.apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | prompt-proto-service.apdb.namespace | string | `"pp_apdb"` | Database namespace for the APDB | | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| prompt-proto-service.imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | +| prompt-proto-service.instrument.calibRepoPguser | string | None, must be set | Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. | | prompt-proto-service.instrument.name | string | `"HSC"` | The "short" name of the instrument | -| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | prompt-proto-service.knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| prompt-proto-service.logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| prompt-proto-service.registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| prompt-proto-service.registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | prompt-proto-service.s3.endpointUrl | string | None, must be set | S3 endpoint containing `imageBucket` | diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index fb1b435a9d..a5bb1093c6 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -5,14 +5,15 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: latest instrument: - pipelines: (survey="SURVEY")=[${PROMPT_PROTOTYPE_DIR}/pipelines/HSC/ApPipe.yaml] + pipelines: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] calibRepo: s3://rubin:rubin-pp-users/central_repo/ + calibRepoPguser: hsc_prompt s3: imageBucket: rubin:rubin-pp @@ -25,11 +26,5 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu:5432/lsst-devl - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: # TODO: remove on DM-40839 - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 - db: ppcentralbutler - user: pp fullnameOverride: "prompt-proto-service-hsc" diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index 87e306249d..9208930e56 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -12,7 +12,7 @@ prompt-proto-service: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent @@ -23,7 +23,7 @@ prompt-proto-service: # -- The "short" name of the instrument name: HSC # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -32,6 +32,10 @@ prompt-proto-service: # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set calibRepo: "" + # -- Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. + # If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. + # @default -- None, must be set + calibRepoPguser: "" s3: # -- Bucket containing the incoming raw images @@ -53,37 +57,21 @@ prompt-proto-service: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: lsst-devl # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: rubin # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: pp_apdb registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: "" # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: "" # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). - # @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. + # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/applications/prompt-proto-service-latiss/Chart.yaml b/applications/prompt-proto-service-latiss/Chart.yaml index a265da32fe..cb9215abe4 100644 --- a/applications/prompt-proto-service-latiss/Chart.yaml +++ b/applications/prompt-proto-service-latiss/Chart.yaml @@ -4,14 +4,17 @@ version: 1.0.0 description: >- Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LATISS images. -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" dependencies: - name: prompt-proto-service version: 1.0.0 diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index ca527039dd..5a8a15fbdf 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -2,43 +2,38 @@ Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LATISS images. -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values | Key | Type | Default | Description | |-----|------|---------|-------------| | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | -| prompt-proto-service.apdb.db | string | `"lsst-devl"` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| prompt-proto-service.apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | prompt-proto-service.apdb.namespace | string | `"pp_apdb"` | Database namespace for the APDB | | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| prompt-proto-service.imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | +| prompt-proto-service.instrument.calibRepoPguser | string | None, must be set | Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. | | prompt-proto-service.instrument.name | string | `"LATISS"` | The "short" name of the instrument | -| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `"latiss_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | prompt-proto-service.knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| prompt-proto-service.logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| prompt-proto-service.registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| prompt-proto-service.registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | prompt-proto-service.s3.endpointUrl | string | `""` | | diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index b55cdc8572..5d7718ff1c 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -5,17 +5,22 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. - tag: AuxTel20230524-w_2023_20 + tag: latest instrument: - calibRepo: s3://rubin-summit-users/ + pipelines: >- + (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] + calibRepo: s3://rubin:rubin-pp-users/central_repo/ + calibRepoPguser: latiss_prompt s3: - imageBucket: rubin-pp + imageBucket: rubin:rubin-pp endpointUrl: https://s3dfrgw.slac.stanford.edu + disableBucketValidation: '1' imageNotifications: kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 @@ -23,9 +28,5 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu/lsst-devl - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: # TODO: remove on DM-40839 - ip: usdf-butler.slac.stanford.edu:5432 fullnameOverride: "prompt-proto-service-latiss" diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 9d69579cf4..6ad883454d 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -5,21 +5,28 @@ prompt-proto-service: revision: "12" image: - repository: ghcr.io/lsst-dm/prompt-proto-service pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: d_2023_09_22 + tag: d_2023_12_18 instrument: pipelines: >- - (survey="AUXTEL_PHOTO_IMAGING")=[${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/ApPipe.yaml, - ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/SingleFrame.yaml, - ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/Isr.yaml] - (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/ApPipe.yaml, - ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/SingleFrame.yaml, - ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/Isr.yaml] - (survey="spec")=[] (survey="spec_with_rotation")=[] (survey="spec_bright")=[] (survey="spec_bright_with_rotation")=[] (survey="spec_pole")=[] (survey="spec_pole_with_rotation")=[] (survey="")=[] - calibRepo: /app/butler + (survey="AUXTEL_PHOTO_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] + (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] + (survey="spec")=[] + (survey="spec-survey")=[] + (survey="spec_with_rotation")=[] + (survey="spec_bright")=[] + (survey="spec_bright_with_rotation")=[] + (survey="spec_pole")=[] + (survey="spec_pole_with_rotation")=[] + (survey="")=[] + calibRepo: s3://rubin-summit-users + calibRepoPguser: rubin s3: imageBucket: rubin-summit @@ -28,14 +35,13 @@ prompt-proto-service: imageNotifications: kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing-prod + # Scheduler adds an extra 60-80-second delay for first visit in a sequence, + # and files can take up to 20 seconds to arrive. Scheduler delay associated + # with CWFS engineering data, should not apply to other cameras. + imageTimeout: '110' apdb: url: postgresql://rubin@usdf-prompt-processing.slac.stanford.edu:5432/lsst-devl - ip: usdf-prompt-processing.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: - ip: usdf-butler.slac.stanford.edu:5432 # TODO: remove on DM-40839 - centralRepoFile: true logLevel: lsst.resources=DEBUG diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 62a2cec5d9..34ae28ceee 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -13,7 +13,7 @@ prompt-proto-service: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent @@ -24,7 +24,7 @@ prompt-proto-service: # -- The "short" name of the instrument name: LATISS # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -32,7 +32,11 @@ prompt-proto-service: # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set - calibRepo: s3://rubin-summit-users/ + calibRepo: "" + # -- Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. + # If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. + # @default -- None, must be set + calibRepoPguser: "" s3: # -- Bucket containing the incoming raw images @@ -53,37 +57,21 @@ prompt-proto-service: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: lsst-devl # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: rubin # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: pp_apdb registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: lsstdb1 # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: rubin # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). - # @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. + # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/applications/prompt-proto-service-lsstcam/Chart.yaml b/applications/prompt-proto-service-lsstcam/Chart.yaml index f35a54ad71..8e47a14f03 100644 --- a/applications/prompt-proto-service-lsstcam/Chart.yaml +++ b/applications/prompt-proto-service-lsstcam/Chart.yaml @@ -4,14 +4,17 @@ version: 1.0.0 description: >- Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LSSTCam images. -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" dependencies: - name: prompt-proto-service version: 1.0.0 diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index 587a346d60..e07d659977 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -2,43 +2,38 @@ Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LSSTCam images. -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values | Key | Type | Default | Description | |-----|------|---------|-------------| | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | -| prompt-proto-service.apdb.db | string | `"lsst-devl"` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| prompt-proto-service.apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | prompt-proto-service.apdb.namespace | string | `"pp_apdb"` | Database namespace for the APDB | | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| prompt-proto-service.imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | +| prompt-proto-service.instrument.calibRepoPguser | string | None, must be set | Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. | | prompt-proto-service.instrument.name | string | `""` | The "short" name of the instrument | -| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | prompt-proto-service.knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| prompt-proto-service.logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| prompt-proto-service.registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| prompt-proto-service.registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | prompt-proto-service.s3.endpointUrl | string | None, must be set | S3 endpoint containing `imageBucket` | diff --git a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml index 228fe82b20..3dc7d35210 100644 --- a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml @@ -5,13 +5,14 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: latest instrument: calibRepo: s3://rubin-summit-users/ + calibRepoPguser: rubin s3: imageBucket: rubin:rubin-pp @@ -24,9 +25,5 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu:5432/lsst-devl - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: # TODO: remove on DM-40839 - ip: usdf-butler.slac.stanford.edu:5432 fullnameOverride: "prompt-proto-service-lsstcam" diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index 918c2acdb7..4d70040810 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -12,7 +12,7 @@ prompt-proto-service: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent @@ -23,7 +23,7 @@ prompt-proto-service: # -- The "short" name of the instrument name: "" # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -32,6 +32,10 @@ prompt-proto-service: # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set calibRepo: "" + # -- Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. + # If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. + # @default -- None, must be set + calibRepoPguser: "" s3: # -- Bucket containing the incoming raw images @@ -53,37 +57,21 @@ prompt-proto-service: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: lsst-devl # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: rubin # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: pp_apdb registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: lsstdb1 # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: rubin # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). - # @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. + # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/applications/prompt-proto-service-lsstcomcam/Chart.yaml b/applications/prompt-proto-service-lsstcomcam/Chart.yaml index 201c406019..ab5e410dad 100644 --- a/applications/prompt-proto-service-lsstcomcam/Chart.yaml +++ b/applications/prompt-proto-service-lsstcomcam/Chart.yaml @@ -4,14 +4,17 @@ version: 1.0.0 description: >- Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LSSTComCam images. -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" dependencies: - name: prompt-proto-service version: 1.0.0 diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index 14ea0bb548..426556533e 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -2,42 +2,37 @@ Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LSSTComCam images. -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values | Key | Type | Default | Description | |-----|------|---------|-------------| -| prompt-proto-service.apdb.db | string | `"lsst-devl"` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| prompt-proto-service.apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | prompt-proto-service.apdb.namespace | string | `"pp_apdb"` | Database namespace for the APDB | | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| prompt-proto-service.imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | +| prompt-proto-service.instrument.calibRepoPguser | string | None, must be set | Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. | | prompt-proto-service.instrument.name | string | `""` | The "short" name of the instrument | -| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | prompt-proto-service.knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| prompt-proto-service.logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| prompt-proto-service.registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| prompt-proto-service.registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | prompt-proto-service.s3.endpointUrl | string | None, must be set | S3 endpoint containing `imageBucket` | diff --git a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml index fd58db9178..2b9f48237e 100644 --- a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml @@ -5,13 +5,14 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: latest instrument: calibRepo: s3://rubin-summit-users/ + calibRepoPguser: rubin s3: imageBucket: rubin:rubin-pp @@ -24,9 +25,5 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu:5432/lsst-devl - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: # TODO: remove on DM-40839 - ip: usdf-butler.slac.stanford.edu:5432 fullnameOverride: "prompt-proto-service-lsstcomcam" diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index cacc5b24ae..7fa43f4768 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -12,7 +12,7 @@ prompt-proto-service: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent @@ -23,7 +23,7 @@ prompt-proto-service: # -- The "short" name of the instrument name: "" # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -32,6 +32,10 @@ prompt-proto-service: # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set calibRepo: "" + # -- Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. + # If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. + # @default -- None, must be set + calibRepoPguser: "" s3: # -- Bucket containing the incoming raw images @@ -53,37 +57,21 @@ prompt-proto-service: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: lsst-devl # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: rubin # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: pp_apdb registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: lsstdb1 # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: rubin # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). - # @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. + # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/applications/rubintv/Chart.yaml b/applications/rubintv/Chart.yaml index fb493a331f..28e84ca86a 100644 --- a/applications/rubintv/Chart.yaml +++ b/applications/rubintv/Chart.yaml @@ -3,9 +3,9 @@ name: rubintv version: 1.0.0 description: Real-time display front end sources: - - https://github.com/lsst-sqre/rubintv + - https://github.com/lsst-ts/rubintv appVersion: 0.1.0 dependencies: - name: redis - version: 1.0.8 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/rubintv/README.md b/applications/rubintv/README.md index 16cae3fda9..19fd8fa5c1 100644 --- a/applications/rubintv/README.md +++ b/applications/rubintv/README.md @@ -4,7 +4,7 @@ Real-time display front end ## Source Code -* +* ## Values @@ -12,9 +12,9 @@ Real-time display front end |-----|------|---------|-------------| | frontend.affinity | object | `{}` | Affinity rules for the rubintv frontend pod | | frontend.debug | bool | `false` | If set to true, enable more verbose logging. | -| frontend.image | object | `{"pullPolicy":"IfNotPresent","repository":"ghcr.io/lsst-sqre/rubintv","tag":""}` | Settings for rubintv OCI image | +| frontend.image | object | `{"pullPolicy":"IfNotPresent","repository":"ghcr.io/lsst-ts/rubintv","tag":""}` | Settings for rubintv OCI image | | frontend.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the rubintv image | -| frontend.image.repository | string | `"ghcr.io/lsst-sqre/rubintv"` | rubintv frontend image to use | +| frontend.image.repository | string | `"ghcr.io/lsst-ts/rubintv"` | rubintv frontend image to use | | frontend.image.tag | string | The appVersion of the chart | Tag of rubintv image to use | | frontend.nodeSelector | object | `{}` | Node selector rules for the rubintv frontend pod | | frontend.pathPrefix | string | `"/rubintv"` | Prefix for rubintv's frontend API routes. | diff --git a/applications/rubintv/values-base.yaml b/applications/rubintv/values-base.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/rubintv/values.yaml b/applications/rubintv/values.yaml index 4fc58e1983..8225d1a40a 100644 --- a/applications/rubintv/values.yaml +++ b/applications/rubintv/values.yaml @@ -25,7 +25,7 @@ frontend: # -- Settings for rubintv OCI image image: # -- rubintv frontend image to use - repository: "ghcr.io/lsst-sqre/rubintv" + repository: "ghcr.io/lsst-ts/rubintv" # -- Pull policy for the rubintv image pullPolicy: "IfNotPresent" diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index edb149fcd3..5fac64841f 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -25,10 +25,9 @@ dependencies: condition: source-influxdb.enabled version: 4.12.5 repository: https://helm.influxdata.com/ - - name: influxdb2 - condition: influxdb2.enabled - version: 2.1.1 - repository: https://helm.influxdata.com/ + - name: influxdb-enterprise + condition: influxdb-enterprise.enabled + version: 1.0.0 - name: kafka-connect-manager alias: kafka-connect-manager condition: kafka-connect-manager.enabled @@ -37,6 +36,10 @@ dependencies: alias: source-kafka-connect-manager condition: source-kafka-connect-manager.enabled version: 1.0.0 + - name: kafka-connect-manager + alias: kafka-connect-manager-enterprise + condition: kafka-connect-manager-enterprise.enabled + version: 1.0.0 - name: chronograf condition: chronograf.enabled version: 1.2.6 diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index aec807a257..94d95c871b 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -13,16 +13,17 @@ Rubin Observatory's telemetry service. | bucketmapper.image.repository | string | `"ghcr.io/lsst-sqre/rubin-influx-tools"` | repository for rubin-influx-tools | | bucketmapper.image.tag | string | `"0.2.0"` | tag for rubin-influx-tools | | chronograf.enabled | bool | `true` | Enable Chronograf. | -| chronograf.env | object | `{"BASE_PATH":"/chronograf","CUSTOM_AUTO_REFRESH":"1s=1000","HOST_PAGE_DISABLED":true}` | Chronograf environment variables. | +| chronograf.env | object | `{"BASE_PATH":"/chronograf","HOST_PAGE_DISABLED":true}` | Chronograf environment variables. | | chronograf.envFromSecret | string | `"sasquatch"` | Chronograf secrets, expected keys generic_client_id, generic_client_secret and token_secret. | -| chronograf.image | object | `{"repository":"quay.io/influxdb/chronograf","tag":"1.9.4"}` | Chronograf image tag. | +| chronograf.image | object | `{"repository":"quay.io/influxdb/chronograf","tag":"1.10.2"}` | Chronograf image tag. | | chronograf.ingress | object | disabled | Chronograf ingress configuration. | | chronograf.persistence | object | `{"enabled":true,"size":"100Gi"}` | Chronograf data persistence configuration. | | chronograf.resources.limits.cpu | int | `4` | | | chronograf.resources.limits.memory | string | `"64Gi"` | | | chronograf.resources.requests.cpu | int | `1` | | | chronograf.resources.requests.memory | string | `"4Gi"` | | -| influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| influxdb-enterprise | object | `{"enabled":false}` | Override influxdb-enterprise configuration. | +| influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"60s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb-staging.enabled | bool | `false` | Enable InfluxDB staging deployment. | | influxdb-staging.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | | influxdb-staging.ingress | object | disabled | InfluxDB ingress configuration. | @@ -34,7 +35,7 @@ Rubin Observatory's telemetry service. | influxdb-staging.resources.requests.cpu | int | `8` | | | influxdb-staging.resources.requests.memory | string | `"96Gi"` | | | influxdb-staging.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | -| influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":1000,"query-timeout":"30s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb.enabled | bool | `true` | Enable InfluxDB. | | influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | | influxdb.ingress | object | disabled | InfluxDB ingress configuration. | @@ -46,38 +47,13 @@ Rubin Observatory's telemetry service. | influxdb.resources.requests.cpu | int | `8` | | | influxdb.resources.requests.memory | string | `"96Gi"` | | | influxdb.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | -| influxdb2.adminUser.bucket | string | `"default"` | Admin default bucket. | -| influxdb2.adminUser.existingSecret | string | `"sasquatch"` | Get admin-password/admin-token keys from secret. | -| influxdb2.adminUser.organization | string | `"default"` | Admin default organization. | -| influxdb2.enabled | bool | `false` | | -| influxdb2.env[0].name | string | `"INFLUXD_STORAGE_WAL_FSYNC_DELAY"` | | -| influxdb2.env[0].value | string | `"100ms"` | | -| influxdb2.env[1].name | string | `"INFLUXD_HTTP_IDLE_TIMEOUT"` | | -| influxdb2.env[1].value | string | `"0"` | | -| influxdb2.env[2].name | string | `"INFLUXD_FLUX_LOG_ENABLED"` | | -| influxdb2.env[2].value | string | `"true"` | | -| influxdb2.env[3].name | string | `"INFLUXD_LOG_LEVEL"` | | -| influxdb2.env[3].value | string | `"debug"` | | -| influxdb2.image.tag | string | `"2.7.1-alpine"` | | -| influxdb2.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/api/v2/$2"` | | -| influxdb2.ingress.className | string | `"nginx"` | | -| influxdb2.ingress.enabled | bool | `false` | InfluxDB2 ingress configuration | -| influxdb2.ingress.hostname | string | `""` | | -| influxdb2.ingress.path | string | `"/influxdb2(/|$)(.*)"` | | -| influxdb2.initScripts.enabled | bool | `true` | InfluxDB2 initialization scripts | -| influxdb2.initScripts.scripts."init.sh" | string | `"#!/bin/bash\ninflux bucket create --name telegraf-kafka-consumer --org default\n"` | | -| influxdb2.persistence.enabled | bool | `true` | Enable persistent volume claim. By default storageClass is undefined choosing the default provisioner (standard on GKE). | -| influxdb2.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments. | -| influxdb2.resources.limits.cpu | int | `8` | | -| influxdb2.resources.limits.memory | string | `"96Gi"` | | -| influxdb2.resources.requests.cpu | int | `8` | | -| influxdb2.resources.requests.memory | string | `"16Gi"` | | | kafdrop.enabled | bool | `true` | Enable Kafdrop. | | kafka-connect-manager | object | `{}` | Override kafka-connect-manager configuration. | +| kafka-connect-manager-enterprise | object | `{"enabled":false}` | Override kafka-connect-manager-enterprise configuration. | | kapacitor.enabled | bool | `true` | Enable Kapacitor. | | kapacitor.envVars | object | `{"KAPACITOR_SLACK_ENABLED":true}` | Kapacitor environment variables. | | kapacitor.existingSecret | string | `"sasquatch"` | InfluxDB credentials, use influxdb-user and influxdb-password keys from secret. | -| kapacitor.image | object | `{"repository":"kapacitor","tag":"1.7.0"}` | Kapacitor image tag. | +| kapacitor.image | object | `{"repository":"kapacitor","tag":"1.7.1"}` | Kapacitor image tag. | | kapacitor.influxURL | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB connection URL. | | kapacitor.persistence | object | `{"enabled":true,"size":"100Gi"}` | Chronograf data persistence configuration. | | kapacitor.resources.limits.cpu | int | `4` | | @@ -85,7 +61,7 @@ Rubin Observatory's telemetry service. | kapacitor.resources.requests.cpu | int | `1` | | | kapacitor.resources.requests.memory | string | `"1Gi"` | | | rest-proxy | object | `{"enabled":false}` | Override rest-proxy configuration. | -| source-influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| source-influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":1000,"query-timeout":"30s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | source-influxdb.enabled | bool | `false` | Enable InfluxDB staging deployment. | | source-influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | | source-influxdb.ingress | object | disabled | InfluxDB ingress configuration. | @@ -101,7 +77,7 @@ Rubin Observatory's telemetry service. | source-kapacitor.enabled | bool | `false` | Enable Kapacitor. | | source-kapacitor.envVars | object | `{"KAPACITOR_SLACK_ENABLED":true}` | Kapacitor environment variables. | | source-kapacitor.existingSecret | string | `"sasquatch"` | InfluxDB credentials, use influxdb-user and influxdb-password keys from secret. | -| source-kapacitor.image | object | `{"repository":"kapacitor","tag":"1.7.0"}` | Kapacitor image tag. | +| source-kapacitor.image | object | `{"repository":"kapacitor","tag":"1.7.1"}` | Kapacitor image tag. | | source-kapacitor.influxURL | string | `"http://sasquatch-influxdb-staging.sasquatch:8086"` | InfluxDB connection URL. | | source-kapacitor.persistence | object | `{"enabled":true,"size":"100Gi"}` | Chronograf data persistence configuration. | | source-kapacitor.resources.limits.cpu | int | `4` | | @@ -109,16 +85,76 @@ Rubin Observatory's telemetry service. | source-kapacitor.resources.requests.cpu | int | `1` | | | source-kapacitor.resources.requests.memory | string | `"1Gi"` | | | squareEvents.enabled | bool | `false` | Enable the Square Events subchart with topic and user configurations. | -| strimzi-kafka | object | `{}` | Override strimzi-kafka configuration. | +| strimzi-kafka | object | `{"connect":{"enabled":true},"kafka":{"listeners":{"external":{"enabled":true},"plain":{"enabled":true},"tls":{"enabled":true}}}}` | Override strimzi-kafka subchart configuration. | | strimzi-registry-operator | object | `{"clusterName":"sasquatch","clusterNamespace":"sasquatch","operatorNamespace":"sasquatch"}` | strimzi-registry-operator configuration. | | telegraf-kafka-consumer | object | `{}` | Override telegraf-kafka-consumer configuration. | +| influxdb-enterprise.bootstrap.auth.secretName | string | `"sasquatch"` | | +| influxdb-enterprise.bootstrap.ddldml | object | `{}` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key | string | `"influxdb.influxdata.com/component"` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator | string | `"In"` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| influxdb-enterprise.data.config.antiEntropy.enabled | bool | `true` | | +| influxdb-enterprise.data.config.cluster.log-queries-after | string | `"15s"` | | +| influxdb-enterprise.data.config.cluster.max-concurrent-queries | int | `1000` | | +| influxdb-enterprise.data.config.cluster.query-timeout | string | `"300s"` | | +| influxdb-enterprise.data.config.continuousQueries.enabled | bool | `false` | | +| influxdb-enterprise.data.config.data.cache-max-memory-size | int | `0` | | +| influxdb-enterprise.data.config.data.trace-logging-enabled | bool | `true` | | +| influxdb-enterprise.data.config.data.wal-fsync-delay | string | `"100ms"` | | +| influxdb-enterprise.data.config.hintedHandoff.max-size | int | `107374182400` | | +| influxdb-enterprise.data.config.http.auth-enabled | bool | `true` | | +| influxdb-enterprise.data.config.http.flux-enabled | bool | `true` | | +| influxdb-enterprise.data.config.logging.level | string | `"debug"` | | +| influxdb-enterprise.data.env | object | `{}` | | +| influxdb-enterprise.data.image | object | `{}` | | +| influxdb-enterprise.data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | +| influxdb-enterprise.data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | +| influxdb-enterprise.data.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | +| influxdb-enterprise.data.ingress.className | string | `"nginx"` | | +| influxdb-enterprise.data.ingress.enabled | bool | `false` | | +| influxdb-enterprise.data.ingress.hostname | string | `""` | | +| influxdb-enterprise.data.ingress.path | string | `"/influxdb-enterprise-data(/|$)(.*)"` | | +| influxdb-enterprise.data.persistence.enabled | bool | `false` | | +| influxdb-enterprise.data.replicas | int | `1` | | +| influxdb-enterprise.data.resources | object | `{}` | | +| influxdb-enterprise.data.service.type | string | `"ClusterIP"` | | +| influxdb-enterprise.fullnameOverride | string | `""` | | +| influxdb-enterprise.imagePullSecrets | list | `[]` | | +| influxdb-enterprise.license.secret.key | string | `"json"` | | +| influxdb-enterprise.license.secret.name | string | `"influxdb-enterprise-license"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key | string | `"influxdb.influxdata.com/component"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator | string | `"In"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"meta"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| influxdb-enterprise.meta.env | object | `{}` | | +| influxdb-enterprise.meta.image | object | `{}` | | +| influxdb-enterprise.meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | +| influxdb-enterprise.meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | +| influxdb-enterprise.meta.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | +| influxdb-enterprise.meta.ingress.className | string | `"nginx"` | | +| influxdb-enterprise.meta.ingress.enabled | bool | `false` | | +| influxdb-enterprise.meta.ingress.hostname | string | `""` | | +| influxdb-enterprise.meta.ingress.path | string | `"/influxdb-enterprise-meta(/|$)(.*)"` | | +| influxdb-enterprise.meta.persistence.enabled | bool | `false` | | +| influxdb-enterprise.meta.podDisruptionBudget.minAvailable | int | `2` | | +| influxdb-enterprise.meta.replicas | int | `3` | | +| influxdb-enterprise.meta.resources | object | `{}` | | +| influxdb-enterprise.meta.service.type | string | `"ClusterIP"` | | +| influxdb-enterprise.meta.sharedSecret.secretName | string | `"influxdb-enterprise-shared-secret"` | | +| influxdb-enterprise.nameOverride | string | `""` | | +| influxdb-enterprise.serviceAccount.annotations | object | `{}` | | +| influxdb-enterprise.serviceAccount.create | bool | `false` | | +| influxdb-enterprise.serviceAccount.name | string | `""` | | | kafdrop.affinity | object | `{}` | Affinity configuration. | | kafdrop.cmdArgs | string | `"--message.format=AVRO --topic.deleteEnabled=false --topic.createEnabled=false"` | Command line arguments to Kafdrop. | | kafdrop.existingSecret | string | `""` | Existing k8s secrect use to set kafdrop environment variables. Set SCHEMAREGISTRY_AUTH for basic auth credentials in the form username:password | | kafdrop.host | string | Defaults to localhost. | The hostname to report for the RMI registry (used for JMX). | | kafdrop.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | kafdrop.image.repository | string | `"obsidiandynamics/kafdrop"` | Kafdrop Docker image repository. | -| kafdrop.image.tag | string | `"3.31.0"` | Kafdrop image version. | +| kafdrop.image.tag | string | `"4.0.1"` | Kafdrop image version. | | kafdrop.ingress.annotations | object | `{}` | Ingress annotations. | | kafdrop.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | kafdrop.ingress.hostname | string | `""` | Ingress hostname. | @@ -154,12 +190,12 @@ Rubin Observatory's telemetry service. | kafka-connect-manager.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | kafka-connect-manager.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | kafka-connect-manager.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | -| kafka-connect-manager.influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | -| kafka-connect-manager.influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | -| kafka-connect-manager.influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | -| kafka-connect-manager.influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | -| kafka-connect-manager.influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | -| kafka-connect-manager.influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | +| kafka-connect-manager.influxdbSink.connectors | object | `{"example":{"enabled":false,"removePrefix":"","repairerConnector":false,"tags":"","topicsRegex":"example.topic"}}` | Connector instances to deploy. | +| kafka-connect-manager.influxdbSink.connectors.example.enabled | bool | `false` | Whether this connector instance is deployed. | +| kafka-connect-manager.influxdbSink.connectors.example.removePrefix | string | `""` | Remove prefix from topic name. | +| kafka-connect-manager.influxdbSink.connectors.example.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| kafka-connect-manager.influxdbSink.connectors.example.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| kafka-connect-manager.influxdbSink.connectors.example.topicsRegex | string | `"example.topic"` | Regex to select topics from Kafka. | | kafka-connect-manager.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | | kafka-connect-manager.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | | kafka-connect-manager.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | @@ -200,13 +236,74 @@ Rubin Observatory's telemetry service. | kafka-connect-manager.s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. | | kafka-connect-manager.s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. | | kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | +| kafka-connect-manager-enterprise.enabled | bool | `true` | Enable Kafka Connect Manager. | +| kafka-connect-manager-enterprise.env.kafkaBrokerUrl | string | `"sasquatch-kafka-bootstrap.sasquatch:9092"` | Kafka broker URL. | +| kafka-connect-manager-enterprise.env.kafkaConnectUrl | string | `"http://sasquatch-connect-api.sasquatch:8083"` | Kafka connnect URL. | +| kafka-connect-manager-enterprise.env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. | +| kafka-connect-manager-enterprise.image.pullPolicy | string | `"IfNotPresent"` | | +| kafka-connect-manager-enterprise.image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | | +| kafka-connect-manager-enterprise.image.tag | string | `"1.3.1"` | | +| kafka-connect-manager-enterprise.influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. | +| kafka-connect-manager-enterprise.influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxErrorPolicy | string | `"NOOP"` | Error policy, see connector documetation for details. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxMaxRetries | string | `"10"` | The maximum number of times a message is retried. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | +| kafka-connect-manager-enterprise.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | +| kafka-connect-manager-enterprise.influxdbSink.connectors | object | `{"example":{"enabled":false,"removePrefix":"","repairerConnector":false,"tags":"","topicsRegex":"example.topic"}}` | Connector instances to deploy. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.enabled | bool | `false` | Whether this connector instance is deployed. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.removePrefix | string | `""` | Remove prefix from topic name. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.topicsRegex | string | `"example.topic"` | Regex to select topics from Kafka. | +| kafka-connect-manager-enterprise.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | +| kafka-connect-manager-enterprise.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | +| kafka-connect-manager-enterprise.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | +| kafka-connect-manager-enterprise.jdbcSink.autoCreate | string | `"true"` | Whether to automatically create the destination table. | +| kafka-connect-manager-enterprise.jdbcSink.autoEvolve | string | `"false"` | Whether to automatically add columns in the table schema. | +| kafka-connect-manager-enterprise.jdbcSink.batchSize | string | `"3000"` | Specifies how many records to attempt to batch together for insertion into the destination table. | +| kafka-connect-manager-enterprise.jdbcSink.connectionUrl | string | `"jdbc:postgresql://localhost:5432/mydb"` | Database connection URL. | +| kafka-connect-manager-enterprise.jdbcSink.dbTimezone | string | `"UTC"` | Name of the JDBC timezone that should be used in the connector when inserting time-based values. | +| kafka-connect-manager-enterprise.jdbcSink.enabled | bool | `false` | Whether the JDBC Sink connector is deployed. | +| kafka-connect-manager-enterprise.jdbcSink.insertMode | string | `"insert"` | The insertion mode to use. Supported modes are: `insert`, `upsert` and `update`. | +| kafka-connect-manager-enterprise.jdbcSink.maxRetries | string | `"10"` | The maximum number of times to retry on errors before failing the task. | +| kafka-connect-manager-enterprise.jdbcSink.name | string | `"postgres-sink"` | Name of the connector to create. | +| kafka-connect-manager-enterprise.jdbcSink.retryBackoffMs | string | `"3000"` | The time in milliseconds to wait following an error before a retry attempt is made. | +| kafka-connect-manager-enterprise.jdbcSink.tableNameFormat | string | `"${topic}"` | A format string for the destination table name. | +| kafka-connect-manager-enterprise.jdbcSink.tasksMax | string | `"10"` | Number of Kafka Connect tasks. | +| kafka-connect-manager-enterprise.jdbcSink.topicRegex | string | `".*"` | Regex for selecting topics. | +| kafka-connect-manager-enterprise.s3Sink.behaviorOnNullValues | string | `"fail"` | How to handle records with a null value (for example, Kafka tombstone records). Valid options are ignore and fail. | +| kafka-connect-manager-enterprise.s3Sink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | +| kafka-connect-manager-enterprise.s3Sink.enabled | bool | `false` | Whether the Amazon S3 Sink connector is deployed. | +| kafka-connect-manager-enterprise.s3Sink.excludedTopicRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | +| kafka-connect-manager-enterprise.s3Sink.flushSize | string | `"1000"` | Number of records written to store before invoking file commits. | +| kafka-connect-manager-enterprise.s3Sink.locale | string | `"en-US"` | The locale to use when partitioning with TimeBasedPartitioner. | +| kafka-connect-manager-enterprise.s3Sink.name | string | `"s3-sink"` | Name of the connector to create. | +| kafka-connect-manager-enterprise.s3Sink.partitionDurationMs | string | `"3600000"` | The duration of a partition in milliseconds, used by TimeBasedPartitioner. Default is 1h for an hourly based partitioner. | +| kafka-connect-manager-enterprise.s3Sink.pathFormat | string | `"'year'=YYYY/'month'=MM/'day'=dd/'hour'=HH"` | Pattern used to format the path in the S3 object name. | +| kafka-connect-manager-enterprise.s3Sink.rotateIntervalMs | string | `"600000"` | The time interval in milliseconds to invoke file commits. Set to 10 minutes by default. | +| kafka-connect-manager-enterprise.s3Sink.s3BucketName | string | `""` | s3 bucket name. The bucket must already exist at the s3 provider. | +| kafka-connect-manager-enterprise.s3Sink.s3PartRetries | int | `3` | Maximum number of retry attempts for failed requests. Zero means no retries. | +| kafka-connect-manager-enterprise.s3Sink.s3PartSize | int | `5242880` | The Part Size in S3 Multi-part Uploads. Valid Values: [5242880,…,2147483647] | +| kafka-connect-manager-enterprise.s3Sink.s3Region | string | `"us-east-1"` | s3 region | +| kafka-connect-manager-enterprise.s3Sink.s3RetryBackoffMs | int | `200` | How long to wait in milliseconds before attempting the first retry of a failed S3 request. | +| kafka-connect-manager-enterprise.s3Sink.s3SchemaCompatibility | string | `"NONE"` | s3 schema compatibility | +| kafka-connect-manager-enterprise.s3Sink.schemaCacheConfig | int | `5000` | The size of the schema cache used in the Avro converter. | +| kafka-connect-manager-enterprise.s3Sink.storeUrl | string | `""` | The object storage connection URL, for non-AWS s3 providers. | +| kafka-connect-manager-enterprise.s3Sink.tasksMax | int | `1` | Number of Kafka Connect tasks. | +| kafka-connect-manager-enterprise.s3Sink.timestampExtractor | string | `"Record"` | The extractor determines how to obtain a timestamp from each record. | +| kafka-connect-manager-enterprise.s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. | +| kafka-connect-manager-enterprise.s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. | +| kafka-connect-manager-enterprise.s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. | +| kafka-connect-manager-enterprise.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | | rest-proxy.affinity | object | `{}` | Affinity configuration. | | rest-proxy.configurationOverrides | object | `{"access.control.allow.headers":"origin,content-type,accept,authorization","access.control.allow.methods":"GET,POST,PUT,DELETE","client.sasl.mechanism":"SCRAM-SHA-512","client.security.protocol":"SASL_PLAINTEXT"}` | Kafka REST configuration options | | rest-proxy.customEnv | string | `nil` | Kafka REST additional env variables | | rest-proxy.heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | rest-proxy.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | rest-proxy.image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| rest-proxy.image.tag | string | `"7.4.1"` | Kafka REST proxy image tag. | +| rest-proxy.image.tag | string | `"7.5.3"` | Kafka REST proxy image tag. | | rest-proxy.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | rest-proxy.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | rest-proxy.ingress.hostname | string | `""` | Ingress hostname. | @@ -240,12 +337,12 @@ Rubin Observatory's telemetry service. | source-kafka-connect-manager.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | source-kafka-connect-manager.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | source-kafka-connect-manager.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | -| source-kafka-connect-manager.influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | -| source-kafka-connect-manager.influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | -| source-kafka-connect-manager.influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | -| source-kafka-connect-manager.influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | -| source-kafka-connect-manager.influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | -| source-kafka-connect-manager.influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | +| source-kafka-connect-manager.influxdbSink.connectors | object | `{"example":{"enabled":false,"removePrefix":"","repairerConnector":false,"tags":"","topicsRegex":"example.topic"}}` | Connector instances to deploy. | +| source-kafka-connect-manager.influxdbSink.connectors.example.enabled | bool | `false` | Whether this connector instance is deployed. | +| source-kafka-connect-manager.influxdbSink.connectors.example.removePrefix | string | `""` | Remove prefix from topic name. | +| source-kafka-connect-manager.influxdbSink.connectors.example.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| source-kafka-connect-manager.influxdbSink.connectors.example.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| source-kafka-connect-manager.influxdbSink.connectors.example.topicsRegex | string | `"example.topic"` | Regex to select topics from Kafka. | | source-kafka-connect-manager.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | | source-kafka-connect-manager.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | | source-kafka-connect-manager.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | @@ -288,30 +385,41 @@ Rubin Observatory's telemetry service. | source-kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | | square-events.cluster.name | string | `"sasquatch"` | | | strimzi-kafka.cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | -| strimzi-kafka.connect.enabled | bool | `true` | Enable Kafka Connect. | +| strimzi-kafka.cluster.releaseLabel | string | `"site-prom"` | Site wide label required for gathering Prometheus metrics if they are enabled. | +| strimzi-kafka.connect.config."key.converter" | string | `"io.confluent.connect.avro.AvroConverter"` | Set the converter for the message key | +| strimzi-kafka.connect.config."key.converter.schemas.enable" | bool | `true` | Enable converted schemas for the message key | +| strimzi-kafka.connect.enabled | bool | `false` | Enable Kafka Connect. | | strimzi-kafka.connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | strimzi-kafka.connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | strimzi-kafka.kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | -| strimzi-kafka.kafka.config."log.retention.bytes" | string | `"429496729600"` | Maximum retained number of bytes for a topic's data. | -| strimzi-kafka.kafka.config."log.retention.hours" | int | `72` | Number of days for a topic's data to be retained. | +| strimzi-kafka.kafka.config."log.retention.bytes" | string | `"350000000000"` | How much disk space Kafka will ensure is available, set to 70% of the data partition size | +| strimzi-kafka.kafka.config."log.retention.hours" | int | `48` | Number of days for a topic's data to be retained. | | strimzi-kafka.kafka.config."message.max.bytes" | int | `10485760` | The largest record batch size allowed by Kafka. | -| strimzi-kafka.kafka.config."offsets.retention.minutes" | int | `4320` | Number of minutes for a consumer group's offsets to be retained. | +| strimzi-kafka.kafka.config."offsets.retention.minutes" | int | `2880` | Number of minutes for a consumer group's offsets to be retained. | | strimzi-kafka.kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition. | | strimzi-kafka.kafka.config."replica.lag.time.max.ms" | int | `120000` | Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. | +| strimzi-kafka.kafka.disruption_tolerance | int | `0` | Number of down brokers that the system can tolerate. | | strimzi-kafka.kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource. | | strimzi-kafka.kafka.externalListener.bootstrap.host | string | `""` | Name used for TLS hostname verification. | | strimzi-kafka.kafka.externalListener.bootstrap.loadBalancerIP | string | `""` | The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the loadBalancerIP when a load balancer is created. This field is ignored if the cloud provider does not support the feature. Once the IP address is provisioned this option make it possible to pin the IP address. We can request the same IP next time it is provisioned. This is important because it lets us configure a DNS record, associating a hostname with that pinned IP address. | | strimzi-kafka.kafka.externalListener.brokers | list | `[]` | Borkers configuration. host is used in the brokers' advertised.brokers configuration and for TLS hostname verification. The format is a list of maps. | | strimzi-kafka.kafka.externalListener.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | | strimzi-kafka.kafka.externalListener.tls.enabled | bool | `false` | Whether TLS encryption is enabled. | -| strimzi-kafka.kafka.listeners.external.enabled | bool | `true` | Whether external listener is enabled. | -| strimzi-kafka.kafka.listeners.plain.enabled | bool | `true` | Whether internal plaintext listener is enabled. | -| strimzi-kafka.kafka.listeners.tls.enabled | bool | `true` | Whether internal TLS listener is enabled. | +| strimzi-kafka.kafka.listeners.external.enabled | bool | `false` | Whether external listener is enabled. | +| strimzi-kafka.kafka.listeners.plain.enabled | bool | `false` | Whether internal plaintext listener is enabled. | +| strimzi-kafka.kafka.listeners.tls.enabled | bool | `false` | Whether internal TLS listener is enabled. | +| strimzi-kafka.kafka.metricsConfig.enabled | bool | `false` | Whether metric configuration is enabled. | | strimzi-kafka.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | | strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | | strimzi-kafka.kafka.version | string | `"3.5.1"` | Version of Kafka to deploy. | +| strimzi-kafka.kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging for pod | +| strimzi-kafka.kafkaExporter.enabled | bool | `false` | Enable Kafka exporter | +| strimzi-kafka.kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | +| strimzi-kafka.kafkaExporter.logging | string | `"info"` | Logging level | +| strimzi-kafka.kafkaExporter.resources | object | `{}` | Resource specification for Kafka exporter | +| strimzi-kafka.kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | | strimzi-kafka.mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | | strimzi-kafka.mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. | | strimzi-kafka.mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. | @@ -325,13 +433,14 @@ Rubin Observatory's telemetry service. | strimzi-kafka.registry.ingress.hostname | string | `""` | Hostname for the Schema Registry. | | strimzi-kafka.registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | strimzi-kafka.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | -| strimzi-kafka.users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). | -| strimzi-kafka.users.kafkaConnectManager.enabled | bool | `true` | Enable user kafka-connect-manager | -| strimzi-kafka.users.promptProcessing.enabled | bool | `true` | Enable user prompt-processing | -| strimzi-kafka.users.replicator.enabled | bool | `false` | Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) | -| strimzi-kafka.users.telegraf.enabled | bool | `true` | Enable user telegraf (deployed by parent Sasquatch chart) | -| strimzi-kafka.users.tsSalKafka.enabled | bool | `true` | Enable user ts-salkafka. | +| strimzi-kafka.users.kafdrop.enabled | bool | `false` | Enable user Kafdrop (deployed by parent Sasquatch chart). | +| strimzi-kafka.users.kafkaConnectManager.enabled | bool | `false` | Enable user kafka-connect-manager | +| strimzi-kafka.users.promptProcessing.enabled | bool | `false` | Enable user prompt-processing | +| strimzi-kafka.users.replicator.enabled | bool | `false` | Enable user replicator (used by Mirror Maker 2 and required at both source and target clusters) | +| strimzi-kafka.users.telegraf.enabled | bool | `false` | Enable user telegraf (deployed by parent Sasquatch chart) | +| strimzi-kafka.users.tsSalKafka.enabled | bool | `false` | Enable user ts-salkafka, used at the telescope environments | | strimzi-kafka.zookeeper.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["zookeeper"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Zookeeper pod assignment. | +| strimzi-kafka.zookeeper.metricsConfig.enabled | bool | `false` | Whether metric configuration is enabled. | | strimzi-kafka.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | strimzi-kafka.zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | | strimzi-kafka.zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | @@ -343,21 +452,17 @@ Rubin Observatory's telemetry service. | telegraf-kafka-consumer.env[0].name | string | `"TELEGRAF_PASSWORD"` | | | telegraf-kafka-consumer.env[0].valueFrom.secretKeyRef.key | string | `"telegraf-password"` | Telegraf KafkaUser password. | | telegraf-kafka-consumer.env[0].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| telegraf-kafka-consumer.env[1].name | string | `"INFLUXDB_TOKEN"` | | -| telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.key | string | `"admin-token"` | InfluxDB v2 admin token. | +| telegraf-kafka-consumer.env[1].name | string | `"INFLUXDB_USER"` | | +| telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.key | string | `"influxdb-user"` | InfluxDB v1 user | | telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| telegraf-kafka-consumer.env[2].name | string | `"INFLUXDB_USER"` | | -| telegraf-kafka-consumer.env[2].valueFrom.secretKeyRef.key | string | `"influxdb-user"` | InfluxDB v1 user | +| telegraf-kafka-consumer.env[2].name | string | `"INFLUXDB_PASSWORD"` | | +| telegraf-kafka-consumer.env[2].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | | telegraf-kafka-consumer.env[2].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| telegraf-kafka-consumer.env[3].name | string | `"INFLUXDB_PASSWORD"` | | -| telegraf-kafka-consumer.env[3].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | -| telegraf-kafka-consumer.env[3].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | telegraf-kafka-consumer.image.pullPolicy | string | IfNotPresent | Image pull policy. | -| telegraf-kafka-consumer.image.repo | string | `"lsstsqre/telegraf"` | Telegraf image repository. | -| telegraf-kafka-consumer.image.tag | string | `"avrounions"` | Telegraf image tag. | +| telegraf-kafka-consumer.image.repo | string | `"quay.io/influxdb/telegraf-nightly"` | Telegraf image repository. | +| telegraf-kafka-consumer.image.tag | string | `"latest"` | Telegraf image tag. | | telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | telegraf-kafka-consumer.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to. | -| telegraf-kafka-consumer.influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. | | telegraf-kafka-consumer.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | telegraf-kafka-consumer.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | | telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | `"1s"` | Default data flushing interval to InfluxDB. | diff --git a/applications/sasquatch/charts/influxdb-enterprise/.helmignore b/applications/sasquatch/charts/influxdb-enterprise/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/sasquatch/charts/influxdb-enterprise/Chart.yaml b/applications/sasquatch/charts/influxdb-enterprise/Chart.yaml new file mode 100644 index 0000000000..462b948693 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +name: influxdb-enterprise +version: 1.0.0 +description: Run InfluxDB Enterprise on Kubernetes +sources: + - https://github.com/influxdata/influxdb +appVersion: 1.11.3 diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md new file mode 100644 index 0000000000..0a2013d54e --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -0,0 +1,72 @@ +# influxdb-enterprise + +Run InfluxDB Enterprise on Kubernetes + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| bootstrap.auth.secretName | string | `"sasquatch"` | | +| bootstrap.ddldml | object | `{}` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key | string | `"influxdb.influxdata.com/component"` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator | string | `"In"` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| data.config.antiEntropy.enabled | bool | `true` | | +| data.config.cluster.log-queries-after | string | `"15s"` | | +| data.config.cluster.max-concurrent-queries | int | `1000` | | +| data.config.cluster.query-timeout | string | `"300s"` | | +| data.config.continuousQueries.enabled | bool | `false` | | +| data.config.data.cache-max-memory-size | int | `0` | | +| data.config.data.trace-logging-enabled | bool | `true` | | +| data.config.data.wal-fsync-delay | string | `"100ms"` | | +| data.config.hintedHandoff.max-size | int | `107374182400` | | +| data.config.http.auth-enabled | bool | `true` | | +| data.config.http.flux-enabled | bool | `true` | | +| data.config.logging.level | string | `"debug"` | | +| data.env | object | `{}` | | +| data.image | object | `{}` | | +| data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | +| data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | +| data.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | +| data.ingress.className | string | `"nginx"` | | +| data.ingress.enabled | bool | `false` | | +| data.ingress.hostname | string | `""` | | +| data.ingress.path | string | `"/influxdb-enterprise-data(/|$)(.*)"` | | +| data.persistence.enabled | bool | `false` | | +| data.replicas | int | `1` | | +| data.resources | object | `{}` | | +| data.service.type | string | `"ClusterIP"` | | +| fullnameOverride | string | `""` | | +| imagePullSecrets | list | `[]` | | +| license.secret.key | string | `"json"` | | +| license.secret.name | string | `"influxdb-enterprise-license"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key | string | `"influxdb.influxdata.com/component"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator | string | `"In"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"meta"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| meta.env | object | `{}` | | +| meta.image | object | `{}` | | +| meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | +| meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | +| meta.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | +| meta.ingress.className | string | `"nginx"` | | +| meta.ingress.enabled | bool | `false` | | +| meta.ingress.hostname | string | `""` | | +| meta.ingress.path | string | `"/influxdb-enterprise-meta(/|$)(.*)"` | | +| meta.persistence.enabled | bool | `false` | | +| meta.podDisruptionBudget.minAvailable | int | `2` | | +| meta.replicas | int | `3` | | +| meta.resources | object | `{}` | | +| meta.service.type | string | `"ClusterIP"` | | +| meta.sharedSecret.secretName | string | `"influxdb-enterprise-shared-secret"` | | +| nameOverride | string | `""` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `false` | | +| serviceAccount.name | string | `""` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/NOTES.txt b/applications/sasquatch/charts/influxdb-enterprise/templates/NOTES.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/_helpers.tpl b/applications/sasquatch/charts/influxdb-enterprise/templates/_helpers.tpl new file mode 100644 index 0000000000..581879ec0a --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/_helpers.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "influxdb-enterprise.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "influxdb-enterprise.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "influxdb-enterprise.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "influxdb-enterprise.labels" -}} +helm.sh/chart: {{ include "influxdb-enterprise.chart" . }} +{{ include "influxdb-enterprise.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "influxdb-enterprise.selectorLabels" -}} +app.kubernetes.io/name: {{ include "influxdb-enterprise.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account +*/}} +{{- define "influxdb-enterprise.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "influxdb-enterprise.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "influxdb-enterprise.image" -}} +{{- $dataTagName := (printf "%s-%s" .chart.AppVersion .podtype) -}} +{{- if (.imageroot) }} +{{- if (.imageroot.tag) -}} +{{- $dataTagName = .imageroot.tag -}} +{{- end -}} +{{- if (.imageroot.addsuffix) -}} +{{- $dataTagName = printf "%s-%s" $dataTagName .podtype -}} +{{- end -}} +{{- end }} +image: "{{ .podvals.image.repository | default "influxdb" }}:{{ $dataTagName }}" +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml new file mode 100644 index 0000000000..46c3381491 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml @@ -0,0 +1,141 @@ +{{- if or .Values.bootstrap.auth.secretName (or .Values.bootstrap.ddldml.raw .Values.bootstrap.ddldml.configMap) -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-bootstrap + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-delete-policy": hook-succeeded +spec: + activeDeadlineSeconds: 300 + backoffLimit: 10 + template: + metadata: + labels: + {{- include "influxdb-enterprise.selectorLabels" . | nindent 8 }} + spec: + {{- if .Values.bootstrap.ddldml.configMap }} + volumes: + - name: ddldml + configMap: + name: {{ .Values.bootstrap.ddldml.configMap }} + {{ end }} + restartPolicy: OnFailure + serviceAccountName: {{ template "influxdb-enterprise.serviceAccountName" . }} + # Consider this a middleware of setup components. + # Each is executed in-order until all of theme complete successfully. + # This means that each command must be idempotent. + initContainers: + {{- if .Values.bootstrap.auth.secretName }} + - name: auth + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 8 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + # Exposing these environment variables makes this command idempotent + # as even if the authentication has been setup, we can still execute the command + # and it won't error as nothing has changed + env: + - name: INFLUX_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-user" + - name: INFLUX_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-password" + command: + - influx + args: + - -host + - {{ include "influxdb-enterprise.fullname" . }}-data + - -execute + - CREATE USER $(INFLUX_USERNAME) WITH PASSWORD '$(INFLUX_PASSWORD)' WITH ALL PRIVILEGES + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 100m + memory: 50Mi + {{ end }} + {{- if .Values.bootstrap.ddldml.configMap }} + - name: ddl + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 8 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + {{- if .Values.bootstrap.auth.secretName }} + env: + - name: INFLUX_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-user" + - name: INFLUX_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-password" + {{ end }} + volumeMounts: + - name: ddldml + mountPath: /ddldml + command: + - influx + args: + - -host + - {{ include "influxdb-enterprise.fullname" . }}-data + - -import + - -path + - /ddldml/ddl + resources: + {{- toYaml .Values.bootstrap.ddldml.resources | nindent 10 }} + {{ end }} + {{- if .Values.bootstrap.ddldml.configMap }} + - name: dml + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 8 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + {{- if .Values.bootstrap.auth.secretName }} + env: + - name: INFLUX_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-user" + - name: INFLUX_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-password" + {{ end }} + volumeMounts: + - name: ddldml + mountPath: /ddldml + command: + - influx + args: + - -host + - {{ include "influxdb-enterprise.fullname" . }}-data + - -import + - -path + - /ddldml/dml + resources: + {{- toYaml .Values.bootstrap.ddldml.resources | nindent 10 }} + {{ end }} + containers: + - name: success + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 8 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + command: + - echo + args: + - "Bootstrap Success" + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 100m + memory: 50Mi +{{ end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml new file mode 100644 index 0000000000..3d6c6bcdcc --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml @@ -0,0 +1,148 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data + labels: + app.kubernetes.io/component: data + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +data: + influxdb.conf: |+ + bind-address = ":8088" + reporting-disabled = false + + [http] + {{- range $key, $value := index .Values.data.config.http }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [enterprise] + {{ if .Values.license.key }} + # license-key and license-path are mutually exclusive, use only one and leave the other blank + license-key = "{{ .Values.license.key }}" #✨ mutually exclusive with license-path + {{ else if .Values.license.secret }} + # license-key and license-path are mutually exclusive, use only one and leave the other blank + license-path = "/var/run/secrets/influxdb/license.json" + {{ end }} + + [meta] + dir = "/var/lib/influxdb/meta" + + [hinted-handoff] + dir = "/var/lib/influxdb/hh" + {{- range $key, $value := index .Values.data.config.hintedHandoff }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value | int }} + {{- end }} + {{- end }} + + [data] + dir = "/var/lib/influxdb/data" + wal-dir = "/var/lib/influxdb/wal" + {{- range $key, $value := index .Values.data.config.data }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [anti-entropy] + {{- range $key, $value := index .Values.data.config.antiEntropy }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [cluster] + {{- range $key, $value := index .Values.data.config.cluster }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [continuous_queries] + {{- range $key, $value := index .Values.data.config.continuousQueries }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [logging] + {{- range $key, $value := index .Values.data.config.logging }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + + entrypoint.pl: |+ + #!/usr/bin/env perl + $ENV{INFLUXDB_HOSTNAME} = `hostname -f`; + $ENV{INFLUXDB_HOSTNAME} =~ s/\n$//; + + {{ if .Values.data.preruncmds }} + # These are commands that will run before influxdb is initialized + {{- range .Values.data.preruncmds }} + {{ if .description }} + # {{ .description }} + {{- end }} + system('{{ .cmd }}'); + {{- end }} + {{ end }} + + $pid = fork(); + + # Inside this conditional is our child process, which + # will return `influxd-meta` + if($pid == 0) { + exec('influxd') or die("Failed to execute influxd: $!\n"); + } + + $SIG{HUP} = sub { kill 'HUP', $pid }; + $SIG{TERM} = sub { kill 'TERM', $pid }; + $SIG{KILL} = sub { kill 'KILL', $pid }; + + # Register data node with meta leader + my $protocol = "http"; + my $meta_service = $ENV{RELEASE_NAME} . "-meta"; + + # We're not going to define an exit strategy for failure here. + # This should be handled by the probes on the pods + while (true) { + # There's no LWP/Simple available in our images, so forking out to curl 😥 + print "\n\n\nREGISTER WITH META SERVICE\n\n\n"; + $exit_code = system('curl', '-XPOST', '--silent', '--fail', '--retry', '5', '--retry-delay', '0', "-Faddr=$ENV{INFLUXDB_HOSTNAME}:8088", "$protocol://$meta_service:8091/add-data"); + + if ($exit_code == 0) { + $| = 1; + last; + } + print "\n\n\nFailed: $!\n\n\n"; + $| = 1; + + exit 255 + } + + waitpid($pid, 0); + exit $? diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-ingress.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-ingress.yaml new file mode 100644 index 0000000000..e3dd191841 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-ingress.yaml @@ -0,0 +1,32 @@ +{{- if .Values.data.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data + labels: + influxdb.influxdata.com/component: data +{{- include "influxdb-enterprise.labels" . | nindent 4 }} +{{- if .Values.data.ingress.annotations }} + annotations: +{{ toYaml .Values.data.ingress.annotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.data.ingress.className }} + ingressClassName: {{ .Values.data.ingress.className }} +{{- end }} + rules: +{{- if .Values.data.ingress.hostname }} + - host: {{ .Values.data.ingress.hostname | quote }} + http: +{{- else }} + - http: +{{- end }} + paths: + - path: {{ .Values.data.ingress.path }} + pathType: Prefix + backend: + service: + name: {{ template "influxdb-enterprise.fullname" . }}-data + port: + number: 8086 +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-service.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-service.yaml new file mode 100644 index 0000000000..fabcbc596c --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-service.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.data.service.annotations }} + annotations: +{{ toYaml .Values.data.service.annotations | indent 4 }} +{{- end }} + name: {{ template "influxdb-enterprise.fullname" . }}-data + labels: + influxdb.influxdata.com/component: data + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + type: {{ .Values.data.service.type }} +{{- if (eq "ClusterIP" .Values.data.service.type) }} + clusterIP: None +{{- end }} + publishNotReadyAddresses: true + ports: + - port: 8086 + protocol: TCP + name: http +{{- if .Values.data.service.nodePort }} + nodePort: {{ .Values.data.service.nodePort }} +{{- end }} + - port: 8088 + protocol: TCP + name: rpc + - port: 2003 + # Graphite supports TCP and UDP, + # so this should __maybe__ be configurable + # Though most use TCP + protocol: TCP + name: graphite + - port: 4242 + protocol: TCP + name: opentsdb + # LoadBalancer service type only allows for one protocol + # disbaling UDP ports +{{- if (ne "LoadBalancer" .Values.data.service.type) }} + - port: 25826 + protocol: UDP + name: collectd + - port: 8089 + protocol: UDP + name: udp +{{- end }} + selector: + influxdb.influxdata.com/component: data +{{- include "influxdb-enterprise.selectorLabels" . | nindent 4 }} +{{- if .Values.data.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.data.service.loadBalancerIP }} +{{- end }} +{{- if .Values.data.service.externalIPs }} + externalIPs: +{{ toYaml .Values.data.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.data.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.data.service.externalTrafficPolicy }} +{{- end }} \ No newline at end of file diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml new file mode 100644 index 0000000000..fa28e08cf4 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml @@ -0,0 +1,141 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.data.replicas | default 3 }} + podManagementPolicy: Parallel + serviceName: {{ include "influxdb-enterprise.fullname" . }}-data + selector: + matchLabels: + influxdb.influxdata.com/component: data + {{- include "influxdb-enterprise.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.data.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + influxdb.influxdata.com/component: data + {{- include "influxdb-enterprise.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.data.podSecurityContext | nindent 8 }} + serviceAccountName: {{ template "influxdb-enterprise.serviceAccountName" . }} + volumes: + {{ if not .Values.data.persistence.enabled }} + - name: {{ include "influxdb-enterprise.fullname" . }}-data-data + emptyDir: {} + {{ end }} + - name: config + configMap: + name: {{ include "influxdb-enterprise.fullname" . }}-data + {{- if .Values.license.secret }} + - name: license + secret: + secretName: {{ .Values.license.secret.name }} + items: + - key: {{ .Values.license.secret.key }} + path: license.json + {{- end }} + containers: + - name: {{ .Chart.Name }} + command: + - "/usr/bin/perl" + args: + - "/etc/influxdb/entrypoint.pl" + securityContext: + {{- toYaml .Values.data.securityContext | nindent 12 }} + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 10 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + env: + - name: RELEASE_NAME + value: {{ include "influxdb-enterprise.fullname" . }} + {{- if .Values.data.env }} +{{ toYaml .Values.data.env | indent 10 }} + {{- end}} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + ports: + - name: http + containerPort: 8086 + protocol: TCP + - name: raft + containerPort: 8088 + protocol: TCP + - name: udp + containerPort: 8089 + protocol: UDP + - name: graphite + containerPort: 2003 + protocol: TCP + - name: opentsdb + containerPort: 4242 + protocol: TCP + - name: collectd + containerPort: 25826 + protocol: UDP + livenessProbe: + httpGet: + path: /ping + port: http + readinessProbe: + initialDelaySeconds: 30 + httpGet: + path: /ping + port: http + volumeMounts: + - name: config + mountPath: /etc/influxdb + - name: {{ include "influxdb-enterprise.fullname" . }}-data-data + mountPath: /var/lib/influxdb + {{- if .Values.license.secret }} + - name: license + mountPath: /var/run/secrets/influxdb/ + {{- end }} + resources: + {{- toYaml .Values.data.resources | nindent 12 }} + {{- with .Values.data.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.data.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.data.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if and .Values.data.persistence.enabled (not .Values.data.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data-data + annotations: + {{- range $key, $value := .Values.data.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + - {{ .Values.data.persistence.accessMode | quote}} + resources: + requests: + storage: {{ .Values.data.persistence.size | quote }} + {{- if .Values.data.persistence.storageClass }} + {{- if (eq "-" .Values.data.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.data.persistence.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml new file mode 100644 index 0000000000..6b9d8b5ef9 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + labels: + app.kubernetes.io/component: meta + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +data: + influxdb-meta.conf: |+ + bind-address = ":8091" + reporting-disabled = false + + [enterprise] + {{ if .Values.license.key }} + # license-key and license-path are mutually exclusive, use only one and leave the other blank + license-key = "{{ .Values.license.key }}" #✨ mutually exclusive with license-path + {{ else if .Values.license.secret }} + # license-key and license-path are mutually exclusive, use only one and leave the other blank + license-path = "/var/run/secrets/influxdb/license.json" + {{ end }} + + [meta] + dir = "/var/lib/influxdb/meta" + + + entrypoint.pl: |+ + #!/usr/bin/env perl + $ENV{INFLUXDB_HOSTNAME} = `hostname -f`; + $ENV{INFLUXDB_HOSTNAME} =~ s/\n$//; + + {{ if .Values.meta.preruncmds }} + # These are commands that will run before influxdb is initialized + {{- range .Values.meta.preruncmds }} + {{ if .description }} + # {{ .description }} + {{- end }} + system('{{ .cmd }}'); + {{- end }} + {{ end }} + + $pid = fork(); + + # Inside this conditional is our child process, which + # will return `influxd-meta` + if($pid == 0) { + exec('influxd-meta') or die("Failed to execute influxd-meta: $!\n"); + } + + $SIG{HUP} = sub { kill 'HUP', $pid }; + $SIG{TERM} = sub { kill 'TERM', $pid }; + $SIG{KILL} = sub { kill 'KILL', $pid }; + + # Register meta node + my $meta_leader = $ENV{INFLUXDB_HOSTNAME}; + $meta_leader =~ s/-[0-9]+./-0./; + + # We're not going to define an exit strategy for failure here. + # This should be handled by the probes on the pods + while (true) { + if($meta_leader eq $ENV{INFLUXDB_HOSTNAME}) { + system('influxd-ctl', 'add-meta', "$ENV{INFLUXDB_HOSTNAME}:8091"); + } else { + system('influxd-ctl', 'join', "$meta_leader:8091"); + } + + if ($? == 0) { + last; + } + + # Wait a few seconds and try again + # Maybe we should implement some rudamentary backoff + sleep(2); + } + + waitpid($pid, 0); + exit $? diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-ingress.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-ingress.yaml new file mode 100644 index 0000000000..bc7ecc42d1 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-ingress.yaml @@ -0,0 +1,32 @@ +{{- if .Values.meta.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + labels: + influxdb.influxdata.com/component: meta +{{- include "influxdb-enterprise.labels" . | nindent 4 }} +{{- if .Values.meta.ingress.annotations }} + annotations: +{{ toYaml .Values.meta.ingress.annotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.meta.ingress.className }} + ingressClassName: {{ .Values.meta.ingress.className }} +{{- end }} + rules: +{{- if .Values.meta.ingress.hostname }} + - host: {{ .Values.meta.ingress.hostname | quote }} + http: +{{- else }} + - http: +{{- end }} + paths: + - path: {{ .Values.meta.ingress.path }} + pathType: Prefix + backend: + service: + name: {{ template "influxdb-enterprise.fullname" . }}-meta + port: + number: 8091 +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-service.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-service.yaml new file mode 100644 index 0000000000..177f6f172c --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-service.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.meta.service.annotations }} + annotations: +{{ toYaml .Values.meta.service.annotations | indent 4 }} +{{- end }} + name: {{ template "influxdb-enterprise.fullname" . }}-meta + labels: + influxdb.influxdata.com/component: meta + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + type: {{ .Values.meta.service.type }} +{{- if (eq "ClusterIP" .Values.meta.service.type) }} + clusterIP: None +{{- end }} + publishNotReadyAddresses: true + ports: + - port: 8089 + protocol: TCP + name: raft + - port: 8091 + protocol: TCP + name: http +{{- if .Values.meta.service.nodePort }} + nodePort: {{ .Values.meta.service.nodePort }} +{{- end }} + selector: + influxdb.influxdata.com/component: meta + {{- include "influxdb-enterprise.selectorLabels" . | nindent 4 }} +{{- if .Values.meta.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.meta.service.loadBalancerIP }} +{{- end }} +{{- if .Values.meta.service.externalIPs }} + externalIPs: +{{ toYaml .Values.meta.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.meta.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.meta.service.externalTrafficPolicy }} +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml new file mode 100644 index 0000000000..beff940f34 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml @@ -0,0 +1,131 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.meta.replicas | default 3 }} + podManagementPolicy: Parallel + serviceName: {{ include "influxdb-enterprise.fullname" . }}-meta + selector: + matchLabels: + influxdb.influxdata.com/component: meta + {{- include "influxdb-enterprise.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.meta.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + influxdb.influxdata.com/component: meta + {{- include "influxdb-enterprise.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.meta.podSecurityContext | nindent 8 }} + serviceAccountName: {{ template "influxdb-enterprise.serviceAccountName" . }} + volumes: + {{ if not .Values.meta.persistence.enabled }} + - name: {{ include "influxdb-enterprise.fullname" . }}-meta-data + emptyDir: {} + {{ end }} + - name: config + configMap: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + {{- if .Values.license.secret }} + - name: license + secret: + secretName: {{ .Values.license.secret.name }} + items: + - key: {{ .Values.license.secret.key }} + path: license.json + {{- end }} + containers: + - name: {{ .Chart.Name }} + command: + - "/usr/bin/perl" + args: + - "/etc/influxdb/entrypoint.pl" + securityContext: + {{- toYaml .Values.meta.securityContext | nindent 12 }} + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.meta "podtype" "meta") | indent 10 }} + imagePullPolicy: {{ .Values.meta.image.pullPolicy }} + env: + - name: INFLUXDB_META_INTERNAL_SHARED_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.meta.sharedSecret.secretName }} + key: secret + {{- if .Values.meta.env }} +{{ toYaml .Values.meta.env | indent 12 }} + {{- end}} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + ports: + - name: http + containerPort: 8091 + protocol: TCP + - name: raft + containerPort: 8089 + protocol: TCP + livenessProbe: + httpGet: + path: /ping + port: http + readinessProbe: + httpGet: + path: /ping + port: http + volumeMounts: + - name: config + mountPath: /etc/influxdb + - name: {{ include "influxdb-enterprise.fullname" . }}-meta-data + mountPath: /var/lib/influxdb + {{- if .Values.license.secret }} + - name: license + mountPath: /var/run/secrets/influxdb/ + {{- end }} + resources: + {{- toYaml .Values.meta.resources | nindent 12 }} + {{- with .Values.meta.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.meta.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.meta.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if and .Values.meta.persistence.enabled (not .Values.meta.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta-data + annotations: + {{- range $key, $value := .Values.meta.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + - {{ .Values.meta.persistence.accessMode | quote}} + resources: + requests: + storage: {{ .Values.meta.persistence.size | quote }} + {{- if .Values.meta.persistence.storageClass }} + {{- if (eq "-" .Values.meta.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.meta.persistence.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/serviceaccount.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/serviceaccount.yaml new file mode 100644 index 0000000000..9e1fc427a1 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "influxdb-enterprise.serviceAccountName" . }} +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml new file mode 100644 index 0000000000..9d96fadd26 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -0,0 +1,273 @@ +# Default values for influxdb-enterprise. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +nameOverride: "" +fullnameOverride: "" +imagePullSecrets: [] + +# License-key and license-path are mutually exclusive. Use only one and leave the other blank. +license: + # You can put your license key here for testing this chart out, + # but we STRONGLY recommend using a license file stored in a secret + # when you ship to production. + # key: "" + # secret: + # name: influxdb-license + # key: json + secret: + name: influxdb-enterprise-license + key: json +# Service account to use for deployment +# If the name is not specified default account will be used +serviceAccount: + create: false + name: '' + annotations: {} + +## The name of a secret in the same kubernetes namespace which contain values +## to be added to the environment. +## This can be used, for example, to set the INFLUXDB_ENTERPRISE_LICENSE_KEY +## or INFLUXDB_ENTERPRISE_LICENSE_PATH environment variable. +# envFromSecret: influxdb-license + +# A secret with keys "username" and "password" is required +# This bootstrap configuration allows you to configure +# some parts of the InfluxDB system at install time. +# +# This job ONLY runs once, after the first `helm upgrade --install` +# or `helm install` +# +# This job WILL NOT run on upgrades +# +bootstrap: + # This section allows you to enable authentication' + # of the data nodes, which will create a username + # and password for your "admin" account. + # A secret should be provided, which will have the keys + # "username" and "password" available. + auth: + secretName: sasquatch + # This section allows you to use DDL and DML to define + # databases, retention policies, and inject some data. + # When using the configMap setting, the keys "ddl" and "dml" + # must exist, even if one of them is empty. + # DDL is executed before DML, to enforce databases and retention policies + # to exist. + ddldml: {} + # configMap: influxdb-ddl-dml + # resources: {} + + +# Sets the tagged version of the docker image that you want to run, will default to latest +# The suffix is if you are pulling from influx repo, example images will be influxdb:1.8.0-meta and influxdb:1.8.0-data +# If set to true, the suffix won't be added +#image: +# tag: 1.11.3 +# ignoresuffix: false + +meta: + replicas: 3 + image: {} + # override: true + # pullPolicy: IfNotPresent + # repository: influxdb + # nodeSelector: {} + # tolerations: [] + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: influxdb.influxdata.com/component + operator: In + values: + - meta + topologyKey: kubernetes.io/hostname + # podAnnotations: {} + # + # podSecurityContext: {} + # fsGroup: 2000 + # + # This allows you to run the pods as a non-privileged user, set to the uid + # securityContext: {} + # runAsUser: 2000 + # runAsGroup: 2000 + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + # + # These are the commands that will be run before influxdb is started + # preruncmds: + # - cmd: ls -l + # description: We want to see what's in the directory + # - cmd: stat $HOME/somefile + # description: And we run a second command + # This secret needs a key called "secret" and it should be a long random string + # Please see docs for shared-internal-secret: + # https://docs.influxdata.com/enterprise_influxdb/v1.8/administration/config-data-nodes/#meta-internal-shared-secret + sharedSecret: + secretName: influxdb-enterprise-shared-secret + # + service: + ## Specify a service type + ## ClusterIP is default + ## ref: http://kubernetes.io/docs/user-guide/services/ + type: ClusterIP + # loadBalancerIP: "" + # externalIPs: [] + # externalTrafficPolicy: "" + # nodePort: 30086 + ## Add annotations to service + # annotations: {} + # InfluxDB ingress configuration. + ingress: + enabled: false + hostname: "" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + className: "nginx" + path: /influxdb-enterprise-meta(/|$)(.*) + ## Persist data to a persistent volume + persistence: + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + ## influxdb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + # annotations: + # accessMode: ReadWriteOnce + # size: 8Gi + # Pick one + podDisruptionBudget: + # maxUnavailable: 2 + minAvailable: 2 + ## Additional data container environment variables. + env: {} + resources: {} + +data: + replicas: 1 + image: {} + # override: true + # pullPolicy: IfNotPresent + # repository: influxdb + # nodeSelector: {} + # tolerations: [] + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: influxdb.influxdata.com/component + operator: In + values: + - data + topologyKey: kubernetes.io/hostname + # podAnnotations: {} + # + # podSecurityContext: {} + # fsGroup: 2000 + # + # This allows you to run the pods as a non-privileged user, set to the uid + # securityContext: {} + # runAsUser: 2000 + # runAsGroup: 2000 + # capabilities: + # drop: + # - ALL + # capabilities: + # drop: + # - ALL + # + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + # + # These are the commands that will be run before influxdb is started + # preruncmds: + # - cmd: ls -l + # description: We want to see what's in the directory + # - cmd: stat $HOME/somefile + # description: And we run a second command + # + service: + ## Specify a service type + ## ClusterIP is default + ## ref: http://kubernetes.io/docs/user-guide/services/ + type: ClusterIP + # loadBalancerIP: "" + # externalIPs: [] + # externalTrafficPolicy: "" + # nodePort: 30091 + ## Add annotations to service + # annotations: {} + # InfluxDB ingress configuration. + ingress: + enabled: false + hostname: "" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + className: "nginx" + path: /influxdb-enterprise-data(/|$)(.*) + ## Persist data to a persistent volume + persistence: + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + ## influxdb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + # annotations: + # accessMode: ReadWriteOnce + # size: 8Gi + config: + data: + trace-logging-enabled: true + wal-fsync-delay: "100ms" + cache-max-memory-size: 0 + antiEntropy: + enabled: true + http: + flux-enabled: true + auth-enabled: true + cluster: + max-concurrent-queries: 1000 + query-timeout: "300s" + log-queries-after: "15s" + hintedHandoff: + max-size: 107374182400 + continuousQueries: + enabled: false + logging: + level: "debug" + ## Additional data container environment variables e.g.: + ## INFLUXDB_HTTP_FLUX_ENABLED: "true" + env: {} + resources: {} diff --git a/applications/sasquatch/charts/kafdrop/README.md b/applications/sasquatch/charts/kafdrop/README.md index dea43e6e00..9ccdfd6655 100644 --- a/applications/sasquatch/charts/kafdrop/README.md +++ b/applications/sasquatch/charts/kafdrop/README.md @@ -16,7 +16,7 @@ A subchart to deploy the Kafdrop UI for Sasquatch. | host | string | Defaults to localhost. | The hostname to report for the RMI registry (used for JMX). | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | image.repository | string | `"obsidiandynamics/kafdrop"` | Kafdrop Docker image repository. | -| image.tag | string | `"3.31.0"` | Kafdrop image version. | +| image.tag | string | `"4.0.1"` | Kafdrop image version. | | ingress.annotations | object | `{}` | Ingress annotations. | | ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/kafdrop/values.yaml b/applications/sasquatch/charts/kafdrop/values.yaml index 41fc5e93c6..6ca6ecdb13 100644 --- a/applications/sasquatch/charts/kafdrop/values.yaml +++ b/applications/sasquatch/charts/kafdrop/values.yaml @@ -9,7 +9,7 @@ image: # -- Image pull policy. pullPolicy: IfNotPresent # -- Kafdrop image version. - tag: 3.31.0 + tag: 4.0.1 kafka: # -- Bootstrap list of Kafka host/port pairs diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md index c9ff922025..c5b9da41fc 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/README.md +++ b/applications/sasquatch/charts/kafka-connect-manager/README.md @@ -21,12 +21,12 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | -| influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | -| influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | -| influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | -| influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | -| influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | -| influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | +| influxdbSink.connectors | object | `{"example":{"enabled":false,"removePrefix":"","repairerConnector":false,"tags":"","topicsRegex":"example.topic"}}` | Connector instances to deploy. | +| influxdbSink.connectors.example.enabled | bool | `false` | Whether this connector instance is deployed. | +| influxdbSink.connectors.example.removePrefix | string | `""` | Remove prefix from topic name. | +| influxdbSink.connectors.example.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| influxdbSink.connectors.example.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| influxdbSink.connectors.example.topicsRegex | string | `"example.topic"` | Regex to select topics from Kafka. | | influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | | influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | | influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | diff --git a/applications/sasquatch/charts/kafka-connect-manager/values.yaml b/applications/sasquatch/charts/kafka-connect-manager/values.yaml index e508350f03..2c534cd62d 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/values.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/values.yaml @@ -34,17 +34,17 @@ influxdbSink: excludedTopicsRegex: "" # -- Connector instances to deploy. connectors: - test: + example: # -- Whether this connector instance is deployed. enabled: false # -- Whether to deploy a repairer connector in addition to the original connector instance. repairerConnector: false # -- Regex to select topics from Kafka. - topicsRegex: "source.lsst.sal.Test" + topicsRegex: "example.topic" # -- Fields in the Avro payload that are treated as InfluxDB tags. tags: "" # -- Remove prefix from topic name. - removePrefix: "source." + removePrefix: "" # The s3Sink connector assumes Parquet format with Snappy compression # and a time based partitioner. diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index acd9f5a244..6093a92723 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -16,7 +16,7 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| image.tag | string | `"7.4.1"` | Kafka REST proxy image tag. | +| image.tag | string | `"7.5.3"` | Kafka REST proxy image tag. | | ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/rest-proxy/values.yaml b/applications/sasquatch/charts/rest-proxy/values.yaml index 67305a2e9d..11512234fa 100644 --- a/applications/sasquatch/charts/rest-proxy/values.yaml +++ b/applications/sasquatch/charts/rest-proxy/values.yaml @@ -9,7 +9,7 @@ image: # -- Image pull policy. pullPolicy: IfNotPresent # -- Kafka REST proxy image tag. - tag: 7.4.1 + tag: 7.5.3 service: # -- Kafka REST proxy service port diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 73fe1951e8..c05c81afd5 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -7,30 +7,41 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | Key | Type | Default | Description | |-----|------|---------|-------------| | cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | -| connect.enabled | bool | `true` | Enable Kafka Connect. | +| cluster.releaseLabel | string | `"site-prom"` | Site wide label required for gathering Prometheus metrics if they are enabled. | +| connect.config."key.converter" | string | `"io.confluent.connect.avro.AvroConverter"` | Set the converter for the message key | +| connect.config."key.converter.schemas.enable" | bool | `true` | Enable converted schemas for the message key | +| connect.enabled | bool | `false` | Enable Kafka Connect. | | connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | -| kafka.config."log.retention.bytes" | string | `"429496729600"` | Maximum retained number of bytes for a topic's data. | -| kafka.config."log.retention.hours" | int | `72` | Number of days for a topic's data to be retained. | +| kafka.config."log.retention.bytes" | string | `"350000000000"` | How much disk space Kafka will ensure is available, set to 70% of the data partition size | +| kafka.config."log.retention.hours" | int | `48` | Number of days for a topic's data to be retained. | | kafka.config."message.max.bytes" | int | `10485760` | The largest record batch size allowed by Kafka. | -| kafka.config."offsets.retention.minutes" | int | `4320` | Number of minutes for a consumer group's offsets to be retained. | +| kafka.config."offsets.retention.minutes" | int | `2880` | Number of minutes for a consumer group's offsets to be retained. | | kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition. | | kafka.config."replica.lag.time.max.ms" | int | `120000` | Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. | +| kafka.disruption_tolerance | int | `0` | Number of down brokers that the system can tolerate. | | kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource. | | kafka.externalListener.bootstrap.host | string | `""` | Name used for TLS hostname verification. | | kafka.externalListener.bootstrap.loadBalancerIP | string | `""` | The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the loadBalancerIP when a load balancer is created. This field is ignored if the cloud provider does not support the feature. Once the IP address is provisioned this option make it possible to pin the IP address. We can request the same IP next time it is provisioned. This is important because it lets us configure a DNS record, associating a hostname with that pinned IP address. | | kafka.externalListener.brokers | list | `[]` | Borkers configuration. host is used in the brokers' advertised.brokers configuration and for TLS hostname verification. The format is a list of maps. | | kafka.externalListener.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | | kafka.externalListener.tls.enabled | bool | `false` | Whether TLS encryption is enabled. | -| kafka.listeners.external.enabled | bool | `true` | Whether external listener is enabled. | -| kafka.listeners.plain.enabled | bool | `true` | Whether internal plaintext listener is enabled. | -| kafka.listeners.tls.enabled | bool | `true` | Whether internal TLS listener is enabled. | +| kafka.listeners.external.enabled | bool | `false` | Whether external listener is enabled. | +| kafka.listeners.plain.enabled | bool | `false` | Whether internal plaintext listener is enabled. | +| kafka.listeners.tls.enabled | bool | `false` | Whether internal TLS listener is enabled. | +| kafka.metricsConfig.enabled | bool | `false` | Whether metric configuration is enabled. | | kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | | kafka.version | string | `"3.5.1"` | Version of Kafka to deploy. | +| kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging for pod | +| kafkaExporter.enabled | bool | `false` | Enable Kafka exporter | +| kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | +| kafkaExporter.logging | string | `"info"` | Logging level | +| kafkaExporter.resources | object | `{}` | Resource specification for Kafka exporter | +| kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | | mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | | mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. | | mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. | @@ -44,13 +55,14 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | registry.ingress.hostname | string | `""` | Hostname for the Schema Registry. | | registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | -| users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). | -| users.kafkaConnectManager.enabled | bool | `true` | Enable user kafka-connect-manager | -| users.promptProcessing.enabled | bool | `true` | Enable user prompt-processing | -| users.replicator.enabled | bool | `false` | Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) | -| users.telegraf.enabled | bool | `true` | Enable user telegraf (deployed by parent Sasquatch chart) | -| users.tsSalKafka.enabled | bool | `true` | Enable user ts-salkafka. | +| users.kafdrop.enabled | bool | `false` | Enable user Kafdrop (deployed by parent Sasquatch chart). | +| users.kafkaConnectManager.enabled | bool | `false` | Enable user kafka-connect-manager | +| users.promptProcessing.enabled | bool | `false` | Enable user prompt-processing | +| users.replicator.enabled | bool | `false` | Enable user replicator (used by Mirror Maker 2 and required at both source and target clusters) | +| users.telegraf.enabled | bool | `false` | Enable user telegraf (deployed by parent Sasquatch chart) | +| users.tsSalKafka.enabled | bool | `false` | Enable user ts-salkafka, used at the telescope environments | | zookeeper.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["zookeeper"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Zookeeper pod assignment. | +| zookeeper.metricsConfig.enabled | bool | `false` | Whether metric configuration is enabled. | | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | | zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/connect.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/connect.yaml index c825464d75..1a5d3e51c8 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/connect.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/connect.yaml @@ -29,8 +29,9 @@ spec: config.storage.replication.factor: -1 offset.storage.replication.factor: -1 status.storage.replication.factor: -1 - key.converter: io.confluent.connect.avro.AvroConverter - key.converter.schemas.enable: true + {{- range $key, $value := .Values.connect.config }} + {{ $key }}: {{ $value }} + {{- end }} key.converter.schema.registry.url: http://sasquatch-schema-registry.sasquatch:8081 value.converter: io.confluent.connect.avro.AvroConverter value.converter.schemas.enable: true diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml new file mode 100644 index 0000000000..e1090ccdf6 --- /dev/null +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml @@ -0,0 +1,154 @@ +{{- if .Values.kafka.metricsConfig.enabled }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: kafka-metrics + labels: + app: sasquatch-kafka-metrics +data: + kafka-metrics-config.yml: | + # See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics + lowercaseOutputName: true + rules: + # Special cases and very specific rules + - pattern: kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + topic: "$4" + partition: "$5" + - pattern: kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + broker: "$4:$5" + - pattern: kafka.server<>connections + name: kafka_server_$1_connections_tls_info + type: GAUGE + labels: + cipher: "$2" + protocol: "$3" + listener: "$4" + networkProcessor: "$5" + - pattern: kafka.server<>connections + name: kafka_server_$1_connections_software + type: GAUGE + labels: + clientSoftwareName: "$2" + clientSoftwareVersion: "$3" + listener: "$4" + networkProcessor: "$5" + - pattern: "kafka.server<>(.+):" + name: kafka_server_$1_$4 + type: GAUGE + labels: + listener: "$2" + networkProcessor: "$3" + - pattern: kafka.server<>(.+) + name: kafka_server_$1_$4 + type: GAUGE + labels: + listener: "$2" + networkProcessor: "$3" + # Some percent metrics use MeanRate attribute + # Ex) kafka.server<>MeanRate + - pattern: kafka.(\w+)<>MeanRate + name: kafka_$1_$2_$3_percent + type: GAUGE + # Generic gauges for percents + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + labels: + "$4": "$5" + # Generic per-second counters with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + # Generic gauges with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + # Emulate Prometheus 'Summary' metrics for the exported 'Histogram's. + # Note that these are missing the '_sum' metric! + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + quantile: "0.$8" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + quantile: "0.$6" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + quantile: "0.$4" + # KRaft mode: uncomment the following lines to export KRaft related metrics + # KRaft overall related metrics + # distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics + #- pattern: "kafka.server<>(.+-total|.+-max):" + # name: kafka_server_raftmetrics_$1 + # type: COUNTER + #- pattern: "kafka.server<>(.+):" + # name: kafka_server_raftmetrics_$1 + # type: GAUGE + # KRaft "low level" channels related metrics + # distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics + #- pattern: "kafka.server<>(.+-total|.+-max):" + # name: kafka_server_raftchannelmetrics_$1 + # type: COUNTER + #- pattern: "kafka.server<>(.+):" + # name: kafka_server_raftchannelmetrics_$1 + # type: GAUGE + # Broker metrics related to fetching metadata topic records in KRaft mode + #- pattern: "kafka.server<>(.+):" + # name: kafka_server_brokermetadatametrics_$1 + # type: GAUGE +{{- end}} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml index a5dbdbfa1a..fab4289e5a 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml @@ -92,12 +92,20 @@ spec: config: offsets.topic.replication.factor: {{ .Values.kafka.replicas }} transaction.state.log.replication.factor: {{ .Values.kafka.replicas }} - transaction.state.log.min.isr: {{ .Values.kafka.replicas }} + transaction.state.log.min.isr: {{ sub .Values.kafka.replicas .Values.kafka.disruption_tolerance }} default.replication.factor: {{ .Values.kafka.replicas }} - min.insync.replicas: {{ .Values.kafka.replicas }} + min.insync.replicas: {{ sub .Values.kafka.replicas .Values.kafka.disruption_tolerance }} {{- range $key, $value := .Values.kafka.config }} {{ $key }}: {{ $value }} {{- end }} + {{- if .Values.kafka.metricsConfig.enabled }} + metricsConfig: + type: jmxPrometheusExporter + valueFrom: + configMapKeyRef: + name: kafka-metrics + key: kafka-metrics-config.yml + {{- end }} storage: type: jbod volumes: @@ -113,6 +121,14 @@ spec: {{- end}} deleteClaim: false zookeeper: + {{- if .Values.zookeeper.metricsConfig.enabled }} + metricsConfig: + type: jmxPrometheusExporter + valueFrom: + configMapKeyRef: + name: zookeeper-metrics + key: zookeeper-metrics-config.yml + {{- end }} template: persistentVolumeClaim: metadata: @@ -139,3 +155,14 @@ spec: entityOperator: topicOperator: {} userOperator: {} + {{- if .Values.kafkaExporter.enabled }} + kafkaExporter: + topicRegex: {{ .Values.kafkaExporter.topicRegex }} + groupRegex: {{ .Values.kafkaExporter.groupRegex }} + logging: {{ .Values.kafkaExporter.logging }} + enableSaramaLogging: {{ .Values.kafkaExporter.enableSaramaLogging }} + {{- with .Values.kafkaExporter.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml index 39ae427957..26ff37d788 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml @@ -57,7 +57,7 @@ spec: # Policy to define the remote topic naming convention. # The default is to preserve topic names in the target cluster. # To add the source cluster alias as a prefix to the topic name, use replication.policy.separator="." and replication.policy.class="org.apache.kafka.connect.mirror.DefaultReplicationPolicy" - replication.policy.separator: {{ default "" .Values.mirrormaker2.replication.policy.separator }} + replication.policy.separator: {{ default "." .Values.mirrormaker2.replication.policy.separator }} replication.policy.class: {{ default "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" .Values.mirrormaker2.replication.policy.class }} # Handling high volumes of messages # By increasing the batch size, produce requests are delayed and more messages are @@ -75,8 +75,8 @@ spec: producer.max.request.size: 10485760 producer.buffer.memory: 10485760 # Increase request timeout - producer.request.timeout.ms: 120000 - consumer.request.timeout.ms: 120000 + producer.request.timeout.ms: 240000 + consumer.request.timeout.ms: 240000 heartbeatConnector: config: heartbeats.topic.replication.factor: 3 diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml new file mode 100644 index 0000000000..51d87d0aa6 --- /dev/null +++ b/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml @@ -0,0 +1,108 @@ +{{- if .Values.kafka.metricsConfig.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: cluster-operator-metrics + namespace: sasquatch + labels: + app: strimzi + release: {{ .Values.cluster.releaseLabel }} +spec: + selector: + matchLabels: + strimzi.io/kind: cluster-operator + namespaceSelector: + matchNames: + - sasquatch + podMetricsEndpoints: + - path: /metrics + port: http +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: entity-operator-metrics + namespace: sasquatch + labels: + app: strimzi + release: {{ .Values.cluster.releaseLabel }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: entity-operator + namespaceSelector: + matchNames: + - sasquatch + podMetricsEndpoints: + - path: /metrics + port: healthcheck +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: bridge-metrics + namespace: sasquatch + labels: + app: strimzi + release: {{ .Values.cluster.releaseLabel }} +spec: + selector: + matchLabels: + strimzi.io/kind: KafkaBridge + namespaceSelector: + matchNames: + - sasquatch + podMetricsEndpoints: + - path: /metrics + port: rest-api +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: kafka-resources-metrics + namespace: sasquatch + labels: + app: strimzi + release: {{ .Values.cluster.releaseLabel }} +spec: + selector: + matchExpressions: + - key: "strimzi.io/kind" + operator: In + values: ["Kafka", "KafkaConnect", "KafkaMirrorMaker", "KafkaMirrorMaker2"] + namespaceSelector: + matchNames: + - sasquatch + podMetricsEndpoints: + - path: /metrics + port: tcp-prometheus + relabelings: + - separator: ; + regex: __meta_kubernetes_pod_label_(strimzi_io_.+) + replacement: $1 + action: labelmap + - sourceLabels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + targetLabel: namespace + replacement: $1 + action: replace + - sourceLabels: [__meta_kubernetes_pod_name] + separator: ; + regex: (.*) + targetLabel: kubernetes_pod_name + replacement: $1 + action: replace + - sourceLabels: [__meta_kubernetes_pod_node_name] + separator: ; + regex: (.*) + targetLabel: node_name + replacement: $1 + action: replace + - sourceLabels: [__meta_kubernetes_pod_host_ip] + separator: ; + regex: (.*) + targetLabel: node_ip + replacement: $1 + action: replace +{{- end }} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml new file mode 100644 index 0000000000..8fb8f8a17a --- /dev/null +++ b/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml @@ -0,0 +1,40 @@ +{{- if .Values.zookeeper.metricsConfig.enabled }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: zookeeper-metrics + labels: + app: sasquatch-zookeeper-metrics +data: + zookeeper-metrics-config.yml: | + # See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics + lowercaseOutputName: true + rules: + # replicated Zookeeper + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$2" + type: GAUGE + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$3" + type: GAUGE + labels: + replicaId: "$2" + - pattern: "org.apache.ZooKeeperService<>(Packets\\w+)" + name: "zookeeper_$4" + type: COUNTER + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4_$5" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" +{{- end}} diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 82eeae277e..062a868242 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -2,12 +2,16 @@ cluster: # -- Name used for the Kafka cluster, and used by Strimzi for many annotations. name: sasquatch + # -- Site wide label required for gathering Prometheus metrics if they are enabled. + releaseLabel: "site-prom" kafka: # -- Version of Kafka to deploy. version: "3.5.1" # -- Number of Kafka broker replicas to run. replicas: 3 + # -- Number of down brokers that the system can tolerate. + disruption_tolerance: 0 storage: # -- Size of the backing storage disk for each of the Kafka brokers. size: 500Gi @@ -15,11 +19,11 @@ kafka: storageClassName: "" config: # -- Number of minutes for a consumer group's offsets to be retained. - offsets.retention.minutes: 4320 + offsets.retention.minutes: 2880 # -- Number of days for a topic's data to be retained. - log.retention.hours: 72 - # -- Maximum retained number of bytes for a topic's data. - log.retention.bytes: "429496729600" + log.retention.hours: 48 + # -- How much disk space Kafka will ensure is available, set to 70% of the data partition size + log.retention.bytes: "350000000000" # -- The largest record batch size allowed by Kafka. message.max.bytes: 10485760 # -- The number of bytes of messages to attempt to fetch for each partition. @@ -27,18 +31,22 @@ kafka: # -- Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. replica.lag.time.max.ms: 120000 + metricsConfig: + # -- Whether metric configuration is enabled. + enabled: false + listeners: plain: # -- Whether internal plaintext listener is enabled. - enabled: true + enabled: false tls: # -- Whether internal TLS listener is enabled. - enabled: true + enabled: false external: # -- Whether external listener is enabled. - enabled: true + enabled: false externalListener: tls: @@ -89,6 +97,20 @@ kafka: # -- Tolerations for Kafka broker pod assignment. tolerations: [] +kafkaExporter: + # -- Enable Kafka exporter + enabled: false + # -- Kafka topics to monitor + topicRegex: ".*" + # -- Consumer groups to monitor + groupRegex: ".*" + # -- Logging level + logging: info + # -- Enable Sarama logging for pod + enableSaramaLogging: false + # -- Resource specification for Kafka exporter + resources: {} + zookeeper: # -- Number of Zookeeper replicas to run. replicas: 3 @@ -98,6 +120,10 @@ zookeeper: # -- Name of a StorageClass to use when requesting persistent volumes. storageClassName: "" + metricsConfig: + # -- Whether metric configuration is enabled. + enabled: false + # -- Affinity for Zookeeper pod assignment. affinity: podAntiAffinity: @@ -115,11 +141,16 @@ zookeeper: connect: # -- Enable Kafka Connect. - enabled: true + enabled: false # -- Custom strimzi-kafka image with connector plugins used by sasquatch. image: ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655 # -- Number of Kafka Connect replicas to run. replicas: 3 + config: + # -- Set the converter for the message key + key.converter: io.confluent.connect.avro.AvroConverter + # -- Enable converted schemas for the message key + key.converter.schemas.enable: true registry: ingress: @@ -139,28 +170,28 @@ superusers: users: replicator: - # -- Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) + # -- Enable user replicator (used by Mirror Maker 2 and required at both source and target clusters) enabled: false tsSalKafka: - # -- Enable user ts-salkafka. - enabled: true + # -- Enable user ts-salkafka, used at the telescope environments + enabled: false kafdrop: # -- Enable user Kafdrop (deployed by parent Sasquatch chart). - enabled: true + enabled: false telegraf: # -- Enable user telegraf (deployed by parent Sasquatch chart) - enabled: true + enabled: false promptProcessing: # -- Enable user prompt-processing - enabled: true + enabled: false kafkaConnectManager: # -- Enable user kafka-connect-manager - enabled: true + enabled: false mirrormaker2: # -- Enable replication in the target (passive) cluster. diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index 50cff0795c..2f8f981d08 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -13,21 +13,17 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | env[0].name | string | `"TELEGRAF_PASSWORD"` | | | env[0].valueFrom.secretKeyRef.key | string | `"telegraf-password"` | Telegraf KafkaUser password. | | env[0].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| env[1].name | string | `"INFLUXDB_TOKEN"` | | -| env[1].valueFrom.secretKeyRef.key | string | `"admin-token"` | InfluxDB v2 admin token. | +| env[1].name | string | `"INFLUXDB_USER"` | | +| env[1].valueFrom.secretKeyRef.key | string | `"influxdb-user"` | InfluxDB v1 user | | env[1].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| env[2].name | string | `"INFLUXDB_USER"` | | -| env[2].valueFrom.secretKeyRef.key | string | `"influxdb-user"` | InfluxDB v1 user | +| env[2].name | string | `"INFLUXDB_PASSWORD"` | | +| env[2].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | | env[2].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| env[3].name | string | `"INFLUXDB_PASSWORD"` | | -| env[3].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | -| env[3].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | image.pullPolicy | string | IfNotPresent | Image pull policy. | -| image.repo | string | `"lsstsqre/telegraf"` | Telegraf image repository. | -| image.tag | string | `"avrounions"` | Telegraf image tag. | +| image.repo | string | `"quay.io/influxdb/telegraf-nightly"` | Telegraf image repository. | +| image.tag | string | `"latest"` | Telegraf image tag. | | imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to. | -| influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. | | kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | | kafkaConsumers.test.flush_interval | string | `"1s"` | Default data flushing interval to InfluxDB. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 1c261665e5..fafa0c8741 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -4,9 +4,9 @@ enabled: false image: # -- Telegraf image repository. - repo: "lsstsqre/telegraf" + repo: "quay.io/influxdb/telegraf-nightly" # -- Telegraf image tag. - tag: "avrounions" + tag: "latest" # -- Image pull policy. # @default -- IfNotPresent pullPolicy: "Always" @@ -31,12 +31,6 @@ env: name: sasquatch # -- Telegraf KafkaUser password. key: telegraf-password - - name: INFLUXDB_TOKEN - valueFrom: - secretKeyRef: - name: sasquatch - # -- InfluxDB v2 admin token. - key: admin-token - name: INFLUXDB_USER valueFrom: secretKeyRef: @@ -122,10 +116,6 @@ influxdb: # -- Name of the InfluxDB v1 database to write to. database: "telegraf-kafka-consumer-v1" -influxdb2: - # -- Name of the InfluxDB v2 bucket to write to. - bucket: "telegraf-kafka-consumer" - # -- Kubernetes resources requests and limits. resources: {} diff --git a/applications/sasquatch/secrets-idfint.yaml b/applications/sasquatch/secrets-idfint.yaml new file mode 100644 index 0000000000..08fc85c129 --- /dev/null +++ b/applications/sasquatch/secrets-idfint.yaml @@ -0,0 +1,16 @@ +"kafka-connect-manager-password": + description: "kafka-connect-manager KafkaUser password." +"prompt-processing-password": + description: "prompt-processing KafkaUser password." +"rest-proxy-password": + description: "rest-proxy-password KafkaUser password." +"rest-proxy-sasl-jass-config": + description: "rest-proxy-sasl-jass-config for connection with the Kafka broker." +"sasquatch-test-kafka-properties": + description: "sasquatch-test properties file for connection with the Kafka broker." +"sasquatch-test-password": + description: "sasquatch-test KafkaUser password." +"telegraf-password": + description: "Telegraf KafkaUser password." +"ts-salkafka-password": + description: "ts-salkafka KafkaUser password." diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index 92684a3c2b..cb879f3170 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -1,66 +1,71 @@ GENERIC_CLIENT_ID: description: >- - ? + Chronograf client ID for OIDC authentication with Gafaelfawr. value: chronograf-client-id GENERIC_CLIENT_SECRET: description: >- - ? + Chronograf client secret for OIDC authentication with Gafaelfawr. generate: type: password TOKEN_SECRET: description: >- - ? - generate: - type: password -admin-password: - description: >- - ? - generate: - type: password -admin-token: - description: >- - ? + Chronograf token secret for OIDC authentication with Gafaelfawr. generate: type: password influxdb-password: description: >- - ? + InfluxDB admin password. generate: type: password influxdb-user: description: >- - ? + InfluxDB admin user. value: admin kafdrop-kafka-properties: description: >- - ? + Kafdrop properties file for connection with the Kafka broker. + if: kafdrop.enabled kafdrop-password: description: >- - ? + Kafdrop KafkaUser password. + if: kafdrop.enabled kafka-connect-manager-password: description: >- - ? + kafka-connect-manager Kafka user password. + if: strimzi-kafka.users.kafkaConnectManager.enabled prompt-processing-password: description: >- - ? + prompt-processing KafkaUser password. + if: strimzi-kafka.users.promptProcessing.enabled replicator-password: description: >- - ? + replicator KafkaUser password. + if: strimzi-kafka.users.replicator.enabled rest-proxy-password: description: >- - ? + rest-proxy-password KafkaUser password. + if: rest-proxy.enabled rest-proxy-sasl-jass-config: description: >- - ? + rest-proxy-sasl-jass-config for connection with the Kafka broker. + if: rest-proxy.enabled sasquatch-test-kafka-properties: description: >- - ? + sasquatch-test properties file for connection with the Kafka broker. + if: strimzi-kafka.kafka.listeners.plain.enabled sasquatch-test-password: description: >- - ? + sasquatch-test KafkaUser password. + if: strimzi-kafka.kafka.listeners.plain.enabled telegraf-password: description: >- - ? + Telegraf KafkaUser password. + if: telegraf-kafka-consumer.enabled ts-salkafka-password: description: >- - ? + ts-salkafka KafkaUser password. + if: strimzi-kafka.users.ts-salkafka.enabled +connect-push-secret: + description: >- + Write token for pushing generated Strimzi Kafka Connect image to GitHub Container Registry. + if: strimzi-kafka.connect.enabled diff --git a/applications/sasquatch/templates/bucketmapper.yaml b/applications/sasquatch/templates/bucketmapper.yaml deleted file mode 100644 index de676abaf0..0000000000 --- a/applications/sasquatch/templates/bucketmapper.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.influxdb2.enabled }} -apiVersion: batch/v1 -kind: CronJob -metadata: - name: sasquatch-bucketmapper - namespace: sasquatch -spec: - schedule: "3-59/15 * * * *" - successfulJobsHistoryLimit: 1 - jobTemplate: - spec: - template: - spec: - restartPolicy: Never - automountServiceAccountToken: false - containers: - - name: bucketmapper - image: "{{ .Values.bucketmapper.image.repository }}:{{ .Values.bucketmapper.image.tag }}" - securityContext: - allowPrivilegeEscalation: false - runAsNonRoot: true - runAsUser: 405 - runAsGroup: 100 - capabilities: - drop: - - all - readOnlyRootFilesystem: true - env: - - name: "INFLUXDB_TOKEN" - valueFrom: - secretKeyRef: - name: "sasquatch" - key: "admin-token" - - name: "INFLUXDB_ORG" - value: "default" - - name: "INFLUXDB_URL" - value: "http://sasquatch-influxdb2.sasquatch:80" - - name: "DEBUG" - value: "true" - command: [ "bucketmapper" ] -{{- end }} diff --git a/applications/sasquatch/templates/vault-secrets.yaml b/applications/sasquatch/templates/vault-secrets.yaml index d44b29b2dc..aeb2dabbe9 100644 --- a/applications/sasquatch/templates/vault-secrets.yaml +++ b/applications/sasquatch/templates/vault-secrets.yaml @@ -14,3 +14,17 @@ metadata: spec: path: "{{ .Values.global.vaultSecretsPath }}/pull-secret" type: kubernetes.io/dockerconfigjson +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: sasquatch-connect-push-secret + namespace: sasquatch +spec: + path: "{{ .Values.global.vaultSecretsPath }}/sasquatch" + type: kubernetes.io/dockerconfigjson + keys: + - connect-push-secret + templates: + .dockerconfigjson: >- + {% index .Secrets "connect-push-secret" %} diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index ba96c6ff6b..878a150ba8 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -1,6 +1,6 @@ strimzi-kafka: mirrormaker2: - enabled: true + enabled: false source: bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094 topicsPattern: "lsst.sal.*, registry-schemas" @@ -9,10 +9,10 @@ strimzi-kafka: separator: "." class: "org.apache.kafka.connect.mirror.DefaultReplicationPolicy" sourceRegistry: - enabled: true + enabled: false schemaTopic: source.registry-schemas sourceConnect: - enabled: true + enabled: false resources: requests: cpu: 2 @@ -43,6 +43,14 @@ strimzi-kafka: users: replicator: enabled: true + tsSalKafka: + enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true influxdb: persistence: @@ -61,7 +69,7 @@ influxdb-staging: hostname: base-lsp.lsst.codes source-influxdb: - enabled: true + enabled: false persistence: storageClass: rook-ceph-block size: 10Ti @@ -133,7 +141,7 @@ kafka-connect-manager: topicsRegex: "lsst.sal.MTCamera|lsst.sal.MTHeaderService|lsst.sal.MTOODS" telegraf-kafka-consumer: - enabled: true + enabled: false kafkaConsumers: auxtel: enabled: true @@ -320,7 +328,7 @@ telegraf-kafka-consumer: # environment where data is replicated from. # We need to remove the "source." prefix from the topic name before writing to InfluxDB. source-kafka-connect-manager: - enabled: true + enabled: false influxdbSink: connectInfluxUrl: "http://sasquatch-influxdb-staging.sasquatch:8086" connectInfluxDb: "efd" @@ -438,7 +446,7 @@ chronograf: STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/base.json source-kapacitor: - enabled: true + enabled: false persistence: storageClass: rook-ceph-block diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index a4690b321c..af03f201f4 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -16,6 +16,13 @@ strimzi-kafka: users: replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true + registry: ingress: enabled: true @@ -36,24 +43,8 @@ influxdb: memory: 16Gi cpu: 2 -influxdb2: - enabled: true - ingress: - enabled: true - hostname: data-dev.lsst.cloud - resources: - requests: - memory: 16Gi - cpu: 2 - limits: - memory: 16Gi - cpu: 2 - - telegraf-kafka-consumer: enabled: true - image: - tag: "avrounions" kafkaConsumers: test: enabled: true diff --git a/applications/sasquatch/values-idfint.yaml b/applications/sasquatch/values-idfint.yaml index 12d9e206f3..3b8389c218 100644 --- a/applications/sasquatch/values-idfint.yaml +++ b/applications/sasquatch/values-idfint.yaml @@ -29,6 +29,12 @@ strimzi-kafka: users: replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true influxdb: ingress: @@ -47,7 +53,7 @@ kafka-connect-manager: connectors: test: enabled: true - topicsRegex: ".*Test" + topicsRegex: "lsst.sal.Test" kafdrop: ingress: diff --git a/applications/sasquatch/values-roundtable-dev.yaml b/applications/sasquatch/values-roundtable-dev.yaml index 39606b67ca..de45e0b1b9 100644 --- a/applications/sasquatch/values-roundtable-dev.yaml +++ b/applications/sasquatch/values-roundtable-dev.yaml @@ -85,9 +85,6 @@ strimzi-kafka: influxdb: enabled: false -influxdb2: - enabled: false - telegraf-kafka-consumer: enabled: false diff --git a/applications/sasquatch/values-roundtable-prod.yaml b/applications/sasquatch/values-roundtable-prod.yaml index 39606b67ca..de45e0b1b9 100644 --- a/applications/sasquatch/values-roundtable-prod.yaml +++ b/applications/sasquatch/values-roundtable-prod.yaml @@ -85,9 +85,6 @@ strimzi-kafka: influxdb: enabled: false -influxdb2: - enabled: false - telegraf-kafka-consumer: enabled: false diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 3ea7944186..440218f24d 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -23,6 +23,12 @@ strimzi-kafka: enabled: true replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true influxdb: persistence: @@ -31,15 +37,13 @@ influxdb: ingress: enabled: true hostname: summit-lsp.lsst.codes - -influxdb2: - enabled: true - persistence: - storageClass: rook-ceph-block - size: 5Ti - ingress: - enabled: true - hostname: summit-lsp.lsst.codes + resources: + requests: + memory: 128Gi + cpu: 16 + limits: + memory: 128Gi + cpu: 16 kafka-connect-manager: influxdbSink: @@ -58,6 +62,7 @@ kafka-connect-manager: enabled: true repairerConnector: false topicsRegex: ".*MTMount" + tasksMax: "8" comcam: enabled: true repairerConnector: false @@ -74,6 +79,7 @@ kafka-connect-manager: enabled: true repairerConnector: false topicsRegex: ".*MTM1M3" + tasksMax: "8" m2: enabled: true repairerConnector: false @@ -119,7 +125,7 @@ kafka-connect-manager: topicsRegex: ".*LaserTracker" telegraf-kafka-consumer: - enabled: true + enabled: false kafkaConsumers: auxtel: enabled: true diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 7cc7292865..6f15453958 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -1,5 +1,11 @@ strimzi-kafka: + cluster: + releaseLabel: pillan-prom kafka: + disruption_tolerance: 1 + config: + auto.create.topics.enable: false + log.cleaner.min.compaction.lag.ms: 259200000 storage: storageClassName: rook-ceph-block externalListener: @@ -15,9 +21,32 @@ strimzi-kafka: host: sasquatch-tts-kafka-1.lsst.codes - loadBalancerIP: "140.252.146.47" host: sasquatch-tts-kafka-2.lsst.codes + metricsConfig: + enabled: true + kafkaExporter: + enabled: true + enableSaramaLogging: true + resources: + requests: + cpu: 200m + memory: 64Mi + limits: + cpu: 500m + memory: 128Mi zookeeper: storage: storageClassName: rook-ceph-block + metricsConfig: + enabled: true + users: + tsSalKafka: + enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true registry: ingress: enabled: true @@ -25,6 +54,10 @@ strimzi-kafka: nginx.ingress.kubernetes.io/rewrite-target: /$2 hostname: tucson-teststand.lsst.codes path: /schema-registry(/|$)(.*) + connect: + config: + key.converter: org.apache.kafka.connect.json.JsonConverter + key.converter.schemas.enable: false influxdb: persistence: @@ -33,17 +66,8 @@ influxdb: enabled: true hostname: tucson-teststand.lsst.codes -influxdb2: - enabled: true - persistence: - storageClass: rook-ceph-block - ingress: - # -- InfluxDB2 ingress configuration - enabled: true - hostname: tucson-teststand.lsst.codes - telegraf-kafka-consumer: - enabled: true + enabled: false kafkaConsumers: auxtel: enabled: true @@ -154,7 +178,7 @@ kafka-connect-manager: topicsRegex: ".*OCPS" test: enabled: true - topicsRegex: ".*Test" + topicsRegex: "lsst.sal.Test" pmd: enabled: true topicsRegex: ".*PMD" @@ -175,6 +199,9 @@ kafka-connect-manager: topicsRegex: ".*GCHeaderService|.*GenericCamera" kafdrop: + image: + tag: 4.0.0 + cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" ingress: enabled: true hostname: tucson-teststand.lsst.codes diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index 16ecb7b735..6d7f8a3fcb 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -14,6 +14,14 @@ strimzi-kafka: users: replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true + promptProcessing: + enabled: true influxdb: ingress: @@ -60,7 +68,7 @@ kafka-connect-manager: topicsRegex: ".*OCPS" test: enabled: false - topicsRegex: ".*Test" + topicsRegex: "lsst.sal.Test" pmd: enabled: false topicsRegex: ".*PMD" @@ -123,6 +131,12 @@ kafka-connect-manager: connectInfluxDb: "lsst.lf" topicsRegex: "lsst.lf.*" tags: benchmark_env,module,benchmark_type + lsstprompt: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.prompt" + topicsRegex: "lsst.prompt.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,group kafdrop: ingress: @@ -146,6 +160,7 @@ rest-proxy: - lsst.camera - lsst.verify - lsst.lf + - lsst.prompt chronograf: ingress: diff --git a/applications/sasquatch/values-usdfint.yaml b/applications/sasquatch/values-usdfint.yaml new file mode 100644 index 0000000000..7c874fd79e --- /dev/null +++ b/applications/sasquatch/values-usdfint.yaml @@ -0,0 +1,169 @@ +strimzi-kafka: + mirrormaker2: + enabled: false + source: + bootstrapServer: sasquatch-base-kafka-bootstrap.lsst.codes:9094 + topicsPattern: "registry-schemas, lsst.sal.*, lsst.dm.*" + resources: + requests: + cpu: 2 + memory: 4Gi + limits: + cpu: 4 + memory: 8Gi + users: + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true + +influxdb: + ingress: + enabled: true + hostname: usdf-rsp-int.slac.stanford.edu + persistence: + enabled: true + size: 15Ti + +kafka-connect-manager: + influxdbSink: + # Based on the kafka producers configuration for the BTS + # https://github.com/lsst-ts/argocd-csc/blob/main/apps/kafka-producers/values-base-teststand.yaml + connectors: + auxtel: + enabled: false + topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" + maintel: + enabled: false + topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" + mtmount: + enabled: false + topicsRegex: ".*MTMount" + comcam: + enabled: false + topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" + eas: + enabled: false + topicsRegex: ".*DIMM|.*DSM|.*WeatherForecast|.*WeatherStation" + latiss: + enabled: false + topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" + m1m3: + enabled: false + topicsRegex: ".*MTM1M3" + m2: + enabled: false + topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" + obssys: + enabled: false + topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" + ocps: + enabled: false + topicsRegex: ".*OCPS" + test: + enabled: false + topicsRegex: "lsst.sal.Test" + pmd: + enabled: false + topicsRegex: ".*PMD" + calsys: + enabled: false + topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" + mtaircompressor: + enabled: false + topicsRegex: ".*MTAirCompressor" + authorize: + enabled: false + topicsRegex: ".*Authorize" + lasertracker: + enabled: false + topicsRegex: ".*LaserTracker" + genericcamera: + enabled: false + topicsRegex: ".*GCHeaderService|.*GenericCamera" + gis: + enabled: false + topicsRegex: ".*GIS" + lsstdm: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.dm" + topicsRegex: "lsst.dm.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run + lsstdebug: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.debug" + topicsRegex: "lsst.debug.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run + lsstexample: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.example" + topicsRegex: "lsst.example.*" + tags: band,instrument + lsstrubintv: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.rubintv" + topicsRegex: "lsst.rubintv.*" + tags: image_type,observation_reason,science_program,filter,disperser + lsstcamera: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.camera" + topicsRegex: "lsst.camera.*" + lsstverify: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.verify" + topicsRegex: "lsst.verify.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run + lsstlf: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.lf" + topicsRegex: "lsst.lf.*" + tags: benchmark_env,module,benchmark_type + +kafdrop: + ingress: + enabled: true + hostname: usdf-rsp-int.slac.stanford.edu + +rest-proxy: + enabled: false + ingress: + enabled: true + hostname: usdf-rsp-int.slac.stanford.edu + kafka: + topics: + - test.next-visit + topicPrefixes: + - test + - lsst.dm + - lsst.debug + - lsst.example + - lsst.rubintv + - lsst.camera + - lsst.verify + - lsst.lf + +chronograf: + ingress: + enabled: true + hostname: usdf-rsp-int.slac.stanford.edu + + env: + GENERIC_NAME: "OIDC" + GENERIC_AUTH_URL: https://usdf-rsp-int.slac.stanford.edu/auth/openid/login + GENERIC_TOKEN_URL: https://usdf-rsp-int.slac.stanford.edu/auth/openid/token + USE_ID_TOKEN: 1 + JWKS_URL: https://usdf-rsp-int.slac.stanford.edu/.well-known/jwks.json + GENERIC_API_URL: https://usdf-rsp-int.slac.stanford.edu/auth/userinfo + GENERIC_SCOPES: openid + GENERIC_API_KEY: sub + PUBLIC_URL: https://usdf-rsp-int.slac.stanford.edu/ + STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/usdfint.json diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 9c425c7908..a97f0c2317 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -1,4 +1,12 @@ strimzi-kafka: + kafka: + listeners: + tls: + enabled: true + plain: + enabled: true + external: + enabled: true mirrormaker2: enabled: true source: @@ -14,25 +22,89 @@ strimzi-kafka: users: replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true + promptProcessing: + enabled: true influxdb: ingress: enabled: true hostname: usdf-rsp.slac.stanford.edu + annotations: + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" persistence: enabled: true size: 15Ti + config: + coordinator: + query-timeout: "300s" source-influxdb: enabled: true ingress: enabled: true hostname: usdf-rsp.slac.stanford.edu + annotations: + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" persistence: enabled: true size: 15Ti + config: + coordinator: + query-timeout: "300s" + +influxdb-enterprise: + enabled: true + meta: + service: + type: LoadBalancer + annotations: + metallb.universe.tf/address-pool: sdf-services + ingress: + enabled: true + hostname: usdf-rsp.slac.stanford.edu + persistence: + # -- Enable InfluxDB Enterprise meta pod persistence + enabled: true + accessMode: ReadWriteOnce + size: 16Gi + # -- InfluxDB Enterprise meta pod resources + resources: + requests: + memory: 2Gi + cpu: 2 + limits: + memory: 4Gi + cpu: 4 + data: + replicas: 2 + ingress: + enabled: true + hostname: usdf-rsp.slac.stanford.edu + # -- Enable InfluxDB Enterprise data pod persistence + persistence: + enabled: true + accessMode: ReadWriteOnce + storageClass: zfs--rubin-efd + size: 30Ti + # -- InfluxDB Enterprise data pod resources + resources: + requests: + memory: 192Gi + cpu: 8 + limits: + memory: 192Gi + cpu: 8 kafka-connect-manager: + enabled: false influxdbSink: # Based on the kafka producers configuration for the Summit # https://github.com/lsst-ts/argocd-csc/blob/main/apps/kafka-producers/values-summit.yaml @@ -112,6 +184,87 @@ kafka-connect-manager: repairerConnector: false topicsRegex: ".*LaserTracker" +kafka-connect-manager-enterprise: + enabled: true + influxdbSink: + connectInfluxUrl: "http://sasquatch-influxdb-enterprise-data.sasquatch:8086" + connectInfluxDb: "efd" + connectors: + enterprise-auxtel: + enabled: true + repairerConnector: false + topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" + enterprise-maintel: + enabled: true + repairerConnector: false + topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" + enterprise-mtmount: + enabled: true + repairerConnector: false + topicsRegex: ".*MTMount" + tasksMax: "8" + enterprise-comcam: + enabled: true + repairerConnector: false + topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" + enterprise-eas: + enabled: true + repairerConnector: false + topicsRegex: ".*DIMM|.*DSM|.*ESS|.*HVAC|.*WeatherForecast" + enterprise-latiss: + enabled: true + repairerConnector: false + topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" + enterprise-m1m3: + enabled: true + repairerConnector: false + topicsRegex: ".*MTM1M3" + tasksMax: "8" + enterprise-m2: + enabled: true + repairerConnector: false + topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" + enterprise-obssys: + enabled: true + repairerConnector: false + topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" + enterprise-ocps: + enabled: true + repairerConnector: false + topicsRegex: ".*OCPS" + enterprise-test: + enabled: true + repairerConnector: false + topicsRegex: "lsst.sal.Test" + enterprise-pmd: + enabled: true + repairerConnector: false + topicsRegex: ".*PMD" + enterprise-calsys: + enabled: true + repairerConnector: false + topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" + enterprise-mtaircompressor: + enabled: true + repairerConnector: false + topicsRegex: ".*MTAirCompressor" + enterprise-genericcamera: + enabled: true + repairerConnector: false + topicsRegex: ".*GCHeaderService|.*GenericCamera" + enterprise-gis: + enabled: true + repairerConnector: false + topicsRegex: ".*GIS" + enterprise-mtvms: + enabled: true + repairerConnector: false + topicsRegex: ".*MTVMS" + enterprise-lasertracker: + enabled: true + repairerConnector: false + topicsRegex: ".*LaserTracker" + kafdrop: ingress: enabled: true @@ -121,7 +274,6 @@ chronograf: ingress: enabled: true hostname: usdf-rsp.slac.stanford.edu - env: GENERIC_NAME: "OIDC" GENERIC_AUTH_URL: https://usdf-rsp.slac.stanford.edu/auth/openid/login @@ -133,3 +285,6 @@ chronograf: GENERIC_API_KEY: sub PUBLIC_URL: https://usdf-rsp.slac.stanford.edu/ STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/usdfprod.json + +kapacitor: + influxURL: http://sasquatch-influxdb-enterprise-data.sasquatch:8086 diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index e79f893e22..d4e11d27cc 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -1,7 +1,17 @@ # Default values for Sasquatch. -# -- Override strimzi-kafka configuration. -strimzi-kafka: {} +# -- Override strimzi-kafka subchart configuration. +strimzi-kafka: + kafka: + listeners: + tls: + enabled: true + plain: + enabled: true + external: + enabled: true + connect: + enabled: true # -- strimzi-registry-operator configuration. strimzi-registry-operator: @@ -52,8 +62,8 @@ influxdb: max-row-limit: 0 coordinator: write-timeout: "1h" - max-concurrent-queries: 0 - query-timeout: "0s" + max-concurrent-queries: 1000 + query-timeout: "30s" log-queries-after: "15s" continuous_queries: enabled: false @@ -116,7 +126,7 @@ influxdb-staging: coordinator: write-timeout: "1h" max-concurrent-queries: 0 - query-timeout: "0s" + query-timeout: "60s" log-queries-after: "15s" continuous_queries: enabled: false @@ -178,8 +188,8 @@ source-influxdb: max-row-limit: 0 coordinator: write-timeout: "1h" - max-concurrent-queries: 0 - query-timeout: "0s" + max-concurrent-queries: 1000 + query-timeout: "30s" log-queries-after: "15s" continuous_queries: enabled: false @@ -199,55 +209,9 @@ source-influxdb: memory: 96Gi cpu: 8 -influxdb2: +# -- Override influxdb-enterprise configuration. +influxdb-enterprise: enabled: false - image: - tag: 2.7.1-alpine - adminUser: - # -- Admin default organization. - organization: "default" - # -- Admin default bucket. - bucket: "default" - # -- Get admin-password/admin-token keys from secret. - existingSecret: sasquatch - persistence: - # -- Enable persistent volume claim. - # By default storageClass is undefined choosing the default provisioner (standard on GKE). - enabled: true - # -- Persistent volume size. - # @default 1Ti for teststand deployments. - size: 1Ti - ingress: - # -- InfluxDB2 ingress configuration - enabled: false - hostname: "" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /api/v2/$2 - className: "nginx" - path: /influxdb2(/|$)(.*) - env: - - name: INFLUXD_STORAGE_WAL_FSYNC_DELAY - value: "100ms" - - name: INFLUXD_HTTP_IDLE_TIMEOUT - value: "0" - - name: INFLUXD_FLUX_LOG_ENABLED - value: "true" - - name: INFLUXD_LOG_LEVEL - value: "debug" - initScripts: - # -- InfluxDB2 initialization scripts - enabled: true - scripts: - init.sh: |+ - #!/bin/bash - influx bucket create --name telegraf-kafka-consumer --org default - resources: - requests: - memory: 16Gi - cpu: 8 - limits: - memory: 96Gi - cpu: 8 # -- Override kafka-connect-manager configuration. kafka-connect-manager: {} @@ -258,6 +222,10 @@ source-kafka-connect-manager: env: kafkaConnectUrl: "http://sasquatch-source-connect-api.sasquatch:8083" +# -- Override kafka-connect-manager-enterprise configuration. +kafka-connect-manager-enterprise: + enabled: false + # -- Override telegraf-kafka-consumer configuration. telegraf-kafka-consumer: {} @@ -271,7 +239,7 @@ chronograf: # -- Chronograf image tag. image: repository: "quay.io/influxdb/chronograf" - tag: 1.9.4 + tag: 1.10.2 # -- Chronograf data persistence configuration. persistence: enabled: true @@ -288,7 +256,6 @@ chronograf: env: HOST_PAGE_DISABLED: true BASE_PATH: /chronograf - CUSTOM_AUTO_REFRESH: "1s=1000" # -- Chronograf secrets, expected keys generic_client_id, generic_client_secret and token_secret. envFromSecret: "sasquatch" resources: @@ -305,7 +272,7 @@ source-kapacitor: # -- Kapacitor image tag. image: repository: kapacitor - tag: 1.7.0 + tag: 1.7.1 # -- Chronograf data persistence configuration. persistence: enabled: true @@ -331,7 +298,7 @@ kapacitor: # -- Kapacitor image tag. image: repository: kapacitor - tag: 1.7.0 + tag: 1.7.1 # -- Chronograf data persistence configuration. persistence: enabled: true diff --git a/applications/schedview-prenight/.helmignore b/applications/schedview-prenight/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/schedview-prenight/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/schedview-prenight/Chart.yaml b/applications/schedview-prenight/Chart.yaml new file mode 100644 index 0000000000..c55978fef1 --- /dev/null +++ b/applications/schedview-prenight/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +appVersion: v0.10.0 +description: Run the schedview pre-night briefing dashboard. +name: schedview-prenight +sources: +- https://github.com/lsst/schedview +home: https://schedview.lsst.io/ +type: application +version: 1.0.0 diff --git a/applications/schedview-prenight/README.md b/applications/schedview-prenight/README.md new file mode 100644 index 0000000000..d32b3999f1 --- /dev/null +++ b/applications/schedview-prenight/README.md @@ -0,0 +1,31 @@ +# schedview-prenight + +Run the schedview pre-night briefing dashboard. + +**Homepage:** + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the schedview-prenight deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of schedview-prenight deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of schedview-prenight deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of schedview-prenight deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of schedview-prenight deployment pods | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"Always"` | Pull policy for the schedview-prenight image | +| image.repository | string | `"ghcr.io/lsst/schedview"` | Image to use in the schedview-prenight deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the schedview-prenight deployment pod | +| podAnnotations | object | `{}` | Annotations for the schedview-prenight deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the schedview-prenight deployment pod | +| tolerations | list | `[]` | Tolerations for the schedview-prenight deployment pod | diff --git a/applications/schedview-prenight/templates/_helpers.tpl b/applications/schedview-prenight/templates/_helpers.tpl new file mode 100644 index 0000000000..4a655207a6 --- /dev/null +++ b/applications/schedview-prenight/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "schedview-prenight.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "schedview-prenight.labels" -}} +helm.sh/chart: {{ include "schedview-prenight.chart" . }} +{{ include "schedview-prenight.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "schedview-prenight.selectorLabels" -}} +app.kubernetes.io/name: "schedview-prenight" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/schedview-prenight/templates/deployment.yaml b/applications/schedview-prenight/templates/deployment.yaml new file mode 100644 index 0000000000..3bd8261d49 --- /dev/null +++ b/applications/schedview-prenight/templates/deployment.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "schedview-prenight" + labels: + {{- include "schedview-prenight.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "schedview-prenight.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "schedview-prenight.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: BOKEH_ALLOW_WS_ORIGIN + value: {{ .Values.global.host }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/schedview-prenight" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: tmp + mountPath: /tmp + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: tmp + emptyDir: {} diff --git a/applications/schedview-prenight/templates/hpa.yaml b/applications/schedview-prenight/templates/hpa.yaml new file mode 100644 index 0000000000..bce3338552 --- /dev/null +++ b/applications/schedview-prenight/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "schedview-prenight" + labels: + {{- include "schedview-prenight.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "schedview-prenight" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/schedview-prenight/templates/ingress.yaml b/applications/schedview-prenight/templates/ingress.yaml new file mode 100644 index 0000000000..8565f31302 --- /dev/null +++ b/applications/schedview-prenight/templates/ingress.yaml @@ -0,0 +1,31 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "schedview-prenight" + labels: + {{- include "schedview-prenight.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "exec:portal" + loginRedirect: true +template: + metadata: + name: "schedview-prenight" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/schedview-prenight" + pathType: "Prefix" + backend: + service: + name: "schedview-prenight" + port: + number: 8080 diff --git a/applications/schedview-prenight/templates/networkpolicy.yaml b/applications/schedview-prenight/templates/networkpolicy.yaml new file mode 100644 index 0000000000..a576a4d494 --- /dev/null +++ b/applications/schedview-prenight/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "schedview-prenight" +spec: + podSelector: + matchLabels: + {{- include "schedview-prenight.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/schedview-prenight/templates/service.yaml b/applications/schedview-prenight/templates/service.yaml new file mode 100644 index 0000000000..2372652f57 --- /dev/null +++ b/applications/schedview-prenight/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "schedview-prenight" + labels: + {{- include "schedview-prenight.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "schedview-prenight.selectorLabels" . | nindent 4 }} diff --git a/applications/schedview-prenight/values-usdfdev.yaml b/applications/schedview-prenight/values-usdfdev.yaml new file mode 100644 index 0000000000..b030121701 --- /dev/null +++ b/applications/schedview-prenight/values-usdfdev.yaml @@ -0,0 +1,3 @@ +image: + # -- Overrides the image tag whose default is the chart appVersion. + tag: "tickets-preops-4603" diff --git a/applications/schedview-prenight/values.yaml b/applications/schedview-prenight/values.yaml new file mode 100644 index 0000000000..b49993951d --- /dev/null +++ b/applications/schedview-prenight/values.yaml @@ -0,0 +1,64 @@ +# Default values for schedview-prenight. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the schedview-prenight deployment + repository: "ghcr.io/lsst/schedview" + + # -- Pull policy for the schedview-prenight image + pullPolicy: "Always" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +autoscaling: + # -- Enable autoscaling of schedview-prenight deployment + enabled: false + + # -- Minimum number of schedview-prenight deployment pods + minReplicas: 1 + + # -- Maximum number of schedview-prenight deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of schedview-prenight deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the schedview-prenight deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the schedview-prenight deployment pod +resources: {} + +# -- Node selection rules for the schedview-prenight deployment pod +nodeSelector: {} + +# -- Tolerations for the schedview-prenight deployment pod +tolerations: [] + +# -- Affinity rules for the schedview-prenight deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/applications/schedview-snapshot/.helmignore b/applications/schedview-snapshot/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/schedview-snapshot/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/schedview-snapshot/Chart.yaml b/applications/schedview-snapshot/Chart.yaml new file mode 100644 index 0000000000..b3c7307acf --- /dev/null +++ b/applications/schedview-snapshot/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +appVersion: v0.10.0 +description: Dashboard for examination of scheduler snapshots +name: schedview-snapshot +sources: +- https://github.com/lsst/schedview +home: https://schedview.lsst.io/ +type: application +version: 1.0.0 diff --git a/applications/schedview-snapshot/README.md b/applications/schedview-snapshot/README.md new file mode 100644 index 0000000000..7cee94076a --- /dev/null +++ b/applications/schedview-snapshot/README.md @@ -0,0 +1,31 @@ +# schedview-snapshot + +Dashboard for examination of scheduler snapshots + +**Homepage:** + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the schedview-snapshot deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of schedview-snapshot deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of schedview-snapshot deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of schedview-snapshot deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of schedview-snapshot deployment pods | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"Always"` | Pull policy for the schedview-snapshot image | +| image.repository | string | `"ghcr.io/lsst/schedview"` | Image to use in the schedview-snapshot deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the schedview-snapshot deployment pod | +| podAnnotations | object | `{}` | Annotations for the schedview-snapshot deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the schedview-snapshot deployment pod | +| tolerations | list | `[]` | Tolerations for the schedview-snapshot deployment pod | diff --git a/applications/schedview-snapshot/templates/_helpers.tpl b/applications/schedview-snapshot/templates/_helpers.tpl new file mode 100644 index 0000000000..e489b7f79b --- /dev/null +++ b/applications/schedview-snapshot/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "schedview-snapshot.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "schedview-snapshot.labels" -}} +helm.sh/chart: {{ include "schedview-snapshot.chart" . }} +{{ include "schedview-snapshot.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "schedview-snapshot.selectorLabels" -}} +app.kubernetes.io/name: "schedview-snapshot" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/schedview-snapshot/templates/deployment.yaml b/applications/schedview-snapshot/templates/deployment.yaml new file mode 100644 index 0000000000..3d129a04c0 --- /dev/null +++ b/applications/schedview-snapshot/templates/deployment.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "schedview-snapshot" + labels: + {{- include "schedview-snapshot.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "schedview-snapshot.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "schedview-snapshot.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: BOKEH_ALLOW_WS_ORIGIN + value: {{ .Values.global.host }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/schedview-snapshot/dashboard" + port: "http" + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 15 + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: tmp + mountPath: /tmp + - name: slashdatcache + mountPath: /.cache + command: + - /bin/bash + - -c + - micromamba run scheduler_dashboard --data_dir /home/mambauser/schedview/test_data + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: tmp + emptyDir: {} + - name: slashdatcache + emptyDir: {} diff --git a/applications/schedview-snapshot/templates/hpa.yaml b/applications/schedview-snapshot/templates/hpa.yaml new file mode 100644 index 0000000000..ec4a91c543 --- /dev/null +++ b/applications/schedview-snapshot/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "schedview-snapshot" + labels: + {{- include "schedview-snapshot.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "schedview-snapshot" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/schedview-snapshot/templates/ingress.yaml b/applications/schedview-snapshot/templates/ingress.yaml new file mode 100644 index 0000000000..b6cb8ae716 --- /dev/null +++ b/applications/schedview-snapshot/templates/ingress.yaml @@ -0,0 +1,31 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "schedview-snapshot" + labels: + {{- include "schedview-snapshot.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "exec:portal" + loginRedirect: true +template: + metadata: + name: "schedview-snapshot" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/schedview-snapshot" + pathType: "Prefix" + backend: + service: + name: "schedview-snapshot" + port: + number: 8080 diff --git a/applications/schedview-snapshot/templates/networkpolicy.yaml b/applications/schedview-snapshot/templates/networkpolicy.yaml new file mode 100644 index 0000000000..20ad995598 --- /dev/null +++ b/applications/schedview-snapshot/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "schedview-snapshot" +spec: + podSelector: + matchLabels: + {{- include "schedview-snapshot.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/schedview-snapshot/templates/service.yaml b/applications/schedview-snapshot/templates/service.yaml new file mode 100644 index 0000000000..015e50343c --- /dev/null +++ b/applications/schedview-snapshot/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "schedview-snapshot" + labels: + {{- include "schedview-snapshot.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "schedview-snapshot.selectorLabels" . | nindent 4 }} diff --git a/applications/schedview-snapshot/values-usdfdev.yaml b/applications/schedview-snapshot/values-usdfdev.yaml new file mode 100644 index 0000000000..b030121701 --- /dev/null +++ b/applications/schedview-snapshot/values-usdfdev.yaml @@ -0,0 +1,3 @@ +image: + # -- Overrides the image tag whose default is the chart appVersion. + tag: "tickets-preops-4603" diff --git a/applications/schedview-snapshot/values.yaml b/applications/schedview-snapshot/values.yaml new file mode 100644 index 0000000000..1cf7fabb8e --- /dev/null +++ b/applications/schedview-snapshot/values.yaml @@ -0,0 +1,64 @@ +# Default values for schedview-snapshot. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the schedview-snapshot deployment + repository: "ghcr.io/lsst/schedview" + + # -- Pull policy for the schedview-snapshot image + pullPolicy: "Always" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +autoscaling: + # -- Enable autoscaling of schedview-snapshot deployment + enabled: false + + # -- Minimum number of schedview-snapshot deployment pods + minReplicas: 1 + + # -- Maximum number of schedview-snapshot deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of schedview-snapshot deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the schedview-snapshot deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the schedview-snapshot deployment pod +resources: {} + +# -- Node selection rules for the schedview-snapshot deployment pod +nodeSelector: {} + +# -- Tolerations for the schedview-snapshot deployment pod +tolerations: [] + +# -- Affinity rules for the schedview-snapshot deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/applications/semaphore/values-usdfint.yaml b/applications/semaphore/values-usdfint.yaml new file mode 100644 index 0000000000..0527b1efed --- /dev/null +++ b/applications/semaphore/values-usdfint.yaml @@ -0,0 +1,9 @@ +semaphore: + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx +config: + phalanx_env: "usdfint" + github_app_id: "337324" + enable_github_app: "True" diff --git a/applications/sherlock/values-base.yaml b/applications/sherlock/values-base.yaml index de35ed5941..29b173fed6 100644 --- a/applications/sherlock/values-base.yaml +++ b/applications/sherlock/values-base.yaml @@ -1,7 +1,7 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" diff --git a/applications/sherlock/values-idfdev.yaml b/applications/sherlock/values-idfdev.yaml index 09d06b446e..6e477f644e 100644 --- a/applications/sherlock/values-idfdev.yaml +++ b/applications/sherlock/values-idfdev.yaml @@ -1,9 +1,9 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" publishUrl: "https://status.lsst.codes/api/data-dev" diff --git a/applications/sherlock/values-idfint.yaml b/applications/sherlock/values-idfint.yaml index f26f30166c..eaf463d1ca 100644 --- a/applications/sherlock/values-idfint.yaml +++ b/applications/sherlock/values-idfint.yaml @@ -1,8 +1,8 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" publishUrl: "https://status.lsst.codes/api/data-int" diff --git a/applications/sherlock/values-idfprod.yaml b/applications/sherlock/values-idfprod.yaml index 6dc7b40cad..08f8030234 100644 --- a/applications/sherlock/values-idfprod.yaml +++ b/applications/sherlock/values-idfprod.yaml @@ -1,8 +1,8 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" publishUrl: "https://status.lsst.codes/api/data" diff --git a/applications/sherlock/values-roe.yaml b/applications/sherlock/values-roe.yaml index de35ed5941..29b173fed6 100644 --- a/applications/sherlock/values-roe.yaml +++ b/applications/sherlock/values-roe.yaml @@ -1,7 +1,7 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" diff --git a/applications/sherlock/values-summit.yaml b/applications/sherlock/values-summit.yaml index de35ed5941..29b173fed6 100644 --- a/applications/sherlock/values-summit.yaml +++ b/applications/sherlock/values-summit.yaml @@ -1,7 +1,7 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" diff --git a/applications/sherlock/values-tucson-teststand.yaml b/applications/sherlock/values-tucson-teststand.yaml index de35ed5941..29b173fed6 100644 --- a/applications/sherlock/values-tucson-teststand.yaml +++ b/applications/sherlock/values-tucson-teststand.yaml @@ -1,7 +1,7 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" diff --git a/applications/siav2/.helmignore b/applications/siav2/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/siav2/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/siav2/Chart.yaml b/applications/siav2/Chart.yaml new file mode 100644 index 0000000000..ea261435c0 --- /dev/null +++ b/applications/siav2/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: Simple Image Access v2 service +name: siav2 +type: application +version: 1.0.0 diff --git a/applications/siav2/README.md b/applications/siav2/README.md new file mode 100644 index 0000000000..1afe3efeeb --- /dev/null +++ b/applications/siav2/README.md @@ -0,0 +1,35 @@ +# siav2 + +Simple Image Access v2 service + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the siav2 deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of siav2 deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of siav2 deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of siav2 deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of siav2 deployment pods | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the siav2 image | +| image.repository | string | `"ghcr.io/lsst-sqre/dal-siav2"` | Image to use in the siav2 deployment | +| image.tag | string | `"0.0.4"` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the siav2 deployment pod | +| obsCoreTable | string | `"ivoa.ObsCore"` | ObsCore table on the TAP service to query | +| podAnnotations | object | `{}` | Annotations for the siav2 deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the siav2 deployment pod | +| tapService | string | `"tap"` | Local TAP service endpoint to query | +| tolerations | list | `[]` | Tolerations for the siav2 deployment pod | +| uws.affinity | object | `{}` | Affinity rules for the UWS database pod | +| uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | +| uws.image.repository | string | `"library/postgres"` | UWS database image to use | +| uws.image.tag | string | `"16.1"` | Tag of UWS database image to use | +| uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | +| uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | +| uws.resources | object | `{"limits":{"cpu":2,"memory":"4Gi"},"requests":{"cpu":0.25,"memory":"1Gi"}}` | Resource limits and requests for the UWS database pod | +| uws.tolerations | list | `[]` | Tolerations for the UWS database pod | diff --git a/applications/siav2/templates/_helpers.tpl b/applications/siav2/templates/_helpers.tpl new file mode 100644 index 0000000000..0cadb5116e --- /dev/null +++ b/applications/siav2/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "siav2.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "siav2.labels" -}} +helm.sh/chart: {{ include "siav2.chart" . }} +{{ include "siav2.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "siav2.selectorLabels" -}} +app.kubernetes.io/name: "siav2" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/siav2/templates/configmap.yaml b/applications/siav2/templates/configmap.yaml new file mode 100644 index 0000000000..dff5030498 --- /dev/null +++ b/applications/siav2/templates/configmap.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: siav2-config + labels: + {{- include "siav2.labels" . | nindent 4 }} +data: + cadc-registry.properties: | + ivo://ivoa.net/sso#OpenID = {{ .Values.global.baseUrl }}/auth/cadc + catalina.properties: | + # tomcat properties + tomcat.connector.connectionTimeout=20000 + tomcat.connector.keepAliveTimeout=120000 + tomcat.connector.secure=false + tomcat.connector.scheme=http + tomcat.connector.proxyName={{ .Values.global.host }} + tomcat.connector.proxyPort=8080 + + # database connection pools for uws + org.opencadc.sia2.uws.maxActive=5 + org.opencadc.sia2.uws.username=postgres + org.opencadc.sia2.uws.password= + org.opencadc.sia2.uws.url=jdbc:postgresql://siav2-uws-db/ + + # authentication provider + ca.nrc.cadc.auth.IdentityManager=org.opencadc.auth.StandardIdentityManager + #ca.nrc.cadc.auth.PrincipalExtractor.allowBasicATP=true + sia2.properties: | + # TAP service + org.opencadc.sia2.queryService = {{ .Values.global.baseUrl }}/api/{{ .Values.tapService }} + org.opencadc.sia2.table = {{ .Values.obsCoreTable }} + war-rename.conf: | + mv sia2.war api#siav2.war diff --git a/applications/siav2/templates/deployment.yaml b/applications/siav2/templates/deployment.yaml new file mode 100644 index 0000000000..823238e00e --- /dev/null +++ b/applications/siav2/templates/deployment.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "siav2" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "server" + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "siav2.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "server" + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: false + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/api/siav2/availability" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: config-volume + mountPath: /config + volumes: + - name: config-volume + configMap: + name: siav2-config + securityContext: + runAsNonRoot: true + runAsUser: 8675309 + runAsGroup: 8675309 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/siav2/templates/hpa.yaml b/applications/siav2/templates/hpa.yaml new file mode 100644 index 0000000000..b1b59e6b4f --- /dev/null +++ b/applications/siav2/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "siav2" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "siav2" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/cachemachine/templates/ingress-anonymous.yaml b/applications/siav2/templates/ingress-anonymous.yaml similarity index 52% rename from applications/cachemachine/templates/ingress-anonymous.yaml rename to applications/siav2/templates/ingress-anonymous.yaml index 4ac68ad654..6102d9611f 100644 --- a/applications/cachemachine/templates/ingress-anonymous.yaml +++ b/applications/siav2/templates/ingress-anonymous.yaml @@ -1,30 +1,30 @@ apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: - name: {{ template "cachemachine.fullname" . }}-anonymous + name: "siav2-anonymous" labels: - {{- include "cachemachine.labels" . | nindent 4 }} + {{- include "siav2.labels" . | nindent 4 }} config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: anonymous: true template: metadata: - name: {{ template "cachemachine.fullname" . }}-anonymous + name: "siav2-anonymous" + {{- with .Values.ingress.annotations }} annotations: - nginx.ingress.kubernetes.io/use-regex: "true" - {{- with .Values.ingress.anonymousAnnotations }} + nginx.ingress.kubernetes.io/ssl-redirect: "true" {{- toYaml . | nindent 6 }} - {{- end }} + {{- end }} spec: rules: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/cachemachine/.*/(available|desired)" + - path: "/api/siav2/(availability|capabilities|swagger-ui.*)" pathType: "ImplementationSpecific" backend: service: - name: {{ template "cachemachine.fullname" . }} + name: "siav2" port: - number: 80 + number: 8080 diff --git a/applications/siav2/templates/ingress.yaml b/applications/siav2/templates/ingress.yaml new file mode 100644 index 0000000000..7f9fa4bd21 --- /dev/null +++ b/applications/siav2/templates/ingress.yaml @@ -0,0 +1,39 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "siav2-authenticated" + labels: + {{- include "siav2.labels" . | nindent 4 }} +config: + authType: "basic" + baseUrl: {{ .Values.global.baseUrl | quote }} + delegate: + internal: + scopes: + - read:tap + service: "siav2" + useAuthorization: true + loginRedirect: false + scopes: + all: + - read:image +template: + metadata: + name: "siav2-authenticated" + {{- with .Values.ingress.annotations }} + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/api/siav2" + pathType: "Prefix" + backend: + service: + name: "siav2" + port: + number: 8080 diff --git a/applications/siav2/templates/networkpolicy.yaml b/applications/siav2/templates/networkpolicy.yaml new file mode 100644 index 0000000000..8273676873 --- /dev/null +++ b/applications/siav2/templates/networkpolicy.yaml @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "siav2" +spec: + podSelector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "server" + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/siav2/templates/service.yaml b/applications/siav2/templates/service.yaml new file mode 100644 index 0000000000..b05d2f791b --- /dev/null +++ b/applications/siav2/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "siav2" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "siav2.selectorLabels" . | nindent 4 }} diff --git a/applications/siav2/templates/uws-configmap.yaml b/applications/siav2/templates/uws-configmap.yaml new file mode 100644 index 0000000000..4c1f532b78 --- /dev/null +++ b/applications/siav2/templates/uws-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: siav2-uws-configmap + labels: + {{- include "siav2.labels" . | nindent 4 }} +data: + uws-schema-create.sql: | + CREATE SCHEMA uws; diff --git a/applications/siav2/templates/uws-db-deployment.yaml b/applications/siav2/templates/uws-db-deployment.yaml new file mode 100644 index 0000000000..ac7a416d97 --- /dev/null +++ b/applications/siav2/templates/uws-db-deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "siav2-uws-db" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "siav2-uws-db" + template: + metadata: + {{- with .Values.uws.podAnnotations }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/uws-configmap.yaml") . | sha256sum }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "siav2.labels" . | nindent 8 }} + app.kubernetes.io/component: "siav2-uws-db" + spec: + automountServiceAccountToken: false + containers: + - name: "uws" + image: "{{ .Values.uws.image.repository }}:{{ .Values.uws.image.tag }}" + imagePullPolicy: {{ .Values.uws.image.pullPolicy | quote }} + env: + - name: POSTGRES_HOST_AUTH_METHOD + value: trust + ports: + - containerPort: 5432 + {{- with .Values.uws.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: "data" + mountPath: "/var/lib/postgresql/data" + - name: "init-scripts" + mountPath: "/docker-entrypoint-initdb.d/" + volumes: + - name: "data" + emptyDir: {} + - name: "init-scripts" + configMap: + name: siav2-uws-configmap + {{- with .Values.uws.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.uws.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.uws.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/siav2/templates/uws-db-networkpolicy.yaml b/applications/siav2/templates/uws-db-networkpolicy.yaml new file mode 100644 index 0000000000..219b8a11fe --- /dev/null +++ b/applications/siav2/templates/uws-db-networkpolicy.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "siav2-uws-db" +spec: + podSelector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "sia2-uws-db" + policyTypes: + - Ingress + # Deny all outbound access; PostgreSQL doesn't need to talk to anything. + - Egress + ingress: + # Allow inbound access to UWS database from the server. + - from: + - podSelector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 14 }} + app.kubernetes.io/component: "server" + ports: + - protocol: "TCP" + port: 5432 diff --git a/applications/siav2/templates/uws-db-service.yaml b/applications/siav2/templates/uws-db-service.yaml new file mode 100644 index 0000000000..97e1550881 --- /dev/null +++ b/applications/siav2/templates/uws-db-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: "siav2-uws-db" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + ports: + - protocol: "TCP" + port: 5432 + targetPort: 5432 + selector: + {{- include "siav2.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: "siav2-uws-db" diff --git a/applications/moneypenny/templates/vault-secrets.yaml b/applications/siav2/templates/vault-secrets.yaml similarity index 78% rename from applications/moneypenny/templates/vault-secrets.yaml rename to applications/siav2/templates/vault-secrets.yaml index 3be6ea057e..df02f02b05 100644 --- a/applications/moneypenny/templates/vault-secrets.yaml +++ b/applications/siav2/templates/vault-secrets.yaml @@ -3,7 +3,7 @@ kind: VaultSecret metadata: name: pull-secret labels: - {{- include "moneypenny.labels" . | nindent 4 }} + {{- include "siav2.labels" . | nindent 4 }} spec: path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" type: kubernetes.io/dockerconfigjson diff --git a/applications/siav2/values-idfdev.yaml b/applications/siav2/values-idfdev.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/siav2/values-idfint.yaml b/applications/siav2/values-idfint.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/siav2/values-idfprod.yaml b/applications/siav2/values-idfprod.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/siav2/values-usdfdev.yaml b/applications/siav2/values-usdfdev.yaml new file mode 100644 index 0000000000..995d3094f2 --- /dev/null +++ b/applications/siav2/values-usdfdev.yaml @@ -0,0 +1,2 @@ +tapService: "live" +obsCoreTable: "oga.ObsCore" diff --git a/applications/siav2/values-usdfprod.yaml b/applications/siav2/values-usdfprod.yaml new file mode 100644 index 0000000000..995d3094f2 --- /dev/null +++ b/applications/siav2/values-usdfprod.yaml @@ -0,0 +1,2 @@ +tapService: "live" +obsCoreTable: "oga.ObsCore" diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml new file mode 100644 index 0000000000..7ac091e9c8 --- /dev/null +++ b/applications/siav2/values.yaml @@ -0,0 +1,102 @@ +# Default values for siav2. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the siav2 deployment + repository: "ghcr.io/lsst-sqre/dal-siav2" + + # -- Pull policy for the siav2 image + pullPolicy: "IfNotPresent" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "0.0.4" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +autoscaling: + # -- Enable autoscaling of siav2 deployment + enabled: false + + # -- Minimum number of siav2 deployment pods + minReplicas: 1 + + # -- Maximum number of siav2 deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of siav2 deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the siav2 deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the siav2 deployment pod +resources: {} + +# -- Node selection rules for the siav2 deployment pod +nodeSelector: {} + +# -- Tolerations for the siav2 deployment pod +tolerations: [] + +# -- Affinity rules for the siav2 deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + +uws: + image: + # -- UWS database image to use + repository: "library/postgres" + + # -- Pull policy for the UWS database image + pullPolicy: "IfNotPresent" + + # -- Tag of UWS database image to use + tag: "16.1" + + # -- Resource limits and requests for the UWS database pod + resources: + requests: + cpu: 0.25 + memory: "1Gi" + limits: + cpu: 2.0 + memory: "4Gi" + + # -- Annotations for the UWS databse pod + podAnnotations: {} + + # -- Node selection rules for the UWS database pod + nodeSelector: {} + + # -- Tolerations for the UWS database pod + tolerations: [] + + # -- Affinity rules for the UWS database pod + affinity: {} + +# -- Local TAP service endpoint to query +tapService: "tap" + +# -- ObsCore table on the TAP service to query +obsCoreTable: "ivoa.ObsCore" diff --git a/applications/simonyitel/Chart.yaml b/applications/simonyitel/Chart.yaml new file mode 100644 index 0000000000..7bb99438ea --- /dev/null +++ b/applications/simonyitel/Chart.yaml @@ -0,0 +1,140 @@ +apiVersion: v2 +name: simonyitel +version: 1.0.0 +description: Deployment for the Simonyi Survey Telescope CSCs +dependencies: +- name: csc_collector + version: 1.0.0 + repository: file://../../charts/csc_collector +- name: csc + alias: ccheaderservice + version: 1.0.0 + condition: ccheaderservice.enabled + repository: file://../../charts/csc +- name: csc + alias: ccoods + version: 1.0.0 + condition: ccoods.enabled + repository: file://../../charts/csc +- name: csc + alias: lasertracker1 + version: 1.0.0 + condition: lasertracker1.enabled + repository: file://../../charts/csc +- name: csc + alias: lasertracker1-sim + version: 1.0.0 + condition: lasertracker1-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtaircompressor1 + version: 1.0.0 + condition: mtaircompressor1.enabled + repository: file://../../charts/csc +- name: csc + alias: mtaircompressor1-sim + version: 1.0.0 + condition: mtaircompressor1-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtaircompressor2 + version: 1.0.0 + condition: mtaircompressor2.enabled + repository: file://../../charts/csc +- name: csc + alias: mtaircompressor2-sim + version: 1.0.0 + condition: mtaircompressor2-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtaos + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: mtcamhexapod + version: 1.0.0 + condition: mtcamhexapod.enabled + repository: file://../../charts/csc +- name: csc + alias: mtcamhexapod-sim + version: 1.0.0 + condition: mtcamhexapod-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdome + version: 1.0.0 + condition: mtdome.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdome-sim + version: 1.0.0 + condition: mtdome-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtdometrajectory + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: mtheaderservice + version: 1.0.0 + condition: mtheaderservice.enabled + repository: file://../../charts/csc +- name: csc + alias: mtm1m3 + version: 1.0.0 + condition: mtm1m3.enabled + repository: file://../../charts/csc +- name: csc + alias: mtm1m3-sim + version: 1.0.0 + condition: mtm1m3-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtm2 + version: 1.0.0 + condition: mtm2.enabled + repository: file://../../charts/csc +- name: csc + alias: mtm2-sim + version: 1.0.0 + condition: mtm2-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtm2hexapod + version: 1.0.0 + condition: mtm2hexapod.enabled + repository: file://../../charts/csc +- name: csc + alias: mtm2hexapod-sim + version: 1.0.0 + condition: mtm2hexapod-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtmount + version: 1.0.0 + condition: mtmount.enabled + repository: file://../../charts/csc +- name: csc + alias: mtmount-sim + version: 1.0.0 + condition: mtmount-sim.enabled + repository: file://../../charts/csc +- name: csc + alias: mtoods + version: 1.0.0 + condition: mtoods.enabled + repository: file://../../charts/csc +- name: csc + alias: mtptg + version: 1.0.0 + repository: file://../../charts/csc +- name: csc + alias: mtrotator + version: 1.0.0 + condition: mtrotator.enabled + repository: file://../../charts/csc +- name: csc + alias: mtrotator-sim + version: 1.0.0 + condition: mtrotator-sim.enabled + repository: file://../../charts/csc diff --git a/applications/simonyitel/README.md b/applications/simonyitel/README.md new file mode 100644 index 0000000000..2247abeba4 --- /dev/null +++ b/applications/simonyitel/README.md @@ -0,0 +1,43 @@ +# simonyitel + +Deployment for the Simonyi Survey Telescope CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| ccheaderservice.enabled | bool | `false` | Enable the CCHeaderService CSC | +| ccoods.enabled | bool | `false` | Enable the CCOODS CSC | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| lasertracker1-sim.enabled | bool | `false` | Enable the LaserTracker:1 simulator CSC | +| lasertracker1.enabled | bool | `false` | Enable the LaserTracker:1 CSC | +| m1m3-sim.enabled | bool | `false` | Enable the MTM1M3 simulator CSC | +| m1m3.enabled | bool | `false` | Enable the MTM1M3 hardware simulator CSC | +| mtaircompressor1-sim.enabled | bool | `false` | Enable the MTAirCompressor:1 simulator CSC | +| mtaircompressor1.enabled | bool | `false` | Enable the MTAirCompressor:1 CSC | +| mtaircompressor2-sim.enabled | bool | `false` | Enable the MTAirCompressor:2 simulator CSC | +| mtaircompressor2.enabled | bool | `false` | Enable the MTAirCompressor:2 CSC | +| mtcamhexapod-sim.enabled | bool | `false` | Enable the MTHexapod:1 simulator CSC | +| mtcamhexapod.enabled | bool | `false` | Enable the MTHexapod:1 CSC | +| mtdome-sim.enabled | bool | `false` | Enable the MTDome simulator CSC | +| mtdome.enabled | bool | `false` | Enable the MTDome CSC | +| mtheaderservice.enabled | bool | `false` | Enable the MTHeaderService CSC | +| mtm2-sim.enabled | bool | `false` | Enable the MTM2 simulator CSC | +| mtm2.enabled | bool | `false` | Enable the MTM2 CSC | +| mtm2hexapod-sim.enabled | bool | `false` | Enable the MTHexapod:2 simulator CSC | +| mtm2hexapod.enabled | bool | `false` | Enable the MTHexapod:2 CSC | +| mtmount-sim.enabled | bool | `false` | Enable the MTMount simulator CSC | +| mtmount.enabled | bool | `false` | Enable the MTMount CSC | +| mtrotator-sim.enabled | bool | `false` | Enable the MTRotator simulator CSC | +| mtrotator.enabled | bool | `false` | Enable the MTRotator CSC | diff --git a/applications/simonyitel/values-tucson-teststand.yaml b/applications/simonyitel/values-tucson-teststand.yaml new file mode 100644 index 0000000000..d8d1d03ccf --- /dev/null +++ b/applications/simonyitel/values-tucson-teststand.yaml @@ -0,0 +1,231 @@ +csc_collector: + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + - name: butler-secret + key: butler-secret + +ccheaderservice: + enabled: true + image: + repository: ts-dockerhub.lsst.org/headerservice + pullPolicy: Always + env: + URL_SPEC: --lfa_mode s3 --s3instance tuc + TSTAND_HEADERSERVICE: TUCSON + CAMERA: cc + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + +ccoods: + enabled: true + image: + repository: ts-dockerhub.lsst.org/ccoods + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + CTRL_OODS_CONFIG_FILE: /etc/ccoods.yaml + butlerSecret: + containerPath: &bS-cP /home/saluser/.lsst + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *bS-cP + nfsMountpoint: + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam + - name: comcam-oods-data + containerPath: /data + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /data + configfile: + path: /etc + filename: ccoods.yaml + content: | + defaultInterval: &interval + days: 0 + hours: 0 + minutes: 0 + seconds: 0 + + ingester: + imageStagingDirectory: /data/staging/comcam/oods + butlers: + - butler: + instrument: lsst.obs.lsst.LsstComCam + class: + import : lsst.ctrl.oods.gen3ButlerIngester + name : Gen3ButlerIngester + stagingDirectory : /data/lsstdata/TTS/comcam/oods/gen3butler/raw + badFileDirectory: /data/lsstdata/TTS/comcam/oods/gen3butler/badfiles + repoDirectory : /repo/LSSTComCam + collections: + - LSSTComCam/raw/all + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 30 + batchSize: 20 + scanInterval: + <<: *interval + seconds: 2 + + cacheCleaner: + # ONLY clean out empty directories here, never files + clearEmptyDirectories: + - /data/lsstdata/TTS/comcam/oods/gen3butler/raw + # clean out empty directories and old files from these directories + clearEmptyDirectoriesAndOldFiles: + - /data/lsstdata/TTS/comcam/oods/gen3butler/badfiles + - /data/staging/comcam/oods + - /data/staging/comcam/forwarder + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 31 + directoriesEmptyForMoreThan: + <<: *interval + days: 2 + +lasertracker1-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/lasertracker + pullPolicy: Always + env: + RUN_ARG: 1 --simulate 2 + +mtaircompressor1-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtaircompressor + pullPolicy: Always + env: + RUN_ARG: 1 --simulate --state disabled + +mtaircompressor2-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtaircompressor + pullPolicy: Always + env: + RUN_ARG: 2 --simulate --state disabled + +mtaos: + image: + repository: ts-dockerhub.lsst.org/mtaos + pullPolicy: Always + butlerSecret: + containerPath: *bS-cP + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *bS-cP + nfsMountpoint: + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam + - name: comcam-gen3-oods + containerPath: /data/lsstdata/TTS/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/comcam + - name: scratch + containerPath: /scratch + readOnly: false + server: nfs-scratch.tu.lsst.org + serverPath: /scratch + +mtcamhexapod-sim: + enabled: true + classifier: mthexapod1 + image: + repository: ts-dockerhub.lsst.org/mthexapod + pullPolicy: Always + env: + RUN_ARG: --simulate 1 + +mtdome-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtdome + pullPolicy: Always + env: + RUN_ARG: --simulate 1 + +mtdometrajectory: + image: + repository: ts-dockerhub.lsst.org/mtdometrajectory + pullPolicy: Always + +mtm1m3-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtm1m3_sim + pullPolicy: Always + +mtm2-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/m2 + pullPolicy: Always + env: + RUN_ARG: --simulate + +mtm2hexapod-sim: + enabled: true + classifier: mthexapod2 + image: + repository: ts-dockerhub.lsst.org/mthexapod + pullPolicy: Always + env: + RUN_ARG: --simulate 2 + +mtmount-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtmount + pullPolicy: Always + env: + RUN_ARG: --simulate + +mtptg: + image: + repository: ts-dockerhub.lsst.org/ptkernel + pullPolicy: Always + env: + TELESCOPE: MT + +mtrotator-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/mtrotator + pullPolicy: Always + env: + RUN_ARG: --simulate diff --git a/applications/simonyitel/values.yaml b/applications/simonyitel/values.yaml new file mode 100644 index 0000000000..d1872c646f --- /dev/null +++ b/applications/simonyitel/values.yaml @@ -0,0 +1,147 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +ccheaderservice: + # -- Enable the CCHeaderService CSC + enabled: false + +ccoods: + # -- Enable the CCOODS CSC + enabled: false + +lasertracker1: + # -- Enable the LaserTracker:1 CSC + enabled: false + +lasertracker1-sim: + # -- Enable the LaserTracker:1 simulator CSC + enabled: false + +mtaircompressor1: + # -- Enable the MTAirCompressor:1 CSC + enabled: false + +mtaircompressor1-sim: + # -- Enable the MTAirCompressor:1 simulator CSC + enabled: false + +mtaircompressor2: + # -- Enable the MTAirCompressor:2 CSC + enabled: false + +mtaircompressor2-sim: + # -- Enable the MTAirCompressor:2 simulator CSC + enabled: false + +mtcamhexapod: + # -- Enable the MTHexapod:1 CSC + enabled: false + +mtcamhexapod-sim: + # -- Enable the MTHexapod:1 simulator CSC + enabled: false + +mtdome: + # -- Enable the MTDome CSC + enabled: false + +mtdome-sim: + # -- Enable the MTDome simulator CSC + enabled: false + +mtheaderservice: + # -- Enable the MTHeaderService CSC + enabled: false + +m1m3: + # -- Enable the MTM1M3 hardware simulator CSC + enabled: false + +m1m3-sim: + # -- Enable the MTM1M3 simulator CSC + enabled: false + +mtm2: + # -- Enable the MTM2 CSC + enabled: false + +mtm2-sim: + # -- Enable the MTM2 simulator CSC + enabled: false + +mtm2hexapod: + # -- Enable the MTHexapod:2 CSC + enabled: false + +mtm2hexapod-sim: + # -- Enable the MTHexapod:2 simulator CSC + enabled: false + +mtmount: + # -- Enable the MTMount CSC + enabled: false + +mtmount-sim: + # -- Enable the MTMount simulator CSC + enabled: false + +mtrotator: + # -- Enable the MTRotator CSC + enabled: false + +mtrotator-sim: + # -- Enable the MTRotator simulator CSC + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index ecf1781446..1e7664aa30 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.33.11"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.33.16"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index 67b1d9e1e4..40f43ce31a 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.33.11" + tag: "1.33.16" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/squarebot/secrets.yaml b/applications/squarebot/secrets.yaml new file mode 100644 index 0000000000..e3a30ec367 --- /dev/null +++ b/applications/squarebot/secrets.yaml @@ -0,0 +1,24 @@ +SQUAREBOT_GITHUB_APP_ID: + description: >- + The ID of the GitHub App shared by all Squarebot services. +SQUAREBOT_GITHUB_APP_PRIVATE_KEY: + description: >- + The private key for the GitHub App shared by all Squarebot services. + onepassword: + encoded: true +SQUAREBOT_SLACK_APP_ID: + description: >- + The ID of the Slack App shared by all Squarebot services. +SQUAREBOT_SLACK_TOKEN: + description: >- + The Slack bot user oauth token for the Slack App shared by all Squarebot services. +SQUAREBOT_SLACK_SIGNING: + description: >- + The signing secret for all webhook payloads from Slack. +ca.crt: + description: >- + The cluster CA certificate for the Kubernetes cluster. This is available + on the Kafka resource in the sasquatch application under the + ``status.listeners[].certificate`` field. + onepassword: + encoded: true diff --git a/applications/squarebot/values-roundtable-prod.yaml b/applications/squarebot/values-roundtable-prod.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/squareone/values-usdfint.yaml b/applications/squareone/values-usdfint.yaml new file mode 100644 index 0000000000..143b9b6ff0 --- /dev/null +++ b/applications/squareone/values-usdfint.yaml @@ -0,0 +1,5 @@ +replicaCount: 3 +config: + siteName: "Rubin Science Platform" + semaphoreUrl: "https://usdf-rsp-int.slac.stanford.edu/semaphore" + timesSquareUrl: "https://usdf-rsp-int.slac.stanford.edu/times-square/api" diff --git a/applications/ssotap/Chart.yaml b/applications/ssotap/Chart.yaml index ce8558f9e4..c525c47d3e 100644 --- a/applications/ssotap/Chart.yaml +++ b/applications/ssotap/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: ssotap version: 1.0.0 -description: IVOA TAP service +description: IVOA TAP service for Solar System Objects sources: - https://github.com/lsst-sqre/tap-postgres - https://github.com/opencadc/tap @@ -9,4 +9,4 @@ sources: dependencies: - name: cadc-tap version: 1.0.0 - repository: "file://../../charts/cadc-tap/" + repository: "file://../../charts/cadc-tap" diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index f51056c0b6..52a4c9e4a8 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -1,6 +1,6 @@ # ssotap -IVOA TAP service +IVOA TAP service for Solar System Objects ## Source Code diff --git a/applications/ssotap/secrets.yaml b/applications/ssotap/secrets.yaml index 4280c602a3..8f50e59791 100644 --- a/applications/ssotap/secrets.yaml +++ b/applications/ssotap/secrets.yaml @@ -2,3 +2,7 @@ description: >- Google service account credentials used to write async job output to Google Cloud Storage. +pgpassword: + description: >- + Password to external PostgreSQL server that contains the Solar System + Objects data. diff --git a/applications/ssotap/values-usdfint.yaml b/applications/ssotap/values-usdfint.yaml new file mode 100644 index 0000000000..7ed15d6fb4 --- /dev/null +++ b/applications/ssotap/values-usdfint.yaml @@ -0,0 +1,4 @@ +cadc-tap: + tapSchema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-sso" diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml index 622d4a01d2..6acbfae9af 100644 --- a/applications/strimzi/Chart.yaml +++ b/applications/strimzi/Chart.yaml @@ -1,11 +1,11 @@ apiVersion: v2 name: strimzi type: application -version: 0.1.0 +version: 1.0.0 description: Strimzi Kafka Operator home: https://strimzi.io -appVersion: "0.26.0" +appVersion: "0.39.0" dependencies: - name: strimzi-kafka-operator - version: "0.37.0" + version: "0.39.0" repository: https://strimzi.io/charts/ diff --git a/applications/strimzi/README.md b/applications/strimzi/README.md new file mode 100644 index 0000000000..b7b3698bc4 --- /dev/null +++ b/applications/strimzi/README.md @@ -0,0 +1,6 @@ +# strimzi + +Strimzi Kafka Operator + +**Homepage:** + diff --git a/applications/strimzi/values-idfint.yaml b/applications/strimzi/values-idfint.yaml index 01bf88743b..1abe0d7c86 100644 --- a/applications/strimzi/values-idfint.yaml +++ b/applications/strimzi/values-idfint.yaml @@ -6,5 +6,4 @@ strimzi-kafka-operator: memory: "512Mi" watchNamespaces: - "sasquatch" - - "alert-stream-broker" logLevel: "INFO" diff --git a/applications/strimzi/values-usdfint.yaml b/applications/strimzi/values-usdfint.yaml new file mode 100644 index 0000000000..1abe0d7c86 --- /dev/null +++ b/applications/strimzi/values-usdfint.yaml @@ -0,0 +1,9 @@ +strimzi-kafka-operator: + resources: + limits: + memory: "1Gi" + requests: + memory: "512Mi" + watchNamespaces: + - "sasquatch" + logLevel: "INFO" diff --git a/applications/strimzi/values.yaml b/applications/strimzi/values.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/tap/Chart.yaml b/applications/tap/Chart.yaml index a5f99cd97a..fa6f85e365 100644 --- a/applications/tap/Chart.yaml +++ b/applications/tap/Chart.yaml @@ -9,4 +9,4 @@ sources: dependencies: - name: cadc-tap version: 1.0.0 - repository: "file://../../charts/cadc-tap/" + repository: "file://../../charts/cadc-tap" diff --git a/applications/tap/values-idfdev.yaml b/applications/tap/values-idfdev.yaml index 2f972d8040..fa5f854eae 100644 --- a/applications/tap/values-idfdev.yaml +++ b/applications/tap/values-idfdev.yaml @@ -6,3 +6,4 @@ cadc-tap: config: qserv: host: "10.136.1.211:4040" + # Change to 134.79.23.209:4040 to point to USDF qserv diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml deleted file mode 100644 index 7cac2030b8..0000000000 --- a/applications/tap/values-minikube.yaml +++ /dev/null @@ -1,27 +0,0 @@ -cadc-tap: - tapSchema: - image: - repository: "lsstsqre/tap-schema-idfprod-tap" - - mockdb: - enabled: true - - config: - jvmMaxHeapSize: 4G - - resources: - requests: - cpu: 0 - memory: 0 - limits: - cpu: 0 - memory: 0 - - uws: - resources: - requests: - cpu: 0 - memory: 0 - limits: - cpu: 0 - memory: 0 diff --git a/applications/tap/values-usdfint.yaml b/applications/tap/values-usdfint.yaml new file mode 100644 index 0000000000..9627e80cf9 --- /dev/null +++ b/applications/tap/values-usdfint.yaml @@ -0,0 +1,12 @@ +cadc-tap: + tapSchema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-tap" + + config: + qserv: + host: "172.24.49.51:4040" + + gcsBucket: "rubin:rubin-qserv" + gcsBucketUrl: "https://s3dfrgw.slac.stanford.edu" + gcsBucketType: "S3" diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index 6ec2b496c5..f9fc08d575 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.16 + version: 1.1.22 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 08654acf39..2a62ef164f 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: telegraf -version: 1.0.1 +version: 1.0.0 description: Application telemetry collection service home: https://www.influxdata.com/time-series-platform/telegraf/ sources: @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.34 + version: 1.8.40 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | diff --git a/applications/telegraf/README.md b/applications/telegraf/README.md index d54785fc49..dc680e46ce 100644 --- a/applications/telegraf/README.md +++ b/applications/telegraf/README.md @@ -16,7 +16,7 @@ Application telemetry collection service | global.enabledServices | string | Set by Argo CD | services enabled in this RSP instance | | global.host | string | Set by Argo CD | Host name for instance identification | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| prometheus_config | object | `{"argocd":{"application_controller":"http://argocd-application-controller-metrics.argocd.svc:8082/metrics","notifications_controller":"http://argocd-notifications-controller-metrics.argocd.svc:9001/metrics","redis":"http://argocd-redis-metrics.argocd.svc:9121/metrics","repo_server":"http://argocd-repo-server-metrics.argocd.svc:8084/metrics","server":"http://argocd-server-metrics.argocd.svc:8083/metrics"},"ingress-nginx":{"controller":"http://ingress-nginx-controller-metrics.ingress-nginx:10254/metrics"},"nublado2":{"hub":"http://hub.nublado2:8081/metrics"}}` | Use prometheus_config to specify all the services in the RSP that expose prometheus endpoints. A better option, eventually, will be to use telegraf-operator and capture these as pod annotations. | +| prometheus_config | object | `{"argocd":{"application_controller":"http://argocd-application-controller-metrics.argocd.svc:8082/metrics","notifications_controller":"http://argocd-notifications-controller-metrics.argocd.svc:9001/metrics","redis":"http://argocd-redis-metrics.argocd.svc:9121/metrics","repo_server":"http://argocd-repo-server-metrics.argocd.svc:8084/metrics","server":"http://argocd-server-metrics.argocd.svc:8083/metrics"},"ingress-nginx":{"controller":"http://ingress-nginx-controller-metrics.ingress-nginx:10254/metrics"},"nublado":{"hub":"http://hub.nublado:8081/metrics"}}` | Use prometheus_config to specify all the services in the RSP that expose prometheus endpoints. A better option, eventually, will be to use telegraf-operator and capture these as pod annotations. | | telegraf.args[0] | string | `"--config"` | | | telegraf.args[1] | string | `"/etc/telegraf-generated/telegraf-generated.conf"` | | | telegraf.config.inputs | list | `[]` | | diff --git a/applications/telegraf/values.yaml b/applications/telegraf/values.yaml index 4d0a61b491..b394abc387 100644 --- a/applications/telegraf/values.yaml +++ b/applications/telegraf/values.yaml @@ -43,8 +43,8 @@ prometheus_config: redis: "http://argocd-redis-metrics.argocd.svc:9121/metrics" repo_server: "http://argocd-repo-server-metrics.argocd.svc:8084/metrics" server: "http://argocd-server-metrics.argocd.svc:8083/metrics" - nublado2: - hub: "http://hub.nublado2:8081/metrics" + nublado: + hub: "http://hub.nublado:8081/metrics" ingress-nginx: controller: "http://ingress-nginx-controller-metrics.ingress-nginx:10254/metrics" diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index 018f8c902e..e42cd84c0b 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -12,7 +12,7 @@ appVersion: "0.9.2" dependencies: - name: redis - version: 1.0.8 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/times-square/README.md b/applications/times-square/README.md index c2a2dd8e41..3159971dcb 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -18,7 +18,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.11"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.16"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 3bf8abb1cb..265246f8ee 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -126,7 +126,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.11" + tag: "1.33.16" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/uws/Chart.yaml b/applications/uws/Chart.yaml new file mode 100644 index 0000000000..67076c7911 --- /dev/null +++ b/applications/uws/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v2 +name: uws +version: 1.0.0 +description: Deployment for the UWS and DM OCPS CSCs +dependencies: +- name: csc_collector + version: 1.0.0 + repository: file://../../charts/csc_collector +- name: csc + alias: atocps + version: 1.0.0 + condition: atocps.enabled + repository: file://../../charts/csc +- name: csc + alias: ccocps + version: 1.0.0 + condition: ccocps.enabled + repository: file://../../charts/csc +- name: csc + alias: mtocps + version: 1.0.0 + condition: mtocps.enabled + repository: file://../../charts/csc +- name: uws-api-server + version: 1.5.0 diff --git a/applications/uws/README.md b/applications/uws/README.md new file mode 100644 index 0000000000..d85d0198db --- /dev/null +++ b/applications/uws/README.md @@ -0,0 +1,49 @@ +# uws + +Deployment for the UWS and DM OCPS CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| atocps.enabled | bool | `false` | Enable the OCPS:1 CSC | +| ccocps.enabled | bool | `false` | Enable the OCPS:2 CSC | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| mtocps.enabled | bool | `false` | Enable the OCPS:3 CSC | +| uws-api-server.basePath | string | `"uws-server"` | The base path for the client ingress | +| uws-api-server.butlerPg | object | `{}` | Configuration for Postgres backed butlers The object must have the following attributes defined: _secretKey_ (A label that points to the VaultSecret for the postgres credentials) _containerPath_ (The directory location in the container for the Butler secret) _dbUser_ (The database user name for butler access) | +| uws-api-server.client.enabled | bool | `false` | Turn on the UWS client system if desired | +| uws-api-server.createNamespace | bool | `false` | Temporary flag to make service deploy own namespace. Doing this to not disrupt other sites. | +| uws-api-server.hostname | string | `""` | Hostname for the client ingress | +| uws-api-server.image.repository | string | `"lsstdm/uws-api-server"` | The Docker registry name of the UWS server container image | +| uws-api-server.image.tag | string | `"latest"` | The tag of the UWS server container image | +| uws-api-server.job.image.repository | string | `"lsstsqre/centos"` | The Docker registry name of the UWS job container image | +| uws-api-server.job.image.tag | string | `"d_latest"` | The tag of the UWS job container image | +| uws-api-server.job.securityContext.fsGroup | int | `202` | Set the filesystem GID for the mounted volumes in the UWS job container | +| uws-api-server.job.securityContext.runAsGroup | int | `202` | Set the GID for the UWS job container entrypoint | +| uws-api-server.job.securityContext.runAsUser | int | `1000` | Set the UID for the UWS job container entrypoint | +| uws-api-server.logLevel | string | `"WARNING"` | Log level of server. Set to "DEBUG" for highest verbosity | +| uws-api-server.replicaCount | int | `1` | Set the replica count for the UWS server | +| uws-api-server.server.securityContext.fsGroup | int | `202` | Set the filesystem GID for the mounted volumes in the UWS server container | +| uws-api-server.server.securityContext.runAsGroup | int | `202` | Set the GID for the UWS server container entrypoint | +| uws-api-server.server.securityContext.runAsUser | int | `1000` | Set the UID for the UWS server container entrypoint | +| uws-api-server.targetCluster | string | `""` | Target Kubernetes cluster | +| uws-api-server.vaultPathPrefix | string | `""` | Site-specific Vault path for secrets. | +| uws-api-server.volumes | list | `[]` | Central data volumes to be mounted in job containers. Each object listed can have the following attributes defined: _name_ (A label identifier for the data volume mount) _server_ (The hostname for the NFS server with the data volume mount) _claimName_ (The PVC claim name for the data volume mount) _mountPath_ (The mount path in the server container for the data volume mount) _exportPath_ (The export path on the NFS server for the data volume mount) _subPath_ (A possible sub path for the data volume mount) _readOnly_ (Flag to mark the data volume mount as read only or read/write) | +| uws-api-server.workingVolume.claimName | string | `""` | The PVC claim name for the working volume | +| uws-api-server.workingVolume.exportPath | string | `""` | The export path on the NFS server for the working volume | +| uws-api-server.workingVolume.mountPath | string | `"/uws"` | The mount path in the server container for the working volume | +| uws-api-server.workingVolume.name | string | `"job-files"` | A label identifier for the working volume | +| uws-api-server.workingVolume.server | string | `""` | The hostname for the NFS server with the working volume | +| uws-api-server.workingVolume.subPath | string | `""` | A possible sub path for the working volume mount | diff --git a/applications/uws/charts/uws-api-server/Chart.yaml b/applications/uws/charts/uws-api-server/Chart.yaml new file mode 100644 index 0000000000..c8b882a7fe --- /dev/null +++ b/applications/uws/charts/uws-api-server/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: uws-api-server +version: 1.5.0 +description: Helm chart for deploying the Universal Worker Service API Server +maintainers: + - name: Kian-Tat Lim + email: ktl@slac.stanford.edu + - name: Michael Reuter + email: mareuter@lsst.org diff --git a/applications/uws/charts/uws-api-server/README.md b/applications/uws/charts/uws-api-server/README.md new file mode 100644 index 0000000000..e176c46975 --- /dev/null +++ b/applications/uws/charts/uws-api-server/README.md @@ -0,0 +1,34 @@ +# uws-api-server + +Helm chart for deploying the Universal Worker Service API Server + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| basePath | string | `"uws-server"` | The base path for the client ingress | +| butlerPg | object | `{}` | Configuration for Postgres backed butlers The object must have the following attributes defined: _secretKey_ (A label that points to the VaultSecret for the postgres credentials) _containerPath_ (The directory location in the container for the Butler secret) _dbUser_ (The database user name for butler access) | +| client.enabled | bool | `false` | Turn on the UWS client system if desired | +| createNamespace | bool | `false` | Temporary flag to make service deploy own namespace. Doing this to not disrupt other sites. | +| hostname | string | `""` | Hostname for the client ingress | +| image.repository | string | `"lsstdm/uws-api-server"` | The Docker registry name of the UWS server container image | +| image.tag | string | `"latest"` | The tag of the UWS server container image | +| job.image.repository | string | `"lsstsqre/centos"` | The Docker registry name of the UWS job container image | +| job.image.tag | string | `"d_latest"` | The tag of the UWS job container image | +| job.securityContext.fsGroup | int | `202` | Set the filesystem GID for the mounted volumes in the UWS job container | +| job.securityContext.runAsGroup | int | `202` | Set the GID for the UWS job container entrypoint | +| job.securityContext.runAsUser | int | `1000` | Set the UID for the UWS job container entrypoint | +| logLevel | string | `"WARNING"` | Log level of server. Set to "DEBUG" for highest verbosity | +| replicaCount | int | `1` | Set the replica count for the UWS server | +| server.securityContext.fsGroup | int | `202` | Set the filesystem GID for the mounted volumes in the UWS server container | +| server.securityContext.runAsGroup | int | `202` | Set the GID for the UWS server container entrypoint | +| server.securityContext.runAsUser | int | `1000` | Set the UID for the UWS server container entrypoint | +| targetCluster | string | `""` | Target Kubernetes cluster | +| vaultPathPrefix | string | `""` | Site-specific Vault path for secrets. | +| volumes | list | `[]` | Central data volumes to be mounted in job containers. Each object listed can have the following attributes defined: _name_ (A label identifier for the data volume mount) _server_ (The hostname for the NFS server with the data volume mount) _claimName_ (The PVC claim name for the data volume mount) _mountPath_ (The mount path in the server container for the data volume mount) _exportPath_ (The export path on the NFS server for the data volume mount) _subPath_ (A possible sub path for the data volume mount) _readOnly_ (Flag to mark the data volume mount as read only or read/write) | +| workingVolume.claimName | string | `""` | The PVC claim name for the working volume | +| workingVolume.exportPath | string | `""` | The export path on the NFS server for the working volume | +| workingVolume.mountPath | string | `"/uws"` | The mount path in the server container for the working volume | +| workingVolume.name | string | `"job-files"` | A label identifier for the working volume | +| workingVolume.server | string | `""` | The hostname for the NFS server with the working volume | +| workingVolume.subPath | string | `""` | A possible sub path for the working volume mount | diff --git a/applications/nublado2/templates/_helpers.tpl b/applications/uws/charts/uws-api-server/templates/_helpers.tpl similarity index 68% rename from applications/nublado2/templates/_helpers.tpl rename to applications/uws/charts/uws-api-server/templates/_helpers.tpl index 7b318e97f0..16ab573354 100644 --- a/applications/nublado2/templates/_helpers.tpl +++ b/applications/uws/charts/uws-api-server/templates/_helpers.tpl @@ -2,7 +2,7 @@ {{/* Expand the name of the chart. */}} -{{- define "nublado2.name" -}} +{{- define "uws-api-server.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} {{- end -}} @@ -11,7 +11,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "nublado2.fullname" -}} +{{- define "uws-api-server.fullname" -}} {{- if .Values.fullnameOverride -}} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} @@ -27,30 +27,19 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "nublado2.chart" -}} +{{- define "uws-api-server.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} {{/* Common labels */}} -{{- define "nublado2.labels" -}} -app.kubernetes.io/name: {{ include "nublado2.name" . }} -helm.sh/chart: {{ include "nublado2.chart" . }} +{{- define "uws-api-server.labels" -}} +app.kubernetes.io/name: {{ include "uws-api-server.name" . }} +helm.sh/chart: {{ include "uws-api-server.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "nublado2.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "nublado2.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/applications/uws/charts/uws-api-server/templates/configmap.yaml b/applications/uws/charts/uws-api-server/templates/configmap.yaml new file mode 100644 index 0000000000..b147ff8d8a --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/configmap.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-configmap +data: + config: | + workingVolume: + {{- toYaml .Values.workingVolume | nindent 6 }} + volumes: + {{- toYaml .Values.volumes | nindent 6 }} + server: + service: {{ .Release.Name }}-server + port: 8080 + protocol: "http" + basePath: "/api/v1" + logLevel: "{{ .Values.logLevel }}" + job: + image: + repository: "{{ .Values.job.image.repository }}" + tag: "{{ .Values.job.image.tag }}" + securityContext: + {{- toYaml .Values.job.securityContext | nindent 8 }} + {{- if .Values.butlerPg }} + butlerPg: + containerPath: "{{ .Values.butlerPg.containerPath }}" + dbUser: "{{ .Values.butlerPg.dbUser }}" + {{- end }} diff --git a/applications/uws/charts/uws-api-server/templates/deployment-client.yaml b/applications/uws/charts/uws-api-server/templates/deployment-client.yaml new file mode 100644 index 0000000000..0785af1199 --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/deployment-client.yaml @@ -0,0 +1,35 @@ +{{- if .Values.client.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-client + labels: + app: {{ .Release.Name }}-client + chart: {{ template "uws-api-server.chart" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Release.Name }}-client + template: + metadata: + labels: + app: {{ .Release.Name }}-client + spec: + volumes: + # Server configuration + - name: config + configMap: + name: {{ .Release.Name }}-configmap + containers: + - name: uws-api-client + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: Always + command: ["/bin/bash", "-c", "sleep 1000d"] + volumeMounts: + # Server configuration + - name: config + subPath: config + mountPath: /etc/config/uws.yaml +{{- end }} diff --git a/applications/uws/charts/uws-api-server/templates/deployment-server.yaml b/applications/uws/charts/uws-api-server/templates/deployment-server.yaml new file mode 100644 index 0000000000..02f3c8556d --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/deployment-server.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-server + labels: + app: {{ .Release.Name }}-server + chart: {{ template "uws-api-server.chart" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Release.Name }}-server + template: + metadata: + labels: + app: {{ .Release.Name }}-server + spec: + securityContext: + {{ toYaml .Values.server.securityContext | nindent 8 }} + serviceAccountName: {{ .Release.Name }}-job-manager + volumes: + # Server configuration + - name: config + configMap: + name: {{ .Release.Name }}-configmap + # Volume to host job data + - name: "{{ .Values.workingVolume.name }}" + persistentVolumeClaim: + claimName: "{{ .Values.workingVolume.claimName }}" + {{- range .Values.volumes }} + - name: "{{ .name }}" + persistentVolumeClaim: + claimName: "{{ .claimName }}" + {{- end }} + containers: + - name: uws-api-server + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: Always + command: ["/bin/bash", "-c", "cd server && python3 server.py"] + ports: + - containerPort: 8080 + volumeMounts: + # Working directory for job data + - name: "{{ .Values.workingVolume.name }}" + mountPath: "{{ .Values.workingVolume.mountPath }}" + subPath: "{{ .Values.workingVolume.subPath }}" + # Server configuration + - name: config + subPath: config + mountPath: /etc/config/uws.yaml + # Shared data volumes (environment-specific) + {{- range .Values.volumes }} + - name: "{{ .name }}" + mountPath: "{{ .mountPath }}" + subPath: "{{ .subPath }}" + readOnly: {{ .readOnly }} + {{- end }} diff --git a/applications/uws/charts/uws-api-server/templates/ingress.yaml b/applications/uws/charts/uws-api-server/templates/ingress.yaml new file mode 100644 index 0000000000..bb2e67a650 --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/ingress.yaml @@ -0,0 +1,24 @@ +--- +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "uws-api-server.fullname" $ }}-ingress + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/use-regex: 'true' + nginx.ingress.kubernetes.io/auth-type: basic + nginx.ingress.kubernetes.io/auth-secret: uws-server-basic-auth + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required' +spec: + rules: + - host: {{ .Values.hostname }} + http: + paths: + - path: /{{ .Values.basePath }}(/|$)(.*) + pathType: "ImplementationSpecific" + backend: + service: + name: {{ .Release.Name }}-server + port: + number: 80 diff --git a/applications/uws/charts/uws-api-server/templates/rbac.yaml b/applications/uws/charts/uws-api-server/templates/rbac.yaml new file mode 100644 index 0000000000..e2248ca9dd --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/rbac.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-job-manager + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-job-manager-role +rules: + - apiGroups: ["batch"] + resources: ["jobs", "jobs/status", "configmaps"] + verbs: ["get", "list", "watch", "create", "delete"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-job-manager-rolebinding +roleRef: + kind: Role + name: {{ .Release.Name }}-job-manager-role + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-job-manager diff --git a/applications/uws/charts/uws-api-server/templates/service.yaml b/applications/uws/charts/uws-api-server/templates/service.yaml new file mode 100644 index 0000000000..d59529eadb --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/service.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-server + labels: + app: {{ .Release.Name }}-server + chart: {{ template "uws-api-server.chart" . }} +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + app: {{ .Release.Name }}-server diff --git a/applications/uws/charts/uws-api-server/templates/vault-secrets.yaml b/applications/uws/charts/uws-api-server/templates/vault-secrets.yaml new file mode 100644 index 0000000000..475ccec3fa --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/vault-secrets.yaml @@ -0,0 +1,13 @@ +{{- if .Values.butlerPg }} +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "uws-api-server.fullname" . }}-butler-secret + namespace: uws + labels: + app.kubernetes.io/name: {{ include "uws-api-server.name" . }} +spec: + path: {{ required "vaultPathPrefix must be set" .Values.vaultPathPrefix }}/{{ required "butlerPg.secretKey must be set" .Values.butlerPg.secretKey }} + type: Opaque +{{- end }} diff --git a/applications/uws/charts/uws-api-server/templates/volumes.yaml b/applications/uws/charts/uws-api-server/templates/volumes.yaml new file mode 100644 index 0000000000..8c12257f3d --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/volumes.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "uws-api-server.fullname" $ }}-uws-server-pv + labels: + app: {{ template "uws-api-server.fullname" $ }} + chart: {{ template "uws-api-server.chart" $ }} + name: {{ template "uws-api-server.chart" $ }}-uws-server +spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + server: {{ .Values.workingVolume.server }} + path: {{ .Values.workingVolume.exportPath }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.workingVolume.claimName }} + labels: + app: {{ template "uws-api-server.fullname" $ }} + chart: {{ template "uws-api-server.chart" $ }} +spec: + resources: + requests: + storage: 1Gi + accessModes: + - ReadWriteMany + storageClassName: "" + selector: + matchLabels: + name: {{ template "uws-api-server.chart" $ }}-uws-server + +{{- range .Values.volumes }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "uws-api-server.fullname" $ }}-{{ .name }}-pv + labels: + app: {{ template "uws-api-server.fullname" $ }} + chart: {{ template "uws-api-server.chart" $ }} + name: {{ template "uws-api-server.chart" $ }}-{{ .name }} +spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + server: {{ .server }} + path: {{ .exportPath }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .claimName }} + labels: + app: {{ template "uws-api-server.fullname" $ }} + chart: {{ template "uws-api-server.chart" $ }} +spec: + resources: + requests: + storage: 1Gi + accessModes: + - ReadWriteMany + storageClassName: "" + selector: + matchLabels: + name: {{ template "uws-api-server.chart" $ }}-{{ .name }} +{{- end }} diff --git a/applications/uws/charts/uws-api-server/values.yaml b/applications/uws/charts/uws-api-server/values.yaml new file mode 100644 index 0000000000..fc61e04fbc --- /dev/null +++ b/applications/uws/charts/uws-api-server/values.yaml @@ -0,0 +1,73 @@ +# -- Set the replica count for the UWS server +replicaCount: 1 +image: + # -- The Docker registry name of the UWS server container image + repository: lsstdm/uws-api-server + # -- The tag of the UWS server container image + tag: latest +# -- Target Kubernetes cluster +targetCluster: "" +# -- Hostname for the client ingress +hostname: "" +# -- The base path for the client ingress +basePath: "uws-server" +# -- Log level of server. Set to "DEBUG" for highest verbosity +logLevel: "WARNING" +# -- Site-specific Vault path for secrets. +vaultPathPrefix: "" +server: + securityContext: + # -- Set the UID for the UWS server container entrypoint + runAsUser: 1000 + # -- Set the GID for the UWS server container entrypoint + runAsGroup: 202 + # -- Set the filesystem GID for the mounted volumes in the UWS server container + fsGroup: 202 +client: + # -- Turn on the UWS client system if desired + enabled: false +job: + image: + # -- The Docker registry name of the UWS job container image + repository: "lsstsqre/centos" + # -- The tag of the UWS job container image + tag: "d_latest" + securityContext: + # -- Set the UID for the UWS job container entrypoint + runAsUser: 1000 + # -- Set the GID for the UWS job container entrypoint + runAsGroup: 202 + # -- Set the filesystem GID for the mounted volumes in the UWS job container + fsGroup: 202 +# -- Configuration for Postgres backed butlers +# The object must have the following attributes defined: +# _secretKey_ (A label that points to the VaultSecret for the postgres credentials) +# _containerPath_ (The directory location in the container for the Butler secret) +# _dbUser_ (The database user name for butler access) +butlerPg: {} +workingVolume: + # -- A label identifier for the working volume + name: job-files + # -- The hostname for the NFS server with the working volume + server: "" + # -- The export path on the NFS server for the working volume + exportPath: "" + # -- The PVC claim name for the working volume + claimName: "" + # -- The mount path in the server container for the working volume + mountPath: "/uws" + # -- A possible sub path for the working volume mount + subPath: "" +# -- Central data volumes to be mounted in job containers. +# Each object listed can have the following attributes defined: +# _name_ (A label identifier for the data volume mount) +# _server_ (The hostname for the NFS server with the data volume mount) +# _claimName_ (The PVC claim name for the data volume mount) +# _mountPath_ (The mount path in the server container for the data volume mount) +# _exportPath_ (The export path on the NFS server for the data volume mount) +# _subPath_ (A possible sub path for the data volume mount) +# _readOnly_ (Flag to mark the data volume mount as read only or read/write) +volumes: [] +# -- Temporary flag to make service deploy own namespace. +# Doing this to not disrupt other sites. +createNamespace: false diff --git a/applications/uws/values-tucson-teststand.yaml b/applications/uws/values-tucson-teststand.yaml new file mode 100644 index 0000000000..211ceec91f --- /dev/null +++ b/applications/uws/values-tucson-teststand.yaml @@ -0,0 +1,87 @@ +uws-api-server: + targetCluster: "tucson-teststand" + hostname: tucson-teststand.lsst.codes + image: + tag: latest + logLevel: INFO + vaultPathPrefix: secret/k8s_operator/tucson-teststand.lsst.codes + butlerPg: + secretKey: butler-secret + containerPath: /home/lsst/.lsst + dbUser: oods + workingVolume: + name: job-files + server: nfs-scratch.tu.lsst.org + exportPath: "/scratch" + claimName: uws-server-pvc + mountPath: "/uws" + subPath: "uws" + volumes: + - name: project + server: nfs-project.tu.lsst.org + claimName: project-pvc + mountPath: "/project" + exportPath: "/project" + subPath: "" + readOnly: false + - name: home + server: nfs-jhome.tu.lsst.org + claimName: home-pvc + mountPath: "/jhome" + exportPath: "/jhome" + subPath: "" + readOnly: false + - name: repo-latiss + server: nfs-auxtel.tu.lsst.org + claimName: repo-latiss-pvc + mountPath: "/repo/LATISS" + exportPath: "/auxtel/repo/LATISS" + subPath: "" + readOnly: false + - name: repo-comcam + server: comcam-archiver.tu.lsst.org + claimName: repo-comcam-pvc + mountPath: "/repo/LSSTComCam" + exportPath: "/repo/LSSTComCam" + subPath: "" + readOnly: false + - name: data-auxtel + server: nfs-auxtel.tu.lsst.org + claimName: data-auxtel-pvc + mountPath: "/data/lsstdata/TTS/auxtel" + exportPath: "/auxtel/lsstdata/TTS/auxtel" + subPath: "" + readOnly: true + - name: data-comcam + server: comcam-archiver.tu.lsst.org + claimName: data-comcam-pvc + mountPath: "/data/lsstdata/TTS/comcam" + exportPath: "/lsstdata/TTS/comcam" + subPath: "" + readOnly: true + +csc_collector: + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + +atocps: + enabled: true + classifier: ocps1 + image: + repository: ts-dockerhub.lsst.org/dmocps + pullPolicy: Always + env: + RUN_ARG: 1 + +ccocps: + enabled: true + classifier: ocps2 + image: + repository: ts-dockerhub.lsst.org/dmocps + pullPolicy: Always + env: + RUN_ARG: 2 diff --git a/applications/uws/values.yaml b/applications/uws/values.yaml new file mode 100644 index 0000000000..13b3083d23 --- /dev/null +++ b/applications/uws/values.yaml @@ -0,0 +1,67 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +atocps: + # -- Enable the OCPS:1 CSC + enabled: false + +ccocps: + # -- Enable the OCPS:2 CSC + enabled: false + +mtocps: + # -- Enable the OCPS:3 CSC + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml index b85db7d0ca..2dd13545d4 100644 --- a/applications/vault-secrets-operator/Chart.yaml +++ b/applications/vault-secrets-operator/Chart.yaml @@ -5,7 +5,7 @@ sources: - https://github.com/ricoberger/vault-secrets-operator dependencies: - name: vault-secrets-operator - version: 2.5.1 + version: 2.5.6 repository: https://ricoberger.github.io/helm-charts/ annotations: phalanx.lsst.io/docs: | diff --git a/applications/vault-secrets-operator/values-idfint.yaml b/applications/vault-secrets-operator/values-idfint.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-idfint.yaml +++ b/applications/vault-secrets-operator/values-idfint.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle diff --git a/applications/vault-secrets-operator/values-idfprod.yaml b/applications/vault-secrets-operator/values-idfprod.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-idfprod.yaml +++ b/applications/vault-secrets-operator/values-idfprod.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle diff --git a/applications/vault-secrets-operator/values-minikube.yaml b/applications/vault-secrets-operator/values-minikube.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-minikube.yaml +++ b/applications/vault-secrets-operator/values-minikube.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle diff --git a/applications/vault-secrets-operator/values-roundtable-dev.yaml b/applications/vault-secrets-operator/values-roundtable-dev.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-roundtable-dev.yaml +++ b/applications/vault-secrets-operator/values-roundtable-dev.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle diff --git a/applications/vault-secrets-operator/values-roundtable-prod.yaml b/applications/vault-secrets-operator/values-roundtable-prod.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-roundtable-prod.yaml +++ b/applications/vault-secrets-operator/values-roundtable-prod.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle diff --git a/applications/vault-secrets-operator/values-usdfint.yaml b/applications/vault-secrets-operator/values-usdfint.yaml new file mode 100644 index 0000000000..bfb0f3700f --- /dev/null +++ b/applications/vault-secrets-operator/values-usdfint.yaml @@ -0,0 +1,22 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_AUTH_METHOD + value: approle + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_SECRET_ID + - name: VAULT_TOKEN_MAX_TTL + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_TOKEN_MAX_TTL + vault: + address: "https://vault.slac.stanford.edu" + authMethod: approle diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml index df72bdf189..0b5d2f7802 100644 --- a/applications/vo-cutouts/Chart.yaml +++ b/applications/vo-cutouts/Chart.yaml @@ -8,7 +8,7 @@ appVersion: 1.0.0 dependencies: - name: redis - version: 1.0.8 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index b0e8907118..3f919e8c27 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.11"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.16"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/datalinker/secrets-idfdev.yaml b/applications/vo-cutouts/secrets-idfint.yaml similarity index 100% rename from applications/datalinker/secrets-idfdev.yaml rename to applications/vo-cutouts/secrets-idfint.yaml diff --git a/applications/vo-cutouts/secrets-idfprod.yaml b/applications/vo-cutouts/secrets-idfprod.yaml new file mode 100644 index 0000000000..57998942f8 --- /dev/null +++ b/applications/vo-cutouts/secrets-idfprod.yaml @@ -0,0 +1,20 @@ +aws-credentials: + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +google-credentials: + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +postgres-credentials: + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index 9ee9b42d2e..3ec5120b43 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -75,7 +75,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.11" + tag: "1.33.16" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/charts/README.md b/charts/README.md index 3a868d1d92..af2a8f66c6 100644 --- a/charts/README.md +++ b/charts/README.md @@ -12,7 +12,7 @@ To use a chart in this directory, use a dependency stanza similar to the followi dependencies: - name: cadc-tap version: 1.0.0 - repository: "file://../../charts/cadc-tap/" + repository: "file://../../charts/cadc-tap" ``` If a Helm chart should be usable independently of Phalanx and warrants a separate existence with its own version number, that chart should instead go into the [charts](https://github.com/lsst-sqre/charts) repository. diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index e0cc953908..0a2111b8fd 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -14,7 +14,7 @@ IVOA TAP service |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the TAP pod | | config.backend | string | None, must be set to "pg" or "qserv" | What type of backend are we connecting to? | -| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip"` | Datalink payload URL | +| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/2.1.6/datalink-snippets.zip"` | Datalink payload URL | | config.gcsBucket | string | The common GCS bucket | Name of GCS bucket in which to store results | | config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | The common GCS bucket | Base URL for results stored in GCS bucket | @@ -52,11 +52,11 @@ IVOA TAP service | nodeSelector | object | `{}` | Node selector rules for the TAP pod | | podAnnotations | object | `{}` | Annotations for the TAP pod | | replicaCount | int | `1` | Number of pods to start | -| resources | object | `{"limits":{"cpu":8,"memory":"32G"},"requests":{"cpu":2,"memory":"2G"}}` | Resource limits and requests for the TAP pod | +| resources | object | `{"limits":{"cpu":8,"memory":"32Gi"},"requests":{"cpu":2,"memory":"2Gi"}}` | Resource limits and requests for the TAP pod | | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.1.1"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"2.1.6"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the TAP schema database pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the TAP schema database pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | @@ -68,6 +68,6 @@ IVOA TAP service | uws.image.tag | string | Version of QServ TAP image | Tag of UWS database image to use | | uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | -| uws.resources | object | `{"limits":{"cpu":2,"memory":"4G"},"requests":{"cpu":0.25,"memory":"1G"}}` | Resource limits and requests for the UWS database pod | +| uws.resources | object | `{"limits":{"cpu":2,"memory":"4Gi"},"requests":{"cpu":0.25,"memory":"1Gi"}}` | Resource limits and requests for the UWS database pod | | uws.tolerations | list | `[]` | Tolerations for the UWS database pod | | vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index f843ccb299..9af72fe1f0 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -29,10 +29,10 @@ ingress: resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 8.0 - memory: "32G" + memory: "32Gi" # -- Annotations for the TAP pod podAnnotations: {} @@ -77,7 +77,7 @@ config: # -- Tag of tap image to use # @default -- Latest release - tag: "1.13.0" + tag: "1.14.1" qserv: # -- QServ hostname:port to connect to @@ -93,13 +93,13 @@ config: # -- Tag of tap image to use # @default -- Latest release - tag: "2.1.0" + tag: "2.1.1" # -- Address to a MySQL database containing TAP schema data tapSchemaAddress: "cadc-tap-schema-db:3306" # -- Datalink payload URL - datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip" + datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/2.1.6/datalink-snippets.zip" # -- Name of GCS bucket in which to store results # @default -- The common GCS bucket @@ -165,7 +165,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.1.1" + tag: "2.1.6" # -- Resource limits and requests for the TAP schema database pod resources: {} @@ -192,16 +192,16 @@ uws: # -- Tag of UWS database image to use # @default -- Version of QServ TAP image - tag: "2.1.0" + tag: "2.1.1" # -- Resource limits and requests for the UWS database pod resources: requests: cpu: 0.25 - memory: "1G" + memory: "1Gi" limits: cpu: 2.0 - memory: "4G" + memory: "4Gi" # -- Annotations for the UWS databse pod podAnnotations: {} diff --git a/charts/csc/Chart.yaml b/charts/csc/Chart.yaml new file mode 100644 index 0000000000..6ddcdf43e1 --- /dev/null +++ b/charts/csc/Chart.yaml @@ -0,0 +1,4 @@ +name: csc +apiVersion: v2 +version: 1.0.0 +description: A Helm chart for deploying the Control System CSCs. diff --git a/charts/csc/README.md b/charts/csc/README.md new file mode 100644 index 0000000000..fb4504e095 --- /dev/null +++ b/charts/csc/README.md @@ -0,0 +1,32 @@ +# csc + +A Helm chart for deploying the Control System CSCs. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | This specifies the scheduling constraints of the pod | +| annotations | object | `{}` | This allows the specification of pod annotations | +| butlerSecret | object | `{}` | This key allows for specification of Butler secret information. **NOTE**: You must fill out a _secretPermFixer_ entry in addition. If this section is used, it must contain the following attributes: _containerPath_ (The directory location for the Butler secret), _dbUser_ (The username for the Butler backend database) | +| configfile | object | `{}` | This key allows specification of a YAML configuration file If this section is used, it must contain the following attributes defined: _path_ (The container path for the configuration file), _filename_ (The configuration file name), _content_ (The YAML content for the configuration file) | +| enabled | bool | `false` | Flag to enable the given CSC application | +| entrypoint | string | `nil` | This key allows specification of a script to override the entrypoint | +| env | object | `{}` | This is the namespace in which the CSC will be placed | +| envSecrets | list | `[]` | This section holds specifications for secret injection. If this section is used, each object listed must have the following attributes defined: _name_ (The label for the secret), _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), _secretKey_ (The key in the vault store containing the necessary secret) | +| image.pullPolicy | string | `"IfNotPresent"` | The policy to apply when pulling an image for deployment | +| image.repository | string | `"lsstts/test"` | The Docker registry name of the container image to use for the CSC | +| image.tag | string | `nil` | The tag of the container image to use for the CSC | +| imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | +| isPrimary | bool | `true` | This marks the CSC as the primary object to sync upon system starts. This is set to false when two CSCs of the same flavor are deployed (one real, one simulator) to mark the simulator so it can be filtered out for automatic syncing. | +| nameOverride | string | `""` | Provide an alternate name for the application | +| nfsMountpoint | list | `[]` | This section holds the information necessary to create a NFS mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _path_ (The path inside the container to mount), _readOnly_ (This sets if the NFS mount is read only or read/write), _server_ (The hostname of the NFS server), _serverPath_ (The path exported by the NFS server) | +| nodeSelector | object | `{}` | This allows the specification of using specific nodes to run the pod | +| pvcMountpoint | list | `[]` | This section holds the information necessary to create a volume mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _path_ (The path inside the container to mount), _accessMode_ (This sets the required access mode for the volume mount), _claimSize_ (The requested physical disk space size for the volume mount), _storageClass_ (The Kubernetes provided storage class), _ids.uid_ (OPTIONAL: An alternative UID for mounting), _ids.gid_ (OPTIONAL: An alternative GID for mounting) | +| resources | object | `{}` | This allows the specification of resources (CPU, memory) requires to run the container | +| secretPermFixer | list | `[]` | This section sets the optional use of an init container for fixing permissions on secret files. If this section is used, each object listed can have the necessary attributes specified: _name_ (The label used for the init container) _containerPath_ (The path in the container where the secret files will be stored) _secretName_ (OPTIONAL: The secret name if different from _name_) _specialInstructions_ (OPTIONAL: This allows for optional instructions to be used when fixing permissions) | +| securityContext | object | `{}` | This key allows for the specification of a pod security context for volumes. If this section is used, it must contain the following attributes: _user_ (The user id for the volumes) _group_ (The group id for the volumes) _fsGroup_ (OPTIONAL: A special supplemental group that applies to all containers in a pod) | +| service.port | int | `nil` | The port number to use for the Service. | +| service.type | string | `nil` | The Service type for the application. This is either ClusterIP (internal access) or LoadBalancer (external access) | +| service.use | bool | `false` | This sets the use of a Service API for the application | +| tolerations | list | `[]` | This specifies the tolerations of the pod for any system taints | diff --git a/charts/csc/templates/_helpers.tpl b/charts/csc/templates/_helpers.tpl new file mode 100644 index 0000000000..4a3024ae08 --- /dev/null +++ b/charts/csc/templates/_helpers.tpl @@ -0,0 +1,77 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chart.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- include "chart.name" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the CSC name by removing sim tag. +*/}} +{{- define "csc.name" -}} +{{- $name := or .Values.classifier .Chart.Name -}} +{{- if contains "sim" $name -}} +{{- $name | splitList "-" | first -}} +{{- else -}} +{{- $name -}} +{{- end -}} +{{- end -}} + +{{/* +Create the CSC class name by removing sim tag and index. +*/}} +{{- define "csc.class" -}} +{{- $protectedApps := list "mtm2" "mtm1m3" -}} +{{- $name := or .Values.classifier .Chart.Name -}} +{{- if contains "sim" $name -}} +{{- $name = $name | splitList "-" | first -}} +{{- end -}} +{{- $checkForIndex := list -}} +{{- if not (has $name $protectedApps) -}} +{{- $checkForIndex = regexFindAll "[0-9]+$" $name -1 -}} +{{- end -}} +{{- if $checkForIndex -}} +{{- $index := first $checkForIndex -}} +{{- $name = regexReplaceAll $index $name "" -}} +{{- end -}} +{{- $name -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "csc.labels" -}} +helm.sh/chart: {{ .Chart.Name }} +{{ include "csc.selectorLabels" . }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "csc.selectorLabels" -}} +csc: {{ include "chart.name" . }} +csc-name: {{ include "csc.name" . }} +csc-class: {{ include "csc.class" . }} +csc-is-primary: {{ .Values.isPrimary | quote }} +{{- end -}} diff --git a/charts/csc/templates/configfile-configmap.yaml b/charts/csc/templates/configfile-configmap.yaml new file mode 100644 index 0000000000..b0fab92aa6 --- /dev/null +++ b/charts/csc/templates/configfile-configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.configfile }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "chart.name" . }}-configfile + namespace: {{ $.Values.global.controlSystem.appNamespace }} +data: + {{ .Values.configfile.filename }}: +{{ .Values.configfile.content | toYaml | indent 4 }} +{{- end }} + diff --git a/charts/csc/templates/entrypoint-configmap.yaml b/charts/csc/templates/entrypoint-configmap.yaml new file mode 100644 index 0000000000..bbd37daad2 --- /dev/null +++ b/charts/csc/templates/entrypoint-configmap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.entrypoint }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "chart.name" . }}-entrypoint + namespace: {{ $.Values.global.controlSystem.appNamespace }} +data: + .startup.sh: +{{ .Values.entrypoint | toYaml | indent 4 }} +{{- end }} diff --git a/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml new file mode 100644 index 0000000000..1ea87b9a9b --- /dev/null +++ b/charts/csc/templates/job.yaml @@ -0,0 +1,183 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "chart.name" . }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} + labels: + {{- include "csc.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + completions: 1 + template: + metadata: + labels: + {{- include "csc.selectorLabels" . | nindent 8 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - name: {{ include "csc.class" . }} + {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystem.imageTag }} + image: "{{ .Values.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + stdin: true + tty: true + envFrom: + - configMapRef: + name: csc-env-config + env: + - name: LSST_KAFKA_SECURITY_PASSWORD + valueFrom: + secretKeyRef: + name: ts-salkafka + key: ts-salkafka-password + {{- if or (or .Values.env .Values.envSecrets) .Values.butlerSecret }} + {{- range $env_var, $env_value := .Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := .Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if .Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ .Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ .Values.butlerSecret.dbUser | quote }} + {{- end }} + volumeMounts: + {{- if .Values.entrypoint }} + - name: entrypoint + mountPath: /home/saluser/.startup.sh + subPath: .startup.sh + {{- end }} + {{- if .Values.configfile }} + - name: configfile + mountPath: {{ .Values.configfile.path }}/{{ .Values.configfile.filename }} + subPath: {{ .Values.configfile.filename }} + {{- end }} + {{- if .Values.secretPermFixer }} + {{- range $values := .Values.secretPermFixer }} + - name: {{ include "chart.name" $ }}-{{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- end }} + {{- end }} + {{- if .Values.pvcMountpoint }} + {{- range $values := .Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.path }} + {{- end}} + {{- end}} + {{- if .Values.nfsMountpoint }} + {{- range $values := .Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end}} + {{- end}} + {{- with .Values.resources }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.secretPermFixer }} + initContainers: + {{- if .Values.secretPermFixer }} + {{- range $values := .Values.secretPermFixer }} + - name: {{ include "chart.name" $ }}-{{ $values.name }}-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + {{- if $values.specialInstructions }} + {{- toYaml $values.specialInstructions | nindent 14 }} + {{- end }} + volumeMounts: + - name: {{ include "chart.name" $ }}-raw-{{ $values.name }} + mountPath: /secrets-raw + readOnly: true + - name: {{ include "chart.name" $ }}-{{ $values.name }} + mountPath: /secrets + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.securityContext }} + securityContext: + runAsUser: {{ .Values.securityContext.user }} + runAsGroup: {{ .Values.securityContext.group }} + {{- if .Values.securityContext.fsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end}} + {{- end }} + volumes: + {{- if .Values.entrypoint }} + - name: entrypoint + configMap: + name: {{ include "chart.name" $ }}-entrypoint + defaultMode: 0755 + items: + - key: .startup.sh + path: .startup.sh + {{- end }} + {{- if .Values.configfile }} + - name: configfile + configMap: + name: {{ include "chart.name" $ }}-configfile + items: + - key: {{ .Values.configfile.filename }} + path: {{ .Values.configfile.filename }} + {{- end }} + {{- if .Values.secretPermFixer }} + {{- range $values := .Values.secretPermFixer }} + - name: {{ include "chart.name" $ }}-{{ $values.name }} + emptyDir: {} + - name: {{ include "chart.name" $ }}-raw-{{ $values.name }} + secret: + secretName: {{ or $values.secretName $values.name }} + defaultMode: 0600 + {{- end }} + {{- end }} + {{- if .Values.pvcMountpoint }} + {{- range $values := .Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ include "chart.name" $ }}-{{ $values.name }}-pvc + {{- end }} + {{- end }} + {{- if .Values.nfsMountpoint }} + {{- range $values := .Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + restartPolicy: Never + imagePullSecrets: + - name: nexus3-docker + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/csc/templates/mountpoint-pvc.yaml b/charts/csc/templates/mountpoint-pvc.yaml new file mode 100644 index 0000000000..4e6be7427f --- /dev/null +++ b/charts/csc/templates/mountpoint-pvc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.pvcMountpoint }} +{{- range $values := .Values.pvcMountpoint }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "chart.name" . }}-{{ $values.name }}-pvc + namespace: {{ $.Values.global.controlSystem.appNamespace }} + {{- if $values.ids }} + annotations: + {{- if $values.ids.uid }} + pv.beta.kubernetes.io/uid: "{{ $values.ids.uid }}" + {{- end }} + {{- if $values.ids.gid }} + pv.beta.kubernetes.io/gid: "{{ $values.ids.gid }}" + {{- end }} + {{- end }} +spec: + accessModes: + - {{ $values.accessMode | quote }} + resources: + requests: + storage: {{ $values.claimSize }} + storageClassName: {{ $values.storageClass }} +{{- end }} +{{- end }} diff --git a/charts/csc/templates/service.yaml b/charts/csc/templates/service.yaml new file mode 100644 index 0000000000..9e79ab0a13 --- /dev/null +++ b/charts/csc/templates/service.yaml @@ -0,0 +1,18 @@ +{{- if .Values.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + labels: + csc: {{ include "csc.name" . }} + name: {{ include "chart.name" . }}-service + namespace: {{ $.Values.global.controlSystem.appNamespace }} +spec: + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + selector: + csc: {{ include "csc.name" . }} + type: {{ .Values.service.type }} +{{- end }} diff --git a/charts/csc/values.yaml b/charts/csc/values.yaml new file mode 100644 index 0000000000..6916aa3ec5 --- /dev/null +++ b/charts/csc/values.yaml @@ -0,0 +1,91 @@ +# -- Flag to enable the given CSC application +enabled: false +# -- Provide an alternate name for the application +nameOverride: "" +# -- This marks the CSC as the primary object to sync upon system starts. +# This is set to false when two CSCs of the same flavor are deployed (one +# real, one simulator) to mark the simulator so it can be filtered out for +# automatic syncing. +isPrimary: true +image: + # -- The Docker registry name of the container image to use for the CSC + repository: lsstts/test + # -- The tag of the container image to use for the CSC + tag: + # -- The policy to apply when pulling an image for deployment + pullPolicy: IfNotPresent +# -- The list of pull secrets needed for the images. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (The label identifying the pull-secret to use) +imagePullSecrets: [] +# -- This is the namespace in which the CSC will be placed +env: {} +# -- This section holds specifications for secret injection. +# If this section is used, each object listed must have the following attributes defined: +# _name_ (The label for the secret), +# _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), +# _secretKey_ (The key in the vault store containing the necessary secret) +envSecrets: [] +# -- This key allows specification of a script to override the entrypoint +entrypoint: +# -- This key allows specification of a YAML configuration file +# If this section is used, it must contain the following attributes defined: +# _path_ (The container path for the configuration file), +# _filename_ (The configuration file name), +# _content_ (The YAML content for the configuration file) +configfile: {} +# -- This key allows for specification of Butler secret information. +# **NOTE**: You must fill out a _secretPermFixer_ entry in addition. +# If this section is used, it must contain the following attributes: +# _containerPath_ (The directory location for the Butler secret), +# _dbUser_ (The username for the Butler backend database) +butlerSecret: {} +# -- This key allows for the specification of a pod security context for volumes. +# If this section is used, it must contain the following attributes: +# _user_ (The user id for the volumes) +# _group_ (The group id for the volumes) +# _fsGroup_ (OPTIONAL: A special supplemental group that applies to all containers in a pod) +securityContext: {} +# -- This section holds the information necessary to create a volume mount for the container. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (A label identifier for the mountpoint), +# _path_ (The path inside the container to mount), +# _accessMode_ (This sets the required access mode for the volume mount), +# _claimSize_ (The requested physical disk space size for the volume mount), +# _storageClass_ (The Kubernetes provided storage class), +# _ids.uid_ (OPTIONAL: An alternative UID for mounting), +# _ids.gid_ (OPTIONAL: An alternative GID for mounting) +pvcMountpoint: [] +# -- This section holds the information necessary to create a NFS mount for the container. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (A label identifier for the mountpoint), +# _path_ (The path inside the container to mount), +# _readOnly_ (This sets if the NFS mount is read only or read/write), +# _server_ (The hostname of the NFS server), +# _serverPath_ (The path exported by the NFS server) +nfsMountpoint: [] +# -- This allows the specification of pod annotations +annotations: {} +# -- This section sets the optional use of an init container for fixing permissions on secret files. +# If this section is used, each object listed can have the necessary attributes specified: +# _name_ (The label used for the init container) +# _containerPath_ (The path in the container where the secret files will be stored) +# _secretName_ (OPTIONAL: The secret name if different from _name_) +# _specialInstructions_ (OPTIONAL: This allows for optional instructions to be used when fixing permissions) +secretPermFixer: [] +service: + # -- (bool) This sets the use of a Service API for the application + use: false + # -- (int) The port number to use for the Service. + port: + # -- (string) The Service type for the application. + # This is either ClusterIP (internal access) or LoadBalancer (external access) + type: +# -- This allows the specification of resources (CPU, memory) requires to run the container +resources: {} +# -- This allows the specification of using specific nodes to run the pod +nodeSelector: {} +# -- This specifies the tolerations of the pod for any system taints +tolerations: [] +# -- This specifies the scheduling constraints of the pod +affinity: {} diff --git a/charts/csc_collector/Chart.yaml b/charts/csc_collector/Chart.yaml new file mode 100644 index 0000000000..a58aa429b0 --- /dev/null +++ b/charts/csc_collector/Chart.yaml @@ -0,0 +1,4 @@ +name: csc_collector +apiVersion: v2 +version: 1.0.0 +description: A Helm chart provided shared information for Control System CSCs. diff --git a/charts/csc_collector/README.md b/charts/csc_collector/README.md new file mode 100644 index 0000000000..c78a46d05a --- /dev/null +++ b/charts/csc_collector/README.md @@ -0,0 +1,11 @@ +# csc_collector + +A Helm chart provided shared information for Control System CSCs. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| namespace | string | `""` | Namespace for shared CSC resources. | +| secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| siteTag | string | `""` | The site-specific name used for handling configurable CSCs | diff --git a/charts/csc_collector/templates/configmap-env.yaml b/charts/csc_collector/templates/configmap-env.yaml new file mode 100644 index 0000000000..fa9324a5ea --- /dev/null +++ b/charts/csc_collector/templates/configmap-env.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: csc-env-config +data: + LSST_SITE: {{ $.Values.global.controlSystem.siteTag }} + LSST_TOPIC_SUBNAME: {{ $.Values.global.controlSystem.topicName }} + LSST_KAFKA_BROKER_ADDR: {{ $.Values.global.controlSystem.kafkaBrokerAddress }} + LSST_KAFKA_REPLICATION_FACTOR: {{ $.Values.global.controlSystem.kafkaTopicReplicationFactor | quote }} + LSST_KAFKA_SECURITY_USERNAME: ts-salkafka + LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystem.schemaRegistryUrl }} + S3_ENDPOINT_URL: {{ $.Values.global.controlSystem.s3EndpointUrl }} diff --git a/charts/csc_collector/templates/vault-secret.yaml b/charts/csc_collector/templates/vault-secret.yaml new file mode 100644 index 0000000000..949a3016a8 --- /dev/null +++ b/charts/csc_collector/templates/vault-secret.yaml @@ -0,0 +1,11 @@ +{{- range $secret := .Values.secrets }} +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ $secret.name }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} +spec: + path: {{ $.Values.global.vaultSecretsPath }}/{{ $secret.key }} + type: {{ default "Opaque" $secret.type }} +{{- end }} diff --git a/charts/csc_collector/values.yaml b/charts/csc_collector/values.yaml new file mode 100644 index 0000000000..1df70eb976 --- /dev/null +++ b/charts/csc_collector/values.yaml @@ -0,0 +1,12 @@ +# -- Namespace for shared CSC resources. +namespace: "" + +# -- The site-specific name used for handling configurable CSCs +siteTag: "" + +# -- This section holds secret specifications. +# Each object listed can have the following attributes defined: +# _name_ (The name used by pods to access the secret) +# _key_ (The key in the vault store where the secret resides) +# _type_ (OPTIONAL: The secret type. Defaults to Opaque.) +secrets: [] diff --git a/charts/prompt-proto-service/Chart.yaml b/charts/prompt-proto-service/Chart.yaml index 5234d5ddc3..fe2ce5f751 100644 --- a/charts/prompt-proto-service/Chart.yaml +++ b/charts/prompt-proto-service/Chart.yaml @@ -4,11 +4,14 @@ version: 1.0.0 appVersion: "0.1.0" description: Event-driven processing of camera images type: application -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index 8b00851b06..8730d06c0e 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -2,11 +2,11 @@ Event-driven processing of camera images -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values @@ -14,37 +14,31 @@ Event-driven processing of camera images |-----|------|---------|-------------| | additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | affinity | object | `{}` | | -| apdb.db | string | `""` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | apdb.namespace | string | `""` | Database namespace for the APDB | | apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| apdb.user | string | `""` | Database user for the APDB (deprecated for apdb.url) | | containerConcurrency | int | `1` | | | fullnameOverride | string | `"prompt-proto-service"` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | imagePullSecrets | list | `[]` | | | instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | | instrument.name | string | None, must be set | The "short" name of the instrument | -| instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | instrument.skymap | string | `""` | Skymap to use with the instrument | | knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | | | podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | s3.endpointUrl | string | None, must be set | S3 endpoint containing `imageBucket` | diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index 4b0a01bf7c..bd6e141981 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -54,41 +54,17 @@ spec: - name: IMAGE_TIMEOUT value: {{ .Values.imageNotifications.imageTimeout | quote }} - name: PGUSER - value: rubin + value: {{ .Values.instrument.calibRepoPguser }} - name: CALIB_REPO value: {{ .Values.instrument.calibRepo }} - name: LSST_DISABLE_BUCKET_VALIDATION value: {{ .Values.s3.disableBucketValidation | quote }} - - name: IP_APDB # TODO: remove on DM-40839 - # Need explicit port for make_pgpass.py - value: {{ .Values.apdb.ip }} - - name: DB_APDB # TODO: remove on DM-40839 - value: {{ .Values.apdb.db }} - - name: USER_APDB # TODO: remove on DM-40839 - value: {{ .Values.apdb.user }} - name: URL_APDB value: {{ .Values.apdb.url }} - name: NAMESPACE_APDB value: {{ .Values.apdb.namespace }} - - name: IP_REGISTRY # TODO: remove on DM-40839 - # Need explicit port for make_pgpass.py - value: {{ .Values.registry.ip }} - - name: DB_REGISTRY # TODO: remove on DM-40839 - value: {{ .Values.registry.db }} - - name: USER_REGISTRY # TODO: remove on DM-40839 - value: {{ .Values.registry.user }} - name: KAFKA_CLUSTER value: {{ .Values.imageNotifications.kafkaClusterAddress }} - - name: PSQL_REGISTRY_PASS # TODO: remove on DM-40839 - valueFrom: - secretKeyRef: - name: {{ template "prompt-proto-service.fullname" . }}-secret - key: registry_password - - name: PSQL_APDB_PASS # TODO: remove on DM-40839 - valueFrom: - secretKeyRef: - name: {{ template "prompt-proto-service.fullname" . }}-secret - key: apdb_password - name: S3_ENDPOINT_URL value: {{ .Values.s3.endpointUrl }} {{- if .Values.s3.auth_env }} diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 808b1f4f85..96fc9cf489 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -13,7 +13,7 @@ podAnnotations: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent @@ -25,7 +25,7 @@ instrument: # @default -- None, must be set name: "" # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -55,37 +55,21 @@ imageNotifications: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: "" # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: "" # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: "" registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: "" # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: "" # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). -# @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. +# @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/ct.yaml b/ct.yaml deleted file mode 100644 index 794eff7ca4..0000000000 --- a/ct.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Configuration for helm/cert-testing, run via chart-testing-action in -# GitHub Actions. https://github.com/helm/chart-testing#configuration -# -# Intended to be run after tests/expand-services, which generates the -# services-expanded directory of charts for every combination of -# environment and base chart. - -chart-dirs: - - "applications-expanded" - - "environments" -check-version-increment: false -validate-maintainers: false -validate-chart-schema: false diff --git a/docs/_rst_epilog.rst b/docs/_rst_epilog.rst index c795acdce4..05b41ccd02 100644 --- a/docs/_rst_epilog.rst +++ b/docs/_rst_epilog.rst @@ -34,7 +34,7 @@ .. _Pod: https://kubernetes.io/docs/concepts/workloads/pods/ .. _pre-commit: https://pre-commit.com .. _Roundtable: https://roundtable.lsst.io/ -.. _Ruff: https://beta.ruff.rs/docs/ +.. _Ruff: https://docs.astral.sh/ruff/ .. _Safir: https://safir.lsst.io/ .. _Secret: https://kubernetes.io/docs/concepts/configuration/secret/ .. _semantic versioning: https://semver.org/ diff --git a/docs/about/contributing-docs.rst b/docs/about/contributing-docs.rst index 7edac8fb5f..f326f261e8 100644 --- a/docs/about/contributing-docs.rst +++ b/docs/about/contributing-docs.rst @@ -19,7 +19,7 @@ Use the tox_ ``docs`` environment for compiling the documentation: .. code-block:: bash - tox -e docs + tox run -e docs The built documentation is located in the ``docs/_build/html`` directory. @@ -36,7 +36,7 @@ Links in the documentation are validated in the GitHub Actions workflow, but you .. code-block:: bash - tox -e docs-linkcheck + tox run -e docs-linkcheck Submitting a pull request and sharing documentation drafts ========================================================== diff --git a/docs/about/local-environment-setup.rst b/docs/about/local-environment-setup.rst index 7b566b282e..144bbce630 100644 --- a/docs/about/local-environment-setup.rst +++ b/docs/about/local-environment-setup.rst @@ -23,7 +23,7 @@ You will likely need to make changes to Phalanx and create pull requests, so you Members of the `lsst-sqre/phalanx`_ repository on GitHub can clone the repository directly and create a ticket branch, per the `Data Management workflow guide`_. -Otherwise, fork lsst-sqre/phalanx `following GitHub's guide `__. +Otherwise, fork lsst-sqre/phalanx `following GitHub's guide `__. .. _about-venv: @@ -79,9 +79,7 @@ Install helm Some Phalanx commands require Helm (v3 or later) to be available on your PATH. Any version of Helm after v3 should be okay. -You therefore must have it installed on your PATH. - -See the `Helm installation guide `__ for more details. +See the `Helm installation guide `__ for installation instructions. If you don't want to (or don't have access to) install helm globally on your system, you can put the binary in the :file:`bin` directory of the virtual environment you created in :ref:`about-venv`. diff --git a/docs/about/repository.rst b/docs/about/repository.rst index 251aafc696..216795a1a6 100644 --- a/docs/about/repository.rst +++ b/docs/about/repository.rst @@ -2,7 +2,7 @@ Phalanx Git repository structure ################################ -Phalanx is an open source Git repository hosted on `GitHub `__. +Phalanx is an open source Git repository hosted at https://github.com/lsst-sqre/phalanx. This page provides an overview of this repository's structure, for both application developers and environment administrators alike. For background on Phalanx and its technologies, see :doc:`introduction` first. @@ -59,11 +59,26 @@ installer directory :bdg-link-primary-line:`Browse installer/ on GitHub ` This directory contains a script named `install.sh `__. -The arguments to this are the name of the environment, the FQDN, and the read key for Vault (see :ref:`secrets` for more details on Vault). +The arguments to this are the name of the environment, the Vault RoleID, and the Vault SecretID (see :ref:`secrets` for more details on Vault). This installer script is the entry point for setting up a new environment. It can also be run on an existing environment to update it. See the :ref:`environment bootstrapping documentation ` for details. +charts directory +---------------- + +:bdg-link-primary-line:`Browse charts/ on GitHub ` + +This directory contains Helm charts shared by multiple Phalanx applications that are not generally useful enough to warrant separate publication in a proper Helm chart repository. + +In some cases, several Phalanx applications should use common Helm templates to avoid duplication. +The best way to do this within Helm is to use a subchart. +This can be done by publishing a separate Helm chart in https://github.com/lsst-sqre/charts, but publication as a Helm chart implies that the chart may be useful outside of Phalanx. +Sometimes these shared subcharts are merely artifacts of code organization and deduplication within Phalanx, and should not have an independent existence outside of Phalanx. +In those cases, they're maintained in the :file:`charts` directory. + +See :doc:`/developers/shared-charts` for details. + docs directory -------------- @@ -103,7 +118,7 @@ The default branch is ``main``. This default branch is considered the source of truth for fullly synchronized Phalanx environments. Updates to Phalanx are introduced as pull requests on GitHub. -Repository members create branches directly in the `GitHub lsst-sqre/phalanx repository `__ (see the `Data Management workflow guide`_) +Repository members create branches directly in https://github.com/lsst-sqre/phalanx (see the `Data Management workflow guide`_) External collaborators should fork Phalanx and create pull requests. It is possible (particularly in non-production environments) to deploy applications from branches of Phalanx, which is useful for debugging new and updating applications before updating the ``main`` branch. diff --git a/docs/admin/audit-secrets.rst b/docs/admin/audit-secrets.rst index 7d61c4d2d7..1e73b5c8c1 100644 --- a/docs/admin/audit-secrets.rst +++ b/docs/admin/audit-secrets.rst @@ -9,6 +9,7 @@ To check that all of the necessary secrets for an environment named `` The ``VAULT_TOKEN`` environment variable must be set to the Vault write token for this environment (or a read token; this command will not make any changes). +For SQuaRE-managed environments, you can get the write token from the ``Phalanx Vault write tokens`` item in the SQuaRE 1Password vault. The output of the command will be a report of any inconsistencies or problems found in the Vault secrets for this environment. No output indicates no problems. diff --git a/docs/admin/index.rst b/docs/admin/index.rst index 96326b4fc9..c5f6ae4d70 100644 --- a/docs/admin/index.rst +++ b/docs/admin/index.rst @@ -36,7 +36,9 @@ Administrators operate infrastructure, manage secrets, and are involved in the d :caption: Infrastructure :maxdepth: 2 + infrastructure/google/index infrastructure/filestore/index + infrastructure/kubernetes-node-status-max-images .. toctree:: :caption: Reference diff --git a/docs/admin/infrastructure/google/credentials.rst b/docs/admin/infrastructure/google/credentials.rst new file mode 100644 index 0000000000..e8cda7125b --- /dev/null +++ b/docs/admin/infrastructure/google/credentials.rst @@ -0,0 +1,34 @@ +################################## +Getting GKE Kubernetes credentials +################################## + +To use the standard Kubernetes administrative command :command:`kubectl` or other commands built on the same protocol (such as Helm_ or the Phalanx installer), you must have authentication credentials stored for the target Kubernetes cluster. +Google provides a mechanism to obtain those credentials using the :command:`gcloud` command: + +#. Ensure you have a Google account with access to the Google Cloud Platform project where your target Kubernetes cluster is running. + For Phalanx environments run by SQuaRE, this access must be via an ``lsst.cloud`` Google account that is used only for Rubin activities. + If you do not already have such an account or permissions and need administrative access to a Phalanx environment maintained by SQuaRE, contact SQuaRE for access. + +#. `Install gcloud `__ on the system on which you want to run privileged Kubernetes commands. + +#. `Initialize gcloud `__. + You will need to have access to the Google Cloud Platform project where your target Kubernetes cluster is running. + + If you have access to multiple Google Cloud Platform projects, you will be asked to select one as your default project. + You may wish to choose the project for the Phalanx environment you use most often. + You can find the project ID of a Phalanx project hosted on GKE in its :doc:`environments page `. + +#. `Install kubectl and the GKE auth plugin `__. + As part of that installation, you will run the :command:`gcloud` command that obtains credentials usable by :command:`kubectl` and other privileged Kubernetes commands. + +The final step has an example :command:`gcloud` command, but it assumes that you are getting credentials for your default project. +Rubin uses multiple Google Cloud Platform projects for different environments, so you may have to provide the project ID as well. +For the full command to run, see the bottom of the relevant :doc:`environments page `. + +Once you have followed this process on a system, the credentials will remain valid unless the Kubernetes control plane credentials are rotated. + +.. note:: + + The Kubernetes control plane credentials eventually expire and have to periodically be rotated. + If the control plane credentials of the Kubernetes cluster are rotated, you will have to re-run the :command:`gcloud` command to refresh your credentials. + If you discover that your credentials are no longer working, try that command and see if the problem persists. diff --git a/docs/admin/infrastructure/google/index.rst b/docs/admin/infrastructure/google/index.rst new file mode 100644 index 0000000000..8555e0e5b5 --- /dev/null +++ b/docs/admin/infrastructure/google/index.rst @@ -0,0 +1,13 @@ +############################## +Using Google Kubernetes Engine +############################## + +Google Kubernetes Engine (GKE) is the Google Cloud Platform (GCP) implementation of Kubernetes. +It is an excellent hosting platform for Phalanx environments. + +This page collects advice and supplemental documentation for Phalanx administrators of environments hosted on GKE. + +.. toctree:: + + credentials + terraform diff --git a/docs/admin/infrastructure/google/terraform.rst b/docs/admin/infrastructure/google/terraform.rst new file mode 100644 index 0000000000..71a68c9283 --- /dev/null +++ b/docs/admin/infrastructure/google/terraform.rst @@ -0,0 +1,9 @@ +##################################### +Managing GCP resources with Terraform +##################################### + +All SQuaRE-managed Google Cloud Platform projects use Terraform to manage all GCP resources outside of Kubernetes. +These include CLoud SQL databases used by Phalanx applications, Google Firestore for UID and GID assignment, service accounts used with workload identity for authenticated access to Google services, and so forth. + +The Terraform configuration for all SQuaRE-managed projects and most other Rubin Observatory GCP projects is maintained in https://github.com/lsst/idf_deploy. +Changes to this repository are automatically applied to the relevant Google Cloud Platform project when the pull request has been reviewed and merged. diff --git a/docs/admin/infrastructure/kubernetes-node-status-max-images.rst b/docs/admin/infrastructure/kubernetes-node-status-max-images.rst new file mode 100644 index 0000000000..c6e77fbbd8 --- /dev/null +++ b/docs/admin/infrastructure/kubernetes-node-status-max-images.rst @@ -0,0 +1,50 @@ +########################################################## +Kubernetes kubelet nodeStatusMaxImages setting for Nublado +########################################################## + +The image prepuller in the :px-app:`nublado` application requires Kubernetes to keep track of a number of images and ensure each of those images are present on every node. This is required in order to provide a pleasant user experience, because the ``sciplat-lab`` images are large and typically take 3-5 minutes to pull and unpack when they are not already present on a node. +The default Kubernetes settings can in some circumstances result in the :px-app:`nublado` failing to display images in its spawner menu, as well as the image prepuller running continuously. + +Setting nodeStatusMaxImages +=========================== + +The solution, described here, is to set the ``nodeStatusMaxImages`` in the Kubernetes cluster's `kubelet config`_. + +.. _`kubelet config`: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ + +The recommended remediation is to disable each node's cap on ``nodeStatusMaxImages`` by setting its value to ``-1`` in the node's kubelet configuration file. Typically this file is found at ``/var/lib/kubelet/config``. However, your Kubernetes deployment may have relocated it, you may be using a drop-in configuration directory, or you may be managing it with some other automation tool. + +After editing the configuration file, you must then restart kubelet on each node. + +Background +========== + +The fundamental problem is that the Kubernetes setting ``nodeStatusMaxImages`` is set to 50 by default. The only way to retrieve a list of which images are present on a node is to query the node, and look through its ``status.images`` information. + +In general, the Nublado prepulling strategy relies on the supposition that prepulled images are the freshest; that, eventually, people will stop using old images; and finally, when the disk pressure garbage collection threshold is exceeded, the images that have not been used in the longest time will be purged. + +When the ephemeral storage is sized such that there is not room for very many ``sciplat-lab`` images (but enough space to hold at least the full set that should be prepulled), and when ``sciplat-lab`` is the most common image found on nodes, this generally just works with no further attention: the menu stays populated with the current images, and since they are prepulled, they spawn quickly when selected. +Disk pressure cleans up outdated images, and everything works as it should. + +However, if the node has a large amount of ephemeral storage, and/or there is much non-Lab application use on the node, this can cause a problem for the prepuller: it is entirely possible for images that are indeed present on the node to not be in the first fifty images in the image list, and therefore not to be found when the prepuller determines which images need prepulling. + +This has two consequences: first, the prepuller will be constantly scheduling images as it prepulls the ones it wants, because even though the image is already resident on the node, the prepuller does not, and cannot, know that. +Second, these images, because the prepuller incorrectly believes they are not resident on all nodes, will not be visible in the JupyterHub spawner menu, although they will be available from the dropdown list. + +Fortunately there is a simple fix: increase the kubelet ``nodeStatusMaxImages`` setting. The default value of 50 should either be increased to something large enough that it's implausible that that many images would fit into ephemeral storage, or set to ``-1`` to remove the cap entirely. While disabling the cap could, in theory, make node status extremely large (which is the reason the cap exists in the first place), in practice it has never proven problematic in a Phalanx deployment. Those deployments have had at most hundreds, rather than thousands or millions, of container images on any given node, so the size of the status document has always remained modest. + +Should you go the route of choosing a larger positive value for ``nodeStatusMaxImages`` a reasonable rule of thumb is to pick a number one-third of the size of each node's ephemeral storage in gigabytes. Thus if you had a terabyte of ephemeral storage, a ``nodeStatusMaxImages`` of ``350`` would be a good starting guess. This value is also dependent on how broadly mixed your workload is, and how large the images for the other aspects of your workload are, which is why disabling the cap entirely is the initial recommendation. + +Pruning cached images +===================== + +If you cannot change the behavior of the Kubernetes node API, you may need to trim the node image cache so that the total number of images is under 50. +If you have direct administrative access to the Kubernetes node, you can do that with the following steps: + +#. Download `purge `__. + +#. Run it on each node, using an account allowed to use the Docker socket (thus, probably in group ``docker``). + You may want to run it with ``-x`` first to see what it's going to do. + If you want output during the actual run, run it with ``-v``. + +Unfortunately, this will only temporarily solve the problem, so you will either need to do this repeatedly or find a way to change the API configuration to return more cached images. diff --git a/docs/admin/installation.rst b/docs/admin/installation.rst index 23f5cf01d2..c84a256e21 100644 --- a/docs/admin/installation.rst +++ b/docs/admin/installation.rst @@ -8,6 +8,9 @@ An environment has a hostname, Vault server and path to its secrets, and a set o Before starting this process, you should set up the required secrets for your new environment. See :doc:`secrets-setup` for details. +If you are setting up an environment that will be running a 1Password Connect server for itself, you will need to take special bootstrapping steps. +See :px-app-bootstrap:`onepassword-connect` for more information. + Creating an environment ======================= @@ -20,7 +23,9 @@ To create a new Phalanx environment, take the following steps: #. Create a new :file:`values-{environment}.yaml` file in `environments `__. Start with a template copied from an existing environment that's similar to the new environment. Edit it so that ``name``, ``fqdn``, ``vaultUrl``, and ``vaultPathPrefix`` at the top match your new environment. + You may omit ``vaultUrl`` for SQuaRE-managed environments. See :doc:`secrets-setup` for more information about the latter two settings and additional settings you may need. + If the environment will be hosted on Google Kubernetes Engine, also fill out ``gcp.projectId``, ``gcp.region``, and ``gcp.clusterName`` with metadata about where the environment will be hosted. Enable the applications this environment should include. #. Decide on your approach to TLS certificates. @@ -41,8 +46,8 @@ To create a new Phalanx environment, take the following steps: The following applications have special bootstrapping considerations: - :px-app-bootstrap:`argocd` - - :px-app-bootstrap:`cachemachine` - :px-app-bootstrap:`gafaelfawr` + - :px-app-bootstrap:`nublado` - :px-app-bootstrap:`portal` - :px-app-bootstrap:`squareone` @@ -55,19 +60,25 @@ Installing Phalanx Once you have defined a Phalanx environment, follow these steps to install it. These can be run repeatedly to reinstall Phalanx over an existing deployment. -.. warning:: +#. Create a Vault AppRole that will be used by Vault Secrets Operator. - The installer has not been updated to work with the new secrets management system yet, and the way it initializes Vault Secrets Operator is incorrect for the new system and will not work. - This is currently being worked on, but in the meantime you will have to make changes to the installation script to use :command:`phalanx vault create-read-approle --as-secret vault-credentials` and skip the attempt to create a Vault read token secret obtained from 1Password. - Hopefully this will be fixed shortly. + .. prompt:: bash -.. rst-class:: open + phalanx vault create-read-approle -#. Create a virtual environment with the tools you will need from the installer's `requirements.txt `__. + Be aware that this will invalidate any existing AppRole for that environment. #. Run the installer script at `installer/install.sh `__. + + .. prompt:: bash + + installer/install.sh + + ```` and ```` are the Role ID and Secret ID of the Vault AppRole created in the previous step. + Debug any problems. The most common source of problems are errors or missing configuration in the :file:`values-{environment}.yaml` files you created for each application. + You can safely run the installer repeatedly as you debug and fix issues. #. If the installation is using a dynamically-assigned IP address, while the installer is running, wait until the ingress-nginx-controller service comes up and has an external IP address. Then, set the A record for your endpoint to that address (or set an A record with that IP address for the ingress and a CNAME from the endpoint to the A record). diff --git a/docs/admin/migrating-secrets.rst b/docs/admin/migrating-secrets.rst index 027f76c657..8ad7029bf7 100644 --- a/docs/admin/migrating-secrets.rst +++ b/docs/admin/migrating-secrets.rst @@ -5,7 +5,8 @@ Migrating to the new secrets management system We introduced a new command-line-driven secrets management system for Phalanx environments in September of 2023. This page documents how to migrate to the new system from the older scripts in :file:`installer`. -These instructions assume that, if you are using 1Password for static secrets, you have already set up a 1Password vault and corresponding :px-app:`1Password Connect server ` for this environment, but that vault may be empty. +These instructions assume that, if you are using 1Password for static secrets, you have already set up a 1Password vault and enabled the :px-app:`1Password Connect server ` for this environment. +If you have not yet done this, see :doc:`/applications/onepassword-connect/add-new-environment`. In all :command:`phalanx` commands listed below, replace ```` with the short identifier of your environment. @@ -51,7 +52,7 @@ The new secret management system uses Vault AppRoles instead, which are the reco If you are using some other Vault server with its own path conventions, you can skip this step, although it is easier to do the migration if you can set up the new secrets in a new Vault path without having to change the old Vault path. #. Set the ``VAULT_TOKEN`` environment variable to a token with access to create new AppRoles and tokens and to list token accessors and secret IDs. - If you are using the SQuaRE Vault server, use the admin token. + If you are using the SQuaRE Vault server, use the admin token from the ``Phalanx Vault admin credentials`` 1Password item in the SQuaRE 1Password vault. This environment variable will be used for multiple following commands. You will be told when you can clear it again. @@ -84,7 +85,7 @@ The new secret management system uses Vault AppRoles instead, which are the reco The new token will be printed to standard output along with some metadata about it. - For SQuaRE-managed environments, save that token in the ``SQuaRE`` 1Password vault (**not** the vault for the RSP environment) in the item named ``RSP Vault write tokens``. + For SQuaRE-managed environments, save that token in the ``SQuaRE`` 1Password vault (**not** the vault for the RSP environment) in the item named ``Phalanx Vault write tokens``. Add a key for the short environment identifier and set the value to the newly-created write token. Don't forget to mark it as a password using the icon on the right. Then, add a key under the :guilabel:`Accessors` heading for the environment and set the value to the token accessor. @@ -100,6 +101,7 @@ The new secret management system uses Vault AppRoles instead, which are the reco phalanx vault audit This command will print diagnostics if it finds any problems. + You will still need ``VAULT_TOKEN`` set to a privileged token to run this command. Update secrets ============== @@ -155,7 +157,7 @@ Update secrets Replace ```` with the value of ``vaultPathPrefix`` in :file:`environments/values-{environment}.yaml` for your environment. #. If you are using 1Password as the source for static secrets, set ``OP_CONNECT_TOKEN`` to the 1Password Connect token for this environment. - For SQuaRE-managed environments, this can be found in the :guilabel:`RSP 1Password tokens` item in the :guilabel:`SQuaRE` 1Password vault. + For SQuaRE-managed environments, this can be found in the ``RSP 1Password tokens`` item in the SQuaRE 1Password vault. #. Check what secrets are missing or incorrect and fix them. @@ -193,6 +195,16 @@ Switch to the new secrets tree If you are using a static secrets file, add the ``--secrets`` flag pointing to that file. This will fix any secrets that are missing or incorrect in Vault. +#. Some Phalanx applications need to know whether the old or new secrets layout is in use. + On your working branch, add the necessary settings for those applications to their :file:`values-{environment}.yaml` files for your environment. + Applications to review: + + - :px-app:`datalinker` (``config.separateSecrets``) + - :px-app:`nublado` (``secrets.templateSecrets``) + - :px-app:`obsloctap` (``config.separateSecrets``) + - :px-app:`plot-navigator` (``config.separateSecrets``) + - :px-app:`production-tools` (``config.separateSecrets``) + #. You're now ready to test the new secrets tree. You can do this on a branch that contains the changes you made above. diff --git a/docs/admin/requirements.rst b/docs/admin/requirements.rst index ac8dbb9325..4324e9cade 100644 --- a/docs/admin/requirements.rst +++ b/docs/admin/requirements.rst @@ -7,7 +7,7 @@ In order to install a Phalanx environment, the following prerequisites must be i Deployment environment ====================== -Phalanx can only be installed in environments that meet the following reuqirements: +Phalanx can only be installed in environments that meet the following requirements: - Phalanx is a Kubernetes deployment platform that installs within a Kubernetes cluster. The oldest version of Kubernetes known to work is 1.23. diff --git a/docs/admin/secrets-setup.rst b/docs/admin/secrets-setup.rst index 3124c345f6..34d147a4f0 100644 --- a/docs/admin/secrets-setup.rst +++ b/docs/admin/secrets-setup.rst @@ -7,6 +7,9 @@ Phalanx does, however, come with tools to manage one specific approach to using This document explains the basic structure of how secrets must be stored in Vault, describes the tools for managing that structure, and describes the optional tools for managing Vault authentication credentials and paths for one specific Vault design. +If you are setting up an environment that will be running a 1Password Connect server for itself, you will need to take special bootstrapping steps. +See :px-app-bootstrap:`onepassword-connect` for more information. + .. note:: We are in the middle of a migration from an old secrets management system that sometimes used multiple secrets per application and sometimes pointed multiple applications at the same secret, to a new system that always uses one secret per application. @@ -29,7 +32,7 @@ The name of each secret other than ``pull-secret`` matches the name of the appli So, for example, all secrets for Gafaelfawr for a given environment may be stored as key/value pairs in the secret named :samp:`secrets/phalanx/{environment}/gafaelfawr`. This path is configured for each environment via the ``vaultPathPrefix`` setting in the environment :file:`values-{environment}.yaml` file. -The URL to the Vault server is set via the ``vaultUrl`` setting in the same file. +The URL to the Vault server is set via the ``vaultUrl`` setting in the same file and defaults to the SQuaRE-run Vault server. Vault credentials ================= @@ -40,12 +43,6 @@ This approach is being replaced with a `Vault AppRole`_ that has read access to .. _Vault AppRole: https://developer.hashicorp.com/vault/docs/auth/approle -.. warning:: - - The current Phalanx installer only supports Vault read tokens, not Vault AppRoles. - Support for Vault AppRoles will be added in the future. - In the meantime, the Vault bootstrapping process in `install.sh `__ will need to be modified when installing environments that use Vault AppRoles. - Phalanx does not strictly require either of those approaches; any authentication approach that `Vault Secrets Operator`_ supports may be used as long as :px-app:`vault-secrets-operator` is configured accordingly for that environment. However, the standard installation process only supports AppRoles, and tooling is provided to manage those roles. @@ -78,6 +75,7 @@ This normally requires a Vault admin or provisioner token or some equivalent. The output includes the new Vault token, which you should save somewhere secure where you store other secrets. (The running Phalanx environment does not need and should not have access to this token.) You will later set the environment variable ``VAULT_TOKEN`` to this token when running other :command:`phalanx` commands. + For SQuaRE-managed environments, always update the ``Phalanx Vault write tokens`` 1Password item in the SQuaRE 1Password vault after running this command. :samp:`phalanx vault audit {environment}` Check the authentication credentials created by the previous two commands in the given environment for any misconfiguration. @@ -139,6 +137,9 @@ Static secrets from 1Password Static secrets may be stored in a 1Password vault. In this case, each application with static secrets should have an entry in this 1Password vault. +The 1Password vault must be served by a 1Password Connect server so that the Phalanx tooling can access the secrets. +See :px-app:`onepassword-connect` for more details on how this is done. + Application secrets ^^^^^^^^^^^^^^^^^^^ @@ -173,13 +174,11 @@ This will be transformed into a Vault entry in the correct format for generating Configuring 1Password support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In :file:`values-{environment}.yaml` for your environment, in the Phalanx :file:`environments` directory, add the setting ``onePasswordConnectServer``, setting it to the URL of the `1Password Connect`_ server for that 1Password vault. - -When running :command:`phalanx secrets` to sync or audit secrets, you will need to set ``OP_CONNECT_TOKEN`` to a read token for that 1Password Connect server. +For an environment to use 1Password as a static secrets source, there must be a 1Password Connect server that serves the secrets for that environment from a 1Password vault. +See :doc:`/applications/onepassword-connect/add-new-environment` for details on how to enable a new 1Password Connect server for your environment using Phalanx. -Phalanx can manage your 1Password Connect server as well, but it should run in a separate cluster than the environment that it provides secrets for. -SQuaRE-run environments use 1Password Connect servers running in the Roundtable clusters. -See :px-app:`onepassword-connect-dev` for details on how to set up a new 1Password Connect server using Phalanx. +When running :command:`phalanx secrets` to sync or audit secrets, you will need to set ``OP_CONNECT_TOKEN`` to the read token for that 1Password Connect server. +For SQuaRE-run environments, you can get that secret from the 1Password item ``RSP 1Password tokens`` in the SQuaRE 1Password vault. Static secrets from Vault ------------------------- diff --git a/docs/admin/sync-secrets.rst b/docs/admin/sync-secrets.rst index c8546b4a1c..e22badb784 100644 --- a/docs/admin/sync-secrets.rst +++ b/docs/admin/sync-secrets.rst @@ -12,7 +12,10 @@ To populate Vault with all of the necessary secrets for an environment named ``< phalanx secrets sync The ``VAULT_TOKEN`` environment variable must be set to the Vault write token for this environment. +For SQuaRE-managed environments, you can get the write token from the ``Phalanx Vault write tokens`` item in the SQuaRE 1Password vault. + Add the ``--secrets`` command-line option or set ``OP_CONNECT_TOKEN`` if needed for your choice of a :ref:`static secrets source `. +For SQuaRE-managed deployments, the 1Password token for ``OP_CONNECT_TOKEN`` comes from the ``Phalanx 1Password tokens`` item in the SQuaRE 1Password vault. This must be done before installing a Phalanx environment for the first time. It can then be run again whenever the secrets for that environment change. diff --git a/docs/admin/troubleshooting.rst b/docs/admin/troubleshooting.rst index 1ee51aedeb..5383f06074 100644 --- a/docs/admin/troubleshooting.rst +++ b/docs/admin/troubleshooting.rst @@ -18,38 +18,14 @@ When this happens, you may need to recreate the persistent volume. **Solution:** :ref:`recreate-postgres-pvc` -Spawner menu missing images, cachemachine stuck pulling the same image -====================================================================== +Spawner menu missing images, Nublado stuck pulling the same image +================================================================= **Symptoms:** When a user goes to the spawner page for the Notebook Aspect, the expected menu of images is not available. -Instead, the menu is either empty or missing the right number of images of different classes. -The cachemachine application is continuously creating a ``DaemonSet`` for the same image without apparent forward progress. -Querying the cachemachine ``/available`` API shows either nothing in ``images`` or not everything that was expected. +Instead, the menu is missing one or more images. +The same image or set of images is pulled again each on each prepuller loop the Nublado controller attempts. -**Cause:** Cachemachine is responsible for generating the menu used for spawning new JupyterLab instances. -The list of available images is pulled from the list of images that are already cached on every non-cordoned node to ensure that spawning will be quick. -If the desired types of images are not present on each node, cachemachine will create a ``DaemonSet`` for that image to attempt to start a pod using that image on every node, which will cache it. -If this fails to change the reported images available on each node, it will keep retrying. - -The most common cause of this problem is a Kubernetes limitation. -By default, the Kubernetes list node API only returns the "first" (which usually means oldest) 50 cached images. -If more than 50 images are cached, images may go missing from that list even though they are cached, leading cachemachine to think they aren't cached and omitting them from the spawner menu. - -**Solution:** :doc:`/applications/cachemachine/pruning` - -If this doesn't work, another possibility is that there is a node that cachemachine thinks is available for JupyterLab images but which is not eligible for its ``DaemonSet``. -This would be a bug in cachemachine, which should ignore cordoned nodes, but it's possible there is a new iteration of node state or a new rule for where ``DaemonSets`` are allowed to run that it does not know about. - -Spawning a notebook fails with a pending error -============================================== - -**Symptoms:** When a user tries to spawn a new notebook, the spawn fails with an error saying that the user's lab is already pending spawn or is pending deletion. - -**Cause:** If the spawning of the lab fails or if the deletion of a lab fails, sometimes JupyterHub can give up on making further progress but still remember that the lab is supposedly still running. -In this case, JupyterHub may not recover without assistance. -You may need to delete the record for the affected user, and also make sure the user's lab namespace (visible in Argo CD under the ``nublado-users`` application) has been deleted. - -**Solution:** :ref:`nublado2-clear-session-database` +**Solution:** :doc:`infrastructure/kubernetes-node-status-max-images` User gets permission denied from applications ============================================= @@ -76,16 +52,6 @@ If you need to do something that spans users or should create root-owned files, **Solution:** :doc:`infrastructure/filestore/privileged-access` -User pods don't spawn, reporting "permission denied" from Moneypenny -==================================================================== - -**Symptoms:** A user pod fails to spawn, and the error message says that Moneypenny did not have permission to execute. - -**Cause:** The ``gafaelfawr-token`` VaultSecret in the ``nublado2`` namespace is out of date. -This happened because the ``gafaelfawr-redis`` pod restarted and either it lacked persistent storage (at the T&S sites, as of July 2022), or because that storage had been lost. - -**Solution:** :doc:`/applications/gafaelfawr/recreate-token` - Login fails with "bad verification code" error ============================================== diff --git a/docs/applications/_summary.rst.jinja b/docs/applications/_summary.rst.jinja index 96ba49b3d8..5fcced1f2f 100644 --- a/docs/applications/_summary.rst.jinja +++ b/docs/applications/_summary.rst.jinja @@ -3,45 +3,44 @@ * - View on GitHub - :bdg-link-primary-line:`applications/{{ app.name }} ` :bdg-link-primary-line:`Application template ` - {% if app.homepage %} + {%- if app.homepage %} * - Homepage - {{ app.homepage }} - {% endif %} - {% if app.source_urls %} + {%- endif %} + {%- if app.source_urls %} * - Source - {% if app.source_urls|length == 1 %} + {%- if app.source_urls|length == 1 %} - {{ app.source_urls[0] }} - {% else %} + {%- else %} - - {{ app.source_urls[0] }} - {% for source_url in app.source_urls[1:] %} + {%- for source_url in app.source_urls[1:] %} - {{ source_url }} - {% endfor %} - {% endif %} - {% endif %} - {% if app.doc_links %} + {%- endfor %} + {%- endif %} + {%- endif %} + {%- if app.doc_links %} * - Related docs - {% if app.doc_links|length == 1 %} + {%- if app.doc_links|length == 1 %} - {{ app.doc_links[0].to_rst() }} - {% else %} + {%- else %} - - {{ app.doc_links[0].to_rst() }} - {% for doc_link in app.doc_links[1:] %} + {%- for doc_link in app.doc_links[1:] %} - {{ doc_link.to_rst() }} - {% endfor %} - {% endif %} - {% endif %} + {%- endfor %} + {%- endif %} + {%- endif %} * - Type - Helm_ * - Namespace - {{ app.namespace }} * - Environments - .. list-table:: - {% for env_name in app.active_environments %} * - :px-env:`{{ env_name }}` - `values `__ - {% if envs[env_name].argocd_url %} + {%- if envs[env_name].argocd_url %} - `Argo CD <{{ envs[env_name].argocd_url }}/applications/{{ app.name }}>`__ - {% else %} + {%- else %} - - {% endif %} - {% endfor %} + {%- endif %} + {%- endfor %} diff --git a/docs/applications/auxtel/index.rst b/docs/applications/auxtel/index.rst new file mode 100644 index 0000000000..44fe7c6b0a --- /dev/null +++ b/docs/applications/auxtel/index.rst @@ -0,0 +1,18 @@ +.. px-app:: auxtel + +###################################################### +auxtel — Auxiliary Telescope Control System Components +###################################################### + +The auxtel application houses the CSCs associated with the Auxiliary Telescope. Simulation environments use simulators for all CSCs except the ATAOS, ATDomeTrajectory, ATHeaderService, ATOODS and ATPtg. Those environments also contain a simulator for the low-level controller of the ATHexapod. + +.. jinja:: auxtel + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/auxtel/values.md b/docs/applications/auxtel/values.md new file mode 100644 index 0000000000..f038df7882 --- /dev/null +++ b/docs/applications/auxtel/values.md @@ -0,0 +1,12 @@ +```{px-app-values} auxtel +``` + +# AuxTel Helm values reference + +Helm values reference table for the {px-app}`auxtel` application. + +```{include} ../../../applications/auxtel/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/butler/index.rst b/docs/applications/butler/index.rst new file mode 100644 index 0000000000..d050a3b5f0 --- /dev/null +++ b/docs/applications/butler/index.rst @@ -0,0 +1,23 @@ +.. px-app:: butler + +################################################### +butler — Server for Butler data abstraction service +################################################### + +The Butler server provides a web service for querying the LSST data release +products and retrieving the associated images and data files. It is intended +to become the primary backend for the `Butler python library +`_ for community science use cases. + +This service is in early development and currently considered experimental. + +.. jinja:: butler + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/butler/values.md b/docs/applications/butler/values.md new file mode 100644 index 0000000000..ba64489d92 --- /dev/null +++ b/docs/applications/butler/values.md @@ -0,0 +1,12 @@ +```{px-app-values} butler +``` + +# butler Helm values reference + +Helm values reference table for the {px-app}`butler` application. + +```{include} ../../../applications/butler/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/cachemachine/bootstrap.rst b/docs/applications/cachemachine/bootstrap.rst deleted file mode 100644 index 1807462ebd..0000000000 --- a/docs/applications/cachemachine/bootstrap.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. px-app-bootstrap:: cachemachine - -########################## -Bootstrapping cachemachine -########################## - -By default, cachemachine doesn't do any prepulling and doesn't provide a useful menu for Notebook Aspect spawning. -As part of bootstrapping a new environment, you will want to configure it to prepull appropriate images. - -For deployments on Google Kubernetes Engine, you will want to use Google Artifact Repository (GAR) as the source of images. -See :doc:`gar` for basic information and instructions on how to configure workload identity. - -For Telescope and Site deployments that need special images and image cycle configuration, start from the `summit configuration `__. -Consult with Telescope and Site to determine the correct recommended tag and cycle number. - -For other deployments that use the normal Rubin Notebook Aspect images, a reasonable starting configuration for cachemachine is: - -.. code-block:: yaml - - autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "registry.hub.docker.com", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } - -This prepulls the latest release, the latest two weeklies, and the latest three dailies, as well as the image tagged ``recommended``. diff --git a/docs/applications/cachemachine/gar.rst b/docs/applications/cachemachine/gar.rst deleted file mode 100644 index 5ba3ad269c..0000000000 --- a/docs/applications/cachemachine/gar.rst +++ /dev/null @@ -1,62 +0,0 @@ -################################################ -Google Cloud Artifact Registry (GAR) integration -################################################ - -Cachemachine optionally supports using the Google Cloud Artifact Registry (GAR) API to list images rather than the Docker API. - -This allows workload identity credentials to be used instead of Docker credentials when the images are stored in GAR. -Docker client authentication with GAR is cumbersome because a JSON token is used for authentication, and that token contains special characters that make it difficult to pass between multiple secret engine layers. - -Using the GAR API directly also avoids the need to build a cache of hashes to resolve tags to images. -The Docker API returns a list of images with a single tag, which requires constructing a cache of known hashes to determine which tags are alternate names for images that have already been seen. -The GAR API returns a list of images with all tags for that image, avoiding this problem. - -Container Image Streaming -========================= - -`Container Image Streaming `__ is used by cachemachine to decrease the time for the image pull time. -It's also used when an image isn't cached, which makes it practical to use uncached images. -With normal Docker image retrieval, using an uncached image can result in a five-minute wait and an almost-certain timeout. - -The ``sciplatlab`` images are 4GB. -Image pull time for those images decreased from 4 minutes to 30 seconds using image streaming. - -Image streaming is per project by enabling the ``containerfilesystem.googleapis.com`` API. -This was enabled via Terraform for the Interim Data Facility environments. - -Workload Identity -================= - -`Workload Identity `__ is used by Cachemachine to authenticate to the GAR API. -Workload Identity allows Kubernetes service accounts to impersonate Google Cloud Platform (GCP) Service Accounts to authenticate to GCP services. -Workload Identity is enabled on all of the Rubin Science Platform (RSP) Google Kuberentes Engine (GKE) Clusters. - -The binding between the Kubernetes and the GCP service account is done through IAM permissions deployed via Terraform. -The following Kubernetes annotation must be added to the Kubernetes ``ServiceAccount`` object as deployed via Phalanx to bind that service account to the GCP service account. - -.. code-block:: yaml - - serviceAccount: - annotations: { - iam.gke.io/gcp-service-account: cachemachine-wi@science-platform-dev-7696.iam.gserviceaccount.com - } - -To troubleshoot or validate Workload Identity, a test pod can be provisioned using `these instructions `__. - -Validating operations -===================== - -To validate cachemachine is running, check the status page at ``https://data-dev.lsst.cloud/cachemachine/jupyter``. -(Replace ``data-dev`` with the appropriate environment.) -Check the ``common_cache`` key for cached images, and see if ``images_to_cache`` is blank or only showing new images that are in the process of being downloaded. - -Future work -=========== - -- Cachemachine and Nublado both default to configuring an image pull secret when spawning pods. - This value is not used by GAR. - In GKE, the nodes default to using the built-in service account to pull images. - This means we can drop the ``pull-secret`` secret and its configuration when GAR is in use. - -- Image streaming is currently a per-region setting. - If GKE clustes are deployed outside of ``us-central1`` in the future, a GAR repository should be created for that region to stream images. diff --git a/docs/applications/cachemachine/index.rst b/docs/applications/cachemachine/index.rst deleted file mode 100644 index 72c228b3f4..0000000000 --- a/docs/applications/cachemachine/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. px-app:: cachemachine - -######################################### -cachemachine — JupyterLab image prepuller -######################################### - -The Docker images used for lab pods run by the Notebook Aspect are quite large, since they contain the full Rubin Observatory software stack. -If the image is not already cached on a Kubernetes node, starting a lab pod can take as long as five minutes and may exceed the timeout allowed by JupyterHub. - -Cachemachine is an image prepulling service designed to avoid this problem by ensuring every node in the Science Platform Kubernetes cluster has the most frequently used lab images cached. -It is also responsible for reporting the available images to :doc:`Nublado <../nublado2/index>`, used to generate the menu of images when the user creates a new lab pod. - -.. jinja:: cachemachine - :file: applications/_summary.rst.jinja - -Guides -====== - -.. toctree:: - - bootstrap - pruning - gar - values diff --git a/docs/applications/cachemachine/pruning.rst b/docs/applications/cachemachine/pruning.rst deleted file mode 100644 index b7f2829f81..0000000000 --- a/docs/applications/cachemachine/pruning.rst +++ /dev/null @@ -1,18 +0,0 @@ -############# -Image pruning -############# - -If the list of cached images on nodes gets excessively long, Kubernetes may stop updating its list of cached images. -The usual symptom is that the Notebook Aspect spawner menu of available images will be empty or missing expected images. - -This is a limitation of the Kubernetes node API. -By default, `only 50 images on a node will be shown `__. -You can work around this, if you control the Kubernetes installation, by adding ``--node-status-max-images=-1`` on the kubelet command line, or by setting ``nodeStatusMaxImages`` to ``-1`` in the kubelet configuration file. - -If you cannot change that setting, you will need to trim the node image cache so that the total number of images is under 50. - -#. Download `purge `__. - -#. Run it on each node, using an account allowed to use the Docker socket (thus, probably in group ``docker``). - You may want to run it with ``-x`` first to see what it's going to do. - If you want output during the actual run, run it with ``-v``. diff --git a/docs/applications/cachemachine/values.md b/docs/applications/cachemachine/values.md deleted file mode 100644 index f15bba3c17..0000000000 --- a/docs/applications/cachemachine/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} cachemachine -``` - -# Cachemachine Helm values reference - -Helm values reference table for the {px-app}`cachemachine` application. - -```{include} ../../../applications/cachemachine/README.md ---- -start-after: "## Values" ---- -``` diff --git a/docs/applications/calsys/index.rst b/docs/applications/calsys/index.rst new file mode 100644 index 0000000000..7a2c26ff97 --- /dev/null +++ b/docs/applications/calsys/index.rst @@ -0,0 +1,18 @@ +.. px-app:: calsys + +###################################################### +calsys — Calibration Systems Control System Components +###################################################### + +The calsys application houses CSCs associated with calibration systems across both the Simonyi Survey Telescope and the Auxiliary Telescope. It also contains the simulation generic camera systems (GenericCamera:1 and GCHeaderService:1). Simulation environments currently do not have any systems besides the afore mentionend simulation generic camera. + +.. jinja:: calsys + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/calsys/values.md b/docs/applications/calsys/values.md new file mode 100644 index 0000000000..c7f9cb8582 --- /dev/null +++ b/docs/applications/calsys/values.md @@ -0,0 +1,12 @@ +```{px-app-values} calsys +``` + +# CalSys Helm values reference + +Helm values reference table for the {px-app}`calsys` application. + +```{include} ../../../applications/calsys/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/control-system-test/index.rst b/docs/applications/control-system-test/index.rst new file mode 100644 index 0000000000..0afb4d752a --- /dev/null +++ b/docs/applications/control-system-test/index.rst @@ -0,0 +1,18 @@ +.. px-app:: control-system-test + +################################################################### +control-system-test — Systems for Testing Control System Components +################################################################### + +The control-system-test application houses a CSC (Test:42) and the control system integration testing infrastructure. These systems are meant for testing the control system for cycle upgrades and other potentially distruptive software changes. + +.. jinja:: control-system-test + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/control-system-test/values.md b/docs/applications/control-system-test/values.md new file mode 100644 index 0000000000..ae16e7ad84 --- /dev/null +++ b/docs/applications/control-system-test/values.md @@ -0,0 +1,12 @@ +```{px-app-values} control-system-test +``` + +# Control-System-Test Helm values reference + +Helm values reference table for the {px-app}`control-system-test` application. + +```{include} ../../../applications/control-system-test/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/eas/index.rst b/docs/applications/eas/index.rst new file mode 100644 index 0000000000..b8558ffc68 --- /dev/null +++ b/docs/applications/eas/index.rst @@ -0,0 +1,18 @@ +.. px-app:: eas + +############################################################## +eas — Environmental Awareness System Control System Components +############################################################## + +The eas application houses CSCs associated with the Environmental Awareness System. Simulation environments use simulators for all CSCs except the WeatherForecast CSC. + +.. jinja:: eas + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/eas/values.md b/docs/applications/eas/values.md new file mode 100644 index 0000000000..bc1a032bec --- /dev/null +++ b/docs/applications/eas/values.md @@ -0,0 +1,12 @@ +```{px-app-values} eas +``` + +# EAS Helm values reference + +Helm values reference table for the {px-app}`eas` application. + +```{include} ../../../applications/eas/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/filestore-backup/index.rst b/docs/applications/filestore-backup/index.rst new file mode 100644 index 0000000000..b34d94924b --- /dev/null +++ b/docs/applications/filestore-backup/index.rst @@ -0,0 +1,18 @@ +.. px-app:: filestore-backup + +############################################################## +Filestore-backup — Create and purge Google filestore backups +############################################################## + +Filestore-backup manages backing up Google Filestore shares and purging old backups. + +.. jinja:: filestore-backup + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/filestore-backup/values.md b/docs/applications/filestore-backup/values.md new file mode 100644 index 0000000000..5c9df838a4 --- /dev/null +++ b/docs/applications/filestore-backup/values.md @@ -0,0 +1,12 @@ +```{px-app-values} giftless +``` + +# Filestore-backup Helm values reference + +Helm values reference table for the {px-app}`filestore-backup` application. + +```{include} ../../../applications/filestore-backup/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/applications/gafaelfawr/recreate-token.rst b/docs/applications/gafaelfawr/recreate-token.rst index 21da258661..bc7a5de2e1 100644 --- a/docs/applications/gafaelfawr/recreate-token.rst +++ b/docs/applications/gafaelfawr/recreate-token.rst @@ -6,18 +6,17 @@ Where possible, we use persistent storage for Gafaelfawr's Redis database so tha However, if that persistent storage is deleted for some reason, or if Gafaelfawr is not configured to use persistent storage, all tokens will be invalidated. When this happens, depending on the order of restart, the ``gafaelfawr-tokens`` pod that is responsible for maintaining service tokens in the cluster may take up to 30 minutes to realize those tokens are no longer valid. -This will primarily affect the Notebook Aspect, which will be unable to authenticate to moneypenny and thus will not be able to spawn pods. -The result will be a "permission denied" error from moneypenny. +This will primarily affect the Notebook Aspect, which will be unable to authenticate to the Nublado controller and thus will not be able to spawn pods. -Gafaelfawr will automatically fix this problem after 30 minutes, but unfortunately the JupyterHub component of ``nublado2`` currently loads its token on startup and doesn't pick up changes. +Gafaelfawr will automatically fix this problem after 30 minutes, but unfortunately the JupyterHub component of ``nublado`` currently loads its token on startup and doesn't pick up changes. The easiest way to fix this problem is to force revalidation of all of the Gafaelfawr service tokens. To do that: #. Force a restart of the ``gafaelfawr-tokens`` deployment in the ``gafaelfawr`` namespace. - This will recreate the secret in ``nublado2``. + This will recreate any token secrets that are not valid. -#. Force a restart of the ``hub`` deployment in ``nublado2``. +#. Force a restart of the ``hub`` deployment in ``nublado``. This will restart the hub with the new, correct token. Be aware that when the Redis storage is wipoed, all user tokens will also be invalidated. diff --git a/docs/applications/index.rst b/docs/applications/index.rst index e307a23c4a..5a3d024fb6 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -23,19 +23,19 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde :maxdepth: 1 :caption: Rubin Science Platform - cachemachine/index + butler/index datalinker/index + filestore-backup/index hips/index linters/index livetap/index mobu/index - moneypenny/index noteburst/index nublado/index - nublado2/index portal/index semaphore/index sherlock/index + siav2/index sqlproxy-cross-project/index squareone/index ssotap/index @@ -50,12 +50,15 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde argo-workflows/index alert-stream-broker/index exposurelog/index + jira-data-proxy/index narrativelog/index obsloctap/index plot-navigator/index production-tools/index rubintv/index sasquatch/index + schedview-prenight/index + schedview-snapshot/index strimzi/index strimzi-access-operator/index telegraf/index @@ -68,7 +71,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde giftless/index kubernetes-replicator/index monitoring/index - onepassword-connect-dev/index + onepassword-connect/index ook/index squarebot/index @@ -81,3 +84,16 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde prompt-proto-service-latiss/index prompt-proto-service-lsstcam/index prompt-proto-service-lsstcomcam/index + +.. toctree:: + :maxdepth: 1 + :caption: Rubin Observatory Control System + + auxtel/index + calsys/index + control-system-test/index + eas/index + love/index + obssys/index + simonyitel/index + uws/index diff --git a/docs/applications/jira-data-proxy/index.rst b/docs/applications/jira-data-proxy/index.rst new file mode 100644 index 0000000000..5228342a03 --- /dev/null +++ b/docs/applications/jira-data-proxy/index.rst @@ -0,0 +1,20 @@ +.. px-app:: jira-data-proxy + +########################################################### +jira-data-proxy — Jira API read-only proxy for Times Square +########################################################### + +jira-data-proxy provides read-only access to the Rubin Jira API. +This app is built for Times Square so that notebooks can access the Jira API without external credentials. +This app only implements GET endpoints. + +.. jinja:: jira-data-proxy + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/jira-data-proxy/values.md b/docs/applications/jira-data-proxy/values.md new file mode 100644 index 0000000000..dfedf06ddc --- /dev/null +++ b/docs/applications/jira-data-proxy/values.md @@ -0,0 +1,12 @@ +```{px-app-values} jira-data-proxy +``` + +# jira-data-proxy Helm values reference + +Helm values reference table for the {px-app}`jira-data-proxy` application. + +```{include} ../../../applications/jira-data-proxy/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/applications/livetap/index.rst b/docs/applications/livetap/index.rst index ce1e83c8b6..1e9da2a7e5 100644 --- a/docs/applications/livetap/index.rst +++ b/docs/applications/livetap/index.rst @@ -5,11 +5,11 @@ livetap — IVOA livetap Table Access Protocol ############################################ LIVETAP (Live Obscore Table Access Protocol) is an IVOA_ service that provides access to the live obscore table which is hosted on postgres. -On the Rubin Science Platform, it is provided by `tap-postgres `__, which is derived from the `CADC TAP service `__. +On the Rubin Science Platform, it is provided by https://github.com/lsst-sqre/tap-postgres, which is derived from the `CADC TAP service `__. This service provides access to the ObsCore tables that are created and served by the butler and updated live. The TAP data itself, apart from schema queries, comes from Postgres. -The TAP schema is provided by images built from the `sdm_schemas `__ repository. +The TAP schema is provided by images built from https://github.com/lsst/sdm_schemas. .. jinja:: tap :file: applications/_summary.rst.jinja diff --git a/docs/applications/love/index.rst b/docs/applications/love/index.rst new file mode 100644 index 0000000000..1041b545c8 --- /dev/null +++ b/docs/applications/love/index.rst @@ -0,0 +1,18 @@ +.. px-app:: love + +############################################### +love — LSST Observers Visualization Environment +############################################### + +The love application houses all of the systems that make up the LSST Observers Visualization Envrionment. It consists of a visualization front-end, a set of managers to coordinate information exchange, prodcuers that gather the topic traffic from each CSC, a commander to allow control of CSCs and various support applications. + +.. jinja:: love + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/love/values.md b/docs/applications/love/values.md new file mode 100644 index 0000000000..84ad08c4a3 --- /dev/null +++ b/docs/applications/love/values.md @@ -0,0 +1,12 @@ +```{px-app-values} love +``` + +# LOVE Helm values reference + +Helm values reference table for the {px-app}`love` application. + +```{include} ../../../applications/love/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/moneypenny/index.rst b/docs/applications/moneypenny/index.rst deleted file mode 100644 index fcc7b01682..0000000000 --- a/docs/applications/moneypenny/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. px-app:: moneypenny - -############################## -moneypenny — User provisioning -############################## - -Moneypenny is responsible for provisioning new users of the Notebook Aspect of a Science Platform installation. -It is invoked by :px-app:`nublado2` whenever a user pod is spawned and decides whether provisioning is required. -If so, it does so before the lab spawn, usually by spawning a privileged pod. - -A typical example of the type of provisioning it does is creating the user's home directory, with appropriate ownership and permissions, in an NFS file store. - -.. jinja:: moneypenny - :file: applications/_summary.rst.jinja - -Guides -====== - -.. toctree:: - :maxdepth: 1 - - values diff --git a/docs/applications/moneypenny/values.md b/docs/applications/moneypenny/values.md deleted file mode 100644 index 91608bc501..0000000000 --- a/docs/applications/moneypenny/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} moneypenny -``` - -# moneypenny Helm values reference - -Helm values reference table for the {px-app}`moneypenny` application. - -```{include} ../../../applications/moneypenny/README.md ---- -start-after: "## Values" ---- -``` diff --git a/docs/applications/nublado/bootstrap.rst b/docs/applications/nublado/bootstrap.rst index f5fb068164..f09b520353 100644 --- a/docs/applications/nublado/bootstrap.rst +++ b/docs/applications/nublado/bootstrap.rst @@ -4,12 +4,33 @@ Bootstrapping Nublado ##################### -The JupyterLab Controller needs to know where the NFS server that provides persistent space (e.g. home directories, scratch, datasets) can be found. Ensure the correct definitions are in place in the configuration. +For details on how to write the Nublado configuration, see the `Nublado administrator documentation `__. + +GKE deployments +=============== + +When deploying Nublado on Google Kubernetes Engine, using Google Artifact Registry as the image source is strongly recommended. +This will result in better image selection menus, allow use of container streaming for faster start-up times, and avoid the need to maintain a pull secret. + +For setup instructions for using GAR with Nublado, see `Set up Google Artifact Registry in the Nublado documentation `__. +For more details about the benefits of using GAR, see the `relevant Nublado documentation page `__. Telescope and Site deployments ============================== -For Telescope and Site deployments that require instrument control, make sure you have any Multus network definitions you need in the ``values-.yaml``. +Image cycles +------------ + +Telescope and Site deployments have to limit the available images to only images that implement the current XML API. +This is done with a cycle restriction on which images are eligible for spawning. +Failing to set the cycle correctly can cause serious issues with the instrument control plane. + +For details on how to configure the cycle, see `image cycles in the Nublado documentation `__. + +Networking +---------- + +For Telescope and Site deployments that require instrument control, make sure you have any Multus network definitions you need in the :file:`values-{environment}.yaml`. This will look something like: .. code-block:: yaml diff --git a/docs/applications/nublado/index.rst b/docs/applications/nublado/index.rst index 580649eb8e..3c80a30934 100644 --- a/docs/applications/nublado/index.rst +++ b/docs/applications/nublado/index.rst @@ -1,12 +1,14 @@ .. px-app:: nublado -############################################ +####################################### nublado — JupyterHub/JupyterLab for RSP -############################################ +####################################### -The ``nublado`` service is an installation of a Rubin Observatory flavor of `Zero to JupyterHub `__ with some additional resources. This is currently the third version of ``nublado``. -The JupyterHub component provides the Notebook Aspect of the Rubin Science Platform, but replaces the KubeSpawner with a REST client to the JupyterLab Controller. -The JupyterLab Controller component not only provides user lab pod management, but also subsumes the functions formerly provided by the ``cachemachine`` and ``moneypenny`` applications. That is, in addition to creating and destroying user pods and namespaces, it handles filesystem provisioning for users, and manages prepulls of cached images to local nodes. +The ``nublado`` application provides a JupyterHub and JupyterLab interface for Rubin Science Platform users. +It also deploys a Kubernetes controller that, besides creating user lab pods, prepulls lab images and can provide per-user WebDAV file servers. + +The JupyterHub component and its proxy is deployed via `Zero to JupyterHub `__ with a custom configuration. +Alongside it, the Nublado controller is deployed by the same application as a separate FastAPI service. .. jinja:: nublado :file: applications/_summary.rst.jinja diff --git a/docs/applications/nublado/troubleshoot.rst b/docs/applications/nublado/troubleshoot.rst index eeae4ddf3a..88b940740d 100644 --- a/docs/applications/nublado/troubleshoot.rst +++ b/docs/applications/nublado/troubleshoot.rst @@ -1,34 +1,32 @@ .. px-app-troubleshooting:: nublado ####################### -Troubleshooting nublado +Troubleshooting Nublado ####################### -.. _nublado-clear-session-database: - -Clear session database entry -============================ +Check image prepulling status +============================= -Sometimes JupyterHub and its session database will get into an inconsistent state where it thinks a pod is already running but cannot shut it down. -The typical symptom of this is that spawns for that user fail with an error saying that the user's lab is already pending spawn or pending deletion, but the user cannot connect to their pod. +Nublado will attempt to prepull all configured images to each node that it believes is allowed to run Nublado lab images. +To see the status of that prepulling, go to the ``/nublado/spawner/v1/prepulls`` route of the relevant environment. -Recovery may require manually clearing the user's entry in the session database as follows: +In the resulting JSON document, ``config`` shows the current operative configuration, ``images`` shows the prepull status of the various images, and ``nodes`` shows the prepull status by node. -#. Remove the user's lab namespace, if it exists. - -#. Remove the user from the session database. - First, connect to the database: +.. _nublado-clear-session-database: - .. code-block:: shell +Clear session database entry +============================ - pod=$(kubectl get pods -n postgres | grep postgres | awk '{print $1}') - kubectl exec -it -n postgres ${pod} -- psql -U jovyan jupyterhub +Historically, we sometimes saw JupyterHub get into an inconsistent state where it thought a pod was already running and couldn't be shut down. +We haven't seen this problem since switching to the Nublado controller, but it may still be possible for the JupyterHub session database to get out of sync. - Then, at the PostgreSQL prompt: +If JupyterHub keeps telling a user that their lab is already spawning or shutting down, but doesn't allow them to connect to the lab or shut it down, following the instructions on `deleting a user session `__ may fix the problem. - .. code-block:: sql +If it does, investigate how JupyterHub was able to get stuck. +This indicates some sort of bug in Nublado. - delete from users where name='' +Prepuller is running continuously and/or expected menu items are missing +======================================================================== -In some cases you may also need to remove the user from the spawner table. -To do this, run ``select * from spawners`` and find the pod with the user's name in it, and then delete that row. +The Kubernetes control plane configuration variable ``nodeStatusMaxImages`` should be increased or disabled. +See :doc:`/admin/infrastructure/kubernetes-node-status-max-images`. diff --git a/docs/applications/nublado/updating-recommended.rst b/docs/applications/nublado/updating-recommended.rst index 09c6da77af..eb6eb8c829 100644 --- a/docs/applications/nublado/updating-recommended.rst +++ b/docs/applications/nublado/updating-recommended.rst @@ -12,7 +12,7 @@ Tagging a new container version When a new version has been approved (after passing through its prior QA and sign-off gates), the ``recommended`` tag must be updated to point to the new version. -To do this, run the GitHub retag workflow for the `sciplat-lab `__ repository, as follows: +To do this, run the GitHub retag workflow for https://github.com/lsst-sqre/sciplat-lab repository, as follows: #. Go to `the retag workflow page `__. #. Click :guilabel:`Run workflow`. @@ -38,7 +38,7 @@ If you do not find it, then that environment is currently using ``recommended`` Set this key (creating it if necessary) to whatever string represents the correct recommended-by-default image for that instance. For instance, for a Telescope and Site environment, this will likely look something like ``recommended_c0032``. -Create a pull request against `Phalanx `__ that updates the tag. +Create a pull request against https://github.com/lsst-sqre/phalanx that updates the tag. Once this change is merged, sync the nublado application (using Argo CD) in the affected environments. You do not have to wait for a maintenance window to do this, since the change is low risk, although it will result in a very brief outage for Notebook Aspect lab spawning while the JupyterLab Controller is restarted. diff --git a/docs/applications/nublado/upgrade.rst b/docs/applications/nublado/upgrade.rst index 504325c706..10c5d6c666 100644 --- a/docs/applications/nublado/upgrade.rst +++ b/docs/applications/nublado/upgrade.rst @@ -8,8 +8,9 @@ Most of the time, upgrading Nublado can be done simply by syncing the applicatio There will be a brief outage for spawning new pods, but users with existing pods should be able to continue working. Occasionally, new versions of JupyterHub will require a schema update. -We do not routinely enable automatic schema updates currently, so JupyterHub will refuse to start if a database schema update is required. -To enable schema updates, add: +Automatic schema updates are off by default, so JupyterHub will refuse to start if a database schema update is required. + +To enable schema updates, add the following to :file:`values-{environment}.yaml` for the ``nublado`` application: .. code-block:: yaml @@ -18,7 +19,10 @@ To enable schema updates, add: db: upgrade: true -(The ``jupyterhub`` and ``hub`` keys probably already exist in the ``values-.yaml`` file, so just add the ``db.upgrade`` setting in the correct spot.) -Then, JupyterHub will automatically upgrade its database when the new version starts. -You can then remove this configuration again if you're worried about automatic updates misbehaving later. -Alternatively, if there's a schema update, it's probably a pretty major upgrade to JupyterHub, and it may be a better idea to shut down the Hub, remove all user namespaces, and then connect to the database and drop all tables; when the Hub is restarted, the correct schema will be generated. Obviously this will boot all users from the running system, but that may be appropriate for major upgrades. +(The ``jupyterhub`` and ``hub`` keys probably already exist, so just add the ``db.upgrade`` setting in the correct spot.) +JupyterHub will then automatically upgrade its database when it is restarted running the new version. + +You can then this configuration afterwards if you're worried about applying a schema update without being aware that you're doing so. + +Alternately, for major upgrades to JupyterHub, you can choose to start from an empty database. +To do this, follow the instructions in the `Nublado documentation on wiping the database `__. diff --git a/docs/applications/nublado2/bootstrap.rst b/docs/applications/nublado2/bootstrap.rst deleted file mode 100644 index 0401e7d390..0000000000 --- a/docs/applications/nublado2/bootstrap.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. px-app-bootstrap:: nublado2 - -##################### -Bootstrapping Nublado -##################### - -Nublado and :px-app:`moneypenny` need to know where the NFS server that provides user home space is. -Nublado also requires other persistent storage space. -Ensure the correct definitions are in place in their configuration. - -Telescope and Site deployments -============================== - -For Telescope and Site deployments that require instrument control, make sure you have any Multus network definitions you need in the ``values-.yaml``. -This will look something like: - -.. code-block:: yaml - - singleuser: - extraAnnotations: - k8s.v1.cni.cncf.io/networks: "kube-system/macvlan-conf" - initContainers: - - name: "multus-init" - image: "lsstit/ddsnet4u:latest" - securityContext: - privileged: true - -It's possible to list multiple Multus network names separated by commas in the annotation string. -Experimentally, it appears that the interfaces will appear in the order specified. - -The ``initContainers`` entry should be inserted verbatim. -It creates a privileged container that bridges user pods to the specified networks before releasing control to the user's lab. diff --git a/docs/applications/nublado2/index.rst b/docs/applications/nublado2/index.rst deleted file mode 100644 index b08ecb8e17..0000000000 --- a/docs/applications/nublado2/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. px-app:: nublado2 - -############################# -nublado2 — JupyterHub for RSP -############################# - -The ``nublado2`` service is an installation of a Rubin Observatory flavor of `Zero to JupyterHub `__ with some additional resources. -It provides the Notebook Aspect of the Rubin Science Platform. - -.. jinja:: nublado2 - :file: applications/_summary.rst.jinja - -Guides -====== - -.. toctree:: - :maxdepth: 2 - - bootstrap - upgrade - troubleshoot - values diff --git a/docs/applications/nublado2/troubleshoot.rst b/docs/applications/nublado2/troubleshoot.rst deleted file mode 100644 index c5223be9c1..0000000000 --- a/docs/applications/nublado2/troubleshoot.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. px-app-troubleshooting:: nublado2 - -######################## -Troubleshooting nublado2 -######################## - -.. _nublado2-clear-session-database: - -Clear session database entry -============================ - -Sometimes JupyterHub and its session database will get into an inconsistent state where it thinks a pod is already running but cannot shut it down. -The typical symptom of this is that spawns for that user fail with an error saying that the user's lab is already pending spawn or pending deletion, but the user cannot connect to their pod. - -Recovery may require manually clearing the user's entry in the session database as follows: - -#. Remove the user's lab namespace, if it exists. - -#. Remove the user from the session database. - First, connect to the database: - - .. code-block:: shell - - pod=$(kubectl get pods -n postgres | grep postgres | awk '{print $1}') - kubectl exec -it -n postgres ${pod} -- psql -U jovyan jupyterhub - - Then, at the PostgreSQL prompt: - - .. code-block:: sql - - delete from users where name='' - -In some cases you may also need to remove the user from the spawner table. -To do this, run ``select * from spawners`` and find the pod with the user's name in it, and then delete that row. diff --git a/docs/applications/nublado2/upgrade.rst b/docs/applications/nublado2/upgrade.rst deleted file mode 100644 index 9d24ba3bce..0000000000 --- a/docs/applications/nublado2/upgrade.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. px-app-upgrade:: nublado2 - -################# -Upgrading Nublado -################# - -Most of the time, upgrading Nublado can be done simply by syncing the application in Argo CD. -There will be a brief outage for spawning new pods, but users with existing pods should be able to continue working. - -Occasionally, new versions of JupyterHub will require a schema update. -We do not routinely enable automatic schema updates currently, so JupyterHub will refuse to start if a database schema update is required. -To enable schema updates, add: - -.. code-block:: yaml - - jupyterhub: - hub: - db: - upgrade: true - -(The ``jupyterhub`` and ``hub`` keys probably already exist in the ``values-.yaml`` file, so just add the ``db.upgrade`` setting in the correct spot.) -Then, JupyterHub will automatically upgrade its database when the new version starts. -You can then remove this configuration again if you're worried about automatic updates misbehaving later. diff --git a/docs/applications/nublado2/values.md b/docs/applications/nublado2/values.md deleted file mode 100644 index 5a1b65d74a..0000000000 --- a/docs/applications/nublado2/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} nublado2 -``` - -# nublado2 Helm values reference - -Helm values reference table for the {px-app}`nublado2` application. - -```{include} ../../../applications/nublado2/README.md ---- -start-after: "## Values" ---- -``` diff --git a/docs/applications/obssys/index.rst b/docs/applications/obssys/index.rst new file mode 100644 index 0000000000..f82f6cc578 --- /dev/null +++ b/docs/applications/obssys/index.rst @@ -0,0 +1,18 @@ +.. px-app:: obssys + +###################################################### +obssys — Observatory Systems Control System Components +###################################################### + +The obssys application houses the CSCs associated with high-level Observatory control and monitoring. All CSCs within this group use real applications (no simulators) across all environments. + +.. jinja:: obssys + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/obssys/values.md b/docs/applications/obssys/values.md new file mode 100644 index 0000000000..71ac5786bc --- /dev/null +++ b/docs/applications/obssys/values.md @@ -0,0 +1,12 @@ +```{px-app-values} obssys +``` + +# ObsSys Helm values reference + +Helm values reference table for the {px-app}`obssys` application. + +```{include} ../../../applications/obssys/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/onepassword-connect-dev/index.rst b/docs/applications/onepassword-connect-dev/index.rst deleted file mode 100644 index 4961a87c67..0000000000 --- a/docs/applications/onepassword-connect-dev/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. px-app:: onepassword-connect-dev - -#################################################### -onepassword-connect-dev — 1Password API server (dev) -#################################################### - -1Password Connect provides API access to a 1Password vault. -It is used to provide the API for Phalanx integration with 1Password as a source of static secrets. - -Each instance of the upstream 1Password Connect chart provides an API server for a single 1Password vault. -We want to use one vault per SQuaRE-managed Phalanx environment to ensure isolation of secrets between environments. -The Phalanx onepassword-connect applications therefore instantiate the upstream chart multiple times, one for each vault we are providing access to. - -Unfortunately, because dependencies and their aliases can't be conditional on :file:`values.yaml` settings, that means the set of 1Password Connect servers deployed on roundtable-dev have to be a separate application from the ones deployed on roundtable. -This application is the roundtable-dev set of 1Password Connect API servers. -These provide access to the vaults for development and test environments. - -.. jinja:: onepassword-connect-dev - :file: applications/_summary.rst.jinja - -Guides -====== - -.. toctree:: - :maxdepth: 1 - - values diff --git a/docs/applications/onepassword-connect-dev/values.md b/docs/applications/onepassword-connect-dev/values.md deleted file mode 100644 index fee0c6172a..0000000000 --- a/docs/applications/onepassword-connect-dev/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} onepassword-connect-dev -``` - -# onepassword-connect-dev Helm values reference - -Helm values reference table for the {px-app}`onepassword-connect-dev` application. - -```{include} ../../../applications/onepassword-connect-dev/README.md ---- -start-after: "## Values" ---- -``` \ No newline at end of file diff --git a/docs/applications/onepassword-connect/add-new-connect-server.rst b/docs/applications/onepassword-connect/add-new-connect-server.rst new file mode 100644 index 0000000000..42ce34a191 --- /dev/null +++ b/docs/applications/onepassword-connect/add-new-connect-server.rst @@ -0,0 +1,94 @@ +################################## +Add a new 1Password Connect server +################################## + +This document describes how to set up a new 1Password Connect server to provide static secrets for one or more Phalanx environments. +See :ref:`admin-static-secrets` for more background. + +SQuaRE-run Phalanx environments already have 1Password Connect servers set up. +The one in the :px-env:`roundtable-dev` environment serves the vaults for development environments, and one in the :px-env:`roundtable-prod` environment serves the vaults for production environments. + +When following these instructions, you will be creating a new `Secrets Automation workflow `__. +You will need to have permissions to create that workflow for the vault for your environment. + +Create the workflow +=================== + +In the following steps, you will create a 1Password Secrets Automation workflow for the 1Password vault for your environment, and save the necessary secrets to another 1Password vault. + +#. Log on to the 1Password UI via a web browser. + +#. Click on :menuselection:`Integrations` in the right sidebar under **LSST IT**. + +#. Click on the :guilabel:`Directory` tab at the top of the screen. + +#. Under :guilabel:`Infrastructure Secrets Management` click on :guilabel:`Other`. + +#. Click on :guilabel:`Create a Connect server`. + +#. Under :guilabel:`Environment Name`, enter :samp:`RSP {environment}` where *environment* is the Phalanx environment in which this 1Password Connect server will be running (**not** the vaults that it will serve). + Then, click :guilabel:`Choose Vaults` and select the vaults that should be accessible through this 1Password Connect server. + Click :guilabel:`Add Enviroment` to continue. + +#. Next, 1Password wants you to create an access token for at least one environment. + This is the token that will be used by the Phalanx command-line tool to access secrets for that environment. + It will have access to one and only one 1Password vault. + + Under :guilabel:`Token Name`, enter the name of the environment the token should have access to. + Leave :guilabel:`Expires After` set to ``Never``. + Click :guilabel:`Choose Vaults` and choose the vault corresponding to that environment. + Click :guilabel:`Issue Token` to continue. + +#. Next to the credentials file, click :guilabel:`Save in 1Password`, change the title to :samp:`1Password Connect credentials ({environment})` (with *environment* set to the environment in which the 1Password Connect server will be running), select the ``SQuaRE`` vault, and click :guilabel:`Save`. + Then, next to the access token, click the clipboard icon to copy that token to the clipboard. + +#. Click :guilabel:`View Details` to continue. + Go back to home by clicking on the icon on the upper left. + +#. Go to the SQuaRE vault, find the item ``RSP 1Password tokens``, and edit it. + Add the token to that item as another key/value pair, where the key is the short name of the enviroment. + Mark the value as a password. + +#. Confirm that the new ``1Password Connect credentials`` item created two steps previous exists. + You will need this when creating the 1Password Connect server. + You can download it to your local system now if you wish. + +Create the Phalanx configuration +================================ + +In the following steps, you'll deploy the new 1Password Connect server. + +#. Download the file in the :samp:`1Password Connect credentials ({environment})` item in the SQuaRE vault. + It will be named :file:`1password-credentials.json`. + +#. Encode the contents of that file in base64. + + .. tab-set:: + + .. tab-item:: Linux + + .. prompt:: bash + + base64 -w0 < 1password-credentials.json; echo '' + + .. tab-item:: macOS + + .. prompt:: bash + + base64 -i 1password-credentials.json; echo '' + + This is the static secret required by the 1Password Connect server. + +#. If you are following this process, you are presumably using 1Password to manage your static secrets. + Go to the 1Password vault for the environment where the 1Password Connect server will be running. + Create a new application secret item for the application ``onepassword-connect`` (see :ref:`dev-add-onepassword` for more details), and add a key named ``op-session`` whose value is the base64-encoded 1Password credentials. + +#. Synchronize secrets for that environment following the instructions in :doc:`/admin/sync-secrets`. + +.. note:: + + That final step assumes that the 1Password Connect server for the environment where you're deploying a new 1Password Connect server is running elsewhere. + In some cases, such as for the SQuaRE :px-env:`roundtable-prod ` and :px-env:`roundtable-dev ` environments, the 1Password Connect server for that environment runs in the environment itself. + + In this case, you won't be able to use :command:`phalanx secrets sync` because the 1Password Connect server it wants to use is the one you're trying to install. + Instead, follow the :px-app-bootstrap:`bootstrapping instructions for onepassword-connect `. diff --git a/docs/applications/onepassword-connect/add-new-environment.rst b/docs/applications/onepassword-connect/add-new-environment.rst new file mode 100644 index 0000000000..6547bcfb97 --- /dev/null +++ b/docs/applications/onepassword-connect/add-new-environment.rst @@ -0,0 +1,76 @@ +############################################## +Enable 1Password Connect for a new environment +############################################## + +SQuaRE-managed Phalanx deployments keep their static secrets in 1Password. +This means that each Phalanx environment run by SQuaRE needs to have a corresponding 1Password vault, and a 1Password Connect server that provides access to that vault. +One 1Password Connect server can provide access to multiple vaults using multiple separate tokens, each of which is scoped to only one vault. + +SQuaRE runs two 1Password Connect servers, one in the :px-env:`roundtable-dev ` environment for development environments and one in the :px-env:`roundtable-prod ` environment for production environemnts. + +This document describes how to enable the 1Password Connect server to serve the vault for a new environment. + +.. note:: + + These instructions only apply to SQuaRE-managed Phalanx environments. + You can use them as a model for how to use 1Password as a static secrets source with a different 1Password account, but some modifications will be required. + +.. _onepassword-add-prerequisites: + +Prerequistes +============ + +Every environment must have a separate 1Password vault in the **LSST IT** 1Password account. +The vault for the environment should be named ``RSP `` where ```` is the top-level FQDN for that environment. +(In hindsight the vaults should be named after the short environment names used in Phalanx, but sadly that's not what we did.) + +When following these instructions, you will be modifying a `Secrets Automation workflow `__. +You will need to have permissions to modify the workflow for the 1Password Connet server that will be serving your environment. + +Process +======== + +In the following steps, you'll change the permissions of the 1Password Connect server to add the new 1Password vault for your environment and create a new token with access to that vault. + +#. Log on to the 1Password UI via a web browser. + +#. Click on :menuselection:`Integrations` in the right sidebar under **LSST IT**. + +#. Click on the Secrets Management workflow for the 1Password Connect server that will be serving this environment. + +#. Next to :guilabel:`Vaults`, click on :guilabel:`Manage`. + Select the vault for the environment that you're adding, in addition to the existing vaults. + Click :guilabel:`Update Vaults`. + +#. Next to :guilabel:`Access Tokens`, click on :guilabel:`New Token`. + +#. Under :guilabel:`Environment Name`, enter the same name as the 1Password vault name for your environment. + Then, click :guilabel:`Choose Vaults` and select the corresponding vault (and only that one). + Click :guilabel:`Issue Token` to continue. + +#. Next to the access token, click on the clipboard icon to copy the token to the clipboard. + Then, click on :guilabel:`View Details` to continue. + +#. Go back to home by clicking on the icon on the upper left. + Go to the SQuaRE vault, find the ``RSP 1Password tokens``, and edit it. + Add the token to that item as another key/value pair, where the key is the short name of the enviroment. + Mark the value as a password. + +#. Modify :file:`environments/values-{environment}.yaml` to add the configuration for the 1Password Connect server: + + .. code-block:: yaml + + onepassword: + connectUrl: "https://roundtable-dev.lsst.cloud/1password" + vaultTitle: "RSP " + + The ``connectUrl`` will be either ``https://roundtable-dev.lsst.cloud/1password`` (development environments) or ``https://roundtable.lsst.cloud/1password`` (production environments) for SQuaRE-run environments. + ``vaultTitle`` should be set to the name of the 1Password vault for the environment (see :ref:`onepassword-add-prerequisites`). + +Next steps +========== + +You have now confirmed that 1Password is set up for your environment. + +- If you are migrating from the old secrets management system, perform the other steps now: :doc:`/admin/migrating-secrets` +- If you are setting up a new environment, start populating the 1Password vault with static secrets for the applications running in that environment: :doc:`/developers/update-a-onepassword-secret` diff --git a/docs/applications/onepassword-connect/bootstrap.rst b/docs/applications/onepassword-connect/bootstrap.rst new file mode 100644 index 0000000000..fa951dff6f --- /dev/null +++ b/docs/applications/onepassword-connect/bootstrap.rst @@ -0,0 +1,25 @@ +.. px-app-bootstrap:: onepassword-connect + +############################### +Bootstrapping 1Password Connect +############################### + +When :ref:`installing a new environment `, one of the steps is to :doc:`synchronize secrets for that environment `. +However, when 1Password is used as the source for static secrets, this requires a running 1Password Connect server and a token to connect to that server. +Bootstrapping an environment with this property therefore a different process to break this cycle. + +The recommended process of bootstrapping this type of environment is: + +#. In :file:`environment/values-{environment}.yaml`, enable only the minimum required applications plus ``onepassword-connect``. + Leave everything else disabled to start. + +#. Follow the normal secrets setup for the environment using :ref:`a YAML file for static secrets `. + Fill in the ``onepassword-connect`` secret with the base64-encoded credentials file obtained from :doc:`add-new-connect-server`. + +#. Install the environment using the :doc:`normal instructions `. + +#. Now that you have a running 1Password Connect server, take the secrets from your static secrets YAML file and :ref:`populate your 1Password vault with those secrets `. + +#. Set the ``OP_CONNECT_TOKEN`` environment variable to the token for this environment and :doc:`sync secrets again ` using 1Password. + +#. Now, enable the rest of the applications you want to run in this environment and finish :doc:`secrets setup ` and :doc:`installation `. diff --git a/docs/applications/onepassword-connect/index.rst b/docs/applications/onepassword-connect/index.rst new file mode 100644 index 0000000000..1caa0b1d6e --- /dev/null +++ b/docs/applications/onepassword-connect/index.rst @@ -0,0 +1,26 @@ +.. px-app:: onepassword-connect + +########################################## +onepassword-connect — 1Password API server +########################################## + +1Password Connect provides API access to a 1Password vault. +It is used to provide the API for Phalanx integration with 1Password as a source of static secrets. + +Each 1Password Connect server can serve multiple 1Password vaults. +For SQuaRE-managed environments, we run two 1Password Connect servers, one for development environments and one for production environments. +Each environment gets its own 1Password Connect token that can only see secrets in its own 1Password Connect vault. + +.. jinja:: onepassword-connect + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + bootstrap + add-new-environment + add-new-connect-server + values diff --git a/docs/applications/onepassword-connect/values.md b/docs/applications/onepassword-connect/values.md new file mode 100644 index 0000000000..459c0f97de --- /dev/null +++ b/docs/applications/onepassword-connect/values.md @@ -0,0 +1,12 @@ +```{px-app-values} onepassword-connect +``` + +# onepassword-connect Helm values reference + +Helm values reference table for the {px-app}`onepassword-connect` application. + +```{include} ../../../applications/onepassword-connect/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/applications/portal/index.rst b/docs/applications/portal/index.rst index 31e2c7214b..b2f588decf 100644 --- a/docs/applications/portal/index.rst +++ b/docs/applications/portal/index.rst @@ -5,7 +5,7 @@ portal — Firefly-based RSP Portal ################################# The Portal Aspect of the Rubin Science Platform, powered by Firefly. -This provides a graphical user interface for astronomical data exploration and also provides a data viewer that can be used within the Notebook Aspect (:px-app:`nublado2`). +This provides a graphical user interface for astronomical data exploration and also provides a data viewer that can be used within the Notebook Aspect (:px-app:`nublado`). .. jinja:: portal :file: applications/_summary.rst.jinja diff --git a/docs/applications/schedview-prenight/index.rst b/docs/applications/schedview-prenight/index.rst new file mode 100644 index 0000000000..d0e5496f5a --- /dev/null +++ b/docs/applications/schedview-prenight/index.rst @@ -0,0 +1,21 @@ +.. px-app:: schedview-prenight + +#################################################################### +schedview-prenight — Run the schedview pre-night briefing dashboard. +#################################################################### + +schedview's pre-night dashboard is a web application for examination of +Rubin Observatory/LSST scheduler simulation data for one night. It is intended +to be used to understand what the scheduler is likely to do before a night +observing, both to check for problems and to set expectations. + +.. jinja:: schedview-prenight + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/schedview-prenight/values.md b/docs/applications/schedview-prenight/values.md new file mode 100644 index 0000000000..cc7da75492 --- /dev/null +++ b/docs/applications/schedview-prenight/values.md @@ -0,0 +1,12 @@ +```{px-app-values} schedview-prenight +``` + +# schedview-prenight Helm values reference + +Helm values reference table for the {px-app}`schedview-prenight` application. + +```{include} ../../../applications/schedview-prenight/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/schedview-snapshot/index.rst b/docs/applications/schedview-snapshot/index.rst new file mode 100644 index 0000000000..79ed7a6bc1 --- /dev/null +++ b/docs/applications/schedview-snapshot/index.rst @@ -0,0 +1,20 @@ +.. px-app:: schedview-snapshot + +###################################################################### +schedview-snapshot — Dashboard for examination of scheduler snapshots. +###################################################################### + +schedview's pre-night dashboard is a web application for examination of +Rubin Observatory/LSST scheduler snapshots, as stored (for example) during +observing. + +.. jinja:: schedview-snapshot + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/schedview-snapshot/values.md b/docs/applications/schedview-snapshot/values.md new file mode 100644 index 0000000000..ebf761876b --- /dev/null +++ b/docs/applications/schedview-snapshot/values.md @@ -0,0 +1,12 @@ +```{px-app-values} schedview-snapshot +``` + +# schedview-snapshot Helm values reference + +Helm values reference table for the {px-app}`schedview-snapshot` application. + +```{include} ../../../applications/schedview-snapshot/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/semaphore/index.rst b/docs/applications/semaphore/index.rst index 438a37e47a..05755a6773 100644 --- a/docs/applications/semaphore/index.rst +++ b/docs/applications/semaphore/index.rst @@ -7,7 +7,7 @@ semaphore — User notification Semaphore is the user notification and messaging service for the Rubin Science Platform. UI applications like :px-app:`squareone` can display messages from Semaphore's API. -Edit broadcast messages for SQuaRE-managed environments at `lsst-sqre/rsp_broadcast `__. +Edit broadcast messages for SQuaRE-managed environments at https://github.com/lsst-sqre/rsp_broadcast. .. jinja:: semaphore :file: applications/_summary.rst.jinja diff --git a/docs/applications/siav2/index.rst b/docs/applications/siav2/index.rst new file mode 100644 index 0000000000..2621b3d49b --- /dev/null +++ b/docs/applications/siav2/index.rst @@ -0,0 +1,16 @@ +.. px-app:: siav2 + +###################################### +siav2 — Simple Image Access v2 service +###################################### + +.. jinja:: siav2 + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/siav2/values.md b/docs/applications/siav2/values.md new file mode 100644 index 0000000000..b5230c4985 --- /dev/null +++ b/docs/applications/siav2/values.md @@ -0,0 +1,12 @@ +```{px-app-values} siav2 +``` + +# siav2 Helm values reference + +Helm values reference table for the {px-app}`siav2` application. + +```{include} ../../../applications/siav2/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/simonyitel/index.rst b/docs/applications/simonyitel/index.rst new file mode 100644 index 0000000000..3bdbe4fc12 --- /dev/null +++ b/docs/applications/simonyitel/index.rst @@ -0,0 +1,18 @@ +.. px-app:: simonyitel + +######################################################## +simonyitel — Simonyi Telescope Control System Components +######################################################## + +The simonyitel application houses all the CSCs associated with the Simonyi Survey Telescope. Simulation environments use simulators except for the CCHeaderService, CCOODS, MTAOS, MTDomeTrajectory and MTPtg CSCs. + +.. jinja:: simonyitel + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/simonyitel/values.md b/docs/applications/simonyitel/values.md new file mode 100644 index 0000000000..6347b6bd5e --- /dev/null +++ b/docs/applications/simonyitel/values.md @@ -0,0 +1,12 @@ +```{px-app-values} simonyitel +``` + +# SimonyiTel Helm values reference + +Helm values reference table for the {px-app}`simonyitel` application. + +```{include} ../../../applications/simonyitel/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/ssotap/index.rst b/docs/applications/ssotap/index.rst index 9b4e1c2dac..a0e1262494 100644 --- a/docs/applications/ssotap/index.rst +++ b/docs/applications/ssotap/index.rst @@ -5,11 +5,11 @@ ssotap — IVOA DP03 Solar System Table Access Protocol ##################################################### SSOTAP (SSO Table Access Protocol) is an IVOA_ service that provides access to the ObsCore table which is hosted on postgres. -On the Rubin Science Platform, it is provided by `tap-postgres `__, which is derived from the `CADC TAP service `__. +On the Rubin Science Platform, it is provided by https://github.com/lsst-sqre/tap-postgres, which is derived from the `CADC TAP service `__. This service provides access to the Solar System tables that are created and served by the butler. The TAP data itself, apart from schema queries, comes from Postgres. -The TAP schema is provided by images built from the `sdm_schemas `__ repository. +The TAP schema is provided by images built from https://github.com/lsst/sdm_schemas. .. jinja:: tap :file: applications/_summary.rst.jinja diff --git a/docs/applications/tap/index.rst b/docs/applications/tap/index.rst index 73cd30d3b2..b289b88788 100644 --- a/docs/applications/tap/index.rst +++ b/docs/applications/tap/index.rst @@ -5,11 +5,11 @@ tap — IVOA Table Access Protocol ################################ TAP_ (Table Access Protocol) is an IVOA_ service that provides access to general table data, including astronomical catalogs. -On the Rubin Science Platform, it is provided by `lsst-tap-service `__, which is derived from the `CADC TAP service `__. +On the Rubin Science Platform, it is provided by https://github.com/lsst-sqre/lsst-tap-service, which is derived from the `CADC TAP service `__. The same service provides both TAP and ObsTAP_ schemas. The TAP data itself, apart from schema queries, comes from Qserv. -The TAP schema is provided by images built from the `sdm_schemas `__ repository. +The TAP schema is provided by images built from https://github.com/lsst/sdm_schemas. .. jinja:: tap :file: applications/_summary.rst.jinja diff --git a/docs/applications/uws/index.rst b/docs/applications/uws/index.rst new file mode 100644 index 0000000000..e23e1df5bb --- /dev/null +++ b/docs/applications/uws/index.rst @@ -0,0 +1,18 @@ +.. px-app:: uws + +####################################### +uws — Universal Worker Service for OCPS +####################################### + +The uws application houses services and CSCs associated with the Universal Worker System. The UWS consists of a server that accepts requests to run DM specific jobs, such as calibrations. The application also contains the OCPS CSCs that are associated with each camera. Simulation envrionments do not use simulators for these CSCs. + +.. jinja:: uws + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/uws/values.md b/docs/applications/uws/values.md new file mode 100644 index 0000000000..a5ceb06fa2 --- /dev/null +++ b/docs/applications/uws/values.md @@ -0,0 +1,12 @@ +```{px-app-values} uws +``` + +# UWS Helm values reference + +Helm values reference table for the {px-app}`uws` application. + +```{include} ../../../applications/uws/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/developers/define-secrets.rst b/docs/developers/define-secrets.rst index f342d86ec1..45d1a530e7 100644 --- a/docs/developers/define-secrets.rst +++ b/docs/developers/define-secrets.rst @@ -215,9 +215,19 @@ Newlines will be converted to spaces when pasting the secret value. If newlines need to be preserved, be sure to mark the secret with ``onepassword.encoded`` set to ``true`` in :file:`secrets.yaml`, and then encode the secret in base64 before pasting it into 1Password. To encode the secret, save it to a file with the correct newlines, and then use a command such as: -.. prompt:: bash +.. tab-set:: - base64 -w0 < /path/to/secret; echo '' + .. tab-item:: Linux + + .. prompt:: bash + + base64 -w0 < /path/to/secret; echo '' + + .. tab-item:: macOS + + .. prompt:: bash + + base64 -i /path/to/secret; echo '' This will generate a base64-encoded version of the secret on one line, suitable for cutting and pasting into the 1Password field. diff --git a/docs/developers/index.rst b/docs/developers/index.rst index 991327cce5..bb8d0bbf7e 100644 --- a/docs/developers/index.rst +++ b/docs/developers/index.rst @@ -24,6 +24,7 @@ Individual applications are documented in the :doc:`/applications/index` section write-a-helm-chart add-external-chart + shared-charts define-secrets add-application diff --git a/docs/developers/shared-charts.rst b/docs/developers/shared-charts.rst new file mode 100644 index 0000000000..a0f4edd0f9 --- /dev/null +++ b/docs/developers/shared-charts.rst @@ -0,0 +1,65 @@ +###################################### +Sharing subcharts between applications +###################################### + +In some cases, you may want to instantiate multiple Phalanx applications from mostly the same Helm chart. +For example, Phalanx contains multiple TAP server applications (:px-app:`tap`, :px-app:`ssotap`, and :px-app:`livetap`) that are all deployments of the CADC TAP server. +The Helm template resources should be shared among those applications to avoid code duplication, unnecessary maintenance overhead, and unintentional inconsistencies. + +There are two options for how to handle cases like this: + +#. Publish a generic Helm chart for the underlying service using the `charts repository `__, and then use it like any other external chart. + See :doc:`add-external-chart` for more details on how to use an external chart within Phalanx. + +#. Use a shared chart within Phalanx. + This is more appropriate if the chart is only useful inside Phalanx and doesn't make sense to publish as a stand-alone Helm chart. + The shared chart is included as a subchart in each Phalanx application that needs roughly the same resources. + +This document describes the second choice. + +Writing the shared subchart +=========================== + +Shared subcharts go into the `charts directory `__. +Each subdirectory of that directory is a Helm chart, similar to the structure of the :file:`applications` directory. +Those Helm charts should follow our normal Phalanx chart conventions from :doc:`write-a-helm-chart`. +For example, the ``version`` field of every chart should be set to ``1.0.0``, since these charts will not be published and don't need version tracking. + +Usually, the easiest way to create a shared subchart is to start by writing a regular application chart for one instance of the application following the instructions in :doc:`write-a-helm-chart`. +Then, copy that application chart into a subdirectory in the :file:`charts` directory, remove all the parts that don't make sense to share between applications, and add any additional :file:`values.yaml` settings that will be required to customize the instantiation of this chart for different applications. + +Shared charts do not have :file:`values-{environment}.yaml` files and are not aware of Phalanx environments. +Any per-environment settings must be handled in the parent charts that use this subchart and passed down as regular :file:`values.yaml` overrides. + +Shared charts do not have :file:`secrets.yaml` files. +All application secrets must be defined by the application charts in the :file:`applications` directory. +This may mean there is some duplication of secrets between applications. +This is intentional; often, one application should be the owner of those secrets and other applications should use ``copy`` directives to use the same secret value. + +Any documentation URLs such as ``home``, ``sources``, and ``phalanx.lsst.io/docs`` annotations in the shared chart will be ignored. +They can be included in the shared chart for reference, but each application will need to copy that information into its own :file:`Chart.yaml` file for it to show up in the generated Phalanx documentation. + +Using a shared subchart +======================= + +To use a shared subchart, reference it as a dependency in :file:`Chart.yaml` the way that you would use any other Helm chart as a subchart, but use a ``file:`` URL to point to the shared chart directory. +For example: + +.. code-block:: yaml + :caption: applications/tap/Chart.yaml + + dependencies: + - name: cadc-tap + version: 1.0.0 + repository: "file://../../charts/cadc-tap" + +Note the relative ``file:`` URL, which ensures the chart comes from the same checkout of Phalanx as the application chart. +The ``version`` in the dependency must always be ``1.0.0``. + +Don't forget to copy any relevant ``home``, ``sources``, or ``annotations`` settings from the shared chart into the application :file:`Chart.yaml` so that it will be included in the generated Phalanx documentation. + +Next steps +========== + +- Define the secrets needed by each application: :doc:`define-secrets` +- Add the Argo CD applications to appropriate environments: :doc:`add-application` diff --git a/docs/developers/write-a-helm-chart.rst b/docs/developers/write-a-helm-chart.rst index ebef7d96cf..641cb1bf7e 100644 --- a/docs/developers/write-a-helm-chart.rst +++ b/docs/developers/write-a-helm-chart.rst @@ -11,6 +11,9 @@ For first-party charts, the :file:`templates` directory is generally richly popu Here are instructions for writing a Helm chart for a newly-developed application. If you are using an external third-party chart to deploy part of the application, also see :doc:`add-external-chart`. +In some cases where there is a lot of internal duplication between multiple Phalanx applications, those applications should share a subchart that encapsulates that duplication. +See :doc:`shared-charts` if you think that may be the case for your application. + .. _dev-chart-starters: Start from a template @@ -250,6 +253,39 @@ For example: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .ChartAppVersion }}" +Checking the chart +================== + +Most of the testing of your chart will have to be done by deploying it in a test Kubernetes environment. +See :doc:`add-application` for more details about how to do that. +However, you can check the chart for basic syntax and some errors in Helm templating before deploying it. + +To check your chart, run: + +.. prompt:: bash + + phalanx application lint + +Replace ```` with the name of your new application. +Multiple applications may be listed to lint all of them. + +This will run :command:`helm lint` on the chart with the appropriate values files and injected settings for each environment for which it has a configuration and report any errors. +:command:`helm lint` does not check resources against their schemas, alas, but it will at least diagnose YAML and Helm templating syntax errors. + +You can limit the linting to a specific environment by specifying an environment with the ``--environment`` (or ``-e`` or ``--env``) flag. + +This lint check will also be done via GitHub Actions when you create a Phalanx PR, and the PR cannot be merged until this lint check passes. + +You can also ask for the fully-expanded Kubernetes resources that would be installed in the cluster when the chart is installed. +Do this with: + +.. prompt:: bash + + phalanx application template + +Replace ```` with the name of your application and ```` with the name of the environment for which you want to generate its resources. +This will print to standard output the expanded YAML Kubernetes resources that would be created in the cluster by this chart. + Examples ======== diff --git a/docs/documenteer.toml b/docs/documenteer.toml index 8ebf66682a..bb0dc716ef 100644 --- a/docs/documenteer.toml +++ b/docs/documenteer.toml @@ -63,6 +63,8 @@ ignore = [ '^https://usdf-prompt-processing-dev.slac.stanford.edu', '^https://usdf-rsp.slac.stanford.edu', '^https://usdf-rsp-dev.slac.stanford.edu', + '^https://usdf-rsp-int.slac.stanford.edu', '^https://usdf-tel-rsp.slac.stanford.edu', '^https://github.com/orgs/', + '^https://console.cloud.google.com/', ] diff --git a/docs/environments/_summary.rst.jinja b/docs/environments/_summary.rst.jinja index 97002cab01..9c47b905ef 100644 --- a/docs/environments/_summary.rst.jinja +++ b/docs/environments/_summary.rst.jinja @@ -4,8 +4,24 @@ - ``{{ env.name }}`` * - Root domain - `{{ env.fqdn }} `__ + * - Identity provider + - {{ env.identity_provider.value }} * - Argo CD - {% if env.argocd_url %}{{ env.argocd_url }}{% else %}N/A{% endif %} + {%- if env.gcp %} + * - Google console + - - `Log Explorer `__ + - `Google Kubernetes Engine `__ + * - Google Cloud Platform + - .. list-table:: + + * - Project ID + - {{ env.gcp.project_id }} + * - Region + - {{ env.gcp.region }} + * - Cluster name + - {{ env.gcp.cluster_name }} + {%- endif %} * - Applications - .. list-table:: @@ -25,9 +41,7 @@ - {%- endif %} {% endfor %} - * - Identity provider - - {{ env.identity_provider.value }} - {% if env.gafaelfawr_scopes %} + {%- if env.gafaelfawr_scopes %} * - Gafaelfawr groups - .. list-table:: @@ -36,18 +50,27 @@ {% for scope_groups in env.gafaelfawr_scopes %} * - ``{{ scope_groups.scope }}`` - - {{ scope_groups.groups_as_rst()[0] }} - {% if scope_groups.groups|length > 1 %} - {% for group in scope_groups.groups_as_rst()[1:] %} + {%- if scope_groups.groups|length > 1 %} + {%- for group in scope_groups.groups_as_rst()[1:] %} - {{ group }} {%- endfor %} {%- endif %} {%- endfor %} - {% endif %} - {% if env.argocd_rbac %} + {%- endif %} + {%- if env.argocd_rbac %} * - Argo CD RBAC - .. csv-table:: - {% for line in env.argocd_rbac_csv %} {{ line }} {%- endfor %} - {% endif %} + {%- endif %} +{%- if env.gcp %} + +To obtain Kubernetes admin credentials for this cluster, run: + +.. prompt:: bash + + gcloud container clusters get-credentials {{ env.gcp.cluster_name }} --project {{ env.gcp.project_id }} --region {{ env.gcp.region }} + +For details on how to set up :command:`gcloud` and the necessarily plugins, see :doc:`/admin/infrastructure/google/credentials`. +{%- endif %} diff --git a/docs/environments/ccin2p3/index.rst b/docs/environments/ccin2p3/index.rst index 733aa9b0ca..f48999008b 100644 --- a/docs/environments/ccin2p3/index.rst +++ b/docs/environments/ccin2p3/index.rst @@ -4,7 +4,7 @@ ccin2p3 — data-dev.lsst.eu (French Data Facility) ################################################# -``ccin2p3`` is the environment for the Rubin Science Platform at the `CC-IN2P3 `__. +``ccin2p3`` is the environment for the Rubin Science Platform at the `CC-IN2P3 `__. .. jinja:: ccin2p3 :file: environments/_summary.rst.jinja diff --git a/docs/environments/index.rst b/docs/environments/index.rst index c3018b31d2..fdc7accfc1 100644 --- a/docs/environments/index.rst +++ b/docs/environments/index.rst @@ -24,6 +24,7 @@ To learn more about operating a Phalanx environment, see the :doc:`/admin/index` summit/index tucson-teststand/index usdfdev/index + usdfint/index usdfprod/index usdfdev-alert-stream-broker/index usdfdev-prompt-processing/index diff --git a/docs/environments/usdfint/index.rst b/docs/environments/usdfint/index.rst new file mode 100644 index 0000000000..62fb9d5713 --- /dev/null +++ b/docs/environments/usdfint/index.rst @@ -0,0 +1,11 @@ +.. px-env:: usdfint + +############################################################### +usdfint — usdf-rsp-int.slac.stanford.edu (Integration for USDF) +############################################################### + +``usdfint`` is the integration environment for the Rubin Science Platform at the United States Data Facility (USDF) hosted at SLAC. +The primary use of ``usdfint`` is for Rubin construction and operations teams to integrate applications into the Rubin Science Platform that need to run at the USDF. + +.. jinja:: usdfint + :file: environments/_summary.rst.jinja diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index 9f50157e4f..46a5e5fa2c 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -1,16 +1,156 @@ { "$defs": { + "ControlSystemConfig": { + "description": "Configuration for the Control System.", + "properties": { + "appNamespace": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set the namespace for the control system components. Each control system application consists of many components that need to know what namespace to which they belong.", + "title": "Application Namespace" + }, + "imageTag": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The image tag to use for control system images.", + "title": "Image Tag" + }, + "siteTag": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The tag that tells the control system component where it is running.", + "title": "Site Tag" + }, + "topicName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Kafka identifier for control system topics.", + "title": "Topic Identifier" + }, + "kafkaBrokerAddress": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Kafka broker address for the control system components.", + "title": "Kafka Broker Address" + }, + "kafkaTopicReplicationFactor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Kafka topic replication factor for control system components.", + "title": "Kafka Topic Replication Factor" + }, + "schemaRegistryUrl": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Schema Registry URL for the control system components.", + "title": "Schema Registry URL" + }, + "s3EndpointUrl": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The S3 URL for the environment specific LFA.", + "title": "S3 Endpoint URL" + } + }, + "title": "ControlSystemConfig", + "type": "object" + }, + "GCPMetadata": { + "description": "Google Cloud Platform hosting metadata.\n\nHolds information about where in Google Cloud Platform this Phalanx\nenvironment is hosted. This supports generating documentation that\nincludes this metadata, making it easier for administrators to know what\noptions to pass to :command:`gcloud` to do things such as get Kubernetes\ncredentials.", + "properties": { + "projectId": { + "description": "Project ID of GCP project hosting this environment", + "title": "GCP project ID", + "type": "string" + }, + "region": { + "description": "GCP region in which this environment is hosted", + "title": "GCP region", + "type": "string" + }, + "clusterName": { + "description": "Name of the GKE cluster hosting this environment", + "title": "Kubernetes cluster name", + "type": "string" + } + }, + "required": [ + "projectId", + "region", + "clusterName" + ], + "title": "GCPMetadata", + "type": "object" + }, "OnepasswordConfig": { "description": "Configuration for 1Password static secrets source.", "properties": { "connectUrl": { + "description": "URL to the 1Password Connect API server", "format": "uri", "minLength": 1, - "title": "Connecturl", + "title": "1Password Connect URL", "type": "string" }, "vaultTitle": { - "title": "Vaulttitle", + "description": "Title of the 1Password vault from which to retrieve secrets", + "title": "1Password vault title", "type": "string" } }, @@ -24,16 +164,44 @@ }, "$id": "https://phalanx.lsst.io/schemas/environment.json", "additionalProperties": false, - "description": "Configuration for a Phalanx environment.\n\nThis is a model for the :file:`values-{environment}.yaml` files for each\nenvironment and is also used to validate those files. For the complete\nconfiguration for an environment, initialize this model with the merger of\n:file:`values.yaml` and :file:`values-{environment}.yaml`.", + "description": "Configuration for a Phalanx environment.\n\nThis is a model for the :file:`values-{environment}.yaml` files for each\nenvironment and is also used to validate those files. For the complete\nconfiguration for an environment, initialize this model with the merger of\n:file:`values.yaml` and :file:`values-{environment}.yaml`.\n\nFields listed here are not available to application linting. If the field\nvalue has to be injected during linting, the field needs to be defined in\n`EnvironmentBaseConfig` instead.", "properties": { "name": { + "description": "Name of the environment", "title": "Name", "type": "string" }, "fqdn": { - "title": "Fqdn", + "description": "Fully-qualified domain name on which the environment listens", + "title": "Domain name", "type": "string" }, + "butlerRepositoryIndex": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "URL to Butler repository index", + "title": "Butler repository index URL" + }, + "gcp": { + "anyOf": [ + { + "$ref": "#/$defs/GCPMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "If this environment is hosted on Google Cloud Platform, metadata about the hosting project, location, and other details. Used to generate additional environment documentation.", + "title": "GCP hosting metadata" + }, "onepassword": { "anyOf": [ { @@ -43,26 +211,15 @@ "type": "null" } ], - "default": null + "default": null, + "description": "Configuration for using 1Password as a static secrets source", + "title": "1Password configuration" }, "vaultUrl": { - "title": "Vaulturl", - "type": "string" - }, - "vaultPathPrefix": { - "title": "Vaultpathprefix", - "type": "string" - }, - "applications": { - "additionalProperties": { - "type": "boolean" - }, - "title": "Applications", - "type": "object" - }, - "butlerRepositoryIndex": { "anyOf": [ { + "format": "uri", + "minLength": 1, "type": "string" }, { @@ -70,19 +227,32 @@ } ], "default": null, - "title": "Butlerrepositoryindex" + "description": "URL of the Vault server. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", + "title": "Vault server URL" + }, + "vaultPathPrefix": { + "description": "Prefix of Vault paths, including the KV v2 mount point", + "title": "Vault path prefix", + "type": "string" }, - "onepasswordUuid": { + "controlSystem": { "anyOf": [ { - "type": "string" + "$ref": "#/$defs/ControlSystemConfig" }, { "type": "null" } ], - "default": null, - "title": "Onepassworduuid" + "default": null + }, + "applications": { + "additionalProperties": { + "type": "boolean" + }, + "description": "List of applications and whether they are enabled", + "title": "Enabled applications", + "type": "object" }, "repoUrl": { "anyOf": [ @@ -94,7 +264,8 @@ } ], "default": null, - "title": "Repourl" + "description": "URL of the Git repository holding Argo CD configuration. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", + "title": "URL of Git repository" }, "targetRevision": { "anyOf": [ @@ -106,13 +277,13 @@ } ], "default": null, - "title": "Targetrevision" + "description": "Branch of the Git repository holding Argo CD configuration. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", + "title": "Git repository branch" } }, "required": [ "name", "fqdn", - "vaultUrl", "vaultPathPrefix", "applications" ], diff --git a/docs/extras/schemas/secrets.json b/docs/extras/schemas/secrets.json index d222b6c7b6..306b45a11c 100644 --- a/docs/extras/schemas/secrets.json +++ b/docs/extras/schemas/secrets.json @@ -14,9 +14,11 @@ } ], "default": null, - "description": "Rules for where the secret should be copied from" + "description": "Rules for where the secret should be copied from", + "title": "Copy rules" }, "description": { + "description": "Description of the secret", "title": "Description", "type": "string" }, @@ -33,7 +35,8 @@ } ], "default": null, - "title": "Generate" + "description": "Rules for how the secret should be generated", + "title": "Generation rules" }, "if": { "anyOf": [ @@ -54,9 +57,8 @@ "$ref": "#/$defs/SecretOnepasswordConfig" } ], - "default": { - "encoded": false - } + "description": "Configuration for how the secret is stored in 1Password", + "title": "1Password configuration" }, "value": { "anyOf": [ @@ -70,6 +72,7 @@ } ], "default": null, + "description": "Fixed value of secret", "title": "Value" } }, @@ -84,6 +87,7 @@ "description": "Possibly conditional rules for copying a secret value from another.", "properties": { "application": { + "description": "Application from which the secret should be copied", "title": "Application", "type": "string" }, @@ -101,6 +105,7 @@ "title": "Condition" }, "key": { + "description": "Secret key from which the secret should be copied", "title": "Key", "type": "string" } @@ -130,13 +135,14 @@ "title": "Condition" }, "type": { + "description": "Type of secret", "enum": [ "password", "gafaelfawr-token", "fernet-key", "rsa-private-key" ], - "title": "Type", + "title": "Secret type", "type": "string" } }, @@ -163,15 +169,17 @@ "title": "Condition" }, "source": { - "title": "Source", + "description": "Key of secret on which this secret is based. This may only be set by secrets of type `bcrypt-password-hash` or `mtime`.", + "title": "Source key", "type": "string" }, "type": { + "description": "Type of secret", "enum": [ "bcrypt-password-hash", "mtime" ], - "title": "Type", + "title": "Secret type", "type": "string" } }, @@ -187,7 +195,8 @@ "properties": { "encoded": { "default": false, - "title": "Encoded", + "description": "Whether the 1Password copy of the secret is encoded in base64. 1Password doesn't support newlines in secrets, so secrets that contain significant newlines have to be encoded when storing them in 1Password. This flag indicates that this has been done, and therefore when retrieving the secret from 1Password, its base64-encoding must be undone.", + "title": "Is base64-encoded", "type": "boolean" } }, diff --git a/docs/internals/api.rst b/docs/internals/api.rst index 0f400a4fd2..6166bcd1f8 100644 --- a/docs/internals/api.rst +++ b/docs/internals/api.rst @@ -46,6 +46,9 @@ This API is only intended for use within the Phalanx code itself. .. automodapi:: phalanx.services.application :include-all-objects: +.. automodapi:: phalanx.services.environment + :include-all-objects: + .. automodapi:: phalanx.services.secrets :include-all-objects: diff --git a/environments/README.md b/environments/README.md index c92b04c4a1..a55c8427aa 100644 --- a/environments/README.md +++ b/environments/README.md @@ -7,27 +7,33 @@ | applications.alert-stream-broker | bool | `false` | Enable the alert-stream-broker application | | applications.argo-workflows | bool | `false` | Enable the argo-workflows application | | applications.argocd | bool | `true` | Enable the Argo CD application. This must be enabled for all environments and is present here only because it makes parsing easier | -| applications.cachemachine | bool | `false` | Enable the cachemachine application (required by nublado2) | +| applications.auxtel | bool | `false` | Enable the auxtel control system application | +| applications.butler | bool | `false` | Enable the butler application | +| applications.calsys | bool | `false` | Enable the calsys control system application | | applications.cert-manager | bool | `true` | Enable the cert-manager application, required unless the environment makes separate arrangements to inject a current TLS certificate | +| applications.control-system-test | bool | `false` | Enable the control-system-test application | | applications.datalinker | bool | `false` | Eanble the datalinker application | +| applications.eas | bool | `false` | Enable the eas control system application | | applications.exposurelog | bool | `false` | Enable the exposurelog application | +| applications.filestore-backup | bool | `false` | Enable the filestore-backup application | | applications.gafaelfawr | bool | `true` | Enable the Gafaelfawr application. This is required by Phalanx since most other applications use `GafaelfawrIngress` | | applications.giftless | bool | `false` | Enable the giftless application | | applications.hips | bool | `false` | Enable the HiPS application | | applications.ingress-nginx | bool | `true` | Enable the ingress-nginx application. This is required for all environments, but is still configurable because currently USDF uses an unsupported configuration with ingress-nginx deployed in a different cluster. | +| applications.jira-data-proxy | bool | `false` | Enable the jira-data-proxy application | | applications.kubernetes-replicator | bool | `false` | Enable the kubernetes-replicator application | | applications.linters | bool | `false` | Enable the linters application | | applications.livetap | bool | `false` | Enable the livetap application | +| applications.love | bool | `false` | Enable the love control system application | | applications.mobu | bool | `false` | Enable the mobu application | -| applications.moneypenny | bool | `false` | Enable the moneypenny application (required by nublado2) | | applications.monitoring | bool | `false` | Enable the monitoring application | | applications.narrativelog | bool | `false` | Enable the narrativelog application | | applications.next-visit-fan-out | bool | `false` | Enable the next-visit-fan-out application | | applications.noteburst | bool | `false` | Enable the noteburst application (required by times-square) | | applications.nublado | bool | `false` | Enable the nublado application (v3 of the Notebook Aspect) | -| applications.nublado2 | bool | `false` | Enable the nublado2 application (v2 of the Notebook Aspect, now deprecated). This should not be used for new environments. | | applications.obsloctap | bool | `false` | Enable the obsloctap application | -| applications.onepassword-connect-dev | bool | `false` | Enable the onepassword-connect-dev application | +| applications.obssys | bool | `false` | Enable the obssys control system application | +| applications.onepassword-connect | bool | `false` | Enable the onepassword-connect application | | applications.ook | bool | `false` | Enable the ook application | | applications.plot-navigator | bool | `false` | Enable the plot-navigator application | | applications.portal | bool | `false` | Enable the portal application | @@ -39,8 +45,12 @@ | applications.prompt-proto-service-lsstcomcam | bool | `false` | Enable the prompt-proto-service-lsstcomcam application | | applications.rubintv | bool | `false` | Enable the rubintv application | | applications.sasquatch | bool | `false` | Enable the sasquatch application | +| applications.schedview-prenight | bool | `false` | Enable the schedview-prenight application | +| applications.schedview-snapshot | bool | `false` | Enable the schedview-snapshot application | | applications.semaphore | bool | `false` | Enable the semaphore application | | applications.sherlock | bool | `false` | Enable the sherlock application | +| applications.siav2 | bool | `false` | Enable the siav2 application | +| applications.simonyitel | bool | `false` | Enable the simonyitel control system application | | applications.sqlproxy-cross-project | bool | `false` | Enable the sqlproxy-cross-project application | | applications.squarebot | bool | `false` | Enable the squarebot application | | applications.squareone | bool | `false` | Enable the squareone application | @@ -51,13 +61,21 @@ | applications.telegraf | bool | `false` | Enable the telegraf application | | applications.telegraf-ds | bool | `false` | Enable the telegraf-ds application | | applications.times-square | bool | `false` | Enable the times-square application | +| applications.uws | bool | `false` | Enable the uws application. This includes the dmocps control system application. | | applications.vault-secrets-operator | bool | `true` | Enable the vault-secrets-operator application. This is required for all environments. | | applications.vo-cutouts | bool | `false` | Enable the vo-cutouts application | | butlerRepositoryIndex | string | None, must be set | Butler repository index to use for this environment | +| controlSystem.appNamespace | string | None, must be set | Application namespacce for the control system deployment | +| controlSystem.imageTag | string | None, must be set | Image tag for the control system deployment | +| controlSystem.kafkaBrokerAddress | string | `"sasquatch-kafka-brokers.sasquatch:9092"` | Kafka broker address for the control system deployment | +| controlSystem.kafkaTopicReplicationFactor | int | `3` | Kafka topic replication factor for control system topics | +| controlSystem.s3EndpointUrl | string | None, must be set: "" | S3 endpoint (LFA) for the control system deployment | +| controlSystem.schemaRegistryUrl | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL for the control system deployment | +| controlSystem.siteTag | string | None, must be set | Site tag for the control system deployment | +| controlSystem.topicName | string | `"sal"` | Topic name tag for the control system deployment | | fqdn | string | None, must be set | Fully-qualified domain name where the environment is running | | name | string | None, must be set | Name of the environment | -| onepasswordUuid | string | `"dg5afgiadsffeklfr6jykqymeu"` | UUID of the 1Password item in which to find Vault tokens | | repoUrl | string | `"https://github.com/lsst-sqre/phalanx.git"` | URL of the repository for all applications | | targetRevision | string | `"main"` | Revision of repository to use for all applications | | vaultPathPrefix | string | None, must be set | Prefix for Vault secrets for this environment | -| vaultUrl | string | None, must be set | URL of Vault server for this environment | +| vaultUrl | string | `"https://vault.lsst.codes/"` | URL of Vault server for this environment | diff --git a/environments/templates/auxtel-application.yaml b/environments/templates/auxtel-application.yaml new file mode 100644 index 0000000000..ef77800313 --- /dev/null +++ b/environments/templates/auxtel-application.yaml @@ -0,0 +1,53 @@ +{{- if (index .Values "applications" "auxtel") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: auxtel +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: auxtel + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: auxtel + server: https://kubernetes.default.svc + project: default + source: + path: applications/auxtel + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystem.appNamespace" + value: "auxtel" + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/moneypenny-application.yaml b/environments/templates/butler-application.yaml similarity index 82% rename from environments/templates/moneypenny-application.yaml rename to environments/templates/butler-application.yaml index 539a2a0c55..e1bdb6050e 100644 --- a/environments/templates/moneypenny-application.yaml +++ b/environments/templates/butler-application.yaml @@ -1,23 +1,23 @@ -{{- if .Values.applications.moneypenny -}} +{{- if (index .Values "applications" "butler") -}} apiVersion: v1 kind: Namespace metadata: - name: "moneypenny" + name: "butler" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: "moneypenny" + name: "butler" namespace: "argocd" finalizers: - "resources-finalizer.argocd.argoproj.io" spec: destination: - namespace: "moneypenny" + namespace: "butler" server: "https://kubernetes.default.svc" project: "default" source: - path: "applications/moneypenny" + path: "applications/butler" repoURL: {{ .Values.repoUrl | quote }} targetRevision: {{ .Values.targetRevision | quote }} helm: @@ -31,4 +31,4 @@ spec: valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" -{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/environments/templates/calsys-application.yaml b/environments/templates/calsys-application.yaml new file mode 100644 index 0000000000..fb70b3256d --- /dev/null +++ b/environments/templates/calsys-application.yaml @@ -0,0 +1,53 @@ +{{- if (index .Values "applications" "calsys") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: calsys +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: calsys + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: calsys + server: https://kubernetes.default.svc + project: default + source: + path: applications/calsys + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystem.appNamespace" + value: "calsys" + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/control-system-test-application.yaml b/environments/templates/control-system-test-application.yaml new file mode 100644 index 0000000000..62a7799c3f --- /dev/null +++ b/environments/templates/control-system-test-application.yaml @@ -0,0 +1,53 @@ +{{- if (index .Values "applications" "control-system-test") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: control-system-test +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: control-system-test + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: control-system-test + server: https://kubernetes.default.svc + project: default + source: + path: applications/control-system-test + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystem.appNamespace" + value: "control-system-test" + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/eas-application.yaml b/environments/templates/eas-application.yaml new file mode 100644 index 0000000000..15fe4ba6fd --- /dev/null +++ b/environments/templates/eas-application.yaml @@ -0,0 +1,53 @@ +{{- if (index .Values "applications" "eas") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: eas +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: eas + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: eas + server: https://kubernetes.default.svc + project: default + source: + path: applications/eas + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystem.appNamespace" + value: "eas" + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/nublado2-application.yaml b/environments/templates/filestore-backup-application.yaml similarity index 51% rename from environments/templates/nublado2-application.yaml rename to environments/templates/filestore-backup-application.yaml index fbfeea6e1c..a848dec6f6 100644 --- a/environments/templates/nublado2-application.yaml +++ b/environments/templates/filestore-backup-application.yaml @@ -1,41 +1,38 @@ -{{- if .Values.applications.nublado2 -}} +{{- if (index .Values "applications" "filestore-backup") -}} apiVersion: v1 kind: Namespace metadata: - name: "nublado2" + name: "filestore-backup" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: "nublado2" + name: "filestore-backup" namespace: "argocd" finalizers: - "resources-finalizer.argocd.argoproj.io" spec: destination: - namespace: "nublado2" + namespace: "filestore-backup" server: "https://kubernetes.default.svc" project: "default" source: - path: "applications/nublado2" + path: "applications/filestore-backup" repoURL: {{ .Values.repoUrl | quote }} targetRevision: {{ .Values.targetRevision | quote }} helm: - valueFiles: - - "values.yaml" - - "values-{{ .Values.name }}.yaml" parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - ignoreDifferences: - - kind: "Secret" - jsonPointers: - - "/data/hub.config.ConfigurableHTTPProxy.auth_token" - - "/data/hub.config.CryptKeeper.keys" - - "/data/hub.config.JupyterHub.cookie_secret" - - group: "apps" - kind: "Deployment" - jsonPointers: - - "/spec/template/metadata/annotations/checksum~1secret" - - "/spec/template/metadata/annotations/checksum~1auth-token" + - name: "global.gcpProjectId" + value: {{ .Values.gcp.projectId }} + - name: "global.gcpRegion" + value: {{ .Values.gcp.region }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} diff --git a/environments/templates/onepassword-connect-dev-application.yaml b/environments/templates/jira-data-proxy-application.yaml similarity index 77% rename from environments/templates/onepassword-connect-dev-application.yaml rename to environments/templates/jira-data-proxy-application.yaml index 590783e740..fddc00e132 100644 --- a/environments/templates/onepassword-connect-dev-application.yaml +++ b/environments/templates/jira-data-proxy-application.yaml @@ -1,23 +1,23 @@ -{{- if (index .Values "applications" "onepassword-connect-dev") -}} +{{- if (index .Values "applications" "jira-data-proxy") -}} apiVersion: v1 kind: Namespace metadata: - name: "onepassword-connect-dev" + name: "jira-data-proxy" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: "onepassword-connect-dev" + name: "jira-data-proxy" namespace: "argocd" finalizers: - "resources-finalizer.argocd.argoproj.io" spec: destination: - namespace: "onepassword-connect-dev" + namespace: "jira-data-proxy" server: "https://kubernetes.default.svc" project: "default" source: - path: "applications/onepassword-connect-dev" + path: "applications/jira-data-proxy" repoURL: {{ .Values.repoUrl | quote }} targetRevision: {{ .Values.targetRevision | quote }} helm: diff --git a/environments/templates/love-application.yaml b/environments/templates/love-application.yaml new file mode 100644 index 0000000000..716c4dcfc6 --- /dev/null +++ b/environments/templates/love-application.yaml @@ -0,0 +1,53 @@ +{{- if (index .Values "applications" "love") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: love +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: love + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: love + server: https://kubernetes.default.svc + project: default + source: + path: applications/love + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystema.appNamespace" + value: "love" + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/fileservers-application.yaml b/environments/templates/nublado-fileservers-application.yaml similarity index 62% rename from environments/templates/fileservers-application.yaml rename to environments/templates/nublado-fileservers-application.yaml index 0217017542..534327cbfb 100644 --- a/environments/templates/fileservers-application.yaml +++ b/environments/templates/nublado-fileservers-application.yaml @@ -1,3 +1,8 @@ +{{/* + The namespace is fileservers even though the Argo CD application is + nublado-fileservers, since otherwise we have a conflict with the + lab namespace for a user with the username fileservers. +*/}} {{- if .Values.applications.nublado -}} apiVersion: v1 kind: Namespace @@ -7,7 +12,7 @@ metadata: apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: "fileservers" + name: "nublado-fileservers" namespace: "argocd" finalizers: - "resources-finalizer.argocd.argoproj.io" @@ -17,7 +22,7 @@ spec: server: "https://kubernetes.default.svc" project: "default" source: - path: "applications/fileservers" + path: "applications/nublado-fileservers" repoURL: {{ .Values.repoUrl | quote }} targetRevision: {{ .Values.targetRevision | quote }} {{- end -}} diff --git a/environments/templates/nublado-users-application.yaml b/environments/templates/nublado-users-application.yaml index 4efc7793f7..c5c48ec6bc 100644 --- a/environments/templates/nublado-users-application.yaml +++ b/environments/templates/nublado-users-application.yaml @@ -1,4 +1,4 @@ -{{- if (or .Values.applications.nublado .Values.applications.nublado2) -}} +{{- if .Values.applications.nublado -}} apiVersion: argoproj.io/v1alpha1 kind: Application metadata: diff --git a/environments/templates/obssys-application.yaml b/environments/templates/obssys-application.yaml new file mode 100644 index 0000000000..68c0717261 --- /dev/null +++ b/environments/templates/obssys-application.yaml @@ -0,0 +1,53 @@ +{{- if (index .Values "applications" "obssys") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: obssys +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: obssys + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: obssys + server: https://kubernetes.default.svc + project: default + source: + path: applications/obssys + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystem.appNamespace" + value: "obssys" + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/ocps-uws-job-application.yaml b/environments/templates/ocps-uws-job-application.yaml new file mode 100644 index 0000000000..7df1fe253e --- /dev/null +++ b/environments/templates/ocps-uws-job-application.yaml @@ -0,0 +1,18 @@ +{{- if .Values.applications.uws -}} +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: ocps-uws-job + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: ocps-uws-job + server: https://kubernetes.default.svc + project: default + source: + path: applications/ocps-uws-job + repoURL: {{ .Values.repoUrl }} + targetRevision: {{ .Values.targetRevision }} +{{- end -}} \ No newline at end of file diff --git a/environments/templates/onepassword-connect-application.yaml b/environments/templates/onepassword-connect-application.yaml new file mode 100644 index 0000000000..c87d077f6a --- /dev/null +++ b/environments/templates/onepassword-connect-application.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "onepassword-connect") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "onepassword-connect" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "onepassword-connect" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "onepassword-connect" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/onepassword-connect" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/schedview-prenight-application.yaml b/environments/templates/schedview-prenight-application.yaml new file mode 100644 index 0000000000..1489318618 --- /dev/null +++ b/environments/templates/schedview-prenight-application.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "schedview-prenight") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "schedview-prenight" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "schedview-prenight" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "schedview-prenight" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/schedview-prenight" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/templates/schedview-snapshot-application.yaml b/environments/templates/schedview-snapshot-application.yaml new file mode 100644 index 0000000000..5f8764e9c4 --- /dev/null +++ b/environments/templates/schedview-snapshot-application.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "schedview-snapshot") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "schedview-snapshot" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "schedview-snapshot" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "schedview-snapshot" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/schedview-snapshot" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/templates/cachemachine-application.yaml b/environments/templates/siav2-application.yaml similarity index 81% rename from environments/templates/cachemachine-application.yaml rename to environments/templates/siav2-application.yaml index e07f2d2111..115c44c25d 100644 --- a/environments/templates/cachemachine-application.yaml +++ b/environments/templates/siav2-application.yaml @@ -1,23 +1,23 @@ -{{- if .Values.applications.cachemachine -}} +{{- if (index .Values "applications" "siav2") -}} apiVersion: v1 kind: Namespace metadata: - name: "cachemachine" + name: "siav2" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: "cachemachine" + name: "siav2" namespace: "argocd" finalizers: - "resources-finalizer.argocd.argoproj.io" spec: destination: - namespace: "cachemachine" + namespace: "siav2" server: "https://kubernetes.default.svc" project: "default" source: - path: "applications/cachemachine" + path: "applications/siav2" repoURL: {{ .Values.repoUrl | quote }} targetRevision: {{ .Values.targetRevision | quote }} helm: @@ -31,4 +31,4 @@ spec: valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" -{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/environments/templates/simonyitel-application.yaml b/environments/templates/simonyitel-application.yaml new file mode 100644 index 0000000000..0288bf9c86 --- /dev/null +++ b/environments/templates/simonyitel-application.yaml @@ -0,0 +1,53 @@ +{{- if (index .Values "applications" "simonyitel") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: simonyitel +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: simonyitel + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: simonyitel + server: https://kubernetes.default.svc + project: default + source: + path: applications/simonyitel + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystem.appNamespace" + value: "simonyitel" + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/uws-application.yaml b/environments/templates/uws-application.yaml new file mode 100644 index 0000000000..8c12169502 --- /dev/null +++ b/environments/templates/uws-application.yaml @@ -0,0 +1,53 @@ +{{- if (index .Values "applications" "uws") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: uws +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: uws + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: uws + server: https://kubernetes.default.svc + project: default + source: + path: applications/uws + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystem.appNamespace" + value: "uws" + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/values-base.yaml b/environments/values-base.yaml index 8310f36de0..92025c349c 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -1,17 +1,14 @@ name: base fqdn: base-lsp.lsst.codes -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/base-lsp.lsst.codes applications: argo-workflows: true - cachemachine: true exposurelog: true - moneypenny: true narrativelog: true nublado: true - nublado2: true portal: true + rubintv: true sasquatch: true squareone: true strimzi: true diff --git a/environments/values-ccin2p3.yaml b/environments/values-ccin2p3.yaml index 4dad63364e..04419d41ee 100644 --- a/environments/values-ccin2p3.yaml +++ b/environments/values-ccin2p3.yaml @@ -1,13 +1,10 @@ name: ccin2p3 fqdn: data-dev.lsst.eu -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/rsp-cc applications: - cachemachine: true datalinker: true - moneypenny: true - nublado2: true + nublado: true portal: true postgres: true squareone: true diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index a403d70372..e736b9ea04 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -1,24 +1,30 @@ +name: "idfdev" +fqdn: "data-dev.lsst.cloud" butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml" -fqdn: data-dev.lsst.cloud -name: idfdev +gcp: + projectId: "science-platform-dev-7696" + region: "us-central1" + clusterName: "science-platform-dev" onepassword: - connectUrl: "https://roundtable-dev.lsst.cloud/1password/idfdev" + connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP data-dev.lsst.cloud" -vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/phalanx/idfdev +vaultPathPrefix: "secret/phalanx/idfdev" applications: argo-workflows: true + butler: true datalinker: true + filestore-backup: true hips: true + jira-data-proxy: true mobu: true noteburst: true nublado: true portal: true - postgres: true sasquatch: true semaphore: true sherlock: true + siav2: true ssotap: true squareone: true sqlproxy-cross-project: true diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index ee29239791..7d376d4133 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -1,23 +1,27 @@ +name: "idfint" +fqdn: "data-int.lsst.cloud" butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" -fqdn: data-int.lsst.cloud -name: idfint -vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/k8s_operator/data-int.lsst.cloud +gcp: + projectId: "science-platform-int-dc5d" + region: "us-central1" + clusterName: "science-platform-int" +onepassword: + connectUrl: "https://roundtable.lsst.cloud/1password" + vaultTitle: "RSP data-int.lsst.cloud" +vaultPathPrefix: "secret/phalanx/idfint" applications: - alert-stream-broker: true - cachemachine: true + butler: true datalinker: true hips: true linters: true mobu: true - moneypenny: true nublado: true - nublado2: true plot-navigator: true portal: true postgres: true sasquatch: true + siav2: true ssotap: true production-tools: true semaphore: true diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index 2d9137f291..f8f78da0f7 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -1,8 +1,14 @@ +name: "idfprod" +fqdn: "data.lsst.cloud" butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-repos.yaml" -fqdn: data.lsst.cloud -name: idfprod -vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/k8s_operator/data.lsst.cloud +gcp: + projectId: "science-platform-stable-6994" + region: "us-central1" + clusterName: "science-platform-stable" +onepassword: + connectUrl: "https://roundtable.lsst.cloud/1password" + vaultTitle: "RSP data.lsst.cloud" +vaultPathPrefix: "secret/phalanx/idfprod" applications: datalinker: true @@ -10,9 +16,9 @@ applications: mobu: true nublado: true portal: true - postgres: true semaphore: true sherlock: true + siav2: true squareone: true ssotap: true tap: true diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index 0ee92a116b..309e43a461 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -1,15 +1,16 @@ name: minikube fqdn: minikube.lsst.codes -vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/k8s_operator/minikube.lsst.codes +onepassword: + connectUrl: "https://roundtable-dev.lsst.cloud/1password" + vaultTitle: "RSP minikube.lsst.codes" +vaultPathPrefix: secret/phalanx/minikube # The primary constraint on enabling applications is the low available memory # of a GitHub Actions runner, since minikube is used for smoke testing of new -# Helm configurations. +# Helm configurations. minikube also doesn't have access to data, at least +# currently, which substantially limits the applications that can be +# meaningfully deployed. applications: - datalinker: true - hips: true mobu: true postgres: true squareone: true - tap: true diff --git a/environments/values-roe.yaml b/environments/values-roe.yaml index 0e20da2923..601cf9c3b6 100644 --- a/environments/values-roe.yaml +++ b/environments/values-roe.yaml @@ -4,10 +4,8 @@ vaultUrl: "https://vault.lsst.ac.uk" vaultPathPrefix: secret/k8s_operator/roe applications: - cachemachine: true mobu: true - moneypenny: true - nublado2: true + nublado: true portal: true postgres: true squareone: true diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 0b92eebd27..48709c90f2 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -1,13 +1,19 @@ -name: roundtable-dev -fqdn: roundtable-dev.lsst.cloud -vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/k8s_operator/roundtable-dev.lsst.cloud +name: "roundtable-dev" +fqdn: "roundtable-dev.lsst.cloud" +gcp: + projectId: "roundtable-dev-abe2" + region: "us-central1" + clusterName: "roundtable-dev" +onepassword: + connectUrl: "https://roundtable-dev.lsst.cloud/1password" + vaultTitle: "RSP roundtable-dev.lsst.cloud" +vaultPathPrefix: "secret/phalanx/roundtable-dev" applications: giftless: true kubernetes-replicator: true monitoring: true - onepassword-connect-dev: true + onepassword-connect: true ook: true sasquatch: true squarebot: true diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index c94d92d5a0..e8af504612 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -1,12 +1,21 @@ -name: roundtable-prod -fqdn: roundtable.lsst.cloud -vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/k8s_operator/roundtable.lsst.cloud +name: "roundtable-prod" +fqdn: "roundtable.lsst.cloud" +gcp: + projectId: "roundtable-prod-f6fd" + region: "us-central1" + clusterName: "roundtable-prod" +onepassword: + connectUrl: "https://roundtable.lsst.cloud/1password" + vaultTitle: "RSP roundtable.lsst.cloud" +vaultPathPrefix: "secret/phalanx/roundtable-prod" applications: + giftless: true kubernetes-replicator: true + onepassword-connect: true ook: true sasquatch: true squareone: true strimzi: true strimzi-access-operator: true + squarebot: true diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index 6ca59cf4ac..ecce933133 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -1,15 +1,11 @@ name: summit fqdn: summit-lsp.lsst.codes -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/summit-lsp.lsst.codes applications: - cachemachine: true exposurelog: true - moneypenny: true narrativelog: true nublado: true - nublado2: true portal: true postgres: true sasquatch: true diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index df9bef1bc6..ff7484903b 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -1,19 +1,28 @@ name: tucson-teststand fqdn: tucson-teststand.lsst.codes -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/tucson-teststand.lsst.codes applications: argo-workflows: true - cachemachine: true + auxtel: true + calsys: true + control-system-test: true + eas: true exposurelog: true - moneypenny: true + love: true narrativelog: true nublado: true - nublado2: true + obssys: true portal: true sasquatch: true + simonyitel: true squareone: true strimzi: true + uws: true telegraf: true telegraf-ds: true + +controlSystem: + imageTag: k0001 + siteTag: tucson + s3EndpointUrl: https://s3.tu.lsst.org diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 3259dc1705..c6598cc449 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -12,8 +12,11 @@ applications: alert-stream-broker: true datalinker: true + exposurelog: true + jira-data-proxy: true livetap: true mobu: true + narrativelog: true noteburst: true nublado: true obsloctap: true @@ -22,7 +25,10 @@ applications: postgres: true rubintv: true sasquatch: true + schedview-prenight: true + schedview-snapshot: true semaphore: true + siav2: true ssotap: true squareone: true strimzi: true diff --git a/environments/values-usdfint.yaml b/environments/values-usdfint.yaml new file mode 100644 index 0000000000..d0a1c79075 --- /dev/null +++ b/environments/values-usdfint.yaml @@ -0,0 +1,25 @@ +butlerRepositoryIndex: "s3://rubin-summit-users/data-repos.yaml" +fqdn: usdf-rsp-int.slac.stanford.edu +name: usdfint +vaultUrl: "https://vault.slac.stanford.edu" +vaultPathPrefix: secret/rubin/usdf-rsp-int + +applications: + # This environment uses an ingress managed in a separate Kubernetes cluster, + # despite that configuration not being officially supported by Phalanx. + cert-manager: false + ingress-nginx: false + + datalinker: true + livetap: true + mobu: true + nublado: true + plot-navigator: true + portal: true + postgres: true + sasquatch: true + semaphore: true + ssotap: true + squareone: true + strimzi: true + tap: true diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index c4919839d4..324d4682ce 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -10,18 +10,18 @@ applications: cert-manager: false ingress-nginx: false - cachemachine: true datalinker: true + exposurelog: true livetap: true mobu: true - moneypenny: true + narrativelog: true nublado: true - nublado2: true plot-navigator: true portal: true postgres: true sasquatch: true semaphore: true + siav2: true ssotap: true squareone: true strimzi: true diff --git a/environments/values.yaml b/environments/values.yaml index 4b6fe495f8..63b0540cc8 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -12,9 +12,6 @@ name: "" # @default -- None, must be set fqdn: "" -# -- UUID of the 1Password item in which to find Vault tokens -onepasswordUuid: "dg5afgiadsffeklfr6jykqymeu" - # -- URL of the repository for all applications repoUrl: https://github.com/lsst-sqre/phalanx.git @@ -22,8 +19,7 @@ repoUrl: https://github.com/lsst-sqre/phalanx.git targetRevision: "main" # -- URL of Vault server for this environment -# @default -- None, must be set -vaultUrl: "" +vaultUrl: "https://vault.lsst.codes/" # -- Prefix for Vault secrets for this environment # @default -- None, must be set @@ -40,19 +36,34 @@ applications: # environments and is present here only because it makes parsing easier argocd: true - # -- Enable the cachemachine application (required by nublado2) - cachemachine: false + # -- Enable the auxtel control system application + auxtel: false + + # -- Enable the butler application + butler: false + + # -- Enable the calsys control system application + calsys: false # -- Enable the cert-manager application, required unless the environment # makes separate arrangements to inject a current TLS certificate cert-manager: true + # -- Enable the control-system-test application + control-system-test: false + # -- Eanble the datalinker application datalinker: false + # -- Enable the eas control system application + eas: false + # -- Enable the exposurelog application exposurelog: false + # -- Enable the filestore-backup application + filestore-backup: false + # -- Enable the Gafaelfawr application. This is required by Phalanx since # most other applications use `GafaelfawrIngress` gafaelfawr: true @@ -69,6 +80,9 @@ applications: # cluster. ingress-nginx: true + # -- Enable the jira-data-proxy application + jira-data-proxy: false + # -- Enable the kubernetes-replicator application kubernetes-replicator: false @@ -78,12 +92,12 @@ applications: # -- Enable the livetap application livetap: false + # -- Enable the love control system application + love: false + # -- Enable the mobu application mobu: false - # -- Enable the moneypenny application (required by nublado2) - moneypenny: false - # -- Enable the monitoring application monitoring: false @@ -99,12 +113,11 @@ applications: # -- Enable the nublado application (v3 of the Notebook Aspect) nublado: false - # -- Enable the nublado2 application (v2 of the Notebook Aspect, now - # deprecated). This should not be used for new environments. - nublado2: false + # -- Enable the obssys control system application + obssys: false - # -- Enable the onepassword-connect-dev application - onepassword-connect-dev: false + # -- Enable the onepassword-connect application + onepassword-connect: false # -- Enable the ook application ook: false @@ -129,6 +142,18 @@ applications: # -- Enable the sasquatch application sasquatch: false + # -- Enable the schedview-prenight application + schedview-prenight: false + + # -- Enable the schedview-snapshot application + schedview-snapshot: false + + # -- Enable the siav2 application + siav2: false + + # -- Enable the simonyitel control system application + simonyitel: false + # -- Enable the ssotap application ssotap: false @@ -180,9 +205,43 @@ applications: # -- Enable the times-square application times-square: false + # -- Enable the uws application. This includes the dmocps control system + # application. + uws: false + # -- Enable the vault-secrets-operator application. This is required for all # environments. vault-secrets-operator: true # -- Enable the vo-cutouts application vo-cutouts: false + +# The following settings are used for the control system +controlSystem: + # -- Application namespacce for the control system deployment + # @default -- None, must be set + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- None, must be set + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- None, must be set + siteTag: "" + + # -- Topic name tag for the control system deployment + topicName: sal + + # -- Kafka broker address for the control system deployment + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + + # -- Kafka topic replication factor for control system topics + kafkaTopicReplicationFactor: 3 + + # -- Schema registry URL for the control system deployment + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- None, must be set: "" + s3EndpointUrl: "" diff --git a/installer/generate_secrets.py b/installer/generate_secrets.py deleted file mode 100755 index df5b407e17..0000000000 --- a/installer/generate_secrets.py +++ /dev/null @@ -1,564 +0,0 @@ -#!/usr/bin/env python3 -import argparse -import base64 -import json -import logging -import os -import secrets -from collections import defaultdict -from datetime import UTC, datetime -from pathlib import Path - -import bcrypt -from cryptography.fernet import Fernet -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization -from cryptography.hazmat.primitives.asymmetric import rsa -from onepasswordconnectsdk.client import new_client_from_environment - - -class SecretGenerator: - """A basic secret generator that manages a secrets directory containing - per-component secret export files from from Vault, as generated by - read_secrets.sh. - - Parameters - ---------- - environment : str - The name of the environment (the environment's domain name). - regenerate : bool - If `True`, any secrets that can be generated by the SecretGenerator - will be regenerated. - """ - - def __init__(self, environment, regenerate) -> None: - self.secrets = defaultdict(dict) - self.environment = environment - self.regenerate = regenerate - - def generate(self): - """Generate secrets for each component based on the `secrets` - attribute, and regenerating secrets if applicable when the - `regenerate` attribute is `True`. - """ - self._pull_secret() - self._rsp_alerts() - self._butler_secret() - self._argo_sso_secret() - self._postgres() - self._tap() - self._nublado() - self._nublado2() - self._mobu() - self._gafaelfawr() - self._argocd() - self._portal() - self._vo_cutouts() - self._telegraf() - self._sherlock() - - self.input_field("cert-manager", "enabled", "Use cert-manager? (y/n):") - use_cert_manager = self.secrets["cert-manager"]["enabled"] - if use_cert_manager == "y": - self._cert_manager() - elif use_cert_manager == "n": - self._ingress_nginx() - else: - raise Exception( - f"Invalid cert manager enabled value {use_cert_manager}" - ) - - def load(self): - """Load the secrets files for each RSP component from the - ``secrets`` directory. - - This method parses the JSON files and persists them in the ``secrets`` - attribute, keyed by the component name. - """ - if Path("secrets").is_dir(): - for f in Path("secrets").iterdir(): - print(f"Loading {f}") - component = os.path.basename(f) - self.secrets[component] = json.loads(f.read_text()) - - def save(self): - """For each component, save a secret JSON file into the secrets - directory. - """ - os.makedirs("secrets", exist_ok=True) - - for k, v in self.secrets.items(): - with open(f"secrets/{k}", "w") as f: - f.write(json.dumps(v)) - - def input_field(self, component, name, description): - default = self.secrets[component].get(name, "") - prompt_string = ( - f"[{component} {name}] ({description}): [current: {default}] " - ) - input_string = input(prompt_string) - - if input_string: - self.secrets[component][name] = input_string - - def input_file(self, component, name, description): - current = self.secrets.get(component, {}).get(name, "") - print(f"[{component} {name}] ({description})") - print(f"Current contents:\n{current}") - prompt_string = "New filename with contents (empty to not change): " - fname = input(prompt_string) - - if fname: - with open(fname) as f: - self.secrets[component][name] = f.read() - - @staticmethod - def _generate_gafaelfawr_token() -> str: - key = base64.urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") - secret = base64.urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") - return f"gt-{key}.{secret}" - - def _get_current(self, component, name): - if not self._exists(component, name): - return None - - return self.secrets[component][name] - - def _set(self, component, name, new_value): - self.secrets[component][name] = new_value - - def _exists(self, component, name): - return component in self.secrets and name in self.secrets[component] - - def _set_generated(self, component, name, new_value): - if not self._exists(component, name) or self.regenerate: - self._set(component, name, new_value) - - def _tap(self): - self.input_file( - "tap", - "google_creds.json", - "file containing google service account credentials", - ) - - def _postgres(self): - self._set_generated( - "postgres", "exposurelog_password", secrets.token_hex(32) - ) - self._set_generated( - "postgres", "gafaelfawr_password", secrets.token_hex(32) - ) - self._set_generated( - "postgres", "jupyterhub_password", secrets.token_hex(32) - ) - self._set_generated("postgres", "root_password", secrets.token_hex(64)) - self._set_generated( - "postgres", "vo_cutouts_password", secrets.token_hex(32) - ) - self._set_generated( - "postgres", "narrativelog_password", secrets.token_hex(32) - ) - - def _nublado(self): - self._set_generated("nublado", "crypto_key", secrets.token_hex(32)) - self._set_generated("nublado", "proxy_token", secrets.token_hex(32)) - self._set_generated( - "nublado", "cryptkeeper_key", secrets.token_hex(32) - ) - - # Pluck the password out of the postgres portion. - db_password = self.secrets["postgres"]["jupyterhub_password"] - self.secrets["nublado"]["hub_db_password"] = db_password - - slack_webhook = self._get_current("rsp-alerts", "slack-webhook") - if slack_webhook: - self._set("nublado", "slack_webhook", slack_webhook) - - # Grab lab secrets from the Butler secret. - butler = self.secrets["butler-secret"].copy() - self.secrets["nublado-lab-secret"] = butler - - def _nublado2(self): - crypto_key = secrets.token_hex(32) - self._set_generated("nublado2", "crypto_key", crypto_key) - self._set_generated("nublado2", "proxy_token", secrets.token_hex(32)) - self._set_generated( - "nublado2", "cryptkeeper_key", secrets.token_hex(32) - ) - - # Pluck the password out of the postgres portion. - self.secrets["nublado2"]["hub_db_password"] = self.secrets["postgres"][ - "jupyterhub_password" - ] - - def _mobu(self): - self.input_field( - "mobu", - "ALERT_HOOK", - "Slack webhook for reporting mobu alerts. " - "Or use None for no alerting.", - ) - - def _cert_manager(self): - self.input_field( - "cert-manager", - "aws-secret-access-key", - "AWS secret access key for zone for DNS cert solver.", - ) - - def _gafaelfawr(self): - key = rsa.generate_private_key( - backend=default_backend(), public_exponent=65537, key_size=2048 - ) - - key_bytes = key.private_bytes( - serialization.Encoding.PEM, - serialization.PrivateFormat.PKCS8, - serialization.NoEncryption(), - ) - - self._set_generated( - "gafaelfawr", "bootstrap-token", self._generate_gafaelfawr_token() - ) - self._set_generated( - "gafaelfawr", "redis-password", os.urandom(32).hex() - ) - self._set_generated( - "gafaelfawr", "session-secret", Fernet.generate_key().decode() - ) - self._set_generated("gafaelfawr", "signing-key", key_bytes.decode()) - - self.input_field("gafaelfawr", "cloudsql", "Use CloudSQL? (y/n):") - use_cloudsql = self.secrets["gafaelfawr"]["cloudsql"] - if use_cloudsql == "y": - self.input_field( - "gafaelfawr", "database-password", "Database password" - ) - elif use_cloudsql == "n": - # Pluck the password out of the postgres portion. - db_pass = self.secrets["postgres"]["gafaelfawr_password"] - self._set("gafaelfawr", "database-password", db_pass) - else: - raise Exception( - f"Invalid gafaelfawr cloudsql value {use_cloudsql}" - ) - - self.input_field("gafaelfawr", "ldap", "Use LDAP? (y/n):") - use_ldap = self.secrets["gafaelfawr"]["ldap"] - if use_ldap == "y": - self.input_field("gafaelfawr", "ldap-password", "LDAP password") - - self.input_field("gafaelfawr", "auth_type", "Use cilogon or github?") - auth_type = self.secrets["gafaelfawr"]["auth_type"] - if auth_type == "cilogon": - self.input_field( - "gafaelfawr", "cilogon-client-secret", "CILogon client secret" - ) - use_ldap = self.secrets["gafaelfawr"]["ldap"] - if use_ldap == "y": - self.input_field( - "gafaelfawr", "ldap-secret", "LDAP simple bind password" - ) - elif auth_type == "github": - self.input_field( - "gafaelfawr", "github-client-secret", "GitHub client secret" - ) - elif auth_type == "oidc": - self.input_field( - "gafaelfawr", - "oidc-client-secret", - "OpenID Connect client secret", - ) - if use_ldap == "y": - self.input_field( - "gafaelfawr", "ldap-secret", "LDAP simple bind password" - ) - else: - raise Exception(f"Invalid auth provider {auth_type}") - - slack_webhook = self._get_current("rsp-alerts", "slack-webhook") - if slack_webhook: - self._set("gafaelfawr", "slack-webhook", slack_webhook) - - def _pull_secret(self): - self.input_file( - "pull-secret", - ".dockerconfigjson", - ".docker/config.json to pull images", - ) - - def _butler_secret(self): - self.input_file( - "butler-secret", - "aws-credentials.ini", - "AWS credentials for butler", - ) - self.input_file( - "butler-secret", - "butler-gcs-idf-creds.json", - "Google credentials for butler", - ) - self.input_file( - "butler-secret", - "postgres-credentials.txt", - "Postgres credentials for butler", - ) - - def _argo_sso_secret(self): - # We aren't currently using this, but might as well generate it - # against the day we do. - self._set_generated( - "argo-sso-secret", "client-id", "argo-workflows-sso" - ) - self._set_generated( - "argo-sso-secret", "client-secret", secrets.token_hex(16) - ) - - def _ingress_nginx(self): - self.input_file("ingress-nginx", "tls.key", "Certificate private key") - self.input_file("ingress-nginx", "tls.crt", "Certificate chain") - - def _argocd(self): - current_pw = self._get_current( - "installer", "argocd.admin.plaintext_password" - ) - - self.input_field( - "installer", - "argocd.admin.plaintext_password", - "Admin password for ArgoCD?", - ) - new_pw = self.secrets["installer"]["argocd.admin.plaintext_password"] - - if current_pw != new_pw or self.regenerate: - h = bcrypt.hashpw( - new_pw.encode("ascii"), bcrypt.gensalt(rounds=15) - ).decode("ascii") - now_time = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - - self._set("argocd", "admin.password", h) - self._set("argocd", "admin.passwordMtime", now_time) - - self.input_field( - "argocd", - "dex.clientSecret", - "OAuth client secret for ArgoCD (either GitHub or Google)?", - ) - - self._set_generated( - "argocd", "server.secretkey", secrets.token_hex(16) - ) - - def _telegraf(self): - self.input_field( - "telegraf", - "influx-token", - "Token for communicating with monitoring InfluxDB2 instance", - ) - self._set("telegraf", "org-id", "square") - - def _portal(self): - pw = secrets.token_hex(32) - self._set_generated("portal", "ADMIN_PASSWORD", pw) - - def _vo_cutouts(self): - self._set_generated( - "vo-cutouts", "redis-password", os.urandom(32).hex() - ) - - self.input_field("vo-cutouts", "cloudsql", "Use CloudSQL? (y/n):") - use_cloudsql = self.secrets["vo-cutouts"]["cloudsql"] - if use_cloudsql == "y": - self.input_field( - "vo-cutouts", "database-password", "Database password" - ) - elif use_cloudsql == "n": - # Pluck the password out of the postgres portion. - db_pass = self.secrets["postgres"]["vo_cutouts_password"] - self._set("vo-cutouts", "database-password", db_pass) - else: - raise Exception( - f"Invalid vo-cutouts cloudsql value {use_cloudsql}" - ) - - aws = self.secrets["butler-secret"]["aws-credentials.ini"] - self._set("vo-cutouts", "aws-credentials", aws) - google = self.secrets["butler-secret"]["butler-gcs-idf-creds.json"] - self._set("vo-cutouts", "google-credentials", google) - postgres = self.secrets["butler-secret"]["postgres-credentials.txt"] - self._set("vo-cutouts", "postgres-credentials", postgres) - - def _sherlock(self): - """This secret is for sherlock to push status to status.lsst.codes.""" - publish_key = secrets.token_hex(32) - self._set_generated("sherlock", "publish_key", publish_key) - - def _rsp_alerts(self): - """Shared secrets for alerting.""" - self.input_field( - "rsp-alerts", "slack-webhook", "Slack webhook for alerts" - ) - - def _narrativelog(self): - """Give narrativelog its own secret for externalization.""" - db_pass = self.secrets["postgres"]["narrativelog_password"] - self._set("narrativelog", "database-password", db_pass) - - def _exposurelog(self): - """Give exposurelog its own secret for externalization.""" - db_pass = self.secrets["postgres"]["exposurelog_password"] - self._set("exposureloglog", "database-password", db_pass) - - -class OnePasswordSecretGenerator(SecretGenerator): - """A secret generator that syncs 1Password secrets into a secrets directory - containing per-component secret export files from Vault (as generated - by read_secrets.sh). - - Parameters - ---------- - environment : str - The name of the environment (the environment's domain name). - regenerate : bool - If `True`, any secrets that can be generated by the SecretGenerator - will be regenerated. - """ - - def __init__(self, environment, regenerate) -> None: - super().__init__(environment, regenerate) - self.op_secrets = {} - self.op = new_client_from_environment() - self.parse_vault() - - def parse_vault(self): - """Parse the 1Password vault and store secrets applicable to this - environment in the `op_secrets` attribute. - - This method is called automatically when initializing a - `OnePasswordSecretGenerator`. - """ - vault = self.op.get_vault_by_title("RSP-Vault") - items = self.op.get_items(vault.id) - - for item_summary in items: - key = None - secret_notes = None - secret_password = None - environments = [] - item = self.op.get_item(item_summary.id, vault.id) - - logging.debug(f"Looking at {item.id}") - - for field in item.fields: - if field.label == "generate_secrets_key": - if key is None: - key = field.value - else: - msg = "Found two generate_secrets_keys for {key}" - raise Exception(msg) - elif field.label == "environment": - environments.append(field.value) - elif field.label == "notesPlain": - secret_notes = field.value - elif field.purpose == "PASSWORD": - secret_password = field.value - - if not key: - continue - - secret_value = secret_notes or secret_password - - if not secret_value: - logging.error("No value found for %s", item.title) - continue - - logging.debug("Environments are %s for %s", environments, item.id) - - if self.environment in environments: - self.op_secrets[key] = secret_value - logging.debug("Storing %s (matching environment)", item.id) - elif not environments and key not in self.op_secrets: - self.op_secrets[key] = secret_value - logging.debug("Storing %s (applicable to all envs)", item.id) - else: - logging.debug("Ignoring %s", item.id) - - def input_field(self, component, name, description): - """Query for a secret's value from 1Password (`op_secrets` attribute). - - This method overrides `SecretGenerator.input_field`, which prompts - a user interactively. - """ - key = f"{component} {name}" - if key not in self.op_secrets: - raise Exception(f"Did not find entry in 1Password for {key}") - - self.secrets[component][name] = self.op_secrets[key] - - def input_file(self, component, name, description): - """Query for a secret file from 1Password (`op_secrets` attribute). - - This method overrides `SecretGenerator.input_file`, which prompts - a user interactively. - """ - return self.input_field(component, name, description) - - def generate(self): - """Generate secrets, updating the `secrets` attribute. - - This method first runs `SecretGenerator.generate`, and then - automatically generates secrets for any additional components - that were identified in 1Password. - - If a secret appears already, it is overridden with the value in - 1Password. - """ - super().generate() - - for composite_key, _secret_value in self.op_secrets.items(): - item_component, item_name = composite_key.split() - # Special case for components that may not be present in every - # environment, but nonetheless might be 1Password secrets (see - # conditional in SecretGenerator.generate) - if item_component in {"ingress-nginx", "cert-manager"}: - continue - - logging.debug( - "Updating component: %s/%s", item_component, item_name - ) - self.input_field(item_component, item_name, "") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="generate_secrets") - parser.add_argument( - "--op", - default=False, - action="store_true", - help="Load secrets from 1Password", - ) - parser.add_argument( - "--verbose", default=False, action="store_true", help="Verbose logging" - ) - parser.add_argument( - "--regenerate", - default=False, - action="store_true", - help="Regenerate random secrets", - ) - parser.add_argument("environment", help="Environment to generate") - args = parser.parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig() - - if args.op: - sg = OnePasswordSecretGenerator(args.environment, args.regenerate) - else: - sg = SecretGenerator(args.environment, args.regenerate) - - sg.load() - sg.generate() - sg.save() diff --git a/installer/install.sh b/installer/install.sh index 8919cef2c1..d3cbe8ee90 100755 --- a/installer/install.sh +++ b/installer/install.sh @@ -1,36 +1,39 @@ #!/bin/bash -e -USAGE="Usage: ./install.sh ENVIRONMENT VAULT_TOKEN [VAULT_TOKEN_LEASE_DURATION]" +USAGE="Usage: ./install.sh ENVIRONMENT VAULT_ROLE_ID VAULT_SECRET_ID" ENVIRONMENT=${1:?$USAGE} -export VAULT_TOKEN=${2:?$USAGE} -export VAULT_TOKEN_LEASE_DURATION=${4:-31536000} -export VAULT_ADDR=`yq -r .vaultUrl ../environments/values-$ENVIRONMENT.yaml` -VAULT_PATH_PREFIX=`yq -r .vaultPathPrefix ../environments/values-$ENVIRONMENT.yaml` -ARGOCD_PASSWORD=`vault kv get --field=argocd.admin.plaintext_password $VAULT_PATH_PREFIX/installer` +config="../environments/values-${ENVIRONMENT}.yaml" +VAULT_ROLE_ID=${2:?$USAGE} +VAULT_SECRET_ID=${3:?$USAGE} -GIT_URL=`git config --get remote.origin.url` +echo "Getting Git branch and remote information..." +GIT_URL=$(git config --get remote.origin.url) # Github runs in a detached head state, but sets GITHUB_REF, # extract the branch from it. If we're there, use that branch. # git branch --show-current will return empty in deatached head. -GIT_BRANCH=${GITHUB_HEAD_REF:-`git branch --show-current`} +GIT_BRANCH=${GITHUB_HEAD_REF:-$(git branch --show-current)} -echo "Set VAULT_TOKEN in a secret for vault-secrets-operator..." +echo "Logging on to Vault..." +if grep '^vaultUrl:' "$config" >/dev/null; then + export VAULT_ADDR=$(yq -r .vaultUrl "$config") +else + export VAULT_ADDR=$(yq -r .vaultUrl ../environments/values.yaml) +fi +export VAULT_TOKEN=$(vault write auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID" | grep 'token ' | awk '{ print $2 }') +VAULT_PATH_PREFIX=$(yq -r .vaultPathPrefix "$config") +ARGOCD_PASSWORD=$(vault kv get --field=admin.plaintext_password $VAULT_PATH_PREFIX/argocd) + +echo "Putting Vault credentials in a secret for vault-secrets-operator..." # The namespace may not exist already, but don't error if it does. kubectl create ns vault-secrets-operator || true -kubectl create secret generic vault-secrets-operator \ +kubectl create secret generic vault-credentials \ --namespace vault-secrets-operator \ - --from-literal=VAULT_TOKEN=$VAULT_TOKEN \ - --from-literal=VAULT_TOKEN_LEASE_DURATION=$VAULT_TOKEN_LEASE_DURATION \ + --from-literal=VAULT_ROLE_ID=$VAULT_ROLE_ID \ + --from-literal=VAULT_SECRET_ID=$VAULT_SECRET_ID \ --dry-run=client -o yaml | kubectl apply -f - -echo "Set up docker pull secret for vault-secrets-operator..." -vault kv get --field=.dockerconfigjson $VAULT_PATH_PREFIX/pull-secret > docker-creds -kubectl create secret generic pull-secret -n vault-secrets-operator \ - --from-file=.dockerconfigjson=docker-creds \ - --type=kubernetes.io/dockerconfigjson \ - --dry-run=client -o yaml | kubectl apply -f - - -echo "Update / install vault-secrets-operator..." -# ArgoCD depends on pull-secret, which depends on vault-secrets-operator. +# Argo CD depends a Vault-created secret for its credentials, so +# vault-secrets-operator has to be installed first. +echo "Updating or installing vault-secrets-operator..." helm dependency update ../applications/vault-secrets-operator helm upgrade vault-secrets-operator ../applications/vault-secrets-operator \ --install \ @@ -42,7 +45,7 @@ helm upgrade vault-secrets-operator ../applications/vault-secrets-operator \ --timeout 5m \ --wait -echo "Update / install argocd using helm..." +echo "Updating or installing Argo CD using Helm..." helm dependency update ../applications/argocd helm upgrade argocd ../applications/argocd \ --install \ @@ -54,7 +57,7 @@ helm upgrade argocd ../applications/argocd \ --timeout 5m \ --wait -echo "Login to argocd..." +echo "Logging in to Argo CD..." argocd login \ --plaintext \ --port-forward \ @@ -62,7 +65,7 @@ argocd login \ --username admin \ --password $ARGOCD_PASSWORD -echo "Creating top level application" +echo "Creating the top-level Argo CD application..." argocd app create science-platform \ --repo $GIT_URL \ --path environments --dest-namespace default \ @@ -76,52 +79,50 @@ argocd app create science-platform \ --values values.yaml \ --values values-$ENVIRONMENT.yaml +echo "Syncing the top-level Argo CD application..." argocd app sync science-platform \ --port-forward \ --port-forward-namespace argocd -echo "Syncing critical early applications" -if [ $(yq -r '.applications."ingress-nginx"' ../environments/values-$ENVIRONMENT.yaml) != "false" ]; -then +if [ $(yq -r '.applications."ingress-nginx"' "$config") != "false" ]; then echo "Syncing ingress-nginx..." argocd app sync ingress-nginx \ --port-forward \ --port-forward-namespace argocd fi -# Wait for the cert-manager's webhook to finish deploying by running -# kubectl, argocd's sync doesn't seem to wait for this to finish. -if [ $(yq -r '.applications."cert-manager"' ../environments/values-$ENVIRONMENT.yaml) != "false" ]; -then +if [ $(yq -r '.applications."cert-manager"' "$config") != "false" ]; then echo "Syncing cert-manager..." argocd app sync cert-manager \ --port-forward \ --port-forward-namespace argocd && \ - kubectl -n cert-manager rollout status deploy/cert-manager-webhook + + # Wait for the cert-manager's webhook to finish deploying by running + # kubectl, argocd's sync doesn't seem to wait for this to finish. + kubectl -n cert-manager rollout status deploy/cert-manager-webhook fi -if [ $(yq -r .applications.postgres ../environments/values-$ENVIRONMENT.yaml) == "true" ]; -then +if [ $(yq -r .applications.postgres "$config") == "true" ]; then echo "Syncing postgres..." argocd app sync postgres \ --port-forward \ --port-forward-namespace argocd fi -if [ $(yq -r .applications.gafaelfawr ../environments/values-$ENVIRONMENT.yaml) != "false" ]; -then +if [ $(yq -r .applications.gafaelfawr "$config") != "false" ]; then echo "Syncing gafaelfawr..." argocd app sync gafaelfawr \ --port-forward \ --port-forward-namespace argocd fi -echo "Sync remaining science platform apps" +echo "Syncing remaining applications..." argocd app sync -l "argocd.argoproj.io/instance=science-platform" \ --port-forward \ --port-forward-namespace argocd +echo '' echo "You can now check on your argo cd installation by running:" echo "kubectl port-forward service/argocd-server -n argocd 8080:443" echo "For the ArgoCD admin password:" -echo "vault kv get --field=argocd.admin.plaintext_password $VAULT_PATH_PREFIX/installer" +echo "vault kv get --field=admin.plaintext_password $VAULT_PATH_PREFIX/argocd" diff --git a/installer/read_secrets.sh b/installer/read_secrets.sh deleted file mode 100755 index 3a5e2f3a2e..0000000000 --- a/installer/read_secrets.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -e - -ENVIRONMENT=${1:?"Usage: read_secrets.sh ENVIRONMENT"} - -mkdir -p secrets - -COMPONENTS=`vault kv list --format=yaml secret/k8s_operator/$ENVIRONMENT | yq -r '.[]'` -for SECRET in $COMPONENTS -do - if [ $SECRET != "efd/" ] && [ $SECRET != "ts/" ]; then - vault kv get --field=data --format=json secret/k8s_operator/$ENVIRONMENT/$SECRET > secrets/$SECRET - fi -done diff --git a/installer/requirements.txt b/installer/requirements.txt deleted file mode 100644 index 73e8efa191..0000000000 --- a/installer/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -bcrypt -cryptography -onepasswordconnectsdk -pyyaml -yq diff --git a/installer/update_all_secrets.sh b/installer/update_all_secrets.sh deleted file mode 100755 index 65a8f5abd5..0000000000 --- a/installer/update_all_secrets.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -ex -./update_secrets.sh minikube.lsst.codes -./update_secrets.sh base-lsp.lsst.codes -./update_secrets.sh summit-lsp.lsst.codes -./update_secrets.sh tucson-teststand.lsst.codes -./update_secrets.sh data.lsst.cloud -./update_secrets.sh data-int.lsst.cloud -./update_secrets.sh data-dev.lsst.cloud -./update_secrets.sh roe -./update_secrets.sh roundtable-dev.lsst.cloud -./update_secrets.sh roundtable.lsst.cloud diff --git a/installer/update_secrets.sh b/installer/update_secrets.sh deleted file mode 100755 index 3f02056d7c..0000000000 --- a/installer/update_secrets.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -e -ENVIRONMENT=$1 - -export OP_CONNECT_HOST=https://roundtable.lsst.codes/1password -export VAULT_DOC_UUID=`yq -r .onepasswordUuid ../environments/values.yaml` -export VAULT_ADDR=https://vault.lsst.codes -export VAULT_TOKEN=`./vault_key.py $ENVIRONMENT write` - -if [ -z "$OP_CONNECT_TOKEN" ]; then - echo 'OP_CONNECT_TOKEN must be set to a 1Password Connect token' >&2 - exit 1 -fi - -echo "Clear out any existing secrets" -rm -rf secrets - -echo "Reading current secrets from vault" -./read_secrets.sh $ENVIRONMENT - -echo "Generating missing secrets with values from 1Password" -./generate_secrets.py $ENVIRONMENT --op - -echo "Writing secrets to vault" -./write_secrets.sh $ENVIRONMENT diff --git a/installer/vault_key.py b/installer/vault_key.py deleted file mode 100755 index 6e60759ae8..0000000000 --- a/installer/vault_key.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python3 -import argparse -import json -import os - -from onepasswordconnectsdk import new_client_from_environment - - -class VaultKeyRetriever: - def __init__(self) -> None: - self.op = new_client_from_environment() - vault_keys = self.op.get_item( - os.environ["VAULT_DOC_UUID"], "RSP-Vault" - ) - for field in vault_keys.fields: - if field.label == "notesPlain": - vault_keys_json = field.value - break - self.vault_keys = json.loads(vault_keys_json) - - def retrieve_key(self, environment, key_type): - env_key = f"k8s_operator/{environment}" - for e in self.vault_keys: - if env_key in e: - return e[env_key][key_type]["id"] - return None - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="fetch the vault key for an environment" - ) - parser.add_argument( - "environment", help="Environment name to retrieve key for" - ) - parser.add_argument( - "key_type", choices=["read", "write"], help="Which key to retrieve" - ) - args = parser.parse_args() - - vkr = VaultKeyRetriever() - print(vkr.retrieve_key(args.environment, args.key_type)) diff --git a/installer/write_secrets.sh b/installer/write_secrets.sh deleted file mode 100755 index 6497dd6c42..0000000000 --- a/installer/write_secrets.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -x - -ENVIRONMENT=${1:?"Usage: write_secrets.sh ENVIRONMENT"} - -# This is a bit tricky. This makes the path different for -# $SECRET, which ends up getting passed into vault and making -# the keys. -cd secrets - -for SECRET in * -do - vault kv put secret/k8s_operator/$ENVIRONMENT/$SECRET @$SECRET -done diff --git a/pyproject.toml b/pyproject.toml index eb71c49f1a..9b7834e8eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,13 +15,14 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Intended Audience :: Developers", "Operating System :: POSIX", ] requires-python = ">=3.11" [project.scripts] -expand-charts = "phalanx.testing.expandcharts:main" phalanx = "phalanx.cli:main" [project.urls] @@ -139,16 +140,21 @@ ignore = [ "D104", # don't see the point of documenting every package "D105", # our style doesn't require docstrings for magic methods "D106", # Pydantic uses a nested Config class that doesn't warrant docs + "D205", # our documentation style allows a folded first line "EM101", # justification (duplicate string in traceback) is silly "EM102", # justification (duplicate string in traceback) is silly "FBT003", # positional booleans are normal for Pydantic field defaults + "FIX002", # point of a TODO comment is that we're not ready to fix it "G004", # forbidding logging f-strings is appealing, but not our style "PD011", # false positive with non-NumPY code that uses .values + "RET505", # disagree that omitting else always makes code more readable + "PLR0911", # often many returns is clearer and simpler style "PLR0913", # factory pattern uses constructors with many arguments "PLR2004", # too aggressive about magic values - "RET505", # disagree that omitting else always makes code more readable + "PLW0603", # yes global is discouraged but if needed, it's needed "S105", # good idea but too many false positives on non-passwords "S106", # good idea but too many false positives on non-passwords + "S107", # good idea but too many false positives on non-passwords "S603", # impossible to write subprocess code without triggering this "S607", # searching for executables on PATH is often correct "SIM102", # sometimes the formatting of nested if statements is clearer @@ -159,14 +165,32 @@ ignore = [ "TCH003", # we decided to not maintain separate TYPE_CHECKING blocks "TID252", # if we're going to use relative imports, use them always "TRY003", # good general advice but lint is way too aggressive - - # Phalanx-specific exclusions. - "T201", # print makes sense to use because Phalanx is interactive + "TRY301", # sometimes raising exceptions inside try is the best flow + + # The following settings should be disabled when using ruff format + # per https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules + "W191", + "E111", + "E114", + "E117", + "D206", + "D300", + "Q000", + "Q001", + "Q002", + "Q003", + "COM812", + "COM819", + "ISC001", + "ISC002", ] select = ["ALL"] -target-version = "py311" +target-version = "py312" [tool.ruff.per-file-ignores] +"src/phalanx/**" = [ + "T201", # print makes sense to use because Phalanx is interactive +] "tests/**" = [ "D103", # tests don't need docstrings "PLR0915", # tests are allowed to be long, sometimes that's convenient @@ -204,11 +228,5 @@ builtins-ignorelist = [ fixture-parentheses = false mark-parentheses = false -[tool.ruff.pep8-naming] -classmethod-decorators = [ - "pydantic.root_validator", - "pydantic.validator", -] - [tool.ruff.pydocstyle] convention = "numpy" diff --git a/requirements/dev.in b/requirements/dev.in index 3571b429c5..6a1b0b9aa5 100644 --- a/requirements/dev.in +++ b/requirements/dev.in @@ -13,6 +13,7 @@ mypy pre-commit pytest pytest-cov +pytest-pretty ruff types-PyYAML diff --git a/requirements/dev.txt b/requirements/dev.txt index 54f1f1b7cb..089700b15f 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -4,19 +4,19 @@ # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/dev.txt requirements/dev.in # -alabaster==0.7.13 \ - --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \ - --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2 +alabaster==0.7.16 \ + --hash=sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65 \ + --hash=sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92 # via sphinx -annotated-types==0.5.0 \ - --hash=sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802 \ - --hash=sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd +annotated-types==0.6.0 \ + --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ + --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d # via # -c requirements/main.txt # pydantic -attrs==23.1.0 \ - --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ - --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 +attrs==23.2.0 \ + --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ + --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 # via # jsonschema # referencing @@ -24,17 +24,17 @@ autodoc-pydantic==2.0.1 \ --hash=sha256:7a125a4ff18e4903e27be71e4ddb3269380860eacab4a584d6cc2e212fa96991 \ --hash=sha256:d3c302fdb6d37edb5b721f0f540252fa79cea7018bc1a9a85bf70f33a68b0ce4 # via -r requirements/dev.in -babel==2.12.1 \ - --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \ - --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455 +babel==2.14.0 \ + --hash=sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363 \ + --hash=sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287 # via sphinx -beautifulsoup4==4.12.2 \ - --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ - --hash=sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a +beautifulsoup4==4.12.3 \ + --hash=sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051 \ + --hash=sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed # via pydata-sphinx-theme -certifi==2023.7.22 \ - --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ - --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 +certifi==2023.11.17 \ + --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \ + --hash=sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474 # via # -c requirements/main.txt # requests @@ -42,82 +42,97 @@ cfgv==3.4.0 \ --hash=sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9 \ --hash=sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560 # via pre-commit -charset-normalizer==3.2.0 \ - --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \ - --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \ - --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \ - --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \ - --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \ - --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \ - --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \ - --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \ - --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \ - --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \ - --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \ - --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \ - --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \ - --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \ - --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \ - --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \ - --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \ - --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \ - --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \ - --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \ - --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \ - --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \ - --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \ - --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \ - --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \ - --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \ - --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \ - --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \ - --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \ - --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \ - --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \ - --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \ - --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \ - --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \ - --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \ - --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \ - --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \ - --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \ - --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \ - --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \ - --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \ - --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \ - --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \ - --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \ - --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \ - --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \ - --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \ - --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \ - --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \ - --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \ - --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \ - --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \ - --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \ - --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \ - --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \ - --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \ - --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \ - --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \ - --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \ - --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \ - --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \ - --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \ - --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \ - --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \ - --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \ - --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \ - --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \ - --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \ - --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \ - --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \ - --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \ - --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \ - --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \ - --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \ - --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa +charset-normalizer==3.3.2 \ + --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ + --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ + --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ + --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ + --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ + --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ + --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ + --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ + --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ + --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ + --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ + --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ + --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ + --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ + --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ + --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ + --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ + --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ + --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ + --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ + --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ + --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ + --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ + --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ + --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ + --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ + --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ + --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ + --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ + --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ + --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ + --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ + --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ + --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ + --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ + --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ + --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ + --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ + --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ + --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ + --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ + --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ + --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ + --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ + --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ + --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ + --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ + --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ + --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ + --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ + --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ + --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ + --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ + --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ + --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ + --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ + --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ + --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ + --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ + --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ + --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ + --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ + --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ + --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ + --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ + --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ + --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ + --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ + --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ + --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ + --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ + --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ + --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ + --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ + --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ + --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ + --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ + --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ + --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ + --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ + --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ + --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ + --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ + --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ + --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ + --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ + --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ + --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ + --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ + --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 # via # -c requirements/main.txt # requests @@ -128,192 +143,102 @@ click==8.1.7 \ # -c requirements/main.txt # documenteer # sphinx-click -contourpy==1.1.1 \ - --hash=sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6 \ - --hash=sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33 \ - --hash=sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8 \ - --hash=sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d \ - --hash=sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d \ - --hash=sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c \ - --hash=sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf \ - --hash=sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e \ - --hash=sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e \ - --hash=sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163 \ - --hash=sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532 \ - --hash=sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2 \ - --hash=sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8 \ - --hash=sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1 \ - --hash=sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b \ - --hash=sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9 \ - --hash=sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916 \ - --hash=sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23 \ - --hash=sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb \ - --hash=sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a \ - --hash=sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e \ - --hash=sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442 \ - --hash=sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684 \ - --hash=sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34 \ - --hash=sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d \ - --hash=sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d \ - --hash=sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9 \ - --hash=sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45 \ - --hash=sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718 \ - --hash=sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab \ - --hash=sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3 \ - --hash=sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae \ - --hash=sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb \ - --hash=sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5 \ - --hash=sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba \ - --hash=sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0 \ - --hash=sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217 \ - --hash=sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887 \ - --hash=sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887 \ - --hash=sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62 \ - --hash=sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431 \ - --hash=sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b \ - --hash=sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce \ - --hash=sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b \ - --hash=sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f \ - --hash=sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85 \ - --hash=sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e \ - --hash=sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7 \ - --hash=sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251 \ - --hash=sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970 \ - --hash=sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0 \ - --hash=sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7 - # via matplotlib -coverage[toml]==7.3.1 \ - --hash=sha256:025ded371f1ca280c035d91b43252adbb04d2aea4c7105252d3cbc227f03b375 \ - --hash=sha256:04312b036580ec505f2b77cbbdfb15137d5efdfade09156961f5277149f5e344 \ - --hash=sha256:0575c37e207bb9b98b6cf72fdaaa18ac909fb3d153083400c2d48e2e6d28bd8e \ - --hash=sha256:07d156269718670d00a3b06db2288b48527fc5f36859425ff7cec07c6b367745 \ - --hash=sha256:1f111a7d85658ea52ffad7084088277135ec5f368457275fc57f11cebb15607f \ - --hash=sha256:220eb51f5fb38dfdb7e5d54284ca4d0cd70ddac047d750111a68ab1798945194 \ - --hash=sha256:229c0dd2ccf956bf5aeede7e3131ca48b65beacde2029f0361b54bf93d36f45a \ - --hash=sha256:245c5a99254e83875c7fed8b8b2536f040997a9b76ac4c1da5bff398c06e860f \ - --hash=sha256:2829c65c8faaf55b868ed7af3c7477b76b1c6ebeee99a28f59a2cb5907a45760 \ - --hash=sha256:4aba512a15a3e1e4fdbfed2f5392ec221434a614cc68100ca99dcad7af29f3f8 \ - --hash=sha256:4c96dd7798d83b960afc6c1feb9e5af537fc4908852ef025600374ff1a017392 \ - --hash=sha256:50dd1e2dd13dbbd856ffef69196781edff26c800a74f070d3b3e3389cab2600d \ - --hash=sha256:5289490dd1c3bb86de4730a92261ae66ea8d44b79ed3cc26464f4c2cde581fbc \ - --hash=sha256:53669b79f3d599da95a0afbef039ac0fadbb236532feb042c534fbb81b1a4e40 \ - --hash=sha256:553d7094cb27db58ea91332e8b5681bac107e7242c23f7629ab1316ee73c4981 \ - --hash=sha256:586649ada7cf139445da386ab6f8ef00e6172f11a939fc3b2b7e7c9082052fa0 \ - --hash=sha256:5ae4c6da8b3d123500f9525b50bf0168023313963e0e2e814badf9000dd6ef92 \ - --hash=sha256:5b4ee7080878077af0afa7238df1b967f00dc10763f6e1b66f5cced4abebb0a3 \ - --hash=sha256:5d991e13ad2ed3aced177f524e4d670f304c8233edad3210e02c465351f785a0 \ - --hash=sha256:614f1f98b84eb256e4f35e726bfe5ca82349f8dfa576faabf8a49ca09e630086 \ - --hash=sha256:636a8ac0b044cfeccae76a36f3b18264edcc810a76a49884b96dd744613ec0b7 \ - --hash=sha256:6407424621f40205bbe6325686417e5e552f6b2dba3535dd1f90afc88a61d465 \ - --hash=sha256:6bc6f3f4692d806831c136c5acad5ccedd0262aa44c087c46b7101c77e139140 \ - --hash=sha256:6cb7fe1581deb67b782c153136541e20901aa312ceedaf1467dcb35255787952 \ - --hash=sha256:74bb470399dc1989b535cb41f5ca7ab2af561e40def22d7e188e0a445e7639e3 \ - --hash=sha256:75c8f0df9dfd8ff745bccff75867d63ef336e57cc22b2908ee725cc552689ec8 \ - --hash=sha256:770f143980cc16eb601ccfd571846e89a5fe4c03b4193f2e485268f224ab602f \ - --hash=sha256:7eb0b188f30e41ddd659a529e385470aa6782f3b412f860ce22b2491c89b8593 \ - --hash=sha256:7eb3cd48d54b9bd0e73026dedce44773214064be93611deab0b6a43158c3d5a0 \ - --hash=sha256:87d38444efffd5b056fcc026c1e8d862191881143c3aa80bb11fcf9dca9ae204 \ - --hash=sha256:8a07b692129b8a14ad7a37941a3029c291254feb7a4237f245cfae2de78de037 \ - --hash=sha256:966f10df9b2b2115da87f50f6a248e313c72a668248be1b9060ce935c871f276 \ - --hash=sha256:a6191b3a6ad3e09b6cfd75b45c6aeeffe7e3b0ad46b268345d159b8df8d835f9 \ - --hash=sha256:aab8e9464c00da5cb9c536150b7fbcd8850d376d1151741dd0d16dfe1ba4fd26 \ - --hash=sha256:ac3c5b7e75acac31e490b7851595212ed951889918d398b7afa12736c85e13ce \ - --hash=sha256:ac9ad38204887349853d7c313f53a7b1c210ce138c73859e925bc4e5d8fc18e7 \ - --hash=sha256:b9c0c19f70d30219113b18fe07e372b244fb2a773d4afde29d5a2f7930765136 \ - --hash=sha256:c397c70cd20f6df7d2a52283857af622d5f23300c4ca8e5bd8c7a543825baa5a \ - --hash=sha256:c6601a60318f9c3945be6ea0f2a80571f4299b6801716f8a6e4846892737ebe4 \ - --hash=sha256:c6f55d38818ca9596dc9019eae19a47410d5322408140d9a0076001a3dcb938c \ - --hash=sha256:ca70466ca3a17460e8fc9cea7123c8cbef5ada4be3140a1ef8f7b63f2f37108f \ - --hash=sha256:ca833941ec701fda15414be400c3259479bfde7ae6d806b69e63b3dc423b1832 \ - --hash=sha256:cd0f7429ecfd1ff597389907045ff209c8fdb5b013d38cfa7c60728cb484b6e3 \ - --hash=sha256:cd694e19c031733e446c8024dedd12a00cda87e1c10bd7b8539a87963685e969 \ - --hash=sha256:cdd088c00c39a27cfa5329349cc763a48761fdc785879220d54eb785c8a38520 \ - --hash=sha256:de30c1aa80f30af0f6b2058a91505ea6e36d6535d437520067f525f7df123887 \ - --hash=sha256:defbbb51121189722420a208957e26e49809feafca6afeef325df66c39c4fdb3 \ - --hash=sha256:f09195dda68d94a53123883de75bb97b0e35f5f6f9f3aa5bf6e496da718f0cb6 \ - --hash=sha256:f12d8b11a54f32688b165fd1a788c408f927b0960984b899be7e4c190ae758f1 \ - --hash=sha256:f1a317fdf5c122ad642db8a97964733ab7c3cf6009e1a8ae8821089993f175ff \ - --hash=sha256:f2781fd3cabc28278dc982a352f50c81c09a1a500cc2086dc4249853ea96b981 \ - --hash=sha256:f4f456590eefb6e1b3c9ea6328c1e9fa0f1006e7481179d749b3376fc793478e +coverage[toml]==7.4.0 \ + --hash=sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca \ + --hash=sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471 \ + --hash=sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a \ + --hash=sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058 \ + --hash=sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85 \ + --hash=sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143 \ + --hash=sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446 \ + --hash=sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590 \ + --hash=sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a \ + --hash=sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105 \ + --hash=sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9 \ + --hash=sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a \ + --hash=sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac \ + --hash=sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25 \ + --hash=sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2 \ + --hash=sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450 \ + --hash=sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932 \ + --hash=sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba \ + --hash=sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137 \ + --hash=sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae \ + --hash=sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614 \ + --hash=sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70 \ + --hash=sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e \ + --hash=sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505 \ + --hash=sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870 \ + --hash=sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc \ + --hash=sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451 \ + --hash=sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7 \ + --hash=sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e \ + --hash=sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566 \ + --hash=sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5 \ + --hash=sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26 \ + --hash=sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2 \ + --hash=sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42 \ + --hash=sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555 \ + --hash=sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43 \ + --hash=sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed \ + --hash=sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa \ + --hash=sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516 \ + --hash=sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952 \ + --hash=sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd \ + --hash=sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09 \ + --hash=sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c \ + --hash=sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f \ + --hash=sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6 \ + --hash=sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1 \ + --hash=sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0 \ + --hash=sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e \ + --hash=sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9 \ + --hash=sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9 \ + --hash=sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e \ + --hash=sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06 # via # -r requirements/dev.in # pytest-cov -cycler==0.11.0 \ - --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \ - --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f - # via matplotlib -diagrams==0.23.3 \ - --hash=sha256:543c707c36a2c896dfdf8f23e993a9c7ae48bb1a667f6baf19151eb98e57a134 \ - --hash=sha256:c497094f9d3600a94bdcfb62b6daf331d2eb7f9b355246e548dae7a4b5c97be0 +diagrams==0.23.4 \ + --hash=sha256:1ba69d98fcf8d768dbddf07d2c77aba6cc95c2e6f90f37146c04c96bc6765450 \ + --hash=sha256:b7ada0b119b5189dd021b1dc1467fad3704737452bb18b1e06d05e4d1fa48ed7 # via sphinx-diagrams -distlib==0.3.7 \ - --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ - --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 +distlib==0.3.8 \ + --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ + --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -documenteer[guide]==1.0.0a8 \ - --hash=sha256:566126dd8c798b4b4a282580b372c1b814f189f0d6a9f25b9ff1aebdd3353a0f \ - --hash=sha256:a7889bfb8f246e01ce1af508aeb477312d4885f4ecc33fdd89ddbfaeacab5208 - # via -r requirements/dev.in -docutils==0.19 \ - --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ - --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc +documenteer[guide]==1.0.1 \ + --hash=sha256:0d6bf2947456fd3456d86790874d7aab24c8f33d5b69a1f4d40bfb88a714f900 \ + --hash=sha256:d581fb54b6205daec69b4515b5a71e15781dfff1c2dd1f59fa28de1d2b2d4eb9 # via + # -r requirements/dev.in + # documenteer +docutils==0.20.1 \ + --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \ + --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b + # via + # documenteer # myst-parser # pybtex-docutils # pydata-sphinx-theme # sphinx # sphinx-click # sphinx-jinja + # sphinx-prompt # sphinxcontrib-bibtex -filelock==3.12.4 \ - --hash=sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4 \ - --hash=sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c # via virtualenv -fonttools==4.42.1 \ - --hash=sha256:0eb79a2da5eb6457a6f8ab904838454accc7d4cccdaff1fd2bd3a0679ea33d64 \ - --hash=sha256:113337c2d29665839b7d90b39f99b3cac731f72a0eda9306165a305c7c31d341 \ - --hash=sha256:12a7c247d1b946829bfa2f331107a629ea77dc5391dfd34fdcd78efa61f354ca \ - --hash=sha256:179737095eb98332a2744e8f12037b2977f22948cf23ff96656928923ddf560a \ - --hash=sha256:19b7db825c8adee96fac0692e6e1ecd858cae9affb3b4812cdb9d934a898b29e \ - --hash=sha256:37983b6bdab42c501202500a2be3a572f50d4efe3237e0686ee9d5f794d76b35 \ - --hash=sha256:3a35981d90feebeaef05e46e33e6b9e5b5e618504672ca9cd0ff96b171e4bfff \ - --hash=sha256:46a0ec8adbc6ff13494eb0c9c2e643b6f009ce7320cf640de106fb614e4d4360 \ - --hash=sha256:4aa79366e442dbca6e2c8595645a3a605d9eeabdb7a094d745ed6106816bef5d \ - --hash=sha256:515607ec756d7865f23070682622c49d922901943697871fc292277cf1e71967 \ - --hash=sha256:53eb5091ddc8b1199330bb7b4a8a2e7995ad5d43376cadce84523d8223ef3136 \ - --hash=sha256:5d18fc642fd0ac29236ff88ecfccff229ec0386090a839dd3f1162e9a7944a40 \ - --hash=sha256:5fb289b7a815638a7613d46bcf324c9106804725b2bb8ad913c12b6958ffc4ec \ - --hash=sha256:62f481ac772fd68901573956231aea3e4b1ad87b9b1089a61613a91e2b50bb9b \ - --hash=sha256:689508b918332fb40ce117131633647731d098b1b10d092234aa959b4251add5 \ - --hash=sha256:68a02bbe020dc22ee0540e040117535f06df9358106d3775e8817d826047f3fd \ - --hash=sha256:6ed2662a3d9c832afa36405f8748c250be94ae5dfc5283d668308391f2102861 \ - --hash=sha256:7286aed4ea271df9eab8d7a9b29e507094b51397812f7ce051ecd77915a6e26b \ - --hash=sha256:7cc7d685b8eeca7ae69dc6416833fbfea61660684b7089bca666067cb2937dcf \ - --hash=sha256:8708b98c278012ad267ee8a7433baeb809948855e81922878118464b274c909d \ - --hash=sha256:9398f244e28e0596e2ee6024f808b06060109e33ed38dcc9bded452fd9bbb853 \ - --hash=sha256:9e36344e48af3e3bde867a1ca54f97c308735dd8697005c2d24a86054a114a71 \ - --hash=sha256:a398bdadb055f8de69f62b0fc70625f7cbdab436bbb31eef5816e28cab083ee8 \ - --hash=sha256:acb47f6f8680de24c1ab65ebde39dd035768e2a9b571a07c7b8da95f6c8815fd \ - --hash=sha256:be24fcb80493b2c94eae21df70017351851652a37de514de553435b256b2f249 \ - --hash=sha256:c391cd5af88aacaf41dd7cfb96eeedfad297b5899a39e12f4c2c3706d0a3329d \ - --hash=sha256:c95b0724a6deea2c8c5d3222191783ced0a2f09bd6d33f93e563f6f1a4b3b3a4 \ - --hash=sha256:c9b1ce7a45978b821a06d375b83763b27a3a5e8a2e4570b3065abad240a18760 \ - --hash=sha256:db372213d39fa33af667c2aa586a0c1235e88e9c850f5dd5c8e1f17515861868 \ - --hash=sha256:db55cbaea02a20b49fefbd8e9d62bd481aaabe1f2301dabc575acc6b358874fa \ - --hash=sha256:ed1a13a27f59d1fc1920394a7f596792e9d546c9ca5a044419dca70c37815d7c \ - --hash=sha256:f2b82f46917d8722e6b5eafeefb4fb585d23babd15d8246c664cd88a5bddd19c \ - --hash=sha256:f2f806990160d1ce42d287aa419df3ffc42dfefe60d473695fb048355fe0c6a0 \ - --hash=sha256:f720fa82a11c0f9042376fd509b5ed88dab7e3cd602eee63a1af08883b37342b - # via matplotlib -gitdb==4.0.10 \ - --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ - --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 +gitdb==4.0.11 \ + --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ + --hash=sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b # via # -c requirements/main.txt # gitpython -gitpython==3.1.37 \ - --hash=sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33 \ - --hash=sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54 +gitpython==3.1.41 \ + --hash=sha256:c36b6634d069b3f719610175020a9aed919421c87552185b085e04fbbdb10b7c \ + --hash=sha256:ed66e624884f76df22c8e16066d567aaa5a37d5b5fa19db2c6df6f7156db9048 # via # -c requirements/main.txt # documenteer @@ -321,13 +246,13 @@ graphviz==0.20.1 \ --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \ --hash=sha256:8c58f14adaa3b947daf26c19bc1e98c4e0702cdc31cf99153e6f06904d492bf8 # via diagrams -identify==2.5.29 \ - --hash=sha256:24437fbf6f4d3fe6efd0eb9d67e24dd9106db99af5ceb27996a5f7895f24bf1b \ - --hash=sha256:d43d52b86b15918c137e3a74fff5224f60385cd0e9c38e99d07c257f02f151a5 +identify==2.5.33 \ + --hash=sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d \ + --hash=sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34 # via pre-commit -idna==3.4 \ - --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ - --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 +idna==3.6 \ + --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ + --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f # via # -c requirements/main.txt # requests @@ -339,9 +264,9 @@ iniconfig==2.0.0 \ --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 # via pytest -jinja2==3.1.2 \ - --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ - --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 +jinja2==3.1.3 \ + --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \ + --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90 # via # -c requirements/main.txt # diagrams @@ -349,120 +274,14 @@ jinja2==3.1.2 \ # sphinx # sphinx-jinja # sphinxcontrib-redoc -jsonschema==4.19.1 \ - --hash=sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e \ - --hash=sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf +jsonschema==4.21.1 \ + --hash=sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f \ + --hash=sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5 # via sphinxcontrib-redoc -jsonschema-specifications==2023.7.1 \ - --hash=sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1 \ - --hash=sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb +jsonschema-specifications==2023.12.1 \ + --hash=sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc \ + --hash=sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c # via jsonschema -kiwisolver==1.4.5 \ - --hash=sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf \ - --hash=sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e \ - --hash=sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af \ - --hash=sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f \ - --hash=sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046 \ - --hash=sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3 \ - --hash=sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5 \ - --hash=sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71 \ - --hash=sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee \ - --hash=sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3 \ - --hash=sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9 \ - --hash=sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b \ - --hash=sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985 \ - --hash=sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea \ - --hash=sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16 \ - --hash=sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89 \ - --hash=sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c \ - --hash=sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9 \ - --hash=sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712 \ - --hash=sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342 \ - --hash=sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a \ - --hash=sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958 \ - --hash=sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d \ - --hash=sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a \ - --hash=sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130 \ - --hash=sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff \ - --hash=sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898 \ - --hash=sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b \ - --hash=sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f \ - --hash=sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265 \ - --hash=sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93 \ - --hash=sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929 \ - --hash=sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635 \ - --hash=sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709 \ - --hash=sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b \ - --hash=sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb \ - --hash=sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a \ - --hash=sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920 \ - --hash=sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e \ - --hash=sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544 \ - --hash=sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45 \ - --hash=sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390 \ - --hash=sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77 \ - --hash=sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355 \ - --hash=sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff \ - --hash=sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4 \ - --hash=sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7 \ - --hash=sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20 \ - --hash=sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c \ - --hash=sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162 \ - --hash=sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228 \ - --hash=sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437 \ - --hash=sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc \ - --hash=sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a \ - --hash=sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901 \ - --hash=sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4 \ - --hash=sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770 \ - --hash=sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525 \ - --hash=sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad \ - --hash=sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a \ - --hash=sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29 \ - --hash=sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90 \ - --hash=sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250 \ - --hash=sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d \ - --hash=sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3 \ - --hash=sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54 \ - --hash=sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f \ - --hash=sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1 \ - --hash=sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da \ - --hash=sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238 \ - --hash=sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa \ - --hash=sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523 \ - --hash=sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0 \ - --hash=sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205 \ - --hash=sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3 \ - --hash=sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4 \ - --hash=sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac \ - --hash=sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9 \ - --hash=sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb \ - --hash=sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced \ - --hash=sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd \ - --hash=sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0 \ - --hash=sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da \ - --hash=sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18 \ - --hash=sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9 \ - --hash=sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276 \ - --hash=sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333 \ - --hash=sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b \ - --hash=sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db \ - --hash=sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126 \ - --hash=sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9 \ - --hash=sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09 \ - --hash=sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0 \ - --hash=sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec \ - --hash=sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7 \ - --hash=sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff \ - --hash=sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9 \ - --hash=sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192 \ - --hash=sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8 \ - --hash=sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d \ - --hash=sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6 \ - --hash=sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797 \ - --hash=sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892 \ - --hash=sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f - # via matplotlib latexcodec==2.0.1 \ --hash=sha256:2aa2551c373261cefe2ad3a8953a6d6533e68238d180eb4bb91d7964adb3fe9a \ --hash=sha256:c277a193638dc7683c4c30f6684e3db728a06efb0dc9cf346db8bd0aa6c5d271 @@ -478,100 +297,71 @@ markdown-it-py[linkify]==3.0.0 \ # documenteer # mdit-py-plugins # myst-parser -markupsafe==2.1.3 \ - --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ - --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ - --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ - --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ - --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ - --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ - --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ - --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ - --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ - --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ - --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ - --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ - --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ - --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ - --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ - --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ - --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ - --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ - --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ - --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ - --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ - --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ - --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ - --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ - --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ - --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ - --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ - --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ - --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ - --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ - --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ - --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ - --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ - --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ - --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ - --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ - --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ - --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ - --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ - --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ - --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ - --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ - --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ - --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ - --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ - --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ - --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ - --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ - --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ - --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ - --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ - --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ - --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ - --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ - --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ - --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ - --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ - --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ - --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ - --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 + # rich +markupsafe==2.1.4 \ + --hash=sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69 \ + --hash=sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0 \ + --hash=sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d \ + --hash=sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec \ + --hash=sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5 \ + --hash=sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411 \ + --hash=sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3 \ + --hash=sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74 \ + --hash=sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0 \ + --hash=sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949 \ + --hash=sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d \ + --hash=sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279 \ + --hash=sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f \ + --hash=sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6 \ + --hash=sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc \ + --hash=sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e \ + --hash=sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954 \ + --hash=sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656 \ + --hash=sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc \ + --hash=sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518 \ + --hash=sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56 \ + --hash=sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc \ + --hash=sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa \ + --hash=sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565 \ + --hash=sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4 \ + --hash=sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb \ + --hash=sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250 \ + --hash=sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4 \ + --hash=sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959 \ + --hash=sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc \ + --hash=sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474 \ + --hash=sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863 \ + --hash=sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8 \ + --hash=sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f \ + --hash=sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2 \ + --hash=sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e \ + --hash=sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e \ + --hash=sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb \ + --hash=sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f \ + --hash=sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a \ + --hash=sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26 \ + --hash=sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d \ + --hash=sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2 \ + --hash=sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131 \ + --hash=sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789 \ + --hash=sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6 \ + --hash=sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a \ + --hash=sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858 \ + --hash=sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e \ + --hash=sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb \ + --hash=sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e \ + --hash=sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84 \ + --hash=sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7 \ + --hash=sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea \ + --hash=sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b \ + --hash=sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6 \ + --hash=sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475 \ + --hash=sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74 \ + --hash=sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a \ + --hash=sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00 # via # -c requirements/main.txt # jinja2 -matplotlib==3.8.0 \ - --hash=sha256:061ee58facb3580cd2d046a6d227fb77e9295599c5ec6ad069f06b5821ad1cfc \ - --hash=sha256:0b11f354aae62a2aa53ec5bb09946f5f06fc41793e351a04ff60223ea9162955 \ - --hash=sha256:0d5ee602ef517a89d1f2c508ca189cfc395dd0b4a08284fb1b97a78eec354644 \ - --hash=sha256:0e723f5b96f3cd4aad99103dc93e9e3cdc4f18afdcc76951f4857b46f8e39d2d \ - --hash=sha256:23ed11654fc83cd6cfdf6170b453e437674a050a452133a064d47f2f1371f8d3 \ - --hash=sha256:2ea6886e93401c22e534bbfd39201ce8931b75502895cfb115cbdbbe2d31f287 \ - --hash=sha256:31e793c8bd4ea268cc5d3a695c27b30650ec35238626961d73085d5e94b6ab68 \ - --hash=sha256:36eafe2128772195b373e1242df28d1b7ec6c04c15b090b8d9e335d55a323900 \ - --hash=sha256:3cc3776836d0f4f22654a7f2d2ec2004618d5cf86b7185318381f73b80fd8a2d \ - --hash=sha256:5dc945a9cb2deb7d197ba23eb4c210e591d52d77bf0ba27c35fc82dec9fa78d4 \ - --hash=sha256:5de39dc61ca35342cf409e031f70f18219f2c48380d3886c1cf5ad9f17898e06 \ - --hash=sha256:60a6e04dfd77c0d3bcfee61c3cd335fff1b917c2f303b32524cd1235e194ef99 \ - --hash=sha256:6c49a2bd6981264bddcb8c317b6bd25febcece9e2ebfcbc34e7f4c0c867c09dc \ - --hash=sha256:6f25ffb6ad972cdffa7df8e5be4b1e3cadd2f8d43fc72085feb1518006178394 \ - --hash=sha256:7b37b74f00c4cb6af908cb9a00779d97d294e89fd2145ad43f0cdc23f635760c \ - --hash=sha256:7f54b9fb87ca5acbcdd0f286021bedc162e1425fa5555ebf3b3dfc167b955ad9 \ - --hash=sha256:87df75f528020a6299f76a1d986c0ed4406e3b2bd44bc5e306e46bca7d45e53e \ - --hash=sha256:90d74a95fe055f73a6cd737beecc1b81c26f2893b7a3751d52b53ff06ca53f36 \ - --hash=sha256:a33bd3045c7452ca1fa65676d88ba940867880e13e2546abb143035fa9072a9d \ - --hash=sha256:c3499c312f5def8f362a2bf761d04fa2d452b333f3a9a3f58805273719bf20d9 \ - --hash=sha256:c4940bad88a932ddc69734274f6fb047207e008389489f2b6f77d9ca485f0e7a \ - --hash=sha256:d670b9348e712ec176de225d425f150dc8e37b13010d85233c539b547da0be39 \ - --hash=sha256:dae97fdd6996b3a25da8ee43e3fc734fff502f396801063c6b76c20b56683196 \ - --hash=sha256:dd386c80a98b5f51571b9484bf6c6976de383cd2a8cd972b6a9562d85c6d2087 \ - --hash=sha256:df8505e1c19d5c2c26aff3497a7cbd3ccfc2e97043d1e4db3e76afa399164b69 \ - --hash=sha256:eee482731c8c17d86d9ddb5194d38621f9b0f0d53c99006275a12523ab021732 \ - --hash=sha256:f691b4ef47c7384d0936b2e8ebdeb5d526c81d004ad9403dfb9d4c76b9979a93 \ - --hash=sha256:f8b5a1bf27d078453aa7b5b27f52580e16360d02df6d3dc9504f3d2ce11f6309 - # via sphinxext-opengraph mdit-py-plugins==0.4.0 \ --hash=sha256:b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9 \ --hash=sha256:d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b @@ -580,34 +370,34 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.5.1 \ - --hash=sha256:159aa9acb16086b79bbb0016145034a1a05360626046a929f84579ce1666b315 \ - --hash=sha256:258b22210a4a258ccd077426c7a181d789d1121aca6db73a83f79372f5569ae0 \ - --hash=sha256:26f71b535dfc158a71264e6dc805a9f8d2e60b67215ca0bfa26e2e1aa4d4d373 \ - --hash=sha256:26fb32e4d4afa205b24bf645eddfbb36a1e17e995c5c99d6d00edb24b693406a \ - --hash=sha256:2fc3a600f749b1008cc75e02b6fb3d4db8dbcca2d733030fe7a3b3502902f161 \ - --hash=sha256:32cb59609b0534f0bd67faebb6e022fe534bdb0e2ecab4290d683d248be1b275 \ - --hash=sha256:330857f9507c24de5c5724235e66858f8364a0693894342485e543f5b07c8693 \ - --hash=sha256:361da43c4f5a96173220eb53340ace68cda81845cd88218f8862dfb0adc8cddb \ - --hash=sha256:4a465ea2ca12804d5b34bb056be3a29dc47aea5973b892d0417c6a10a40b2d65 \ - --hash=sha256:51cb1323064b1099e177098cb939eab2da42fea5d818d40113957ec954fc85f4 \ - --hash=sha256:57b10c56016adce71fba6bc6e9fd45d8083f74361f629390c556738565af8eeb \ - --hash=sha256:596fae69f2bfcb7305808c75c00f81fe2829b6236eadda536f00610ac5ec2243 \ - --hash=sha256:5d627124700b92b6bbaa99f27cbe615c8ea7b3402960f6372ea7d65faf376c14 \ - --hash=sha256:6ac9c21bfe7bc9f7f1b6fae441746e6a106e48fc9de530dea29e8cd37a2c0cc4 \ - --hash=sha256:82cb6193de9bbb3844bab4c7cf80e6227d5225cc7625b068a06d005d861ad5f1 \ - --hash=sha256:8f772942d372c8cbac575be99f9cc9d9fb3bd95c8bc2de6c01411e2c84ebca8a \ - --hash=sha256:9fece120dbb041771a63eb95e4896791386fe287fefb2837258925b8326d6160 \ - --hash=sha256:a156e6390944c265eb56afa67c74c0636f10283429171018446b732f1a05af25 \ - --hash=sha256:a9ec1f695f0c25986e6f7f8778e5ce61659063268836a38c951200c57479cc12 \ - --hash=sha256:abed92d9c8f08643c7d831300b739562b0a6c9fcb028d211134fc9ab20ccad5d \ - --hash=sha256:b031b9601f1060bf1281feab89697324726ba0c0bae9d7cd7ab4b690940f0b92 \ - --hash=sha256:c543214ffdd422623e9fedd0869166c2f16affe4ba37463975043ef7d2ea8770 \ - --hash=sha256:d28ddc3e3dfeab553e743e532fb95b4e6afad51d4706dd22f28e1e5e664828d2 \ - --hash=sha256:f33592ddf9655a4894aef22d134de7393e95fcbdc2d15c1ab65828eee5c66c70 \ - --hash=sha256:f6b0e77db9ff4fda74de7df13f30016a0a663928d669c9f2c057048ba44f09bb \ - --hash=sha256:f757063a83970d67c444f6e01d9550a7402322af3557ce7630d3c957386fa8f5 \ - --hash=sha256:ff0cedc84184115202475bbb46dd99f8dcb87fe24d5d0ddfc0fe6b8575c88d2f +mypy==1.8.0 \ + --hash=sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6 \ + --hash=sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d \ + --hash=sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02 \ + --hash=sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d \ + --hash=sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3 \ + --hash=sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3 \ + --hash=sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3 \ + --hash=sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66 \ + --hash=sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259 \ + --hash=sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835 \ + --hash=sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd \ + --hash=sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d \ + --hash=sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8 \ + --hash=sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07 \ + --hash=sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b \ + --hash=sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e \ + --hash=sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6 \ + --hash=sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae \ + --hash=sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9 \ + --hash=sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d \ + --hash=sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a \ + --hash=sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592 \ + --hash=sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218 \ + --hash=sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817 \ + --hash=sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4 \ + --hash=sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410 \ + --hash=sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55 # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ @@ -621,117 +411,24 @@ nodeenv==1.8.0 \ --hash=sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2 \ --hash=sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec # via pre-commit -numpy==1.26.0 \ - --hash=sha256:020cdbee66ed46b671429c7265cf00d8ac91c046901c55684954c3958525dab2 \ - --hash=sha256:0621f7daf973d34d18b4e4bafb210bbaf1ef5e0100b5fa750bd9cde84c7ac292 \ - --hash=sha256:0792824ce2f7ea0c82ed2e4fecc29bb86bee0567a080dacaf2e0a01fe7654369 \ - --hash=sha256:09aaee96c2cbdea95de76ecb8a586cb687d281c881f5f17bfc0fb7f5890f6b91 \ - --hash=sha256:166b36197e9debc4e384e9c652ba60c0bacc216d0fc89e78f973a9760b503388 \ - --hash=sha256:186ba67fad3c60dbe8a3abff3b67a91351100f2661c8e2a80364ae6279720299 \ - --hash=sha256:306545e234503a24fe9ae95ebf84d25cba1fdc27db971aa2d9f1ab6bba19a9dd \ - --hash=sha256:436c8e9a4bdeeee84e3e59614d38c3dbd3235838a877af8c211cfcac8a80b8d3 \ - --hash=sha256:4a873a8180479bc829313e8d9798d5234dfacfc2e8a7ac188418189bb8eafbd2 \ - --hash=sha256:4acc65dd65da28060e206c8f27a573455ed724e6179941edb19f97e58161bb69 \ - --hash=sha256:51be5f8c349fdd1a5568e72713a21f518e7d6707bcf8503b528b88d33b57dc68 \ - --hash=sha256:546b7dd7e22f3c6861463bebb000646fa730e55df5ee4a0224408b5694cc6148 \ - --hash=sha256:5671338034b820c8d58c81ad1dafc0ed5a00771a82fccc71d6438df00302094b \ - --hash=sha256:637c58b468a69869258b8ae26f4a4c6ff8abffd4a8334c830ffb63e0feefe99a \ - --hash=sha256:767254ad364991ccfc4d81b8152912e53e103ec192d1bb4ea6b1f5a7117040be \ - --hash=sha256:7d484292eaeb3e84a51432a94f53578689ffdea3f90e10c8b203a99be5af57d8 \ - --hash=sha256:7f6bad22a791226d0a5c7c27a80a20e11cfe09ad5ef9084d4d3fc4a299cca505 \ - --hash=sha256:86f737708b366c36b76e953c46ba5827d8c27b7a8c9d0f471810728e5a2fe57c \ - --hash=sha256:8c6adc33561bd1d46f81131d5352348350fc23df4d742bb246cdfca606ea1208 \ - --hash=sha256:914b28d3215e0c721dc75db3ad6d62f51f630cb0c277e6b3bcb39519bed10bd8 \ - --hash=sha256:b44e6a09afc12952a7d2a58ca0a2429ee0d49a4f89d83a0a11052da696440e49 \ - --hash=sha256:bb0d9a1aaf5f1cb7967320e80690a1d7ff69f1d47ebc5a9bea013e3a21faec95 \ - --hash=sha256:c0b45c8b65b79337dee5134d038346d30e109e9e2e9d43464a2970e5c0e93229 \ - --hash=sha256:c2e698cb0c6dda9372ea98a0344245ee65bdc1c9dd939cceed6bb91256837896 \ - --hash=sha256:c78a22e95182fb2e7874712433eaa610478a3caf86f28c621708d35fa4fd6e7f \ - --hash=sha256:e062aa24638bb5018b7841977c360d2f5917268d125c833a686b7cbabbec496c \ - --hash=sha256:e5e18e5b14a7560d8acf1c596688f4dfd19b4f2945b245a71e5af4ddb7422feb \ - --hash=sha256:eae430ecf5794cb7ae7fa3808740b015aa80747e5266153128ef055975a72b99 \ - --hash=sha256:ee84ca3c58fe48b8ddafdeb1db87388dce2c3c3f701bf447b05e4cfcc3679112 \ - --hash=sha256:f042f66d0b4ae6d48e70e28d487376204d3cbf43b84c03bac57e28dac6151581 \ - --hash=sha256:f8db2f125746e44dce707dd44d4f4efeea8d7e2b43aace3f8d1f235cfa2733dd \ - --hash=sha256:f93fc78fe8bf15afe2b8d6b6499f1c73953169fad1e9a8dd086cdff3190e7fdf - # via - # contourpy - # matplotlib -packaging==23.1 \ - --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \ - --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f +packaging==23.2 \ + --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ + --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 # via - # matplotlib # pydata-sphinx-theme # pytest # sphinx -pillow==10.0.1 \ - --hash=sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff \ - --hash=sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f \ - --hash=sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21 \ - --hash=sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635 \ - --hash=sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a \ - --hash=sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f \ - --hash=sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1 \ - --hash=sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d \ - --hash=sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db \ - --hash=sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849 \ - --hash=sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7 \ - --hash=sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876 \ - --hash=sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3 \ - --hash=sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317 \ - --hash=sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91 \ - --hash=sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d \ - --hash=sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b \ - --hash=sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd \ - --hash=sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed \ - --hash=sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500 \ - --hash=sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7 \ - --hash=sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a \ - --hash=sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a \ - --hash=sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0 \ - --hash=sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf \ - --hash=sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f \ - --hash=sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1 \ - --hash=sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088 \ - --hash=sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971 \ - --hash=sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a \ - --hash=sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205 \ - --hash=sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54 \ - --hash=sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08 \ - --hash=sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21 \ - --hash=sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d \ - --hash=sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08 \ - --hash=sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e \ - --hash=sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf \ - --hash=sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b \ - --hash=sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145 \ - --hash=sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2 \ - --hash=sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d \ - --hash=sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d \ - --hash=sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf \ - --hash=sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad \ - --hash=sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d \ - --hash=sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1 \ - --hash=sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4 \ - --hash=sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2 \ - --hash=sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19 \ - --hash=sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37 \ - --hash=sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4 \ - --hash=sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68 \ - --hash=sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1 - # via matplotlib -platformdirs==3.10.0 \ - --hash=sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d \ - --hash=sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d +platformdirs==4.1.0 \ + --hash=sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380 \ + --hash=sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420 # via virtualenv pluggy==1.3.0 \ --hash=sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12 \ --hash=sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7 # via pytest -pre-commit==3.4.0 \ - --hash=sha256:6bbd5129a64cad4c0dfaeeb12cd8f7ea7e15b77028d985341478c8af3c759522 \ - --hash=sha256:96d529a951f8b677f730a7212442027e8ba53f9b04d217c4c67dc56c393ad945 +pre-commit==3.6.0 \ + --hash=sha256:c255039ef399049a5544b6ce13d135caba8f2c28c3b4033277a788f434308376 \ + --hash=sha256:d30bad9abf165f7785c15a21a1f46da7d0677cb00ee7ff4c579fd38922efe15d # via -r requirements/dev.in pybtex==0.24.0 \ --hash=sha256:818eae35b61733e5c007c3fcd2cfb75ed1bc8b4173c1f70b56cc4c0802d34755 \ @@ -743,159 +440,157 @@ pybtex-docutils==1.0.3 \ --hash=sha256:3a7ebdf92b593e00e8c1c538aa9a20bca5d92d84231124715acc964d51d93c6b \ --hash=sha256:8fd290d2ae48e32fcb54d86b0efb8d573198653c7e2447d5bec5847095f430b9 # via sphinxcontrib-bibtex -pydantic==2.3.0 \ - --hash=sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d \ - --hash=sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81 +pydantic==2.5.3 \ + --hash=sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a \ + --hash=sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4 # via # -c requirements/main.txt # autodoc-pydantic # documenteer # pydantic-settings -pydantic-core==2.6.3 \ - --hash=sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3 \ - --hash=sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6 \ - --hash=sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418 \ - --hash=sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7 \ - --hash=sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc \ - --hash=sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5 \ - --hash=sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7 \ - --hash=sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f \ - --hash=sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48 \ - --hash=sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad \ - --hash=sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef \ - --hash=sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9 \ - --hash=sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58 \ - --hash=sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da \ - --hash=sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149 \ - --hash=sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b \ - --hash=sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881 \ - --hash=sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456 \ - --hash=sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98 \ - --hash=sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e \ - --hash=sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c \ - --hash=sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e \ - --hash=sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb \ - --hash=sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862 \ - --hash=sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728 \ - --hash=sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6 \ - --hash=sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf \ - --hash=sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e \ - --hash=sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd \ - --hash=sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8 \ - --hash=sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987 \ - --hash=sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a \ - --hash=sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2 \ - --hash=sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784 \ - --hash=sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b \ - --hash=sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309 \ - --hash=sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7 \ - --hash=sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413 \ - --hash=sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2 \ - --hash=sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f \ - --hash=sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6 \ - --hash=sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b \ - --hash=sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3 \ - --hash=sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7 \ - --hash=sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d \ - --hash=sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378 \ - --hash=sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8 \ - --hash=sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe \ - --hash=sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7 \ - --hash=sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973 \ - --hash=sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad \ - --hash=sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34 \ - --hash=sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb \ - --hash=sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c \ - --hash=sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465 \ - --hash=sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5 \ - --hash=sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588 \ - --hash=sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950 \ - --hash=sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70 \ - --hash=sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32 \ - --hash=sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7 \ - --hash=sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec \ - --hash=sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67 \ - --hash=sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645 \ - --hash=sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db \ - --hash=sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7 \ - --hash=sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170 \ - --hash=sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17 \ - --hash=sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb \ - --hash=sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c \ - --hash=sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819 \ - --hash=sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b \ - --hash=sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d \ - --hash=sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a \ - --hash=sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525 \ - --hash=sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1 \ - --hash=sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76 \ - --hash=sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60 \ - --hash=sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b \ - --hash=sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42 \ - --hash=sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd \ - --hash=sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014 \ - --hash=sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d \ - --hash=sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a \ - --hash=sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa \ - --hash=sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f \ - --hash=sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26 \ - --hash=sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a \ - --hash=sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64 \ - --hash=sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5 \ - --hash=sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057 \ - --hash=sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50 \ - --hash=sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b \ - --hash=sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483 \ - --hash=sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b \ - --hash=sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c \ - --hash=sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9 \ - --hash=sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698 \ - --hash=sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362 \ - --hash=sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49 \ - --hash=sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282 \ - --hash=sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0 \ - --hash=sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a \ - --hash=sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b \ - --hash=sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1 \ - --hash=sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa +pydantic-core==2.14.6 \ + --hash=sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556 \ + --hash=sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e \ + --hash=sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411 \ + --hash=sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245 \ + --hash=sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c \ + --hash=sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66 \ + --hash=sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd \ + --hash=sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d \ + --hash=sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b \ + --hash=sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06 \ + --hash=sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948 \ + --hash=sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341 \ + --hash=sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0 \ + --hash=sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f \ + --hash=sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a \ + --hash=sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2 \ + --hash=sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51 \ + --hash=sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80 \ + --hash=sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8 \ + --hash=sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d \ + --hash=sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8 \ + --hash=sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb \ + --hash=sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590 \ + --hash=sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87 \ + --hash=sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534 \ + --hash=sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b \ + --hash=sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145 \ + --hash=sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba \ + --hash=sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b \ + --hash=sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2 \ + --hash=sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e \ + --hash=sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052 \ + --hash=sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622 \ + --hash=sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab \ + --hash=sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b \ + --hash=sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66 \ + --hash=sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e \ + --hash=sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4 \ + --hash=sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e \ + --hash=sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec \ + --hash=sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c \ + --hash=sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed \ + --hash=sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937 \ + --hash=sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f \ + --hash=sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9 \ + --hash=sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4 \ + --hash=sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96 \ + --hash=sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277 \ + --hash=sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23 \ + --hash=sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7 \ + --hash=sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b \ + --hash=sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91 \ + --hash=sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d \ + --hash=sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e \ + --hash=sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1 \ + --hash=sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2 \ + --hash=sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160 \ + --hash=sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9 \ + --hash=sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670 \ + --hash=sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7 \ + --hash=sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c \ + --hash=sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb \ + --hash=sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42 \ + --hash=sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d \ + --hash=sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8 \ + --hash=sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1 \ + --hash=sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6 \ + --hash=sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8 \ + --hash=sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf \ + --hash=sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e \ + --hash=sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a \ + --hash=sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9 \ + --hash=sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1 \ + --hash=sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40 \ + --hash=sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2 \ + --hash=sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d \ + --hash=sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f \ + --hash=sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f \ + --hash=sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af \ + --hash=sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7 \ + --hash=sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda \ + --hash=sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a \ + --hash=sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95 \ + --hash=sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0 \ + --hash=sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60 \ + --hash=sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149 \ + --hash=sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975 \ + --hash=sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4 \ + --hash=sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe \ + --hash=sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94 \ + --hash=sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03 \ + --hash=sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c \ + --hash=sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b \ + --hash=sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a \ + --hash=sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24 \ + --hash=sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391 \ + --hash=sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c \ + --hash=sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab \ + --hash=sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd \ + --hash=sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786 \ + --hash=sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08 \ + --hash=sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8 \ + --hash=sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6 \ + --hash=sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0 \ + --hash=sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421 # via # -c requirements/main.txt # pydantic -pydantic-settings==2.0.3 \ - --hash=sha256:962dc3672495aad6ae96a4390fac7e593591e144625e5112d359f8f67fb75945 \ - --hash=sha256:ddd907b066622bd67603b75e2ff791875540dc485b7307c4fffc015719da8625 +pydantic-settings==2.1.0 \ + --hash=sha256:26b1492e0a24755626ac5e6d715e9077ab7ad4fb5f19a8b7ed7011d52f36141c \ + --hash=sha256:7621c0cb5d90d1140d2f0ef557bdf03573aac7035948109adf2574770b77605a # via autodoc-pydantic pydata-sphinx-theme==0.12.0 \ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ --hash=sha256:c17dbab67a3774f06f34f6378e896fcd0668cc8b5da1c1ba017e65cf1df0af58 # via documenteer -pygments==2.16.1 \ - --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \ - --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29 +pygments==2.17.2 \ + --hash=sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c \ + --hash=sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367 # via # pydata-sphinx-theme + # rich # sphinx # sphinx-prompt -pyparsing==3.1.1 \ - --hash=sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb \ - --hash=sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db - # via matplotlib -pytest==7.4.2 \ - --hash=sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002 \ - --hash=sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069 +pylatexenc==2.10 \ + --hash=sha256:3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3 + # via documenteer +pytest==7.4.4 \ + --hash=sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280 \ + --hash=sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8 # via # -r requirements/dev.in # pytest-cov + # pytest-pretty pytest-cov==4.1.0 \ --hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \ --hash=sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a # via -r requirements/dev.in -python-dateutil==2.8.2 \ - --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ - --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 - # via - # -c requirements/main.txt - # matplotlib +pytest-pretty==1.2.0 \ + --hash=sha256:105a355f128e392860ad2c478ae173ff96d2f03044692f9818ff3d49205d3a60 \ + --hash=sha256:6f79122bf53864ae2951b6c9e94d7a06a87ef753476acd4588aeac018f062036 + # via -r requirements/dev.in python-dotenv==1.0.0 \ --hash=sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba \ --hash=sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a @@ -930,6 +625,7 @@ pyyaml==6.0.1 \ --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ + --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ @@ -958,9 +654,9 @@ pyyaml==6.0.1 \ # pre-commit # pybtex # sphinxcontrib-redoc -referencing==0.30.2 \ - --hash=sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf \ - --hash=sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0 +referencing==0.32.1 \ + --hash=sha256:3c57da0513e9563eb7e203ebe9bb3a1b509b042016433bd1e45a2853466c3dd3 \ + --hash=sha256:7e4dc12271d8e15612bfe35792f5ea1c40970dadf8624602e33db2758f7ee554 # via # jsonschema # jsonschema-specifications @@ -971,125 +667,131 @@ requests==2.31.0 \ # -c requirements/main.txt # documenteer # sphinx -rpds-py==0.10.3 \ - --hash=sha256:015de2ce2af1586ff5dc873e804434185199a15f7d96920ce67e50604592cae9 \ - --hash=sha256:061c3ff1f51ecec256e916cf71cc01f9975af8fb3af9b94d3c0cc8702cfea637 \ - --hash=sha256:08a80cf4884920863623a9ee9a285ee04cef57ebedc1cc87b3e3e0f24c8acfe5 \ - --hash=sha256:09362f86ec201288d5687d1dc476b07bf39c08478cde837cb710b302864e7ec9 \ - --hash=sha256:0bb4f48bd0dd18eebe826395e6a48b7331291078a879295bae4e5d053be50d4c \ - --hash=sha256:106af1653007cc569d5fbb5f08c6648a49fe4de74c2df814e234e282ebc06957 \ - --hash=sha256:11fdd1192240dda8d6c5d18a06146e9045cb7e3ba7c06de6973000ff035df7c6 \ - --hash=sha256:16a472300bc6c83fe4c2072cc22b3972f90d718d56f241adabc7ae509f53f154 \ - --hash=sha256:176287bb998fd1e9846a9b666e240e58f8d3373e3bf87e7642f15af5405187b8 \ - --hash=sha256:177914f81f66c86c012311f8c7f46887ec375cfcfd2a2f28233a3053ac93a569 \ - --hash=sha256:177c9dd834cdf4dc39c27436ade6fdf9fe81484758885f2d616d5d03c0a83bd2 \ - --hash=sha256:187700668c018a7e76e89424b7c1042f317c8df9161f00c0c903c82b0a8cac5c \ - --hash=sha256:1d9b5ee46dcb498fa3e46d4dfabcb531e1f2e76b477e0d99ef114f17bbd38453 \ - --hash=sha256:22da15b902f9f8e267020d1c8bcfc4831ca646fecb60254f7bc71763569f56b1 \ - --hash=sha256:24cd91a03543a0f8d09cb18d1cb27df80a84b5553d2bd94cba5979ef6af5c6e7 \ - --hash=sha256:255f1a10ae39b52122cce26ce0781f7a616f502feecce9e616976f6a87992d6b \ - --hash=sha256:271c360fdc464fe6a75f13ea0c08ddf71a321f4c55fc20a3fe62ea3ef09df7d9 \ - --hash=sha256:2ed83d53a8c5902ec48b90b2ac045e28e1698c0bea9441af9409fc844dc79496 \ - --hash=sha256:2f3e1867dd574014253b4b8f01ba443b9c914e61d45f3674e452a915d6e929a3 \ - --hash=sha256:35fbd23c1c8732cde7a94abe7fb071ec173c2f58c0bd0d7e5b669fdfc80a2c7b \ - --hash=sha256:37d0c59548ae56fae01c14998918d04ee0d5d3277363c10208eef8c4e2b68ed6 \ - --hash=sha256:39d05e65f23a0fe897b6ac395f2a8d48c56ac0f583f5d663e0afec1da89b95da \ - --hash=sha256:3ad59efe24a4d54c2742929001f2d02803aafc15d6d781c21379e3f7f66ec842 \ - --hash=sha256:3aed39db2f0ace76faa94f465d4234aac72e2f32b009f15da6492a561b3bbebd \ - --hash=sha256:3bbac1953c17252f9cc675bb19372444aadf0179b5df575ac4b56faaec9f6294 \ - --hash=sha256:40bc802a696887b14c002edd43c18082cb7b6f9ee8b838239b03b56574d97f71 \ - --hash=sha256:42f712b4668831c0cd85e0a5b5a308700fe068e37dcd24c0062904c4e372b093 \ - --hash=sha256:448a66b8266de0b581246ca7cd6a73b8d98d15100fb7165974535fa3b577340e \ - --hash=sha256:485301ee56ce87a51ccb182a4b180d852c5cb2b3cb3a82f7d4714b4141119d8c \ - --hash=sha256:485747ee62da83366a44fbba963c5fe017860ad408ccd6cd99aa66ea80d32b2e \ - --hash=sha256:4cf0855a842c5b5c391dd32ca273b09e86abf8367572073bd1edfc52bc44446b \ - --hash=sha256:4eca20917a06d2fca7628ef3c8b94a8c358f6b43f1a621c9815243462dcccf97 \ - --hash=sha256:4ed172d0c79f156c1b954e99c03bc2e3033c17efce8dd1a7c781bc4d5793dfac \ - --hash=sha256:5267cfda873ad62591b9332fd9472d2409f7cf02a34a9c9cb367e2c0255994bf \ - --hash=sha256:52b5cbc0469328e58180021138207e6ec91d7ca2e037d3549cc9e34e2187330a \ - --hash=sha256:53d7a3cd46cdc1689296348cb05ffd4f4280035770aee0c8ead3bbd4d6529acc \ - --hash=sha256:563646d74a4b4456d0cf3b714ca522e725243c603e8254ad85c3b59b7c0c4bf0 \ - --hash=sha256:570cc326e78ff23dec7f41487aa9c3dffd02e5ee9ab43a8f6ccc3df8f9327623 \ - --hash=sha256:5aca759ada6b1967fcfd4336dcf460d02a8a23e6abe06e90ea7881e5c22c4de6 \ - --hash=sha256:5de11c041486681ce854c814844f4ce3282b6ea1656faae19208ebe09d31c5b8 \ - --hash=sha256:5e271dd97c7bb8eefda5cca38cd0b0373a1fea50f71e8071376b46968582af9b \ - --hash=sha256:642ed0a209ced4be3a46f8cb094f2d76f1f479e2a1ceca6de6346a096cd3409d \ - --hash=sha256:6446002739ca29249f0beaaf067fcbc2b5aab4bc7ee8fb941bd194947ce19aff \ - --hash=sha256:691d50c99a937709ac4c4cd570d959a006bd6a6d970a484c84cc99543d4a5bbb \ - --hash=sha256:69b857a7d8bd4f5d6e0db4086da8c46309a26e8cefdfc778c0c5cc17d4b11e08 \ - --hash=sha256:6ac3fefb0d168c7c6cab24fdfc80ec62cd2b4dfd9e65b84bdceb1cb01d385c33 \ - --hash=sha256:6c9141af27a4e5819d74d67d227d5047a20fa3c7d4d9df43037a955b4c748ec5 \ - --hash=sha256:7170cbde4070dc3c77dec82abf86f3b210633d4f89550fa0ad2d4b549a05572a \ - --hash=sha256:763ad59e105fca09705d9f9b29ecffb95ecdc3b0363be3bb56081b2c6de7977a \ - --hash=sha256:77076bdc8776a2b029e1e6ffbe6d7056e35f56f5e80d9dc0bad26ad4a024a762 \ - --hash=sha256:7cd020b1fb41e3ab7716d4d2c3972d4588fdfbab9bfbbb64acc7078eccef8860 \ - --hash=sha256:821392559d37759caa67d622d0d2994c7a3f2fb29274948ac799d496d92bca73 \ - --hash=sha256:829e91f3a8574888b73e7a3feb3b1af698e717513597e23136ff4eba0bc8387a \ - --hash=sha256:850c272e0e0d1a5c5d73b1b7871b0a7c2446b304cec55ccdb3eaac0d792bb065 \ - --hash=sha256:87d9b206b1bd7a0523375dc2020a6ce88bca5330682ae2fe25e86fd5d45cea9c \ - --hash=sha256:8bd01ff4032abaed03f2db702fa9a61078bee37add0bd884a6190b05e63b028c \ - --hash=sha256:8d54bbdf5d56e2c8cf81a1857250f3ea132de77af543d0ba5dce667183b61fec \ - --hash=sha256:8efaeb08ede95066da3a3e3c420fcc0a21693fcd0c4396d0585b019613d28515 \ - --hash=sha256:8f94fdd756ba1f79f988855d948ae0bad9ddf44df296770d9a58c774cfbcca72 \ - --hash=sha256:95cde244e7195b2c07ec9b73fa4c5026d4a27233451485caa1cd0c1b55f26dbd \ - --hash=sha256:975382d9aa90dc59253d6a83a5ca72e07f4ada3ae3d6c0575ced513db322b8ec \ - --hash=sha256:9dd9d9d9e898b9d30683bdd2b6c1849449158647d1049a125879cb397ee9cd12 \ - --hash=sha256:a019a344312d0b1f429c00d49c3be62fa273d4a1094e1b224f403716b6d03be1 \ - --hash=sha256:a4d9bfda3f84fc563868fe25ca160c8ff0e69bc4443c5647f960d59400ce6557 \ - --hash=sha256:a657250807b6efd19b28f5922520ae002a54cb43c2401e6f3d0230c352564d25 \ - --hash=sha256:a771417c9c06c56c9d53d11a5b084d1de75de82978e23c544270ab25e7c066ff \ - --hash=sha256:aad6ed9e70ddfb34d849b761fb243be58c735be6a9265b9060d6ddb77751e3e8 \ - --hash=sha256:ae87137951bb3dc08c7d8bfb8988d8c119f3230731b08a71146e84aaa919a7a9 \ - --hash=sha256:af247fd4f12cca4129c1b82090244ea5a9d5bb089e9a82feb5a2f7c6a9fe181d \ - --hash=sha256:b5d4bdd697195f3876d134101c40c7d06d46c6ab25159ed5cbd44105c715278a \ - --hash=sha256:b9255e7165083de7c1d605e818025e8860636348f34a79d84ec533546064f07e \ - --hash=sha256:c22211c165166de6683de8136229721f3d5c8606cc2c3d1562da9a3a5058049c \ - --hash=sha256:c55f9821f88e8bee4b7a72c82cfb5ecd22b6aad04033334f33c329b29bfa4da0 \ - --hash=sha256:c7aed97f2e676561416c927b063802c8a6285e9b55e1b83213dfd99a8f4f9e48 \ - --hash=sha256:cd2163f42868865597d89399a01aa33b7594ce8e2c4a28503127c81a2f17784e \ - --hash=sha256:ce5e7504db95b76fc89055c7f41e367eaadef5b1d059e27e1d6eabf2b55ca314 \ - --hash=sha256:cff7351c251c7546407827b6a37bcef6416304fc54d12d44dbfecbb717064717 \ - --hash=sha256:d27aa6bbc1f33be920bb7adbb95581452cdf23005d5611b29a12bb6a3468cc95 \ - --hash=sha256:d3b52a67ac66a3a64a7e710ba629f62d1e26ca0504c29ee8cbd99b97df7079a8 \ - --hash=sha256:de61e424062173b4f70eec07e12469edde7e17fa180019a2a0d75c13a5c5dc57 \ - --hash=sha256:e10e6a1ed2b8661201e79dff5531f8ad4cdd83548a0f81c95cf79b3184b20c33 \ - --hash=sha256:e1a0ffc39f51aa5f5c22114a8f1906b3c17eba68c5babb86c5f77d8b1bba14d1 \ - --hash=sha256:e22491d25f97199fc3581ad8dd8ce198d8c8fdb8dae80dea3512e1ce6d5fa99f \ - --hash=sha256:e626b864725680cd3904414d72e7b0bd81c0e5b2b53a5b30b4273034253bb41f \ - --hash=sha256:e8c71ea77536149e36c4c784f6d420ffd20bea041e3ba21ed021cb40ce58e2c9 \ - --hash=sha256:e8d0f0eca087630d58b8c662085529781fd5dc80f0a54eda42d5c9029f812599 \ - --hash=sha256:ea65b59882d5fa8c74a23f8960db579e5e341534934f43f3b18ec1839b893e41 \ - --hash=sha256:ea93163472db26ac6043e8f7f93a05d9b59e0505c760da2a3cd22c7dd7111391 \ - --hash=sha256:eab75a8569a095f2ad470b342f2751d9902f7944704f0571c8af46bede438475 \ - --hash=sha256:ed8313809571a5463fd7db43aaca68ecb43ca7a58f5b23b6e6c6c5d02bdc7882 \ - --hash=sha256:ef5fddfb264e89c435be4adb3953cef5d2936fdeb4463b4161a6ba2f22e7b740 \ - --hash=sha256:ef750a20de1b65657a1425f77c525b0183eac63fe7b8f5ac0dd16f3668d3e64f \ - --hash=sha256:efb9ece97e696bb56e31166a9dd7919f8f0c6b31967b454718c6509f29ef6fee \ - --hash=sha256:f4c179a7aeae10ddf44c6bac87938134c1379c49c884529f090f9bf05566c836 \ - --hash=sha256:f602881d80ee4228a2355c68da6b296a296cd22bbb91e5418d54577bbf17fa7c \ - --hash=sha256:fc2200e79d75b5238c8d69f6a30f8284290c777039d331e7340b6c17cad24a5a \ - --hash=sha256:fcc1ebb7561a3e24a6588f7c6ded15d80aec22c66a070c757559b57b17ffd1cb +rich==13.7.0 \ + --hash=sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa \ + --hash=sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235 + # via pytest-pretty +rpds-py==0.17.1 \ + --hash=sha256:01f58a7306b64e0a4fe042047dd2b7d411ee82e54240284bab63e325762c1147 \ + --hash=sha256:0210b2668f24c078307260bf88bdac9d6f1093635df5123789bfee4d8d7fc8e7 \ + --hash=sha256:02866e060219514940342a1f84303a1ef7a1dad0ac311792fbbe19b521b489d2 \ + --hash=sha256:0387ce69ba06e43df54e43968090f3626e231e4bc9150e4c3246947567695f68 \ + --hash=sha256:060f412230d5f19fc8c8b75f315931b408d8ebf56aec33ef4168d1b9e54200b1 \ + --hash=sha256:071bc28c589b86bc6351a339114fb7a029f5cddbaca34103aa573eba7b482382 \ + --hash=sha256:0bfb09bf41fe7c51413f563373e5f537eaa653d7adc4830399d4e9bdc199959d \ + --hash=sha256:10162fe3f5f47c37ebf6d8ff5a2368508fe22007e3077bf25b9c7d803454d921 \ + --hash=sha256:149c5cd24f729e3567b56e1795f74577aa3126c14c11e457bec1b1c90d212e38 \ + --hash=sha256:1701fc54460ae2e5efc1dd6350eafd7a760f516df8dbe51d4a1c79d69472fbd4 \ + --hash=sha256:1957a2ab607f9added64478a6982742eb29f109d89d065fa44e01691a20fc20a \ + --hash=sha256:1a746a6d49665058a5896000e8d9d2f1a6acba8a03b389c1e4c06e11e0b7f40d \ + --hash=sha256:1bfcad3109c1e5ba3cbe2f421614e70439f72897515a96c462ea657261b96518 \ + --hash=sha256:1d36b2b59e8cc6e576f8f7b671e32f2ff43153f0ad6d0201250a7c07f25d570e \ + --hash=sha256:1db228102ab9d1ff4c64148c96320d0be7044fa28bd865a9ce628ce98da5973d \ + --hash=sha256:1dc29db3900cb1bb40353772417800f29c3d078dbc8024fd64655a04ee3c4bdf \ + --hash=sha256:1e626b365293a2142a62b9a614e1f8e331b28f3ca57b9f05ebbf4cf2a0f0bdc5 \ + --hash=sha256:1f3c3461ebb4c4f1bbc70b15d20b565759f97a5aaf13af811fcefc892e9197ba \ + --hash=sha256:20de7b7179e2031a04042e85dc463a93a82bc177eeba5ddd13ff746325558aa6 \ + --hash=sha256:24e4900a6643f87058a27320f81336d527ccfe503984528edde4bb660c8c8d59 \ + --hash=sha256:2528ff96d09f12e638695f3a2e0c609c7b84c6df7c5ae9bfeb9252b6fa686253 \ + --hash=sha256:25f071737dae674ca8937a73d0f43f5a52e92c2d178330b4c0bb6ab05586ffa6 \ + --hash=sha256:270987bc22e7e5a962b1094953ae901395e8c1e1e83ad016c5cfcfff75a15a3f \ + --hash=sha256:292f7344a3301802e7c25c53792fae7d1593cb0e50964e7bcdcc5cf533d634e3 \ + --hash=sha256:2953937f83820376b5979318840f3ee47477d94c17b940fe31d9458d79ae7eea \ + --hash=sha256:2a792b2e1d3038daa83fa474d559acfd6dc1e3650ee93b2662ddc17dbff20ad1 \ + --hash=sha256:2a7b2f2f56a16a6d62e55354dd329d929560442bd92e87397b7a9586a32e3e76 \ + --hash=sha256:2f4eb548daf4836e3b2c662033bfbfc551db58d30fd8fe660314f86bf8510b93 \ + --hash=sha256:3664d126d3388a887db44c2e293f87d500c4184ec43d5d14d2d2babdb4c64cad \ + --hash=sha256:3677fcca7fb728c86a78660c7fb1b07b69b281964673f486ae72860e13f512ad \ + --hash=sha256:380e0df2e9d5d5d339803cfc6d183a5442ad7ab3c63c2a0982e8c824566c5ccc \ + --hash=sha256:3ac732390d529d8469b831949c78085b034bff67f584559340008d0f6041a049 \ + --hash=sha256:4128980a14ed805e1b91a7ed551250282a8ddf8201a4e9f8f5b7e6225f54170d \ + --hash=sha256:4341bd7579611cf50e7b20bb8c2e23512a3dc79de987a1f411cb458ab670eb90 \ + --hash=sha256:436474f17733c7dca0fbf096d36ae65277e8645039df12a0fa52445ca494729d \ + --hash=sha256:4dc889a9d8a34758d0fcc9ac86adb97bab3fb7f0c4d29794357eb147536483fd \ + --hash=sha256:4e21b76075c01d65d0f0f34302b5a7457d95721d5e0667aea65e5bb3ab415c25 \ + --hash=sha256:516fb8c77805159e97a689e2f1c80655c7658f5af601c34ffdb916605598cda2 \ + --hash=sha256:5576ee2f3a309d2bb403ec292d5958ce03953b0e57a11d224c1f134feaf8c40f \ + --hash=sha256:5a024fa96d541fd7edaa0e9d904601c6445e95a729a2900c5aec6555fe921ed6 \ + --hash=sha256:5d0e8a6434a3fbf77d11448c9c25b2f25244226cfbec1a5159947cac5b8c5fa4 \ + --hash=sha256:5e7d63ec01fe7c76c2dbb7e972fece45acbb8836e72682bde138e7e039906e2c \ + --hash=sha256:60e820ee1004327609b28db8307acc27f5f2e9a0b185b2064c5f23e815f248f8 \ + --hash=sha256:637b802f3f069a64436d432117a7e58fab414b4e27a7e81049817ae94de45d8d \ + --hash=sha256:65dcf105c1943cba45d19207ef51b8bc46d232a381e94dd38719d52d3980015b \ + --hash=sha256:698ea95a60c8b16b58be9d854c9f993c639f5c214cf9ba782eca53a8789d6b19 \ + --hash=sha256:70fcc6c2906cfa5c6a552ba7ae2ce64b6c32f437d8f3f8eea49925b278a61453 \ + --hash=sha256:720215373a280f78a1814becb1312d4e4d1077b1202a56d2b0815e95ccb99ce9 \ + --hash=sha256:7450dbd659fed6dd41d1a7d47ed767e893ba402af8ae664c157c255ec6067fde \ + --hash=sha256:7b7d9ca34542099b4e185b3c2a2b2eda2e318a7dbde0b0d83357a6d4421b5296 \ + --hash=sha256:7fbd70cb8b54fe745301921b0816c08b6d917593429dfc437fd024b5ba713c58 \ + --hash=sha256:81038ff87a4e04c22e1d81f947c6ac46f122e0c80460b9006e6517c4d842a6ec \ + --hash=sha256:810685321f4a304b2b55577c915bece4c4a06dfe38f6e62d9cc1d6ca8ee86b99 \ + --hash=sha256:82ada4a8ed9e82e443fcef87e22a3eed3654dd3adf6e3b3a0deb70f03e86142a \ + --hash=sha256:841320e1841bb53fada91c9725e766bb25009cfd4144e92298db296fb6c894fb \ + --hash=sha256:8587fd64c2a91c33cdc39d0cebdaf30e79491cc029a37fcd458ba863f8815383 \ + --hash=sha256:8ffe53e1d8ef2520ebcf0c9fec15bb721da59e8ef283b6ff3079613b1e30513d \ + --hash=sha256:9051e3d2af8f55b42061603e29e744724cb5f65b128a491446cc029b3e2ea896 \ + --hash=sha256:91e5a8200e65aaac342a791272c564dffcf1281abd635d304d6c4e6b495f29dc \ + --hash=sha256:93432e747fb07fa567ad9cc7aaadd6e29710e515aabf939dfbed8046041346c6 \ + --hash=sha256:938eab7323a736533f015e6069a7d53ef2dcc841e4e533b782c2bfb9fb12d84b \ + --hash=sha256:9584f8f52010295a4a417221861df9bea4c72d9632562b6e59b3c7b87a1522b7 \ + --hash=sha256:9737bdaa0ad33d34c0efc718741abaafce62fadae72c8b251df9b0c823c63b22 \ + --hash=sha256:99da0a4686ada4ed0f778120a0ea8d066de1a0a92ab0d13ae68492a437db78bf \ + --hash=sha256:99f567dae93e10be2daaa896e07513dd4bf9c2ecf0576e0533ac36ba3b1d5394 \ + --hash=sha256:9bdf1303df671179eaf2cb41e8515a07fc78d9d00f111eadbe3e14262f59c3d0 \ + --hash=sha256:9f0e4dc0f17dcea4ab9d13ac5c666b6b5337042b4d8f27e01b70fae41dd65c57 \ + --hash=sha256:a000133a90eea274a6f28adc3084643263b1e7c1a5a66eb0a0a7a36aa757ed74 \ + --hash=sha256:a3264e3e858de4fc601741498215835ff324ff2482fd4e4af61b46512dd7fc83 \ + --hash=sha256:a71169d505af63bb4d20d23a8fbd4c6ce272e7bce6cc31f617152aa784436f29 \ + --hash=sha256:a967dd6afda7715d911c25a6ba1517975acd8d1092b2f326718725461a3d33f9 \ + --hash=sha256:aa5bfb13f1e89151ade0eb812f7b0d7a4d643406caaad65ce1cbabe0a66d695f \ + --hash=sha256:ae35e8e6801c5ab071b992cb2da958eee76340e6926ec693b5ff7d6381441745 \ + --hash=sha256:b686f25377f9c006acbac63f61614416a6317133ab7fafe5de5f7dc8a06d42eb \ + --hash=sha256:b760a56e080a826c2e5af09002c1a037382ed21d03134eb6294812dda268c811 \ + --hash=sha256:b86b21b348f7e5485fae740d845c65a880f5d1eda1e063bc59bef92d1f7d0c55 \ + --hash=sha256:b9412abdf0ba70faa6e2ee6c0cc62a8defb772e78860cef419865917d86c7342 \ + --hash=sha256:bd345a13ce06e94c753dab52f8e71e5252aec1e4f8022d24d56decd31e1b9b23 \ + --hash=sha256:be22ae34d68544df293152b7e50895ba70d2a833ad9566932d750d3625918b82 \ + --hash=sha256:bf046179d011e6114daf12a534d874958b039342b347348a78b7cdf0dd9d6041 \ + --hash=sha256:c3d2010656999b63e628a3c694f23020322b4178c450dc478558a2b6ef3cb9bb \ + --hash=sha256:c64602e8be701c6cfe42064b71c84ce62ce66ddc6422c15463fd8127db3d8066 \ + --hash=sha256:d65e6b4f1443048eb7e833c2accb4fa7ee67cc7d54f31b4f0555b474758bee55 \ + --hash=sha256:d8bbd8e56f3ba25a7d0cf980fc42b34028848a53a0e36c9918550e0280b9d0b6 \ + --hash=sha256:da1ead63368c04a9bded7904757dfcae01eba0e0f9bc41d3d7f57ebf1c04015a \ + --hash=sha256:dbbb95e6fc91ea3102505d111b327004d1c4ce98d56a4a02e82cd451f9f57140 \ + --hash=sha256:dbc56680ecf585a384fbd93cd42bc82668b77cb525343170a2d86dafaed2a84b \ + --hash=sha256:df3b6f45ba4515632c5064e35ca7f31d51d13d1479673185ba8f9fefbbed58b9 \ + --hash=sha256:dfe07308b311a8293a0d5ef4e61411c5c20f682db6b5e73de6c7c8824272c256 \ + --hash=sha256:e796051f2070f47230c745d0a77a91088fbee2cc0502e9b796b9c6471983718c \ + --hash=sha256:efa767c220d94aa4ac3a6dd3aeb986e9f229eaf5bce92d8b1b3018d06bed3772 \ + --hash=sha256:f0b8bf5b8db49d8fd40f54772a1dcf262e8be0ad2ab0206b5a2ec109c176c0a4 \ + --hash=sha256:f175e95a197f6a4059b50757a3dca33b32b61691bdbd22c29e8a8d21d3914cae \ + --hash=sha256:f2f3b28b40fddcb6c1f1f6c88c6f3769cd933fa493ceb79da45968a21dccc920 \ + --hash=sha256:f6c43b6f97209e370124baf2bf40bb1e8edc25311a158867eb1c3a5d449ebc7a \ + --hash=sha256:f7f4cb1f173385e8a39c29510dd11a78bf44e360fb75610594973f5ea141028b \ + --hash=sha256:fad059a4bd14c45776600d223ec194e77db6c20255578bb5bcdd7c18fd169361 \ + --hash=sha256:ff1dcb8e8bc2261a088821b2595ef031c91d499a0c1b031c152d43fe0a6ecec8 \ + --hash=sha256:ffee088ea9b593cc6160518ba9bd319b5475e5f3e578e4552d63818773c6f56a # via # jsonschema # referencing -ruff==0.0.291 \ - --hash=sha256:13f0d88e5f367b2dc8c7d90a8afdcfff9dd7d174e324fd3ed8e0b5cb5dc9b7f6 \ - --hash=sha256:1d5f0616ae4cdc7a938b493b6a1a71c8a47d0300c0d65f6e41c281c2f7490ad3 \ - --hash=sha256:5383ba67ad360caf6060d09012f1fb2ab8bd605ab766d10ca4427a28ab106e0b \ - --hash=sha256:6ab44ea607967171e18aa5c80335237be12f3a1523375fa0cede83c5cf77feb4 \ - --hash=sha256:6c06006350c3bb689765d71f810128c9cdf4a1121fd01afc655c87bab4fb4f83 \ - --hash=sha256:87671e33175ae949702774071b35ed4937da06f11851af75cd087e1b5a488ac4 \ - --hash=sha256:8a69bfbde72db8ca1c43ee3570f59daad155196c3fbe357047cd9b77de65f15b \ - --hash=sha256:8d5b56bc3a2f83a7a1d7f4447c54d8d3db52021f726fdd55d549ca87bca5d747 \ - --hash=sha256:a04b384f2d36f00d5fb55313d52a7d66236531195ef08157a09c4728090f2ef0 \ - --hash=sha256:b09b94efdcd162fe32b472b2dd5bf1c969fcc15b8ff52f478b048f41d4590e09 \ - --hash=sha256:b3eeee1b1a45a247758ecdc3ab26c307336d157aafc61edb98b825cadb153df3 \ - --hash=sha256:b727c219b43f903875b7503a76c86237a00d1a39579bb3e21ce027eec9534051 \ - --hash=sha256:b75f5801547f79b7541d72a211949754c21dc0705c70eddf7f21c88a64de8b97 \ - --hash=sha256:b97d0d7c136a85badbc7fd8397fdbb336e9409b01c07027622f28dcd7db366f2 \ - --hash=sha256:c61109661dde9db73469d14a82b42a88c7164f731e6a3b0042e71394c1c7ceed \ - --hash=sha256:d867384a4615b7f30b223a849b52104214442b5ba79b473d7edd18da3cde22d6 \ - --hash=sha256:fd17220611047de247b635596e3174f3d7f2becf63bd56301fc758778df9b629 +ruff==0.1.14 \ + --hash=sha256:1c8eca1a47b4150dc0fbec7fe68fc91c695aed798532a18dbb1424e61e9b721f \ + --hash=sha256:2270504d629a0b064247983cbc495bed277f372fb9eaba41e5cf51f7ba705a6a \ + --hash=sha256:269302b31ade4cde6cf6f9dd58ea593773a37ed3f7b97e793c8594b262466b67 \ + --hash=sha256:62ce2ae46303ee896fc6811f63d6dabf8d9c389da0f3e3f2bce8bc7f15ef5488 \ + --hash=sha256:653230dd00aaf449eb5ff25d10a6e03bc3006813e2cb99799e568f55482e5cae \ + --hash=sha256:6b3dadc9522d0eccc060699a9816e8127b27addbb4697fc0c08611e4e6aeb8b5 \ + --hash=sha256:7060156ecc572b8f984fd20fd8b0fcb692dd5d837b7606e968334ab7ff0090ab \ + --hash=sha256:722bafc299145575a63bbd6b5069cb643eaa62546a5b6398f82b3e4403329cab \ + --hash=sha256:80258bb3b8909b1700610dfabef7876423eed1bc930fe177c71c414921898efa \ + --hash=sha256:87b3acc6c4e6928459ba9eb7459dd4f0c4bf266a053c863d72a44c33246bfdbf \ + --hash=sha256:96f76536df9b26622755c12ed8680f159817be2f725c17ed9305b472a757cdbb \ + --hash=sha256:a53d8e35313d7b67eb3db15a66c08434809107659226a90dcd7acb2afa55faea \ + --hash=sha256:ab3f71f64498c7241123bb5a768544cf42821d2a537f894b22457a543d3ca7a9 \ + --hash=sha256:ad3f8088b2dfd884820289a06ab718cde7d38b94972212cc4ba90d5fbc9955f3 \ + --hash=sha256:b2027dde79d217b211d725fc833e8965dc90a16d0d3213f1298f97465956661b \ + --hash=sha256:bea9be712b8f5b4ebed40e1949379cfb2a7d907f42921cf9ab3aae07e6fba9eb \ + --hash=sha256:e3d241aa61f92b0805a7082bd89a9990826448e4d0398f0e2bc8f05c75c63d99 # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -1098,7 +800,6 @@ six==1.16.0 \ # -c requirements/main.txt # latexcodec # pybtex - # python-dateutil # sphinxcontrib-redoc smmap==5.0.1 \ --hash=sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62 \ @@ -1114,9 +815,9 @@ soupsieve==2.5 \ --hash=sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690 \ --hash=sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7 # via beautifulsoup4 -sphinx==6.2.1 \ - --hash=sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b \ - --hash=sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912 +sphinx==7.2.6 \ + --hash=sha256:1e09160a40b956dc623c910118fa636da93bd3ca0b9876a7b3df90f07d691560 \ + --hash=sha256:9a5160e1ea90688d5963ba09a2dcd8bdd526620edbb65c328728f1b2228d5ab5 # via # autodoc-pydantic # documenteer @@ -1129,26 +830,22 @@ sphinx==6.2.1 \ # sphinx-design # sphinx-jinja # sphinx-prompt - # sphinxcontrib-applehelp # sphinxcontrib-bibtex - # sphinxcontrib-devhelp - # sphinxcontrib-htmlhelp # sphinxcontrib-jquery - # sphinxcontrib-qthelp # sphinxcontrib-redoc - # sphinxcontrib-serializinghtml # sphinxext-opengraph -sphinx-autodoc-typehints==1.22 \ - --hash=sha256:71fca2d5eee9b034204e4c686ab20b4d8f5eb9409396216bcae6c87c38e18ea6 \ - --hash=sha256:ef4a8b9d52de66065aa7d3adfabf5a436feb8a2eff07c2ddc31625d8807f2b69 + # sphinxext-rediraffe +sphinx-autodoc-typehints==1.25.2 \ + --hash=sha256:3cabc2537e17989b2f92e64a399425c4c8bf561ed73f087bc7414a5003616a50 \ + --hash=sha256:5ed05017d23ad4b937eab3bee9fae9ab0dd63f0b42aa360031f1fad47e47f673 # via documenteer sphinx-automodapi==0.16.0 \ --hash=sha256:68fc47064804604b90aa27c047016e86aaf970981d90a0082d5b5dd2e9d38afd \ --hash=sha256:6c673ef93066408e5ad3e2fa3533044d432a47fe6a826212b9ebf5f52a872554 # via documenteer -sphinx-click==5.0.1 \ - --hash=sha256:31836ca22f746d3c26cbfdfe0c58edf0bca5783731a0b2e25bb6d59800bb75a1 \ - --hash=sha256:fcc7df15e56e3ff17ebf446cdd316c2eb79580b37c49579fba11e5468802ef25 +sphinx-click==5.1.0 \ + --hash=sha256:6812c2db62d3fae71a4addbe5a8a0a16c97eb491f3cd63fe34b4ed7e07236f33 \ + --hash=sha256:ae97557a4e9ec646045089326c3b90e026c58a45e083b8f35f17d5d6558d08a0 # via -r requirements/dev.in sphinx-copybutton==0.5.2 \ --hash=sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd \ @@ -1165,25 +862,28 @@ sphinx-diagrams==0.4.0 \ sphinx-jinja==2.0.2 \ --hash=sha256:705ebeb9b7a6018ca3f93724315a7c1effa6ba3db44d630e7eaaa15e4ac081a8 \ --hash=sha256:c6232b59a894139770be1dc6d0b00a379e4288ce78157904e1f8473dea3e0718 - # via -r requirements/dev.in -sphinx-prompt==1.5.0 \ - --hash=sha256:fa4e90d8088b5a996c76087d701fc7e31175f8b9dc4aab03a507e45051067162 + # via + # -r requirements/dev.in + # documenteer +sphinx-prompt==1.8.0 \ + --hash=sha256:369ecc633f0711886f9b3a078c83264245be1adf46abeeb9b88b5519e4b51007 \ + --hash=sha256:47482f86fcec29662fdfd23e7c04ef03582714195d01f5d565403320084372ed # via documenteer -sphinxcontrib-applehelp==1.0.7 \ - --hash=sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d \ - --hash=sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa +sphinxcontrib-applehelp==1.0.8 \ + --hash=sha256:c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619 \ + --hash=sha256:cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4 # via sphinx -sphinxcontrib-bibtex==2.5.0 \ - --hash=sha256:71b42e5db0e2e284f243875326bf9936aa9a763282277d75048826fef5b00eaa \ - --hash=sha256:748f726eaca6efff7731012103417ef130ecdcc09501b4d0c54283bf5f059f76 +sphinxcontrib-bibtex==2.6.2 \ + --hash=sha256:10d45ebbb19207c5665396c9446f8012a79b8a538cb729f895b5910ab2d0b2da \ + --hash=sha256:f487af694336f28bfb7d6a17070953a7d264bec43000a2379724274f5f8d70ae # via documenteer -sphinxcontrib-devhelp==1.0.5 \ - --hash=sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212 \ - --hash=sha256:fe8009aed765188f08fcaadbb3ea0d90ce8ae2d76710b7e29ea7d047177dae2f +sphinxcontrib-devhelp==1.0.6 \ + --hash=sha256:6485d09629944511c893fa11355bda18b742b83a2b181f9a009f7e500595c90f \ + --hash=sha256:9893fd3f90506bc4b97bdb977ceb8fbd823989f4316b28c3841ec128544372d3 # via sphinx -sphinxcontrib-htmlhelp==2.0.4 \ - --hash=sha256:6c26a118a05b76000738429b724a0568dbde5b72391a688577da08f11891092a \ - --hash=sha256:8001661c077a73c29beaf4a79968d0726103c5605e27db92b9ebed8bab1359e9 +sphinxcontrib-htmlhelp==2.0.5 \ + --hash=sha256:0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015 \ + --hash=sha256:393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04 # via sphinx sphinxcontrib-jquery==4.1 \ --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \ @@ -1197,20 +897,28 @@ sphinxcontrib-mermaid==0.9.2 \ --hash=sha256:252ef13dd23164b28f16d8b0205cf184b9d8e2b714a302274d9f59eb708e77af \ --hash=sha256:6795a72037ca55e65663d2a2c1a043d636dc3d30d418e56dd6087d1459d98a5d # via documenteer -sphinxcontrib-qthelp==1.0.6 \ - --hash=sha256:62b9d1a186ab7f5ee3356d906f648cacb7a6bdb94d201ee7adf26db55092982d \ - --hash=sha256:bf76886ee7470b934e363da7a954ea2825650013d367728588732c7350f49ea4 +sphinxcontrib-qthelp==1.0.7 \ + --hash=sha256:053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6 \ + --hash=sha256:e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182 # via sphinx sphinxcontrib-redoc==1.6.0 \ --hash=sha256:e358edbe23927d36432dde748e978cf897283a331a03e93d3ef02e348dee4561 # via documenteer -sphinxcontrib-serializinghtml==1.1.9 \ - --hash=sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54 \ - --hash=sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1 +sphinxcontrib-serializinghtml==1.1.10 \ + --hash=sha256:326369b8df80a7d2d8d7f99aa5ac577f51ea51556ed974e7716cfd4fca3f6cb7 \ + --hash=sha256:93f3f5dc458b91b192fe10c397e324f262cf163d79f3282c158e8436a2c4511f # via sphinx -sphinxext-opengraph==0.8.2 \ - --hash=sha256:45a693b6704052c426576f0a1f630649c55b4188bc49eb63e9587e24a923db39 \ - --hash=sha256:6a05bdfe5176d9dd0a1d58a504f17118362ab976631213cd36fb44c4c40544c9 +sphinxext-opengraph==0.9.1 \ + --hash=sha256:b3b230cc6a5b5189139df937f0d9c7b23c7c204493b22646273687969dcb760e \ + --hash=sha256:dd2868a1e7c9497977fbbf44cc0844a42af39ca65fe1bb0272518af225d06fc5 + # via documenteer +sphinxext-rediraffe==0.2.7 \ + --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ + --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c + # via documenteer +tomlkit==0.12.3 \ + --hash=sha256:75baf5012d06501f07bee5bf8e801b9f343e7aac5a92581f20f80ce632e6b5a4 \ + --hash=sha256:b0a645a9156dc7cb5d3a1f0d4bab66db287fcb8e0430bdd4664a095ea16414ba # via documenteer typed-ast==1.5.5 \ --hash=sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10 \ @@ -1259,9 +967,9 @@ types-pyyaml==6.0.12.12 \ --hash=sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062 \ --hash=sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24 # via -r requirements/dev.in -typing-extensions==4.8.0 \ - --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ - --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef +typing-extensions==4.9.0 \ + --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \ + --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd # via # -c requirements/main.txt # mypy @@ -1271,19 +979,20 @@ uc-micro-py==1.0.2 \ --hash=sha256:30ae2ac9c49f39ac6dce743bd187fcd2b574b16ca095fa74cd9396795c954c54 \ --hash=sha256:8c9110c309db9d9e87302e2f4ad2c3152770930d88ab385cd544e7a7e75f3de0 # via linkify-it-py -urllib3==2.0.5 \ - --hash=sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594 \ - --hash=sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e +urllib3==2.1.0 \ + --hash=sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3 \ + --hash=sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54 # via # -c requirements/main.txt + # documenteer # requests -virtualenv==20.24.5 \ - --hash=sha256:b80039f280f4919c77b30f1c23294ae357c4c8701042086e3fc005963e4e537b \ - --hash=sha256:e8361967f6da6fbdf1426483bfe9fca8287c242ac0bc30429905721cefbff752 +virtualenv==20.25.0 \ + --hash=sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3 \ + --hash=sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b # via pre-commit # The following packages are considered to be unsafe in a requirements file: -setuptools==68.2.2 \ - --hash=sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87 \ - --hash=sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a +setuptools==69.0.3 \ + --hash=sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05 \ + --hash=sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78 # via nodeenv diff --git a/requirements/main.txt b/requirements/main.txt index df5e7aca87..978d4aa3d4 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -4,189 +4,197 @@ # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/main.txt requirements/main.in # -annotated-types==0.5.0 \ - --hash=sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802 \ - --hash=sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd +annotated-types==0.6.0 \ + --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ + --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d # via pydantic -anyio==3.7.1 \ - --hash=sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780 \ - --hash=sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5 +anyio==4.2.0 \ + --hash=sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee \ + --hash=sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f # via - # fastapi # httpcore # starlette -bcrypt==4.0.1 \ - --hash=sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535 \ - --hash=sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0 \ - --hash=sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410 \ - --hash=sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd \ - --hash=sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665 \ - --hash=sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab \ - --hash=sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71 \ - --hash=sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215 \ - --hash=sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b \ - --hash=sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda \ - --hash=sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9 \ - --hash=sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a \ - --hash=sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344 \ - --hash=sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f \ - --hash=sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d \ - --hash=sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c \ - --hash=sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c \ - --hash=sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2 \ - --hash=sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d \ - --hash=sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e \ - --hash=sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3 +bcrypt==4.1.2 \ + --hash=sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f \ + --hash=sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5 \ + --hash=sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb \ + --hash=sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258 \ + --hash=sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4 \ + --hash=sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc \ + --hash=sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2 \ + --hash=sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326 \ + --hash=sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483 \ + --hash=sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a \ + --hash=sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966 \ + --hash=sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63 \ + --hash=sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c \ + --hash=sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551 \ + --hash=sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d \ + --hash=sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e \ + --hash=sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0 \ + --hash=sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c \ + --hash=sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb \ + --hash=sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1 \ + --hash=sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42 \ + --hash=sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946 \ + --hash=sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab \ + --hash=sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1 \ + --hash=sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c \ + --hash=sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7 \ + --hash=sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369 # via -r requirements/main.in -certifi==2023.7.22 \ - --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ - --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 +certifi==2023.11.17 \ + --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \ + --hash=sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474 # via # httpcore # httpx # requests -cffi==1.15.1 \ - --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ - --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ - --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ - --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ - --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ - --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ - --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ - --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ - --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ - --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ - --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ - --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ - --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ - --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ - --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ - --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ - --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ - --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ - --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ - --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ - --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ - --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ - --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ - --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ - --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ - --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ - --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ - --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ - --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ - --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ - --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ - --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ - --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ - --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ - --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ - --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ - --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ - --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ - --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ - --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ - --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ - --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ - --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ - --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ - --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ - --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ - --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ - --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ - --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ - --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ - --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ - --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ - --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ - --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ - --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ - --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ - --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ - --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ - --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ - --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ - --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ - --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ - --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ - --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 +cffi==1.16.0 \ + --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ + --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ + --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ + --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ + --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ + --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ + --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ + --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ + --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ + --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ + --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ + --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ + --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ + --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ + --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ + --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ + --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ + --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ + --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ + --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ + --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ + --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ + --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ + --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ + --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ + --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ + --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ + --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ + --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ + --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ + --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ + --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ + --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ + --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ + --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ + --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ + --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ + --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ + --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ + --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ + --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ + --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ + --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ + --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ + --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ + --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ + --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ + --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ + --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ + --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ + --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ + --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 # via cryptography -charset-normalizer==3.2.0 \ - --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \ - --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \ - --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \ - --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \ - --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \ - --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \ - --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \ - --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \ - --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \ - --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \ - --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \ - --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \ - --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \ - --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \ - --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \ - --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \ - --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \ - --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \ - --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \ - --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \ - --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \ - --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \ - --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \ - --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \ - --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \ - --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \ - --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \ - --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \ - --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \ - --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \ - --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \ - --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \ - --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \ - --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \ - --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \ - --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \ - --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \ - --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \ - --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \ - --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \ - --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \ - --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \ - --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \ - --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \ - --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \ - --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \ - --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \ - --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \ - --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \ - --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \ - --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \ - --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \ - --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \ - --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \ - --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \ - --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \ - --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \ - --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \ - --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \ - --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \ - --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \ - --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \ - --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \ - --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \ - --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \ - --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \ - --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \ - --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \ - --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \ - --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \ - --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \ - --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \ - --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \ - --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \ - --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa +charset-normalizer==3.3.2 \ + --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ + --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ + --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ + --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ + --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ + --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ + --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ + --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ + --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ + --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ + --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ + --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ + --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ + --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ + --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ + --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ + --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ + --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ + --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ + --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ + --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ + --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ + --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ + --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ + --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ + --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ + --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ + --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ + --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ + --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ + --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ + --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ + --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ + --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ + --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ + --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ + --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ + --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ + --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ + --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ + --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ + --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ + --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ + --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ + --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ + --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ + --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ + --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ + --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ + --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ + --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ + --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ + --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ + --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ + --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ + --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ + --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ + --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ + --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ + --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ + --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ + --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ + --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ + --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ + --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ + --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ + --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ + --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ + --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ + --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ + --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ + --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ + --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ + --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ + --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ + --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ + --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ + --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ + --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ + --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ + --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ + --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ + --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ + --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ + --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ + --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ + --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ + --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ + --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ + --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 # via requests click==8.1.7 \ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ @@ -194,49 +202,49 @@ click==8.1.7 \ # via # -r requirements/main.in # safir -cryptography==41.0.4 \ - --hash=sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67 \ - --hash=sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311 \ - --hash=sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8 \ - --hash=sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13 \ - --hash=sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143 \ - --hash=sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f \ - --hash=sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829 \ - --hash=sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd \ - --hash=sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397 \ - --hash=sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac \ - --hash=sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d \ - --hash=sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a \ - --hash=sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839 \ - --hash=sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e \ - --hash=sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6 \ - --hash=sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9 \ - --hash=sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860 \ - --hash=sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca \ - --hash=sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91 \ - --hash=sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d \ - --hash=sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714 \ - --hash=sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb \ - --hash=sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f +cryptography==41.0.7 \ + --hash=sha256:079b85658ea2f59c4f43b70f8119a52414cdb7be34da5d019a77bf96d473b960 \ + --hash=sha256:09616eeaef406f99046553b8a40fbf8b1e70795a91885ba4c96a70793de5504a \ + --hash=sha256:13f93ce9bea8016c253b34afc6bd6a75993e5c40672ed5405a9c832f0d4a00bc \ + --hash=sha256:37a138589b12069efb424220bf78eac59ca68b95696fc622b6ccc1c0a197204a \ + --hash=sha256:3c78451b78313fa81607fa1b3f1ae0a5ddd8014c38a02d9db0616133987b9cdf \ + --hash=sha256:43f2552a2378b44869fe8827aa19e69512e3245a219104438692385b0ee119d1 \ + --hash=sha256:48a0476626da912a44cc078f9893f292f0b3e4c739caf289268168d8f4702a39 \ + --hash=sha256:49f0805fc0b2ac8d4882dd52f4a3b935b210935d500b6b805f321addc8177406 \ + --hash=sha256:5429ec739a29df2e29e15d082f1d9ad683701f0ec7709ca479b3ff2708dae65a \ + --hash=sha256:5a1b41bc97f1ad230a41657d9155113c7521953869ae57ac39ac7f1bb471469a \ + --hash=sha256:68a2dec79deebc5d26d617bfdf6e8aab065a4f34934b22d3b5010df3ba36612c \ + --hash=sha256:7a698cb1dac82c35fcf8fe3417a3aaba97de16a01ac914b89a0889d364d2f6be \ + --hash=sha256:841df4caa01008bad253bce2a6f7b47f86dc9f08df4b433c404def869f590a15 \ + --hash=sha256:90452ba79b8788fa380dfb587cca692976ef4e757b194b093d845e8d99f612f2 \ + --hash=sha256:928258ba5d6f8ae644e764d0f996d61a8777559f72dfeb2eea7e2fe0ad6e782d \ + --hash=sha256:af03b32695b24d85a75d40e1ba39ffe7db7ffcb099fe507b39fd41a565f1b157 \ + --hash=sha256:b640981bf64a3e978a56167594a0e97db71c89a479da8e175d8bb5be5178c003 \ + --hash=sha256:c5ca78485a255e03c32b513f8c2bc39fedb7f5c5f8535545bdc223a03b24f248 \ + --hash=sha256:c7f3201ec47d5207841402594f1d7950879ef890c0c495052fa62f58283fde1a \ + --hash=sha256:d5ec85080cce7b0513cfd233914eb8b7bbd0633f1d1703aa28d1dd5a72f678ec \ + --hash=sha256:d6c391c021ab1f7a82da5d8d0b3cee2f4b2c455ec86c8aebbc84837a631ff309 \ + --hash=sha256:e3114da6d7f95d2dee7d3f4eec16dacff819740bbab931aff8648cb13c5ff5e7 \ + --hash=sha256:f983596065a18a2183e7f79ab3fd4c475205b839e02cbc0efbbf9666c4b3083d # via # -r requirements/main.in # pyjwt # safir -fastapi==0.103.1 \ - --hash=sha256:345844e6a82062f06a096684196aaf96c1198b25c06b72c1311b882aa2d8a35d \ - --hash=sha256:5e5f17e826dbd9e9b5a5145976c5cd90bcaa61f2bf9a69aca423f2bcebe44d83 +fastapi==0.109.0 \ + --hash=sha256:8c77515984cd8e8cfeb58364f8cc7a28f0692088475e2614f7bf03275eba9093 \ + --hash=sha256:b978095b9ee01a5cf49b19f4bc1ac9b8ca83aa076e770ef8fd9af09a2b88d191 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ --hash=sha256:9ece7d37fbceb819b80560e7ed58f936e48a65d37ec5f56db79145156b426a25 # via safir -gitdb==4.0.10 \ - --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ - --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 +gitdb==4.0.11 \ + --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ + --hash=sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b # via gitpython -gitpython==3.1.37 \ - --hash=sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33 \ - --hash=sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54 +gitpython==3.1.41 \ + --hash=sha256:c36b6634d069b3f719610175020a9aed919421c87552185b085e04fbbdb10b7c \ + --hash=sha256:ed66e624884f76df22c8e16066d567aaa5a37d5b5fa19db2c6df6f7156db9048 # via -r requirements/main.in h11==0.14.0 \ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ @@ -252,214 +260,211 @@ httpx==0.23.3 \ # via # onepasswordconnectsdk # safir -hvac==1.2.1 \ - --hash=sha256:c786e3dfa1f35239810e5317cccadbe358f49b8c9001a1f2f68b79a250b9f8a1 \ - --hash=sha256:cb87f5724be8fd5f57507f5d5a94e6c42d2675128b460bf3186f966e07d4db78 +hvac==2.1.0 \ + --hash=sha256:73bc91e58c3fc7c6b8107cdaca9cb71fa0a893dfd80ffbc1c14e20f24c0c29d7 \ + --hash=sha256:b48bcda11a4ab0a7b6c47232c7ba7c87fda318ae2d4a7662800c465a78742894 # via -r requirements/main.in -idna==3.4 \ - --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ - --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 +idna==3.6 \ + --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ + --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f # via # anyio # requests # rfc3986 -jinja2==3.1.2 \ - --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ - --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 +jinja2==3.1.3 \ + --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \ + --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90 # via -r requirements/main.in -markupsafe==2.1.3 \ - --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ - --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ - --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ - --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ - --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ - --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ - --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ - --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ - --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ - --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ - --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ - --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ - --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ - --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ - --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ - --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ - --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ - --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ - --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ - --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ - --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ - --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ - --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ - --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ - --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ - --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ - --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ - --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ - --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ - --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ - --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ - --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ - --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ - --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ - --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ - --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ - --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ - --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ - --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ - --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ - --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ - --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ - --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ - --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ - --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ - --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ - --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ - --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ - --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ - --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ - --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ - --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ - --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ - --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ - --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ - --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ - --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ - --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ - --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ - --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 +markupsafe==2.1.4 \ + --hash=sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69 \ + --hash=sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0 \ + --hash=sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d \ + --hash=sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec \ + --hash=sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5 \ + --hash=sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411 \ + --hash=sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3 \ + --hash=sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74 \ + --hash=sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0 \ + --hash=sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949 \ + --hash=sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d \ + --hash=sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279 \ + --hash=sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f \ + --hash=sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6 \ + --hash=sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc \ + --hash=sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e \ + --hash=sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954 \ + --hash=sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656 \ + --hash=sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc \ + --hash=sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518 \ + --hash=sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56 \ + --hash=sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc \ + --hash=sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa \ + --hash=sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565 \ + --hash=sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4 \ + --hash=sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb \ + --hash=sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250 \ + --hash=sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4 \ + --hash=sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959 \ + --hash=sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc \ + --hash=sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474 \ + --hash=sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863 \ + --hash=sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8 \ + --hash=sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f \ + --hash=sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2 \ + --hash=sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e \ + --hash=sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e \ + --hash=sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb \ + --hash=sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f \ + --hash=sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a \ + --hash=sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26 \ + --hash=sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d \ + --hash=sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2 \ + --hash=sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131 \ + --hash=sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789 \ + --hash=sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6 \ + --hash=sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a \ + --hash=sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858 \ + --hash=sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e \ + --hash=sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb \ + --hash=sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e \ + --hash=sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84 \ + --hash=sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7 \ + --hash=sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea \ + --hash=sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b \ + --hash=sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6 \ + --hash=sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475 \ + --hash=sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74 \ + --hash=sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a \ + --hash=sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00 # via jinja2 -onepasswordconnectsdk==1.4.0 \ - --hash=sha256:a8dd3aa1750ef0d5b095368287e043bc30cc90281169d6aaaebca57d6b4e6c5c \ - --hash=sha256:c01b4e5d6faf2e985654d19f34e84efacffcc3ba487bcbcec386d7f8d3e8d88e +onepasswordconnectsdk==1.4.1 \ + --hash=sha256:133defedbc4a4658f68e32865330c2d6844b132763037b984cb74aa21dd1e7f5 \ + --hash=sha256:8402b893e007d1a339bb5658b7600b32505c88234d74bfdb307e74c14e586e42 # via -r requirements/main.in pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pydantic==2.3.0 \ - --hash=sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d \ - --hash=sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81 +pydantic==2.5.3 \ + --hash=sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a \ + --hash=sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4 # via # -r requirements/main.in # fastapi # safir -pydantic-core==2.6.3 \ - --hash=sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3 \ - --hash=sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6 \ - --hash=sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418 \ - --hash=sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7 \ - --hash=sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc \ - --hash=sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5 \ - --hash=sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7 \ - --hash=sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f \ - --hash=sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48 \ - --hash=sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad \ - --hash=sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef \ - --hash=sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9 \ - --hash=sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58 \ - --hash=sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da \ - --hash=sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149 \ - --hash=sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b \ - --hash=sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881 \ - --hash=sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456 \ - --hash=sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98 \ - --hash=sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e \ - --hash=sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c \ - --hash=sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e \ - --hash=sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb \ - --hash=sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862 \ - --hash=sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728 \ - --hash=sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6 \ - --hash=sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf \ - --hash=sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e \ - --hash=sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd \ - --hash=sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8 \ - --hash=sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987 \ - --hash=sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a \ - --hash=sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2 \ - --hash=sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784 \ - --hash=sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b \ - --hash=sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309 \ - --hash=sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7 \ - --hash=sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413 \ - --hash=sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2 \ - --hash=sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f \ - --hash=sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6 \ - --hash=sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b \ - --hash=sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3 \ - --hash=sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7 \ - --hash=sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d \ - --hash=sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378 \ - --hash=sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8 \ - --hash=sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe \ - --hash=sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7 \ - --hash=sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973 \ - --hash=sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad \ - --hash=sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34 \ - --hash=sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb \ - --hash=sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c \ - --hash=sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465 \ - --hash=sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5 \ - --hash=sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588 \ - --hash=sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950 \ - --hash=sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70 \ - --hash=sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32 \ - --hash=sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7 \ - --hash=sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec \ - --hash=sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67 \ - --hash=sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645 \ - --hash=sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db \ - --hash=sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7 \ - --hash=sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170 \ - --hash=sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17 \ - --hash=sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb \ - --hash=sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c \ - --hash=sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819 \ - --hash=sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b \ - --hash=sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d \ - --hash=sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a \ - --hash=sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525 \ - --hash=sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1 \ - --hash=sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76 \ - --hash=sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60 \ - --hash=sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b \ - --hash=sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42 \ - --hash=sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd \ - --hash=sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014 \ - --hash=sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d \ - --hash=sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a \ - --hash=sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa \ - --hash=sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f \ - --hash=sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26 \ - --hash=sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a \ - --hash=sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64 \ - --hash=sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5 \ - --hash=sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057 \ - --hash=sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50 \ - --hash=sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b \ - --hash=sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483 \ - --hash=sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b \ - --hash=sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c \ - --hash=sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9 \ - --hash=sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698 \ - --hash=sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362 \ - --hash=sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49 \ - --hash=sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282 \ - --hash=sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0 \ - --hash=sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a \ - --hash=sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b \ - --hash=sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1 \ - --hash=sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa +pydantic-core==2.14.6 \ + --hash=sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556 \ + --hash=sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e \ + --hash=sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411 \ + --hash=sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245 \ + --hash=sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c \ + --hash=sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66 \ + --hash=sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd \ + --hash=sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d \ + --hash=sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b \ + --hash=sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06 \ + --hash=sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948 \ + --hash=sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341 \ + --hash=sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0 \ + --hash=sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f \ + --hash=sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a \ + --hash=sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2 \ + --hash=sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51 \ + --hash=sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80 \ + --hash=sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8 \ + --hash=sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d \ + --hash=sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8 \ + --hash=sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb \ + --hash=sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590 \ + --hash=sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87 \ + --hash=sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534 \ + --hash=sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b \ + --hash=sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145 \ + --hash=sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba \ + --hash=sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b \ + --hash=sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2 \ + --hash=sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e \ + --hash=sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052 \ + --hash=sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622 \ + --hash=sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab \ + --hash=sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b \ + --hash=sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66 \ + --hash=sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e \ + --hash=sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4 \ + --hash=sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e \ + --hash=sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec \ + --hash=sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c \ + --hash=sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed \ + --hash=sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937 \ + --hash=sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f \ + --hash=sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9 \ + --hash=sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4 \ + --hash=sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96 \ + --hash=sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277 \ + --hash=sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23 \ + --hash=sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7 \ + --hash=sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b \ + --hash=sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91 \ + --hash=sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d \ + --hash=sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e \ + --hash=sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1 \ + --hash=sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2 \ + --hash=sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160 \ + --hash=sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9 \ + --hash=sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670 \ + --hash=sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7 \ + --hash=sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c \ + --hash=sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb \ + --hash=sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42 \ + --hash=sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d \ + --hash=sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8 \ + --hash=sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1 \ + --hash=sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6 \ + --hash=sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8 \ + --hash=sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf \ + --hash=sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e \ + --hash=sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a \ + --hash=sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9 \ + --hash=sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1 \ + --hash=sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40 \ + --hash=sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2 \ + --hash=sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d \ + --hash=sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f \ + --hash=sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f \ + --hash=sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af \ + --hash=sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7 \ + --hash=sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda \ + --hash=sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a \ + --hash=sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95 \ + --hash=sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0 \ + --hash=sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60 \ + --hash=sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149 \ + --hash=sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975 \ + --hash=sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4 \ + --hash=sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe \ + --hash=sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94 \ + --hash=sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03 \ + --hash=sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c \ + --hash=sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b \ + --hash=sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a \ + --hash=sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24 \ + --hash=sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391 \ + --hash=sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c \ + --hash=sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab \ + --hash=sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd \ + --hash=sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786 \ + --hash=sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08 \ + --hash=sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8 \ + --hash=sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6 \ + --hash=sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0 \ + --hash=sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421 # via pydantic -pyhcl==0.4.5 \ - --hash=sha256:30ee337d330d1f90c9f5ed8f49c468f66c8e6e43192bdc7c6ece1420beb3070c \ - --hash=sha256:c47293a51ccdd25e18bb5c8c0ab0ffe355b37c87f8d6f9d3280dc41efd4740bc - # via hvac pyjwt[crypto]==2.8.0 \ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 - # via gidgethub + # via + # gidgethub + # pyjwt python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 @@ -494,6 +499,7 @@ pyyaml==6.0.1 \ --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ + --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ @@ -523,10 +529,12 @@ requests==2.31.0 \ rfc3986[idna2008]==1.5.0 \ --hash=sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835 \ --hash=sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97 - # via httpx -safir==5.0.0a1 \ - --hash=sha256:149073f008a227c29e047195ecf5515c05181d6bc1cf816efd38781b7aca3e02 \ - --hash=sha256:66a72284f2d907023936bfa8e319d1da9210019d64c99c83516c4bc974cd50e8 + # via + # httpx + # rfc3986 +safir==5.2.0 \ + --hash=sha256:93636256dbeea847d63de6d3b434c952f81d6729f6541da09bfc7823d3f61806 \ + --hash=sha256:fe8f51d00449a60544f9eccdb8e603f085fdd4f641f5d5f13cef0f29393aad3f # via -r requirements/main.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -543,19 +551,19 @@ sniffio==1.3.0 \ # anyio # httpcore # httpx -starlette==0.27.0 \ - --hash=sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75 \ - --hash=sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91 +starlette==0.35.1 \ + --hash=sha256:3e2639dac3520e4f58734ed22553f950d3f3cb1001cd2eaac4d57e8cdc5f66bc \ + --hash=sha256:50bbbda9baa098e361f398fda0928062abbaf1f54f4fadcbe17c092a01eb9a25 # via # fastapi # safir -structlog==23.1.0 \ - --hash=sha256:270d681dd7d163c11ba500bc914b2472d2b50a8ef00faa999ded5ff83a2f906b \ - --hash=sha256:79b9e68e48b54e373441e130fa447944e6f87a05b35de23138e475c05d0f7e0e +structlog==24.1.0 \ + --hash=sha256:3f6efe7d25fab6e86f277713c218044669906537bb717c1807a09d46bca0714d \ + --hash=sha256:41a09886e4d55df25bdcb9b5c9674bccfab723ff43e0a86a1b7b236be8e57b16 # via safir -typing-extensions==4.8.0 \ - --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ - --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef +typing-extensions==4.9.0 \ + --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \ + --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd # via # fastapi # pydantic @@ -564,7 +572,7 @@ uritemplate==4.1.1 \ --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e # via gidgethub -urllib3==2.0.5 \ - --hash=sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594 \ - --hash=sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e +urllib3==2.1.0 \ + --hash=sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3 \ + --hash=sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54 # via requests diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 2fc2b971b2..745b591aa7 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -22,9 +22,15 @@ "help", "main", "application", + "application_add_helm_repos", "application_create", + "application_lint", + "application_lint_all", + "application_template", "environment", + "environment_lint", "environment_schema", + "environment_template", "secrets", "secrets_audit", "secrets_list", @@ -71,24 +77,6 @@ def _is_config(path: Path) -> bool: return current -def _load_static_secrets(path: Path) -> StaticSecrets: - """Load static secrets from a file. - - Parameters - ---------- - path - Path to the file. - - Returns - ------- - dict of dict - Map from application to secret key to - `~phalanx.models.secrets.StaticSecret`. - """ - with path.open() as fh: - return StaticSecrets.model_validate(yaml.safe_load(fh)) - - @click.group(context_settings={"help_option_names": ["-h", "--help"]}) @click.version_option(message="%(version)s") def main() -> None: @@ -109,6 +97,34 @@ def application() -> None: """Commands for Phalanx application configuration.""" +@application.command("add-helm-repos") +@click.argument("name", required=False) +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def application_add_helm_repos( + name: str | None = None, *, config: Path | None +) -> None: + """Configure dependency Helm repositories in Helm. + + Add all third-party Helm chart repositories used by Phalanx applications + to the local Helm cache. + + This will also be done as necessary by lint commands, so using this + command is not necessary. It is provided as a convenience for helping to + manage your local Helm configuration. + """ + if not config: + config = _find_config() + factory = Factory(config) + application_service = factory.create_application_service() + application_service.add_helm_repositories([name] if name else None) + + @application.command("create") @click.argument("name") @click.option( @@ -152,9 +168,137 @@ def application_create( raise click.UsageError("Description must start with capital letter") factory = Factory(config) application_service = factory.create_application_service() - application_service.create_application( - name, HelmStarter(starter), description - ) + application_service.create(name, HelmStarter(starter), description) + + +@application.command("lint") +@click.argument("applications", metavar="APPLICATION ...", nargs=-1) +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +@click.option( + "-e", + "--environment", + "--env", + type=str, + metavar="ENV", + default=None, + help="Only lint this environment.", +) +def application_lint( + applications: list[str], + *, + environment: str | None = None, + config: Path | None, +) -> None: + """Lint the Helm charts for applications. + + Update and download any third-party dependency charts and then lint the + Helm chart for the given applications. If no environment is specified, + each chart is linted for all environments for which it has a + configuration. + """ + if not config: + config = _find_config() + factory = Factory(config) + application_service = factory.create_application_service() + if not application_service.lint(applications, environment): + sys.exit(1) + + +@application.command("lint-all") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +@click.option( + "--git", + is_flag=True, + help="Only lint applications changed relative to a Git branch.", +) +@click.option( + "--git-branch", + type=str, + metavar="BRANCH", + default="origin/main", + show_default=True, + show_envvar=True, + envvar="GITHUB_BASE_REF", + help="Base Git branch against which to compare.", +) +def application_lint_all( + *, config: Path | None, git: bool = False, git_branch: str +) -> None: + """Lint the Helm charts for every application and environment. + + Update and download any third-party dependency charts and then lint the + Helm charts for each application and environment combination. + """ + if not config: + config = _find_config() + factory = Factory(config) + application_service = factory.create_application_service() + branch = git_branch if git else None + if not application_service.lint_all(only_changes_from_branch=branch): + sys.exit(1) + + +@application.command("template") +@click.argument("name") +@click.argument("environment") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def application_template( + name: str, environment: str, *, config: Path | None +) -> None: + """Expand the chart of an application for an environment. + + Print the expanded Kubernetes resources for an application as configured + for the given environment to standard output. This is intended for testing + and debugging purposes; normally, charts should be installed with Argo CD. + """ + if not config: + config = _find_config() + factory = Factory(config) + application_service = factory.create_application_service() + sys.stdout.write(application_service.template(name, environment)) + + +@application.command("update-shared-chart-version") +@click.argument("chart") +@click.argument("version") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def application_update_shared_chart_version( + chart: str, version: str, *, config: Path | None +) -> None: + """Update the version for a shared chart. + + This function updates the version of a shared chart in the Chart.yaml + file of all applications that use that shared chart. + """ + if not config: + config = _find_config() + factory = Factory(config) + storage = factory.create_config_storage() + storage.update_shared_chart_version(chart, version) @main.group() @@ -162,6 +306,32 @@ def environment() -> None: """Commands for Phalanx environment configuration.""" +@environment.command("lint") +@click.argument("environment", required=False) +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def environment_lint( + environment: str | None = None, *, config: Path | None, git: bool = False +) -> None: + """Lint the top-level Helm chart for an environment. + + Lint the parent Argo CD Helm chart that installs the Argo CD applications + for an environment. If the environment is not given, lints the + instantiation of that chart for each environment. + """ + if not config: + config = _find_config() + factory = Factory(config) + environment_service = factory.create_environment_service() + if not environment_service.lint(environment): + sys.exit(1) + + @environment.command("schema") @click.option( "-o", @@ -189,6 +359,29 @@ def environment_schema(*, output: Path | None) -> None: sys.stdout.write(json_schema) +@environment.command("template") +@click.argument("environment") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def environment_template(environment: str, *, config: Path | None) -> None: + """Expand the top-level chart for an environment. + + Print the expanded Kubernetes resources for the top-level chart configured + for the given environment. This is intended for testing and debugging + purposes; normally, charts should be installed with Argo CD. + """ + if not config: + config = _find_config() + factory = Factory(config) + environment_service = factory.create_environment_service() + sys.stdout.write(environment_service.template(environment)) + + @main.group() def secrets() -> None: """Secret manipulation commands.""" @@ -224,12 +417,13 @@ def secrets_audit( """ if not config: config = _find_config() - static_secrets = None - if secrets: - static_secrets = _load_static_secrets(secrets) + static_secrets = StaticSecrets.from_path(secrets) if secrets else None factory = Factory(config) secrets_service = factory.create_secrets_service() - sys.stdout.write(secrets_service.audit(environment, static_secrets)) + report = secrets_service.audit(environment, static_secrets) + if report: + sys.stdout.write(report) + sys.exit(1) @secrets.command("list") @@ -409,9 +603,7 @@ def secrets_sync( """ if not config: config = _find_config() - static_secrets = None - if secrets: - static_secrets = _load_static_secrets(secrets) + static_secrets = StaticSecrets.from_path(secrets) if secrets else None factory = Factory(config) secrets_service = factory.create_secrets_service() secrets_service.sync( diff --git a/src/phalanx/constants.py b/src/phalanx/constants.py index 625bb64122..378be56c30 100644 --- a/src/phalanx/constants.py +++ b/src/phalanx/constants.py @@ -10,6 +10,7 @@ __all__ = [ "HELM_DOCLINK_ANNOTATION", + "ONEPASSWORD_ENCODED_WARNING", "PULL_SECRET_DESCRIPTION", "VAULT_SECRET_TEMPLATE", "VAULT_WRITE_TOKEN_LIFETIME", @@ -19,6 +20,12 @@ HELM_DOCLINK_ANNOTATION = "phalanx.lsst.io/docs" """Annotation in :file:`Chart.yaml` for application documentation links.""" +ONEPASSWORD_ENCODED_WARNING = ( + "If you store this secret in a 1Password item, encode it with base64" + " first." +) +"""Warning to add to secrets that must be encoded in 1Password.""" + PULL_SECRET_DESCRIPTION = ( "Pull secrets for Docker registries. Each key under registries is the name" " of a Docker registry that needs a pull secret. The value should have two" diff --git a/src/phalanx/exceptions.py b/src/phalanx/exceptions.py index b9e0a78f0a..978c76488c 100644 --- a/src/phalanx/exceptions.py +++ b/src/phalanx/exceptions.py @@ -13,6 +13,9 @@ "InvalidApplicationConfigError", "InvalidEnvironmentConfigError", "InvalidSecretConfigError", + "MalformedOnepasswordSecretError", + "MissingOnepasswordSecretsError", + "NoOnepasswordConfigError", "NoOnepasswordCredentialsError", "UnknownEnvironmentError", "UnresolvedSecretsError", @@ -56,6 +59,8 @@ def __init__( args_str = " ".join(args) msg = f"helm {command} {args_str} failed with status {exc.returncode}" super().__init__(msg) + self.stdout = exc.stdout + self.stderr = exc.stderr class InvalidApplicationConfigError(Exception): @@ -116,6 +121,46 @@ def __init__(self, application: str, key: str, error: str) -> None: super().__init__(msg) +class MalformedOnepasswordSecretError(Exception): + """A secret stored in 1Password was malformed. + + The most common cause of this error is that the secret was marked as + encoded in base64 but couldn't be decoded. + + Parameters + ---------- + application + Name of the application. + key + Secret key. + error + Error message. + """ + + def __init__(self, application: str, key: str, error: str) -> None: + name = f"{application}/{key}" + msg = f"Value of secret {name} is malformed: {error}" + super().__init__(msg) + + +class MissingOnepasswordSecretsError(Exception): + """Secrets are missing from 1Password. + + Parameters + ---------- + secrets + List of strings identifying missing secrets. These will either be a + bare application name, indicating the entire application item is + missing from 1Password, or the application name followed by a space, + indicating the 1Password item doesn't have that field. + """ + + def __init__(self, secrets: Iterable[str]) -> None: + self.secrets = list(secrets) + msg = f'Missing 1Password items or fields: {", ".join(self.secrets)}' + super().__init__(msg) + + class NoOnepasswordConfigError(Exception): """Environment does not use 1Password.""" diff --git a/src/phalanx/factory.py b/src/phalanx/factory.py index bc681f22db..4e03ed0cf9 100644 --- a/src/phalanx/factory.py +++ b/src/phalanx/factory.py @@ -5,6 +5,7 @@ from pathlib import Path from .services.application import ApplicationService +from .services.environment import EnvironmentService from .services.secrets import SecretsService from .services.vault import VaultService from .storage.config import ConfigStorage @@ -49,6 +50,18 @@ def create_config_storage(self) -> ConfigStorage: """ return ConfigStorage(self._path) + def create_environment_service(self) -> EnvironmentService: + """Create service for manipulating Phalanx environments. + + Returns + ------- + EnvironmentService + Service for manipulating environments. + """ + config_storage = self.create_config_storage() + helm_storage = HelmStorage(config_storage) + return EnvironmentService(config_storage, helm_storage) + def create_secrets_service(self) -> SecretsService: """Create service for manipulating Phalanx secrets. diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index 57505175c8..e43640f257 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -8,6 +8,7 @@ AnyHttpUrl, BaseModel, ConfigDict, + Field, GetJsonSchemaHandler, field_validator, ) @@ -19,10 +20,12 @@ from .secrets import Secret __all__ = [ + "ControlSystemConfig", "Environment", "EnvironmentBaseConfig", "EnvironmentConfig", "EnvironmentDetails", + "GCPMetadata", "GafaelfawrGitHubGroup", "GafaelfawrGitHubTeam", "GafaelfawrScope", @@ -32,33 +35,174 @@ ] -class OnepasswordConfig(CamelCaseModel): - """Configuration for 1Password static secrets source.""" +class GCPMetadata(CamelCaseModel): + """Google Cloud Platform hosting metadata. - connect_url: AnyHttpUrl - """URL to the 1Password Connect API server.""" + Holds information about where in Google Cloud Platform this Phalanx + environment is hosted. This supports generating documentation that + includes this metadata, making it easier for administrators to know what + options to pass to :command:`gcloud` to do things such as get Kubernetes + credentials. + """ - vault_title: str - """Title of the 1Password vault from which to retrieve secrets.""" + project_id: str = Field( + ..., + title="GCP project ID", + description="Project ID of GCP project hosting this environment", + ) + region: str = Field( + ..., + title="GCP region", + description="GCP region in which this environment is hosted", + ) -class EnvironmentBaseConfig(CamelCaseModel): - """Configuration common to `EnviromentConfig` and `Environment`.""" + cluster_name: str = Field( + ..., + title="Kubernetes cluster name", + description="Name of the GKE cluster hosting this environment", + ) - name: str - """Name of the environment.""" - fqdn: str - """Fully-qualified domain name.""" +class OnepasswordConfig(CamelCaseModel): + """Configuration for 1Password static secrets source.""" + + connect_url: AnyHttpUrl = Field( + ..., + title="1Password Connect URL", + description="URL to the 1Password Connect API server", + ) + + vault_title: str = Field( + ..., + title="1Password vault title", + description=( + "Title of the 1Password vault from which to retrieve secrets" + ), + ) + + +class ControlSystemConfig(CamelCaseModel): + """Configuration for the Control System.""" + + app_namespace: str | None = Field( + None, + title="Application Namespace", + description=( + "Set the namespace for the control system components. Each control" + " system application consists of many components that need to know" + " what namespace to which they belong." + ), + ) + + image_tag: str | None = Field( + None, + title="Image Tag", + description=("The image tag to use for control system images."), + ) + + site_tag: str | None = Field( + None, + title="Site Tag", + description=( + "The tag that tells the control system component where it is" + " running." + ), + ) + + topic_name: str | None = Field( + None, + title="Topic Identifier", + description="The Kafka identifier for control system topics.", + ) + + kafka_broker_address: str | None = Field( + None, + title="Kafka Broker Address", + description=( + "The Kafka broker address for the control system components." + ), + ) + + kafka_topic_replication_factor: int | None = Field( + None, + title="Kafka Topic Replication Factor", + description=( + "The Kafka topic replication factor for control system components." + ), + ) + + schema_registry_url: str | None = Field( + None, + title="Schema Registry URL", + description=( + "The Schema Registry URL for the control system components." + ), + ) + + s3_endpoint_url: str | None = Field( + None, + title="S3 Endpoint URL", + description="The S3 URL for the environment specific LFA.", + ) - onepassword: OnepasswordConfig | None = None - """Configuration for using 1Password as a static secrets source.""" - vault_url: str - """URL of Vault server.""" +class EnvironmentBaseConfig(CamelCaseModel): + """Configuration common to `EnviromentConfig` and `Environment`.""" - vault_path_prefix: str - """Prefix of Vault paths, including the Kv2 mount point.""" + name: str = Field(..., title="Name", description="Name of the environment") + + fqdn: str = Field( + ..., + title="Domain name", + description=( + "Fully-qualified domain name on which the environment listens" + ), + ) + + butler_repository_index: str | None = Field( + None, + title="Butler repository index URL", + description="URL to Butler repository index", + ) + + gcp: GCPMetadata | None = Field( + None, + title="GCP hosting metadata", + description=( + "If this environment is hosted on Google Cloud Platform," + " metadata about the hosting project, location, and other details." + " Used to generate additional environment documentation." + ), + ) + + onepassword: OnepasswordConfig | None = Field( + None, + title="1Password configuration", + description=( + "Configuration for using 1Password as a static secrets source" + ), + ) + + vault_url: AnyHttpUrl | None = Field( + None, + title="Vault server URL", + description=( + "URL of the Vault server. This is required in the merged values" + " file that includes environment overrides, but the environment" + " override file doesn't need to set it, so it's marked as" + " optional for schema checking purposes to allow the override" + " file to be schema-checked independently." + ), + ) + + vault_path_prefix: str = Field( + ..., + title="Vault path prefix", + description="Prefix of Vault paths, including the KV v2 mount point", + ) + + control_system: ControlSystemConfig | None = None @field_validator("onepassword", mode="before") @classmethod @@ -123,38 +267,41 @@ class EnvironmentConfig(EnvironmentBaseConfig): environment and is also used to validate those files. For the complete configuration for an environment, initialize this model with the merger of :file:`values.yaml` and :file:`values-{environment}.yaml`. - """ - - applications: dict[str, bool] - """List of applications and whether they are enabled.""" - - butler_repository_index: str | None = None - """URL to Butler repository index.""" - onepassword_uuid: str | None = None - """UUID of 1Password item in which to find Vault tokens. - - This is used only by the old installer and will be removed once the new - secrets management and 1Password integration is deployed everywhere. + Fields listed here are not available to application linting. If the field + value has to be injected during linting, the field needs to be defined in + `EnvironmentBaseConfig` instead. """ - repo_url: str | None = None - """URL of the Git repository holding Argo CD configuration. - - This is required in the merged values file that includes environment - overrides, but the environment override file doesn't need to set it, so - it's marked as optional for schema checking purposes to allow the override - file to be schema-checked independently. - """ - - target_revision: str | None = None - """Branch of the Git repository holding Argo CD configuration. - - This is required in the merged values file that includes environment - overrides, but the environment override file doesn't need to set it, so - it's marked as optional for schema checking purposes to allow the override - file to be schema-checked independently. - """ + applications: dict[str, bool] = Field( + ..., + title="Enabled applications", + description="List of applications and whether they are enabled", + ) + + repo_url: str | None = Field( + None, + title="URL of Git repository", + description=( + "URL of the Git repository holding Argo CD configuration. This is" + " required in the merged values file that includes environment" + " overrides, but the environment override file doesn't need to" + " set it, so it's marked as optional for schema checking purposes" + " to allow the override file to be schema-checked independently." + ), + ) + + target_revision: str | None = Field( + None, + title="Git repository branch", + description=( + "Branch of the Git repository holding Argo CD configuration. This" + " is required in the merged values file that includes environment" + " overrides, but the environment override file doesn't need to set" + " it, so it's marked as optional for schema checking purposes to" + " allow the override file to be schema-checked independently." + ), + ) model_config = ConfigDict(extra="forbid") diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py index 1581a4b783..e72d0219cd 100644 --- a/src/phalanx/models/secrets.py +++ b/src/phalanx/models/secrets.py @@ -7,9 +7,11 @@ from base64 import b64encode from datetime import UTC, datetime from enum import Enum -from typing import Literal, Self +from pathlib import Path +from typing import Any, Literal, Self import bcrypt +import yaml from cryptography.fernet import Fernet from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization @@ -60,11 +62,17 @@ class ConditionalMixin(BaseModel): class SecretCopyRules(BaseModel): """Rules for copying a secret value from another secret.""" - application: str - """Application from which the secret should be copied.""" + application: str = Field( + ..., + title="Application", + description="Application from which the secret should be copied", + ) - key: str - """Secret key from which the secret should be copied.""" + key: str = Field( + ..., + title="Key", + description="Secret key from which the secret should be copied", + ) model_config = ConfigDict(populate_by_name=True, extra="forbid") @@ -92,8 +100,7 @@ class SimpleSecretGenerateRules(BaseModel): SecretGenerateType.gafaelfawr_token, SecretGenerateType.fernet_key, SecretGenerateType.rsa_private_key, - ] - """Type of secret.""" + ] = Field(..., title="Secret type", description="Type of secret") model_config = ConfigDict(populate_by_name=True, extra="forbid") @@ -132,15 +139,16 @@ class SourceSecretGenerateRules(BaseModel): type: Literal[ SecretGenerateType.bcrypt_password_hash, SecretGenerateType.mtime, - ] - """Type of secret.""" - - source: str - """Key of secret on which this secret is based. + ] = Field(..., title="Secret type", description="Type of secret") - This may only be set by secrets of type ``bcrypt-password-hash`` or - ``mtime``. - """ + source: str = Field( + ..., + title="Source key", + description=( + "Key of secret on which this secret is based. This may only be" + " set by secrets of type `bcrypt-password-hash` or `mtime`." + ), + ) def generate(self, source: SecretStr) -> SecretStr: match self.type: @@ -170,36 +178,49 @@ class ConditionalSourceSecretGenerateRules( class SecretOnepasswordConfig(BaseModel): """Configuration for how a static secret is stored in 1Password.""" - encoded: bool = False - """Whether the 1Password copy of the secret is encoded in base64. - - 1Password doesn't support newlines in secrets, so secrets that contain - significant newlines have to be encoded when storing them in 1Password. - This flag indicates that this has been done, and therefore when retrieving - the secret from 1Password, its base64-encoding must be undone. - """ + encoded: bool = Field( + False, + title="Is base64-encoded", + description=( + "Whether the 1Password copy of the secret is encoded in base64." + " 1Password doesn't support newlines in secrets, so secrets that" + " contain significant newlines have to be encoded when storing" + " them in 1Password. This flag indicates that this has been done," + " and therefore when retrieving the secret from 1Password, its" + " base64-encoding must be undone." + ), + ) class SecretConfig(BaseModel): """Specification for an application secret.""" - description: str - """Description of the secret.""" + description: str = Field( + ..., title="Description", description="Description of the secret" + ) copy_rules: SecretCopyRules | None = Field( None, + title="Copy rules", description="Rules for where the secret should be copied from", alias="copy", ) - generate: SecretGenerateRules | None = None - """Rules for how the secret should be generated.""" + generate: SecretGenerateRules | None = Field( + None, + title="Generation rules", + description="Rules for how the secret should be generated", + ) - onepassword: SecretOnepasswordConfig = SecretOnepasswordConfig() - """Configuration for how the secret is stored in 1Password.""" + onepassword: SecretOnepasswordConfig = Field( + default_factory=SecretOnepasswordConfig, + title="1Password configuration", + description="Configuration for how the secret is stored in 1Password", + ) - value: SecretStr | None = None - """Secret value.""" + value: SecretStr | None = Field( + None, title="Value", description="Fixed value of secret" + ) model_config = ConfigDict(populate_by_name=True, extra="forbid") @@ -209,12 +230,16 @@ class ConditionalSecretConfig(SecretConfig, ConditionalMixin): copy_rules: ConditionalSecretCopyRules | None = Field( None, + title="Copy rules", description="Rules for where the secret should be copied from", alias="copy", ) - generate: ConditionalSecretGenerateRules | None = None - """Rules for how the secret should be generated.""" + generate: ConditionalSecretGenerateRules | None = Field( + None, + title="Generation rules", + description="Rules for how the secret should be generated", + ) @model_validator(mode="after") def _validate_generate(self) -> Self: @@ -276,6 +301,7 @@ class PullSecret(BaseModel): title="Pull secret by registry", description="Pull secrets for each registry that needs one", ) + model_config = ConfigDict(extra="forbid") def to_dockerconfigjson(self) -> str: @@ -323,6 +349,15 @@ class StaticSecret(BaseModel): description="Intended for human writers and ignored by tools", ) + warning: YAMLFoldedString | None = Field( + None, + title="Warning for humans", + description=( + "Any warnings humans need to know about when filling out this" + " secret" + ), + ) + value: SecretStr | None = Field( None, title="Value of secret", @@ -357,6 +392,23 @@ class StaticSecrets(BaseModel): model_config = ConfigDict(populate_by_name=True, extra="forbid") + @classmethod + def from_path(cls, path: Path) -> Self: + """Load static secrets from a file on disk. + + Parameters + ---------- + path + Path to the file. + + Returns + ------- + StaticSecrets + Parsed static secrets. + """ + with path.open() as fh: + return cls.model_validate(yaml.safe_load(fh)) + def for_application(self, application: str) -> dict[str, StaticSecret]: """Return any known secrets for an application. @@ -372,3 +424,22 @@ def for_application(self, application: str) -> dict[str, StaticSecret]: application has no static secrets, returns an empty dictionary. """ return self.applications.get(application, {}) + + def to_template(self) -> dict[str, Any]: + """Export the model in a suitable form for the template. + + The static secrets template should always include the ``value`` field + even though it will be `None`, should not include ``warning`` if it is + unset, and should always include the `PullSecret` fields even though + they are defaults. The parameters to `~pydantic.BaseModel.model_dict` + aren't up to specifying this, hence this custom serializer. + + Returns + ------- + dict + Dictionary suitable for dumping as YAML to make a template. + """ + result = self.model_dump(by_alias=True, exclude_unset=True) + if self.pull_secret: + result["pull-secret"] = self.pull_secret.model_dump() + return result diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index b2cd629145..7de4640df0 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -2,12 +2,14 @@ from __future__ import annotations +from collections.abc import Iterable from pathlib import Path import jinja2 import yaml from ..exceptions import ApplicationExistsError +from ..models.environments import Environment from ..models.helm import HelmStarter from ..storage.config import ConfigStorage from ..storage.helm import HelmStorage @@ -43,7 +45,46 @@ def __init__( autoescape=jinja2.select_autoescape(disabled_extensions=["jinja"]), ) - def create_application( + def add_helm_repositories( + self, applications: Iterable[str] | None = None, *, quiet: bool = False + ) -> bool: + """Add all Helm repositories used by any application to Helm's cache. + + To perform other Helm operations, such as downloading third-party + charts in order to run :command:`helm lint`, all third-party Helm + chart repositories have to be added to Helm's cache. This does that + for every application in the Phalanx configuration. + + Consistent names for the Helm repositories are used so that this + command can be run repeatedly. + + Parameters + ---------- + applications + If given, only add Helm repositories required by these + applications. + quiet + Whether to suppress Helm's standard output. + + Returns + ------- + bool + Whether any Helm repositories were added. If there were none, the + caller should not call :command:`helm update`, because it fails + if there are no repositories. + """ + if applications: + repo_urls = set() + for application in applications: + urls = self._config.get_dependency_repositories(application) + repo_urls.update(urls) + else: + repo_urls = self._config.get_all_dependency_repositories() + for url in sorted(repo_urls): + self._helm.repo_add(url, quiet=quiet) + return bool(repo_urls) + + def create( self, name: str, starter: HelmStarter, description: str ) -> None: """Create configuration for a new application. @@ -90,6 +131,204 @@ def create_application( # Add the documentation. self._create_application_docs(name, description) + def lint(self, app_names: list[str], env_name: str | None) -> bool: + """Lint an application with Helm. + + Registers any required Helm repositories, refreshes them, downloads + dependencies, and runs :command:`helm lint` on the application chart, + configured for the given environment. + + Parameters + ---------- + app_names + Names of the applications to lint. + env_name + Name of the environment. If not given, lint all environments for + which this application has a configuration. + + Returns + ------- + bool + Whether linting passed. + """ + if self.add_helm_repositories(app_names): + self._helm.repo_update() + environments: dict[str, Environment] = {} + if env_name: + environments[env_name] = self._config.load_environment(env_name) + success = True + for app_name in app_names: + self._helm.dependency_update(app_name) + if env_name: + app_envs = [env_name] + else: + app_envs = self._config.get_application_environments(app_name) + for env in app_envs: + if env not in environments: + environments[env] = self._config.load_environment(env) + environment = environments[env] + values = self._build_injected_values(app_name, environment) + success &= self._helm.lint_application(app_name, env, values) + return success + + def lint_all(self, *, only_changes_from_branch: str | None = None) -> bool: + """Lint all applications with Helm. + + Registers any required Helm repositories, refreshes them, downloads + dependencies, and runs :command:`helm lint` on every combination of + application chart and configured environment. + + Parameters + ---------- + only_changes_from_branch + If given, only lint application and environment pairs that may + have been affected by Git changes relative to the given branch. + In other words, assume all application chart configurations + identical to the given branch are uninteresting, and only lint the + ones that have changed. + + Returns + ------- + bool + Whether linting passed. + """ + if only_changes_from_branch: + branch = only_changes_from_branch + to_lint = self._config.get_modified_applications(branch) + else: + to_lint = self._config.list_application_environments() + if self.add_helm_repositories(to_lint.keys()): + self._helm.repo_update() + environments: dict[str, Environment] = {} + success = True + for app_name, app_envs in sorted(to_lint.items()): + if not app_envs: + continue + self._helm.dependency_update(app_name, quiet=True) + for env_name in app_envs: + if env_name in environments: + environment = environments[env_name] + else: + environment = self._config.load_environment(env_name) + environments[env_name] = environment + values = self._build_injected_values(app_name, environment) + success &= self._helm.lint_application( + app_name, env_name, values + ) + return success + + def template(self, app_name: str, env_name: str) -> str: + """Expand the templates of an application chart. + + Run :command:`helm template` for an application chart, passing in the + appropriate parameters for that environment. + + Parameters + ---------- + app_name + Name of the application. + env_name + Name of the environment. If not given, lint all environments for + which this application has a configuration. + + Returns + ------- + str + Output from :command:`helm template`. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + if self.add_helm_repositories([app_name], quiet=True): + self._helm.repo_update(quiet=True) + self._helm.dependency_update(app_name, quiet=True) + environment = self._config.load_environment(env_name) + values = self._build_injected_values(app_name, environment) + return self._helm.template_application(app_name, env_name, values) + + def _build_injected_values( + self, application: str, environment: Environment + ) -> dict[str, str]: + """Construct extra injected Helm values. + + To simulate the chart as it will be configured by Argo CD, we have to + add the values that are injected via the Argo CD application. + + Parameters + ---------- + application + Name of the application. + environment + Environment whose globals should be injected. + + Returns + ------- + dict of str + Dictionary of Helm settings to their (string) values. + + Notes + ----- + This is a bit of a hack, since it hard-codes the injected values + rather than reading them out of the ``Application`` object definition. + It therefore must be updated every time we inject a new type of value + into charts. + + All globals that would be injected into any chart are injected here, + even if this chart doesn't use them. That should be harmless, although + it doesn't exactly simulate what Argo CD does. + """ + enabled_apps = [a.name for a in environment.all_applications()] + values = { + "global.enabledServices": "@" + "@".join(enabled_apps), + "global.host": environment.fqdn, + "global.baseUrl": f"https://{environment.fqdn}", + "global.vaultSecretsPath": environment.vault_path_prefix, + } + if environment.gcp: + values["global.gcpProjectId"] = environment.gcp.project_id + values["global.gcpRegion"] = environment.gcp.region + if environment.butler_repository_index: + butler_index = environment.butler_repository_index + values["global.butlerRepositoryIndex"] = butler_index + + # vault-secrets-operator gets the Vault host injected into it. Use the + # existence of its subchart configuration tree as a cue to inject the + # same thing here. + if application == "vault-secrets-operator": + key = "vault-secrets-operator.vault.address" + values[key] = str(environment.vault_url) + + if environment.control_system: + extras = { + "appNamespace": environment.control_system.app_namespace, + "imageTag": environment.control_system.image_tag, + "siteTag": environment.control_system.site_tag, + "topicName": environment.control_system.topic_name, + "kafkaBrokerAddress": ( + environment.control_system.kafka_broker_address + ), + "kafkaTopicReplicationFactor": ( + str( + environment.control_system.kafka_topic_replication_factor + ) + ), + "schemaRegistryUrl": ( + environment.control_system.schema_registry_url + ), + "s3EndpointUrl": environment.control_system.s3_endpoint_url, + } + values.update( + { + f"global.controlSystem.{k}": v + for k, v in extras.items() + if v is not None + } + ) + + return values + def _create_application_template(self, name: str) -> None: """Add the ``Application`` template and environment values setting. diff --git a/src/phalanx/services/environment.py b/src/phalanx/services/environment.py new file mode 100644 index 0000000000..edf5f67d59 --- /dev/null +++ b/src/phalanx/services/environment.py @@ -0,0 +1,69 @@ +"""Service for manipulating Phalanx environments.""" + +from __future__ import annotations + +from ..storage.config import ConfigStorage +from ..storage.helm import HelmStorage + +__all__ = ["EnvironmentService"] + + +class EnvironmentService: + """Service for manipulating Phalanx environments. + + Parameters + ---------- + config_storage + Storage object for the Phalanx configuration. + helm_storage + Interface to Helm actions. + """ + + def __init__( + self, config_storage: ConfigStorage, helm_storage: HelmStorage + ) -> None: + self._config = config_storage + self._helm = helm_storage + + def lint(self, environment: str | None = None) -> bool: + """Lint the Helm chart for environments. + + Parameters + ---------- + environment + If given, lint only the specified environment. + + Returns + ------- + bool + Whether linting passed. + """ + if environment: + return self._helm.lint_environment(environment) + success = True + for environment in self._config.list_environments(): + success &= self._helm.lint_environment(environment) + return success + + def template(self, environment: str) -> str: + """Expand the templates of the top-level chart. + + Run :command:`helm template` for a top-level chart, passing in the + appropriate parameters for the given environment. + + Parameters + ---------- + environment + Environment for which to expand the top-level chart. + + Returns + ------- + str + Output from :command:`helm template`. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + return self._helm.template_environment(environment) diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 2f0e761184..02a29ee733 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -2,6 +2,7 @@ from __future__ import annotations +import binascii from base64 import b64decode from collections import defaultdict from dataclasses import dataclass, field @@ -9,7 +10,13 @@ import yaml from pydantic import SecretStr -from ..exceptions import NoOnepasswordConfigError, UnresolvedSecretsError +from ..constants import ONEPASSWORD_ENCODED_WARNING +from ..exceptions import ( + MalformedOnepasswordSecretError, + MissingOnepasswordSecretsError, + NoOnepasswordConfigError, + UnresolvedSecretsError, +) from ..models.environments import Environment from ..models.secrets import ( PullSecret, @@ -51,7 +58,8 @@ def to_text(self) -> str: report += "Missing secrets:\n• " + secrets + "\n" if self.mismatch: secrets = "\n• ".join(sorted(self.mismatch)) - report += "Incorrect secrets:\n• " + secrets + "\n" + heading = "Secrets that do not have their expected value:" + report += f"{heading}\n• " + secrets + "\n" if self.unknown: secrets = "\n• ".join(sorted(self.unknown)) report += "Unknown secrets in Vault:\n• " + secrets + "\n" @@ -102,7 +110,11 @@ def audit( """ environment = self._config.load_environment(env_name) if not static_secrets: - static_secrets = self._get_onepassword_secrets(environment) + try: + static_secrets = self._get_onepassword_secrets(environment) + except MissingOnepasswordSecretsError as e: + heading = "Missing static secrets from 1Password:" + return f"{heading}\n• " + "\n• ".join(e.secrets) + "\n" vault_client = self._vault.get_vault_client(environment) pull_secret = static_secrets.pull_secret if static_secrets else None @@ -126,8 +138,6 @@ def audit( # Generate the textual report. return report.to_text() - # Generate the textual report. - def generate_static_template(self, env_name: str) -> str: """Generate a template for providing static secrets. @@ -147,17 +157,21 @@ def generate_static_template(self, env_name: str) -> str: YAML template the user can fill out, as a string. """ environment = self._config.load_environment(env_name) + warning = ONEPASSWORD_ENCODED_WARNING template: defaultdict[str, dict[str, StaticSecret]] = defaultdict(dict) for application in environment.all_applications(): for secret in application.all_static_secrets(): - template[secret.application][secret.key] = StaticSecret( + static_secret = StaticSecret( description=YAMLFoldedString(secret.description), value=None, ) + if secret.onepassword.encoded: + static_secret.warning = YAMLFoldedString(warning) + template[secret.application][secret.key] = static_secret static_secrets = StaticSecrets( applications=template, pull_secret=PullSecret() ) - return yaml.dump(static_secrets.model_dump(by_alias=True), width=70) + return yaml.dump(static_secrets.to_template(), width=70) def get_onepassword_static_secrets(self, env_name: str) -> StaticSecrets: """Retrieve static secrets for an environment from 1Password. @@ -240,7 +254,9 @@ def sync( # Replace any Vault secrets that are incorrect. self._sync_application_secrets(vault_client, vault_secrets, resolved) + has_pull_secret = False if resolved.pull_secret and resolved.pull_secret.registries: + has_pull_secret = True pull_secret = resolved.pull_secret self._sync_pull_secret(vault_client, vault_secrets, pull_secret) @@ -250,7 +266,7 @@ def sync( vault_client, vault_secrets, resolved, - has_pull_secret=resolved.pull_secret is not None, + has_pull_secret=has_pull_secret, ) def _audit_secrets( @@ -293,7 +309,7 @@ def _audit_secrets( ] # The pull-secret has to be handled separately. - if pull_secret: + if pull_secret and pull_secret.registries: if "pull-secret" in vault_secrets: value = SecretStr(pull_secret.to_dockerconfigjson()) expected = {".dockerconfigjson": value} @@ -330,7 +346,7 @@ def _clean_vault_secrets( has_pull_secret Whether there should be a pull secret for this environment. """ - for application, values in vault_secrets.items(): + for application, values in sorted(vault_secrets.items()): if application not in resolved.applications: if application == "pull-secret" and has_pull_secret: continue @@ -346,6 +362,37 @@ def _clean_vault_secrets( for key in sorted(to_delete): print("Deleted Vault secret for", application, key) + def _decode_base64_secret( + self, application: str, key: str, value: SecretStr + ) -> SecretStr: + """Decode a secret value that was encoded in base64. + + Parameters + ---------- + application + Name of the application owning the secret, for error reporting. + key + Key of the secret, for error reporting. + value + Value of the secret. + + Returns + ------- + pydantic.SecretStr or None + Decoded value of the secret. + + Raises + ------ + MalformedOnepasswordSecretError + Raised if the secret could not be decoded. + """ + try: + secret = value.get_secret_value() + return SecretStr(b64decode(secret.encode()).decode()) + except (binascii.Error, UnicodeDecodeError) as e: + msg = "value could not be base64-decoded to a valid secret string" + raise MalformedOnepasswordSecretError(application, key, msg) from e + def _get_onepassword_secrets( self, environment: Environment ) -> StaticSecrets | None: @@ -364,6 +411,11 @@ def _get_onepassword_secrets( Raises ------ + MalformedOnepasswordSecretError + Raised if the secret could not be decoded. + MissingOnepasswordSecretsError + Raised if any of the items or fields expected to be in 1Password + are not present. NoOnepasswordCredentialsError Raised if the environment uses 1Password but no 1Password credentials were available in the environment. @@ -375,6 +427,8 @@ def _get_onepassword_secrets( encoded = {} for application in environment.all_applications(): static_secrets = application.all_static_secrets() + if not static_secrets: + continue query[application.name] = [s.key for s in static_secrets] encoded[application.name] = { s.key for s in static_secrets if s.onepassword.encoded @@ -386,8 +440,9 @@ def _get_onepassword_secrets( for key in secrets: secret = result.applications[app_name][key] if secret.value: - value = secret.value.get_secret_value().encode() - secret.value = SecretStr(b64decode(value).decode()) + secret.value = self._decode_base64_secret( + app_name, key, secret.value + ) return result def _resolve_secrets( diff --git a/src/phalanx/services/vault.py b/src/phalanx/services/vault.py index 9628b8992c..3acc7664aa 100644 --- a/src/phalanx/services/vault.py +++ b/src/phalanx/services/vault.py @@ -88,9 +88,10 @@ def copy_secrets(self, environment: str, old_path: str) -> None: new_vault_client = self._vault.get_vault_client(config) old_vault_client = self._vault.get_vault_client(config, old_path) secrets = old_vault_client.list_application_secrets() - for name in secrets: + for name in sorted(secrets): secret = old_vault_client.get_application_secret(name) new_vault_client.store_application_secret(name, secret) + print("Copied Vault secret for", name) def create_read_approle(self, environment: str) -> VaultAppRole: """Create a new Vault read AppRole for the given environment. diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 60b471b9f4..685802cf27 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -5,10 +5,13 @@ import re from collections import defaultdict from contextlib import suppress +from dataclasses import dataclass from pathlib import Path -from typing import Any +from typing import Any, Self import yaml +from git import Diff +from git.repo import Repo from pydantic import ValidationError from ..constants import HELM_DOCLINK_ANNOTATION @@ -67,6 +70,60 @@ def _merge_overrides( return new +@dataclass +class _ApplicationChange: + """Holds the analysis of a diff affecting a Phalanx application chart.""" + + application: str + """Name of the affected application.""" + + path: str + """Path of changed file relative to the top of the chart.""" + + is_delete: bool + """Whether this change is a file deletion.""" + + @classmethod + def from_diff(cls, diff: Diff) -> Self: + """Create a change based on a Git diff. + + Parameters + ---------- + diff + One Git diff affecting a single file. + + Returns + ------- + _ApplicationChange + Corresponding parsed change. + + Raises + ------ + ValueError + Raised if this is not a change to an application chart. + """ + full_path = diff.b_path or diff.a_path + if not full_path: + raise ValueError("Not a change to an application") + m = re.match("applications/([^/]+)/(.+)", full_path) + if not m: + raise ValueError("Not a change to an application") + return cls( + application=m.group(1), + path=m.group(2), + is_delete=diff.change_type == "D", + ) + + @property + def affects_all_envs(self) -> bool: + """Whether this change may affect any environment.""" + if self.path in ("Chart.yaml", "values.yaml"): + return True + if self.path.startswith(("crds/", "templates/")): + return True + return False + + class ConfigStorage: """Analyze Phalanx configuration and convert it to models. @@ -155,6 +212,24 @@ def add_application_setting(self, application: str, setting: str) -> None: new.write(setting + "\n") path_new.rename(path) + def get_all_dependency_repositories(self) -> set[str]: + """List the URLs of all referenced third-party Helm repositories. + + Returns + ------- + set of str + URLs of third-party Helm repositories referenced by some + application chart. + """ + repo_urls = set() + for app_path in (self._path / "applications").iterdir(): + chart_path = app_path / "Chart.yaml" + if not chart_path.exists(): + continue + urls = self.get_dependency_repositories(app_path.name) + repo_urls.update(urls) + return repo_urls + def get_application_chart_path(self, application: str) -> Path: """Determine the path to an application Helm chart. @@ -173,6 +248,105 @@ def get_application_chart_path(self, application: str) -> Path: """ return self._path / "applications" / application + def get_application_environments(self, application: str) -> list[str]: + """List all environments for which an application is configured. + + This is based entirely on the presence of + :file:`values-{environment}.yaml` configuration files in the + application directory, not on which environments enable the + application. This is intentional since this is used to constrain which + environments are linted, and we want to lint applications in + environments that aren't currently enabled to ensure they've not + bitrotted. + + Parameters + ---------- + application + Name of the application. + + Returns + ------- + list of str + List of environment names for which that application is + configured. + """ + path = self.get_application_chart_path(application) + return [ + v.stem.removeprefix("values-") + for v in sorted(path.glob("values-*.yaml")) + ] + + def get_dependency_repositories(self, application: str) -> set[str]: + """Return URLs for dependency Helm repositories for this application. + + Parameters + ---------- + application + Name of the application. + + Returns + ------- + set of str + URLs of Helm repositories used by dependencies of this + application's chart. + """ + path = self.get_application_chart_path(application) / "Chart.yaml" + chart = yaml.safe_load(path.read_text()) + repo_urls = set() + for dependency in chart.get("dependencies", []): + if "repository" in dependency: + repository = dependency["repository"] + if not repository.startswith("file:"): + repo_urls.add(repository) + return repo_urls + + def get_environment_chart_path(self) -> Path: + """Determine the path to the top-level environment chart. + + Returns + ------- + pathlib.Path + Path to the top-level environment chart. + """ + return self._path / "environments" + + def get_modified_applications(self, branch: str) -> dict[str, list[str]]: + """Get all modified application and environment pairs. + + Application and environment pairs that have been deleted do not count + as modified, since we don't want to attempt to lint deleted + configurations. + + Parameters + ---------- + branch + Git branch against which to compare to see what modifications + have been made. + + Returns + ------- + dict of list of str + Dictionary of all modified applications to the list of + environments configured for that application that may have been + affected. + """ + result: defaultdict[str, list[str]] = defaultdict(list) + repo = Repo(str(self._path)) + diffs = repo.head.commit.diff(branch, paths=["applications"], R=True) + for diff in diffs: + try: + change = _ApplicationChange.from_diff(diff) + except ValueError: + continue + if change.affects_all_envs: + envs = self.get_application_environments(change.application) + if envs: + result[change.application] = envs + elif not change.is_delete: + if m := re.match("values-([^.]+).yaml$", change.path): + result[change.application].append(m.group(1)) + return result + def get_starter_path(self, starter: HelmStarter) -> Path: """Determine the path to a Helm starter template. @@ -188,6 +362,45 @@ def get_starter_path(self, starter: HelmStarter) -> Path: """ return self._path / "starters" / starter.value + def list_application_environments(self) -> dict[str, list[str]]: + """List all available applications and their environments. + + Returns + ------- + dict of list of str + Dictionary of all applications to lists of environments for which + that application has a configuration. + """ + return { + a: self.get_application_environments(a) + for a in self.list_applications() + } + + def list_applications(self) -> list[str]: + """List all available applications. + + Returns + ------- + list of str + Names of all applications. + """ + path = self._path / "applications" + return sorted(v.name for v in path.iterdir() if v.is_dir()) + + def list_environments(self) -> list[str]: + """List all of the available environments. + + Returns + ------- + list of str + Names of all available environments. + """ + path = self._path / "environments" + return [ + v.stem.removeprefix("values-") + for v in sorted(path.glob("values-*.yaml")) + ] + def load_environment(self, environment_name: str) -> Environment: """Load the configuration of a Phalanx environment from disk. @@ -274,11 +487,9 @@ def load_phalanx_config(self) -> PhalanxConfig: InvalidEnvironmentConfigError Raised if the configuration for an environment is invalid. """ - environments_path = self._path / "environments" - environments = [] - for values_path in sorted(environments_path.glob("values-*.yaml")): - environment_name = values_path.stem.removeprefix("values-") - environments.append(self.load_environment_config(environment_name)) + environments = [ + self.load_environment_config(e) for e in self.list_environments() + ] # Load the configurations of all applications. all_applications: set[str] = set() @@ -322,6 +533,31 @@ def load_phalanx_config(self) -> PhalanxConfig: applications=sorted(applications.values(), key=lambda a: a.name), ) + def update_shared_chart_version(self, chart: str, version: str) -> None: + """Update the version of a shared chart across all applications. + + Parameters + ---------- + chart + The name of the chart for the version change. + version + The chart version to update. + """ + for app in self.list_applications(): + app_config = self._load_application_config(app) + is_modified = False + try: + for item in app_config.chart["dependencies"]: + if item["name"] == chart: + item["version"] = version + is_modified = True + except KeyError: + pass + if is_modified: + chart_path = self._path / "applications" / app / "Chart.yaml" + with chart_path.open("w") as fh: + yaml.safe_dump(app_config.chart, fh, sort_keys=False) + def write_application_template(self, name: str, template: str) -> None: """Write the Argo CD application template for a new application. @@ -533,7 +769,7 @@ def _load_application_config(self, name: str) -> ApplicationConfig: values_path = base_path / "values.yaml" if values_path.exists(): with values_path.open("r") as fh: - values = yaml.safe_load(fh) + values = yaml.safe_load(fh) or {} else: values = {} diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index a8b63d2a18..edefa3fe6b 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -3,7 +3,9 @@ from __future__ import annotations import subprocess +import sys from pathlib import Path +from urllib.parse import urlparse from ..exceptions import HelmFailedError from ..models.helm import HelmStarter @@ -47,8 +49,369 @@ def create(self, application: str, starter: HelmStarter) -> None: cwd=application_path.parent, ) - def _run_helm( + def dependency_update( + self, application: str, *, quiet: bool = False + ) -> None: + """Download chart dependencies for an application. + + Tell Helm to update any third-party chart dependencies for an + application and store them in the :file:`charts` subdirectory. This is + a prerequisite for :command:`helm lint` or :command:`helm template`. + + Assumes that remote repositories have already been refreshed with + `repo_update` and tells Helm to skip that. + + Parameters + ---------- + application + Application whose dependencies should be updated. + quiet + Whether to suppress Helm's standard output. + """ + application_path = self._config.get_application_chart_path(application) + self._run_helm( + "dependency", + "update", + "--skip-refresh", + cwd=application_path, + quiet=quiet, + ) + + def lint_application( + self, application: str, environment: str, values: dict[str, str] + ) -> bool: + """Lint an application chart with Helm. + + Assumes that :command:`helm dependency update` has already been run to + download any third-party charts. Any output is sent to standard output + and standard error, and if Helm fails, a failure message will be + printed to standard error. + + Parameters + ---------- + application + Name of the application. + environment + Name of the environment in which to lint that application chart, + used to select the :file:`values-{environment}.yaml` file to add. + values + Extra key/value pairs to set, reflecting the settings injected by + Argo CD. + + Returns + ------- + bool + Whether linting passed. + """ + application_path = self._config.get_application_chart_path(application) + + # helm lint complains about any chart without a templates directory, + # but many of our charts are wrappers around third-party charts and + # intentionally don't have such a directory. To silence the warning, + # create an empty templates directory if needed. Git ignores empty + # directories, so this is essentially a no-op in a Git checkout. + if not (application_path / "templates").exists(): + (application_path / "templates").mkdir() + + # Run helm lint with the appropriate flag for the environment in which + # the chart is being linted. + set_arg = ",".join(f"{k}={v}" for k, v in values.items()) + try: + result = self._capture_helm( + "lint", + application, + "--strict", + "--values", + f"{application}/values.yaml", + "--values", + f"{application}/values-{environment}.yaml", + "--set", + set_arg, + cwd=application_path.parent, + ) + except HelmFailedError as e: + self._print_lint_output(application, environment, e.stdout) + if e.stderr: + sys.stderr.write(e.stderr) + msg = ( + f"Error: Application {application} in environment" + f" {environment} has errors\n" + ) + sys.stderr.write(msg) + return False + else: + self._print_lint_output(application, environment, result.stdout) + if result.stderr: + sys.stderr.write(result.stderr) + return True + + def lint_environment(self, environment: str) -> bool: + """Lint the top-level chart for an environment with Helm. + + Any output is sent to standard output and standard error, and if Helm + fails, a failure message will be printed to standard error. + + Parameters + ---------- + environment + Name of the environment. + + Returns + ------- + bool + Whether linting passed. + """ + path = self._config.get_environment_chart_path() + try: + result = self._capture_helm( + "lint", + path.name, + "--strict", + "--values", + f"{path.name}/values.yaml", + "--values", + f"{path.name}/values-{environment}.yaml", + cwd=path.parent, + ) + except HelmFailedError as e: + self._print_lint_output(None, environment, e.stdout) + if e.stderr: + sys.stderr.write(e.stderr) + msg = ( + f"Error: Top-level chart for environment {environment} has" + " errors\n" + ) + sys.stderr.write(msg) + return False + else: + self._print_lint_output(None, environment, result.stdout) + if result.stderr: + sys.stderr.write(result.stderr) + return True + + def repo_add(self, url: str, *, quiet: bool = False) -> None: + """Add a Helm chart repository to Helm's cache. + + Used primarily to enable Helm linting and templating, since both + require any third-party chart repositories be added first. + + Annoyingly, Helm requires you to name repositories, but chart + configurations don't include repository names. Automating adding Helm + repositories therefore requires making up a name. This uses some + arbitrary heuristics that produce consistent names and hopefully won't + produce conflicts. + + Parameters + ---------- + url + Chart repository to add. + quiet + Whether to suppress Helm's standard output. + + Raises + ------ + ValueError + Raised if the Helm repository URL is invalid. + """ + hostname = urlparse(url).hostname + if not hostname: + raise ValueError(f"Invalid Helm repository URL {url}") + if hostname.endswith("github.io"): + name = hostname.split(".", 1)[0] + elif "." in hostname: + name = hostname.split(".")[-2] + else: + name = hostname + self._run_helm("repo", "add", name, url, quiet=quiet) + + def repo_update(self, *, quiet: bool = False) -> None: + """Update Helm's cache of upstream repository indices. + + Parameters + ---------- + quiet + Whether to suppress Helm's standard output. + """ + self._run_helm("repo", "update", quiet=quiet) + + def template_application( + self, application: str, environment: str, values: dict[str, str] + ) -> str: + """Expand an application chart into its Kubernetes resources. + + Runs :command:`helm template` to expand a chart into its Kubernetes + resources for a given environment. Assumes that :command:`helm + dependency update` has already been run to download any third-party + charts. Any output to standard error is passed along. + + Parameters + ---------- + application + Name of the application. + environment + Name of the environment in which to lint that application chart, + used to select the :file:`values-{environment}.yaml` file to add. + values + Extra key/value pairs to set, reflecting the settings injected by + Argo CD. + + Returns + ------- + str + Kubernetes resources created by the chart. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + application_path = self._config.get_application_chart_path(application) + set_arg = ",".join(f"{k}={v}" for k, v in values.items()) + try: + result = self._capture_helm( + "template", + application, + str(application_path), + "--include-crds", + "--values", + f"{application}/values.yaml", + "--values", + f"{application}/values-{environment}.yaml", + "--set", + set_arg, + cwd=application_path.parent, + ) + except HelmFailedError as e: + if e.stderr: + sys.stderr.write(e.stderr) + raise + if result.stderr: + sys.stderr.write(result.stderr) + return result.stdout + + def template_environment(self, environment: str) -> str: + """Expand the top-level chart into its Kubernetes resources. + + Runs :command:`helm template` to expand the top-level chart into its + Kubernetes resources for a given environment. Any output to standard + error is passed along. + + Parameters + ---------- + environment + Name of the environment for which to expand the chart. + + Returns + ------- + str + Kubernetes resources created by the chart. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + path = self._config.get_environment_chart_path() + try: + result = self._capture_helm( + "template", + "science-platform", + str(path), + "--include-crds", + "--values", + "environments/values.yaml", + "--values", + f"environments/values-{environment}.yaml", + cwd=path.parent, + ) + except HelmFailedError as e: + if e.stderr: + sys.stderr.write(e.stderr) + raise + if result.stderr: + sys.stderr.write(result.stderr) + return result.stdout + + def _capture_helm( self, command: str, *args: str, cwd: Path | None = None + ) -> subprocess.CompletedProcess: + """Run Helm, checking for errors and capturing the output. + + Parameters + ---------- + command + Helm subcommand to run. + *args + Arguments for that subcommand. + cwd + If provided, change working directories to this path before + running the Helm command. + + Returns + ------- + subprocess.CompletedProcess + Results of the process, containing the standard output and + standard error streams. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + try: + result = subprocess.run( + ["helm", command, *args], + check=True, + cwd=cwd, + capture_output=True, + text=True, + ) + except subprocess.CalledProcessError as e: + raise HelmFailedError(command, args, e) from e + return result + + def _print_lint_output( + self, application: str | None, environment: str, output: str | None + ) -> None: + """Print filtered output from Helm's lint. + + :command:`helm lint` has no apparent way to disable certain checks, + and there are some warnings that we will never care about. It also + doesn't have very useful output formatting. + + Parameters + ---------- + application + Name of the application, or `None` if linting the top-level chart. + environment + Name of the environment in which to lint that application chart, + output + Raw output from :command:`helm lint`. + """ + if not output: + return + if application: + prelude = f"==> Linting {application} (environment {environment})" + else: + prelude = f"==> Linting top-level chart for {environment}" + for line in output.removesuffix("\n").split("\n"): + if "icon is recommended" in line: + continue + if line == "": + continue + if "1 chart(s) linted" in line: + continue + if line.startswith("==> Linting"): + print(prelude) + else: + print(line) + + def _run_helm( + self, + command: str, + *args: str, + cwd: Path | None = None, + quiet: bool = False, ) -> None: """Run Helm, checking for errors. @@ -69,7 +432,9 @@ def _run_helm( HelmFailedError Raised if Helm fails. """ + cmdline = ["helm", command, *args] + stdout = subprocess.DEVNULL if quiet else None try: - subprocess.run(["helm", command, *args], check=True, cwd=cwd) + subprocess.run(cmdline, check=True, stdout=stdout, cwd=cwd) except subprocess.CalledProcessError as e: raise HelmFailedError(command, args, e) from e diff --git a/src/phalanx/storage/onepassword.py b/src/phalanx/storage/onepassword.py index 487ba8d753..bc1e1b662a 100644 --- a/src/phalanx/storage/onepassword.py +++ b/src/phalanx/storage/onepassword.py @@ -5,10 +5,13 @@ import os from collections import defaultdict -from onepasswordconnectsdk import load_dict, new_client +from onepasswordconnectsdk import new_client from onepasswordconnectsdk.client import FailedToRetrieveItemException -from ..exceptions import NoOnepasswordCredentialsError +from ..exceptions import ( + MissingOnepasswordSecretsError, + NoOnepasswordCredentialsError, +) from ..models.environments import EnvironmentBaseConfig from ..models.secrets import PullSecret, StaticSecret, StaticSecrets @@ -55,40 +58,43 @@ def get_secrets(self, query: dict[str, list[str]]) -> StaticSecrets: dict of dict Retrieved static secrets as a dictionary of applications to secret keys to `~phalanx.models.secrets.StaticSecret` objects. + + Raises + ------ + MissingOnepasswordSecretsError + Raised if any of the items or fields expected to be in 1Password + are not present. """ - request: dict[tuple[str, str], dict[str, str]] = {} - extra = [] - for application, secrets in query.items(): - for secret in secrets: - if "." in secret: - extra.append((application, secret)) - else: - request[(application, secret)] = { - "opitem": application, - "opfield": f".{secret}", - "opvault": self._vault_id, - } - response = load_dict(self._onepassword, request) applications: defaultdict[str, dict[str, StaticSecret]] applications = defaultdict(dict) - for key, value in response.items(): - application, secret = key - applications[application][secret] = StaticSecret(value=value) - - # Separately handle the secret field names that contain periods, since - # that conflicts with the syntax used by load_dict. - for application, secret in extra: - item = self._onepassword.get_item(application, self._vault_id) - found = False - for field in item.fields: - if field.label == secret: - static_secret = StaticSecret(value=field.value) - applications[application][secret] = static_secret - found = True - break - if not found: - msg = f"Item {application} has no field {secret}" - raise FailedToRetrieveItemException(msg) + + # This method originally used the load_dict bulk query interface, but + # the onepasswordconnectsdk Python library appears to turn that into + # separate queries per item anyway, it can't handle fields whose names + # contain periods, and it means we don't know what items are missing + # for error reporting. It seems better to do the work directly. + not_found = [] + for application, secrets in query.items(): + try: + item = self._onepassword.get_item(application, self._vault_id) + except FailedToRetrieveItemException: + not_found.append(application) + continue + for secret in secrets: + found = False + for field in item.fields: + if field.label == secret: + static_secret = StaticSecret(value=field.value) + applications[application][secret] = static_secret + found = True + break + if not found: + not_found.append(f"{application} {secret}") + + # If any secrets weren't found, raise an exception with the list of + # secrets that weren't found. + if not_found: + raise MissingOnepasswordSecretsError(not_found) # Return the static secrets. return StaticSecrets( diff --git a/src/phalanx/storage/vault.py b/src/phalanx/storage/vault.py index 7c81bdcb1d..974b4460cc 100644 --- a/src/phalanx/storage/vault.py +++ b/src/phalanx/storage/vault.py @@ -359,4 +359,6 @@ def get_vault_client( """ if not path_prefix: path_prefix = env.vault_path_prefix - return VaultClient(env.vault_url, path_prefix) + if not env.vault_url: + raise ValueError("vaultUrl not set for this environment") + return VaultClient(str(env.vault_url), path_prefix) diff --git a/src/phalanx/testing/expandcharts.py b/src/phalanx/testing/expandcharts.py deleted file mode 100644 index 6e6714ae96..0000000000 --- a/src/phalanx/testing/expandcharts.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Expand Helm charts for testing. - -Discover the list of supported environments, find all charts that have changed -relative to main, and then expand those charts into directories for each -chart and environment pair and a values.yaml file for that environment. - -This is a workaround for limitations in the helm/chart-testing tool, which -doesn't understand multi-environment patterns. -""" - -from __future__ import annotations - -import shutil -from pathlib import Path -from typing import TYPE_CHECKING - -from git import DiffIndex -from git.repo import Repo - -if TYPE_CHECKING: - from collections.abc import Sequence - - -def get_changed_charts() -> list[str]: - """Get a list of charts that have changed relative to main.""" - repo = Repo(str(Path.cwd())) - - charts = [] - for path in (Path.cwd() / "applications").iterdir(): - if (path / "Chart.yaml").exists(): - diff = repo.head.commit.diff("origin/main", paths=[str(path)]) - for change_type in DiffIndex.change_type: - changes = diff.iter_change_type( - change_type # type: ignore[arg-type] - ) - if any(changes): - print("Found changed chart", path.name) - charts.append(path.name) - break - - return charts - - -def get_environments() -> list[str]: - """Get the list of supported environments.""" - science_platform_path = Path.cwd() / "environments" - - environments = [] - for path in science_platform_path.iterdir(): - name = path.name - if not name.startswith("values-"): - continue - environment = name[len("values-") : -len(".yaml")] - print("Found environment", environment) - environments.append(environment) - - return environments - - -def expand_chart(chart: str, environments: Sequence[str]) -> None: - """Expand charts from applications into applications-expanded.""" - chart_path = Path.cwd() / "applications" / chart - expanded_path = Path.cwd() / "applications-expanded" - expanded_path.mkdir(exist_ok=True) - - if (chart_path / "values.yaml").exists(): - print("Copying simple chart", chart) - shutil.copytree(chart_path, expanded_path / chart) - else: - for environment in environments: - values_path = chart_path / f"values-{environment}.yaml" - if not values_path.exists(): - continue - print("Expanding chart", chart, "for environment", environment) - chart_expanded_path = expanded_path / f"{chart}-{environment}" - shutil.copytree(chart_path, chart_expanded_path) - shutil.copyfile(values_path, chart_expanded_path / "values.yaml") - - -def main() -> None: - """Entry point for expand-charts command.""" - expanded_path = Path.cwd() / "applications-expanded" - if expanded_path.exists(): - shutil.rmtree(expanded_path) - expanded_path.mkdir() - - charts = get_changed_charts() - environments = get_environments() - for chart in charts: - expand_chart(chart, environments) diff --git a/starters/web-service/templates/ingress.yaml b/starters/web-service/templates/ingress.yaml index eacb451a7e..ae40656ae8 100644 --- a/starters/web-service/templates/ingress.yaml +++ b/starters/web-service/templates/ingress.yaml @@ -18,7 +18,6 @@ template: {{- toYaml . | nindent 6 }} {{- end }} spec: - ingressClassName: "nginx" rules: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index 0048775d99..2e5c60225f 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -3,14 +3,47 @@ from __future__ import annotations import shutil +import subprocess from pathlib import Path +from unittest.mock import ANY import yaml +from git.repo import Repo +from git.util import Actor from phalanx.factory import Factory from ..support.cli import run_cli -from ..support.data import phalanx_test_path, read_output_data +from ..support.data import ( + phalanx_test_path, + read_output_data, + read_output_json, +) +from ..support.helm import MockHelm + + +def test_add_helm_repos(mock_helm: MockHelm) -> None: + result = run_cli("application", "add-helm-repos", "argocd") + assert result.output == "" + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + ["repo", "add", "argoproj", "https://argoproj.github.io/argo-helm"] + ] + + mock_helm.reset_mock() + result = run_cli("application", "add-helm-repos") + assert result.output == "" + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + ["repo", "add", "argoproj", "https://argoproj.github.io/argo-helm"], + [ + "repo", + "add", + "jupyterhub", + "https://jupyterhub.github.io/helm-chart/", + ], + ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], + ] def test_create(tmp_path: Path) -> None: @@ -170,3 +203,240 @@ def test_create_prompt(tmp_path: Path) -> None: with (app_path / "Chart.yaml").open() as fh: chart = yaml.safe_load(fh) assert chart["description"] == "Some application" + + +def test_lint(mock_helm: MockHelm) -> None: + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "lint": + output = ( + "==> Linting .\n" + "[INFO] Chart.yaml: icon is recommended\n" + "\n" + "1 chart(s) linted, 0 chart(s) failed\n" + ) + return subprocess.CompletedProcess( + returncode=0, + args=command, + stdout=output, + stderr=None, + ) + + # Lint a single application that will succeed, and check that the icon + # line is filtered out of the output. + mock_helm.set_capture_callback(callback) + result = run_cli("application", "lint", "gafaelfawr", "-e", "idfdev") + expected = "==> Linting gafaelfawr (environment idfdev)\n" + assert result.output == expected + assert result.exit_code == 0 + set_args = read_output_json("idfdev", "lint-set-values") + assert mock_helm.call_args_list == [ + ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], + ["repo", "update"], + ["dependency", "update", "--skip-refresh"], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + ] + + # Lint both gafaelfawr and portal for all configured environmments. We + # won't bother to check the --set flag again. The important part is that + # we call helm lint twice, but all of the setup is only called once. + mock_helm.reset_mock() + result = run_cli("application", "lint", "gafaelfawr", "portal") + expected += ( + "==> Linting gafaelfawr (environment minikube)\n" + "==> Linting portal (environment idfdev)\n" + ) + assert result.output == expected + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], + ["repo", "update"], + ["dependency", "update", "--skip-refresh"], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-minikube.yaml", + "--set", + ANY, + ], + ["dependency", "update", "--skip-refresh"], + [ + "lint", + "portal", + "--strict", + "--values", + "portal/values.yaml", + "--values", + "portal/values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + ] + + def callback_error(*command: str) -> subprocess.CompletedProcess: + return subprocess.CompletedProcess( + returncode=1, + args=command, + stdout="", + stderr="Some error\n", + ) + + mock_helm.reset_mock() + mock_helm.set_capture_callback(callback_error) + result = run_cli("application", "lint", "gafaelfawr", "--env", "idfdev") + assert result.output == ( + "Some error\n" + "Error: Application gafaelfawr in environment idfdev has errors\n" + ) + assert result.exit_code == 1 + + +def test_lint_no_repos(mock_helm: MockHelm) -> None: + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "lint": + output = "==> Linting .\n" + return subprocess.CompletedProcess( + returncode=0, + args=command, + stdout=output, + stderr=None, + ) + + # Lint a single application that has no dependency charts, and make sure + # we don't try to run repo update, which may fail. + mock_helm.set_capture_callback(callback) + result = run_cli("application", "lint", "postgres", "-e", "idfdev") + expected = "==> Linting postgres (environment idfdev)\n" + assert result.output == expected + assert result.exit_code == 0 + set_args = read_output_json("idfdev", "lint-set-values") + assert mock_helm.call_args_list == [ + ["dependency", "update", "--skip-refresh"], + [ + "lint", + "postgres", + "--strict", + "--values", + "postgres/values.yaml", + "--values", + "postgres/values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + ] + + +def test_lint_all(mock_helm: MockHelm) -> None: + result = run_cli("application", "lint-all") + assert result.output == "" + assert result.exit_code == 0 + expected_calls = read_output_json("idfdev", "lint-all-calls") + assert mock_helm.call_args_list == expected_calls + + +def test_lint_all_git(tmp_path: Path, mock_helm: MockHelm) -> None: + upstream_path = tmp_path / "upstream" + shutil.copytree(str(phalanx_test_path()), str(upstream_path)) + upstream_repo = Repo.init(str(upstream_path), initial_branch="main") + upstream_repo.index.add(["applications", "environments"]) + actor = Actor("Someone", "someone@example.com") + upstream_repo.index.commit("Initial commit", author=actor, committer=actor) + change_path = tmp_path / "change" + repo = Repo.clone_from(str(upstream_path), str(change_path)) + + # Now, make a few changes that should trigger linting. + # + # - argocd (only idfdev) + # - gafaelfawr (values change so all environments) + # - portal (templates deletion so all environments) + # - postgres (irrelevant change, no linting) + path = change_path / "applications" / "argocd" / "values-idfdev.yaml" + with path.open("a") as fh: + fh.write("foo: bar\n") + path = change_path / "applications" / "gafaelfawr" / "values.yaml" + with path.open("a") as fh: + fh.write("foo: bar\n") + repo.index.remove( + "applications/portal/templates/vault-secrets.yaml", working_tree=True + ) + repo.index.remove( + "applications/postgres/values-idfdev.yaml", working_tree=True + ) + repo.index.add(["applications"]) + repo.index.commit("Some changes", author=actor, committer=actor) + + # Okay, now we can run the lint and check the helm commands that were run + # against the expected output. + result = run_cli( + "application", + "lint-all", + "--git", + "--config", + str(change_path), + needs_config=False, + ) + assert result.output == "" + assert result.exit_code == 0 + expected_calls = read_output_json("idfdev", "lint-git-calls") + assert mock_helm.call_args_list == expected_calls + + +def test_template(mock_helm: MockHelm) -> None: + test_path = phalanx_test_path() + + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "template": + output = "this is some template\n" + return subprocess.CompletedProcess( + returncode=0, args=command, stdout=output, stderr=None + ) + + mock_helm.set_capture_callback(callback) + result = run_cli("application", "template", "gafaelfawr", "idfdev") + assert result.output == "this is some template\n" + assert result.exit_code == 0 + set_args = read_output_json("idfdev", "lint-set-values") + assert mock_helm.call_args_list == [ + ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], + ["repo", "update"], + ["dependency", "update", "--skip-refresh"], + [ + "template", + "gafaelfawr", + str(test_path / "applications" / "gafaelfawr"), + "--include-crds", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + ] diff --git a/tests/cli/environment_test.py b/tests/cli/environment_test.py index 0d1daf2e93..eec241358c 100644 --- a/tests/cli/environment_test.py +++ b/tests/cli/environment_test.py @@ -2,9 +2,105 @@ from __future__ import annotations +import subprocess from pathlib import Path from ..support.cli import run_cli +from ..support.data import phalanx_test_path +from ..support.helm import MockHelm + + +def test_lint(mock_helm: MockHelm) -> None: + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "lint": + output = ( + "==> Linting .\n" + "[INFO] Chart.yaml: icon is recommended\n" + "\n" + "1 chart(s) linted, 0 chart(s) failed\n" + ) + return subprocess.CompletedProcess( + returncode=0, + args=command, + stdout=output, + stderr=None, + ) + + # Lint a single environment and check that the output is filtered. + mock_helm.set_capture_callback(callback) + result = run_cli("environment", "lint", "idfdev") + expected = "==> Linting top-level chart for idfdev\n" + assert result.output == expected + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + [ + "lint", + "environments", + "--strict", + "--values", + "environments/values.yaml", + "--values", + "environments/values-idfdev.yaml", + ] + ] + + # Lint all environments. + mock_helm.reset_mock() + result = run_cli("environment", "lint") + expected += ( + "==> Linting top-level chart for minikube\n" + "==> Linting top-level chart for usdfdev-prompt-processing\n" + ) + assert result.output == expected + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + [ + "lint", + "environments", + "--strict", + "--values", + "environments/values.yaml", + "--values", + "environments/values-idfdev.yaml", + ], + [ + "lint", + "environments", + "--strict", + "--values", + "environments/values.yaml", + "--values", + "environments/values-minikube.yaml", + ], + [ + "lint", + "environments", + "--strict", + "--values", + "environments/values.yaml", + "--values", + "environments/values-usdfdev-prompt-processing.yaml", + ], + ] + + def callback_error(*command: str) -> subprocess.CompletedProcess: + return subprocess.CompletedProcess( + returncode=1, + args=command, + stdout="", + stderr="Some error\n", + ) + + # Test with an error. + mock_helm.reset_mock() + mock_helm.set_capture_callback(callback_error) + result = run_cli("environment", "lint", "idfdev") + assert result.output == ( + "Some error\n" + "Error: Top-level chart for environment idfdev has errors\n" + ) + assert result.exit_code == 1 def test_schema() -> None: @@ -18,3 +114,30 @@ def test_schema() -> None: / "environment.json" ) assert result.output == current.read_text() + + +def test_template(mock_helm: MockHelm) -> None: + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "template": + output = "this is some template\n" + return subprocess.CompletedProcess( + returncode=0, args=command, stdout=output, stderr=None + ) + + mock_helm.set_capture_callback(callback) + result = run_cli("environment", "template", "idfdev") + assert result.output == "this is some template\n" + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + [ + "template", + "science-platform", + str(phalanx_test_path() / "environments"), + "--include-crds", + "--values", + "environments/values.yaml", + "--values", + "environments/values-idfdev.yaml", + ], + ] diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py index cbf764250f..4865abaddc 100644 --- a/tests/cli/secrets_test.py +++ b/tests/cli/secrets_test.py @@ -4,16 +4,18 @@ import os import re -from base64 import b64decode +from base64 import b64decode, b64encode from datetime import datetime, timedelta from pathlib import Path import bcrypt import click +import pytest import yaml from cryptography.fernet import Fernet from safir.datetime import current_datetime +from phalanx.exceptions import MalformedOnepasswordSecretError from phalanx.factory import Factory from phalanx.models.gafaelfawr import Token @@ -57,10 +59,31 @@ def test_audit(factory: Factory, mock_vault: MockVaultClient) -> None: result = run_cli( "secrets", "audit", "--secrets", str(secrets_path), "idfdev" ) - assert result.exit_code == 0 + assert result.exit_code == 1 assert result.output == read_output_data("idfdev", "secrets-audit") +def test_audit_onepassword_missing( + factory: Factory, + mock_onepassword: MockOnepasswordClient, + mock_vault: MockVaultClient, +) -> None: + """Check reporting of missing 1Password secrets.""" + phalanx_test_path() + config_storage = factory.create_config_storage() + environment = config_storage.load_environment("minikube") + assert environment.onepassword + vault_title = environment.onepassword.vault_title + mock_onepassword.create_empty_test_vault(vault_title) + mock_vault.load_test_data(environment.vault_path_prefix, "minikube") + + result = run_cli("secrets", "audit", "minikube") + assert result.exit_code == 1 + assert result.output == read_output_data( + "minikube", "audit-missing-output" + ) + + def test_list() -> None: result = run_cli("secrets", "list", "idfdev") assert result.exit_code == 0 @@ -224,6 +247,56 @@ def test_sync_onepassword( assert vault == pull_secret +def test_sync_onepassword_errors( + factory: Factory, + mock_onepassword: MockOnepasswordClient, + mock_vault: MockVaultClient, +) -> None: + phalanx_test_path() + config_storage = factory.create_config_storage() + environment = config_storage.load_environment("minikube") + assert environment.onepassword + vault_title = environment.onepassword.vault_title + mock_onepassword.load_test_data(vault_title, "minikube") + mock_vault.load_test_data(environment.vault_path_prefix, "minikube") + + # Find a secret that's supposed to be encoded and change it to have an + # invalid base64 string. + app_name = None + key = None + for application in environment.applications.values(): + for secret in application.secrets.values(): + if secret.onepassword.encoded: + app_name = application.name + key = secret.key + break + assert app_name + assert key + vault_id = mock_onepassword.get_vault_by_title(vault_title).id + item = mock_onepassword.get_item(app_name, vault_id) + for field in item.fields: + if field.label == key: + field.value = "invalid base64" + + # sync should throw an exception containing the application and key. + with pytest.raises(MalformedOnepasswordSecretError) as excinfo: + run_cli("secrets", "sync", "minikube") + assert app_name in str(excinfo.value) + assert key in str(excinfo.value) + + # Instead set the secret to a value that is valid base64, but of binary + # data that cannot be decoded to a string. + for field in item.fields: + if field.label == key: + field.value = b64encode("ää".encode("iso-8859-1")).decode() + + # sync should throw an exception containing the application and key. + with pytest.raises(MalformedOnepasswordSecretError) as excinfo: + run_cli("secrets", "sync", "minikube") + assert app_name in str(excinfo.value) + assert key in str(excinfo.value) + + def test_sync_regenerate( factory: Factory, mock_vault: MockVaultClient ) -> None: diff --git a/tests/cli/vault_test.py b/tests/cli/vault_test.py index 9d9fe8328a..4d47b22476 100644 --- a/tests/cli/vault_test.py +++ b/tests/cli/vault_test.py @@ -107,7 +107,7 @@ def test_copy_secrets( result = run_cli("vault", "copy-secrets", "idfdev", old_path) assert result.exit_code == 0 - assert result.output == "" + assert result.output == read_output_data("idfdev", "copy-output") result = run_cli("vault", "export-secrets", "idfdev", str(tmp_path)) assert result.exit_code == 0 assert result.output == "" diff --git a/tests/config_test.py b/tests/config_test.py new file mode 100644 index 0000000000..ca5df3dba8 --- /dev/null +++ b/tests/config_test.py @@ -0,0 +1,108 @@ +"""Tests for the Phalanx configuration itself.""" + +from __future__ import annotations + +import re +from collections.abc import Iterator +from pathlib import Path +from typing import Literal + +import yaml + +from phalanx.factory import Factory + +_ALLOW_NO_SECRETS = ( + "giftless", + "linters", + "monitoring", + "next-visit-fan-out", +) +"""Temporary whitelist of applications that haven't added secrets.yaml.""" + + +def all_charts( + parent: Literal["applications", "charts"], +) -> Iterator[Path]: + """Iterate through all chart paths.""" + root_path = Path(__file__).parent.parent / parent + for candidate in root_path.iterdir(): + if not candidate.is_dir(): + continue + yield candidate + + +def test_application_version() -> None: + """All application charts should have version 1.0.0.""" + for application in all_charts("applications"): + chart = yaml.safe_load((application / "Chart.yaml").read_text()) + assert ( + chart["version"] == "1.0.0" + ), f"Chart for application {application.name} has incorrect version" + + # Check the same thing for shared charts. + for shared_chart in all_charts("charts"): + chart = yaml.safe_load((shared_chart / "Chart.yaml").read_text()) + assert ( + chart["version"] == "1.0.0" + ), f"Shared chart {shared_chart.name} has incorrect version" + + +def test_enviroments() -> None: + """Ensure applications don't have configs for unknown environments.""" + factory = Factory(Path.cwd()) + config_storage = factory.create_config_storage() + environments = set(config_storage.list_environments()) + for app_name in config_storage.list_applications(): + app_envs = set(config_storage.get_application_environments(app_name)) + if not app_envs <= environments: + unknown = ", ".join(sorted(app_envs - environments)) + msg = f"{app_name} configured for unknown environments: {unknown}" + raise AssertionError(msg) + + +def test_secrets_defined() -> None: + """Any application with a VaultSecret should have secrets.yaml.""" + for application in all_charts("applications"): + if application.name in _ALLOW_NO_SECRETS: + continue + if list(application.glob("secrets*.yaml")): + continue + template_path = application / "templates" + if not template_path.is_dir(): + continue + for template in (application / "templates").iterdir(): + if not template.is_file(): + continue + resources = template.read_text().split("---\n") + for resource in resources: + if "kind: VaultSecret" not in resource: + continue + if "name: pull-secret" in resource: + continue + msg = ( + f"Application {application.name} installs a VaultSecret" + " resource but has no secrets.yaml configuration" + ) + raise AssertionError(msg) + + +def test_shared_subcharts() -> None: + """Check references to shared subcharts.""" + available = [c.name for c in all_charts("charts")] + for application in all_charts("applications"): + chart = yaml.safe_load((application / "Chart.yaml").read_text()) + chart.get("dependencies") + for dependency in chart.get("dependencies", []): + if not re.match("file:", dependency.get("repository", "")): + continue + name = application.name + version = dependency.get("version") + repository = dependency["repository"] + m = re.match(r"file://[.][.]/[.][.]/charts/([^/]+)$", repository) + assert m, f"Incorrect shared chart URL in {name}: {repository}" + assert ( + m.group(1) in available + ), f"Missing shared chart dependency {m.group(1)} in {name}" + assert ( + dependency["version"] == "1.0.0" + ), f"Incorrect shared chart version in {name}: {version} != 1.0.0" diff --git a/tests/conftest.py b/tests/conftest.py index 9b769a7a55..e6353a401e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,6 +10,7 @@ from phalanx.factory import Factory from .support.data import phalanx_test_path +from .support.helm import MockHelm, patch_helm from .support.onepassword import MockOnepasswordClient, patch_onepassword from .support.vault import MockVaultClient, patch_vault @@ -20,6 +21,12 @@ def factory() -> Factory: return Factory(phalanx_test_path()) +@pytest.fixture +def mock_helm() -> Iterator[MockHelm]: + """Mock out Helm commands.""" + yield from patch_helm() + + @pytest.fixture def mock_onepassword() -> Iterator[MockOnepasswordClient]: """Mock out the 1Password Connect client API.""" diff --git a/tests/data/input/applications/portal/templates/_helpers.tpl b/tests/data/input/applications/portal/templates/_helpers.tpl new file mode 100644 index 0000000000..114b6681fe --- /dev/null +++ b/tests/data/input/applications/portal/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "portal.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "portal.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "portal.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "portal.labels" -}} +helm.sh/chart: {{ include "portal.chart" . }} +{{ include "portal.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "portal.selectorLabels" -}} +app.kubernetes.io/name: {{ include "portal.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/tests/data/input/applications/portal/templates/vault-secrets.yaml b/tests/data/input/applications/portal/templates/vault-secrets.yaml new file mode 100644 index 0000000000..c3bbbb8046 --- /dev/null +++ b/tests/data/input/applications/portal/templates/vault-secrets.yaml @@ -0,0 +1,19 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ template "portal.fullname" . }}-secret + labels: + {{- include "portal.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/portal" + type: "Opaque" +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: "pull-secret" + labels: + {{- include "portal.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/pull-secret" + type: "kubernetes.io/dockerconfigjson" diff --git a/tests/data/input/environments/values-idfdev.yaml b/tests/data/input/environments/values-idfdev.yaml index 3abdd21a6f..e47fe975f8 100644 --- a/tests/data/input/environments/values-idfdev.yaml +++ b/tests/data/input/environments/values-idfdev.yaml @@ -1,5 +1,9 @@ name: idfdev fqdn: data-dev.lsst.cloud +gcp: + projectId: science-platform-dev-7696 + region: us-central1 + clusterName: science-platform vaultUrl: https://vault.lsst.codes/ vaultPathPrefix: secret/phalanx/idfdev diff --git a/tests/data/output/idfdev/copy-output b/tests/data/output/idfdev/copy-output new file mode 100644 index 0000000000..38649e2963 --- /dev/null +++ b/tests/data/output/idfdev/copy-output @@ -0,0 +1,6 @@ +Copied Vault secret for argocd +Copied Vault secret for gafaelfawr +Copied Vault secret for mobu +Copied Vault secret for nublado +Copied Vault secret for postgres +Copied Vault secret for unknown diff --git a/tests/data/output/idfdev/lint-all-calls.json b/tests/data/output/idfdev/lint-all-calls.json new file mode 100644 index 0000000000..1352268773 --- /dev/null +++ b/tests/data/output/idfdev/lint-all-calls.json @@ -0,0 +1,148 @@ +[ + [ + "repo", + "add", + "argoproj", + "https://argoproj.github.io/argo-helm" + ], + [ + "repo", + "add", + "jupyterhub", + "https://jupyterhub.github.io/helm-chart/" + ], + [ + "repo", + "add", + "lsst-sqre", + "https://lsst-sqre.github.io/charts/" + ], + [ + "repo", + "update" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "argocd", + "--strict", + "--values", + "argocd/values.yaml", + "--values", + "argocd/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" + ], + [ + "lint", + "argocd", + "--strict", + "--values", + "argocd/values.yaml", + "--values", + "argocd/values-minikube.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" + ], + [ + "lint", + "argocd", + "--strict", + "--values", + "argocd/values.yaml", + "--values", + "argocd/values-usdfdev-prompt-processing.yaml", + "--set", + "global.enabledServices=@argocd,global.host=usdf-prompt-processing-dev.slac.stanford.edu,global.baseUrl=https://usdf-prompt-processing-dev.slac.stanford.edu,global.vaultSecretsPath=secret/rubin/usdf-prompt-processing-dev" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" + ], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-minikube.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "nublado", + "--strict", + "--values", + "nublado/values.yaml", + "--values", + "nublado/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "portal", + "--strict", + "--values", + "portal/values.yaml", + "--values", + "portal/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "postgres", + "--strict", + "--values", + "postgres/values.yaml", + "--values", + "postgres/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" + ], + [ + "lint", + "postgres", + "--strict", + "--values", + "postgres/values.yaml", + "--values", + "postgres/values-minikube.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" + ] +] diff --git a/tests/data/output/idfdev/lint-git-calls.json b/tests/data/output/idfdev/lint-git-calls.json new file mode 100644 index 0000000000..64009f4e2e --- /dev/null +++ b/tests/data/output/idfdev/lint-git-calls.json @@ -0,0 +1,77 @@ +[ + [ + "repo", + "add", + "argoproj", + "https://argoproj.github.io/argo-helm" + ], + [ + "repo", + "add", + "lsst-sqre", + "https://lsst-sqre.github.io/charts/" + ], + [ + "repo", + "update" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "argocd", + "--strict", + "--values", + "argocd/values.yaml", + "--values", + "argocd/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" + ], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-minikube.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "portal", + "--strict", + "--values", + "portal/values.yaml", + "--values", + "portal/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" + ] +] diff --git a/tests/data/output/idfdev/lint-set-values.json b/tests/data/output/idfdev/lint-set-values.json new file mode 100644 index 0000000000..7a42ea7ca5 --- /dev/null +++ b/tests/data/output/idfdev/lint-set-values.json @@ -0,0 +1,8 @@ +[ + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres", + "global.host=data-dev.lsst.cloud", + "global.baseUrl=https://data-dev.lsst.cloud", + "global.vaultSecretsPath=secret/phalanx/idfdev", + "global.gcpProjectId=science-platform-dev-7696", + "global.gcpRegion=us-central1" +] diff --git a/tests/data/output/idfdev/secrets-audit b/tests/data/output/idfdev/secrets-audit index 18db12fb05..baa8c920ff 100644 --- a/tests/data/output/idfdev/secrets-audit +++ b/tests/data/output/idfdev/secrets-audit @@ -9,7 +9,7 @@ Missing secrets: • nublado postgres-credentials.txt • nublado proxy_token • portal ADMIN_PASSWORD -Incorrect secrets: +Secrets that do not have their expected value: • gafaelfawr database-password • postgres nublado3_password Unknown secrets in Vault: diff --git a/tests/data/output/idfdev/static-secrets.yaml b/tests/data/output/idfdev/static-secrets.yaml index 4c12fee9c8..705a55685a 100644 --- a/tests/data/output/idfdev/static-secrets.yaml +++ b/tests/data/output/idfdev/static-secrets.yaml @@ -29,6 +29,9 @@ applications: description: >- Slack web hook to which mobu should report failures and daily status. value: null + warning: >- + If you store this secret in a 1Password item, encode it with base64 + first. app-alert-webhook: description: >- Slack web hook to which to post internal application alerts. This diff --git a/tests/data/output/minikube/audit-missing-output b/tests/data/output/minikube/audit-missing-output new file mode 100644 index 0000000000..d0ad300c7e --- /dev/null +++ b/tests/data/output/minikube/audit-missing-output @@ -0,0 +1,4 @@ +Missing static secrets from 1Password: +• argocd +• gafaelfawr +• mobu diff --git a/tests/docs/applications_test.py b/tests/docs/applications_test.py index a3ec54fc13..c2153b4c08 100644 --- a/tests/docs/applications_test.py +++ b/tests/docs/applications_test.py @@ -23,3 +23,26 @@ def test_descriptions() -> None: description = m.group(1) m = re.match("[A-Z0-9]", description) assert m, f"Description must start with capital letter in {index_path}" + + +def test_applications_index() -> None: + """Ensure all applications are mentioned in the index.""" + doc_root = Path(__file__).parent.parent.parent / "docs" / "applications" + seen = set() + with (doc_root / "index.rst").open() as fh: + for line in fh: + if m := re.match("^ ([^/]+)/index$", line): + seen.add(m.group(1)) + root_path = Path(__file__).parent.parent.parent / "applications" + for application in root_path.iterdir(): + if not application.is_dir(): + continue + if application.name in ( + "nublado-fileservers", + "nublado-users", + "ocps-uws-job", + ): + continue + assert ( + application.name in seen + ), f"{application.name} not lined in docs/applications/index.rst" diff --git a/tests/docs/environments_test.py b/tests/docs/environments_test.py new file mode 100644 index 0000000000..fabbb52fab --- /dev/null +++ b/tests/docs/environments_test.py @@ -0,0 +1,32 @@ +"""Tests for the environment documentation pages.""" + +from __future__ import annotations + +import re +from pathlib import Path + + +def test_environments() -> None: + """Ensure all environments are documented.""" + doc_root = Path(__file__).parent.parent.parent / "docs" / "environments" + seen_dir = set() + for environment in doc_root.iterdir(): + if environment.is_dir(): + seen_dir.add(environment.name) + seen_index = set() + with (doc_root / "index.rst").open() as fh: + for line in fh: + if m := re.match("^ ([^/]+)/index$", line): + seen_index.add(m.group(1)) + root_path = Path(__file__).parent.parent.parent / "environments" + environments = [ + v.stem.removeprefix("values-") + for v in sorted(root_path.glob("values-*.yaml")) + ] + for environment_name in environments: + assert ( + environment_name in seen_dir + ), f"{environment_name} not documented in docs/environments" + assert ( + environment_name in seen_index + ), f"{environment_name} not linked in docs/environments/index.rst" diff --git a/tests/docs/jinja_test.py b/tests/docs/jinja_test.py index 040b3ae244..2fbb94a194 100644 --- a/tests/docs/jinja_test.py +++ b/tests/docs/jinja_test.py @@ -44,10 +44,14 @@ def test_build_jinja_contexts(factory: Factory) -> None: assert idfdev.fqdn == "data-dev.lsst.cloud" assert idfdev.argocd_url == "https://data-dev.lsst.cloud/argo-cd" assert idfdev.identity_provider == IdentityProvider.CILOGON + assert idfdev.gcp.project_id == "science-platform-dev-7696" + assert idfdev.gcp.region == "us-central1" + assert idfdev.gcp.cluster_name == "science-platform" assert minikube.name == "minikube" assert minikube.fqdn == "minikube.lsst.cloud" assert minikube.argocd_url is None assert minikube.identity_provider == IdentityProvider.GITHUB + assert minikube.gcp is None # Check some of the more complex data. expected = read_output_data("idfdev", "argocd-rbac-rst") diff --git a/tests/support/helm.py b/tests/support/helm.py new file mode 100644 index 0000000000..7fafa728fe --- /dev/null +++ b/tests/support/helm.py @@ -0,0 +1,151 @@ +"""Mock Helm command for testing.""" + +from __future__ import annotations + +import subprocess +from collections.abc import Iterator +from pathlib import Path +from typing import Protocol +from unittest.mock import patch + +from phalanx.exceptions import HelmFailedError +from phalanx.storage.helm import HelmStorage + +__all__ = [ + "MockHelm", + "MockHelmCallback", + "patch_helm", +] + + +class MockHelmCallback(Protocol): + """Protocol for Helm callbacks.""" + + def __call__(*command: str) -> subprocess.CompletedProcess: + ... + + +class MockHelm: + """Mocked Helm commands captured during testing. + + This class holds a record of every Helm command that the Phalanx tooling + under test attempted to run. It is patched into the standard Helm storage + class, replacing the invocation of Helm via subprocess. + + Attributes + ---------- + call_args_list + Each call to Helm, as a list of arguments to the Helm command. The + name is chosen to match the `unittest.mock.Mock` interface. + """ + + def __init__(self) -> None: + self.call_args_list: list[list[str]] = [] + self._callback: MockHelmCallback | None = None + + def capture( + self, command: str, *args: str, cwd: Path | None = None + ) -> subprocess.CompletedProcess: + """Mock capturing the output of a Helm command. + + Parameters + ---------- + command + Helm subcommand to run. + *args + Arguments for that subcommand. + cwd + If provided, change working directories to this path before + running the Helm command. + + Returns + ------- + subprocess.CompletedProcess + Results of the process, containing the standard output and + standard error streams. + + Raises + ------ + HelmFailedError + Raised if the ``returncode`` returned by a callback is non-zero. + """ + self.call_args_list.append([command, *args]) + if self._callback: + # https://github.com/python/mypy/issues/708 (which despite being + # closed is not fixed for protocols as of mypy 1.7.0) + result = self._callback(command, *args) # type: ignore[misc] + if result.returncode != 0: + exc = subprocess.CalledProcessError( + returncode=result.returncode, + cmd=[command, *args], + output=result.stdout, + stderr=result.stderr, + ) + raise HelmFailedError(command, args, exc) + return result + else: + return subprocess.CompletedProcess( + args=[command, *args], returncode=0, stdout=None, stderr=None + ) + + def reset_mock(self) -> None: + """Clear the list of previous calls.""" + self.call_args_list = [] + + def run( + self, + command: str, + *args: str, + cwd: Path | None = None, + quiet: bool = False, + ) -> None: + """Mock running a Helm command. + + Parameters + ---------- + command + Helm subcommand being run run. + *args + Arguments for that subcommand. + cwd + If provided, the caller is requesting to change working + directories to this path before running the Helm command. + (Currently ignored.) + quiet + Whether to suppress Helm's standard output. (Currently ignored.) + """ + self.call_args_list.append([command, *args]) + + def set_capture_callback(self, callback: MockHelmCallback) -> None: + """Set the callback called when capturing Helm command output. + + If no callback is set, empty standard output and standard error will + be returned by the mock. + + Parameters + ---------- + callback + Callback run whenever the Phalanx code under test captures the + output of a Helm command. The callback will be passed the Helm + command as a list, and is expected to return a + `subprocess.CompletedProcess` object. If ``returncode`` is + non-zero, the mock will raise `subprocess.CalledProcessError`. + """ + self._callback = callback + + +def patch_helm() -> Iterator[MockHelm]: + """Intercept Helm invocations with a mock. + + Each attempt to run a Helm command will be captured in the mock and not + actually run. + + Yields + ------ + MockHelm + Class that captures the attempted Helm commands. + """ + mock = MockHelm() + with patch.object(HelmStorage, "_capture_helm", side_effect=mock.capture): + with patch.object(HelmStorage, "_run_helm", side_effect=mock.run): + yield mock diff --git a/tests/support/onepassword.py b/tests/support/onepassword.py index 2c04886a43..7cc283df15 100644 --- a/tests/support/onepassword.py +++ b/tests/support/onepassword.py @@ -37,6 +37,19 @@ def __init__(self) -> None: self._data: dict[str, dict[str, Item]] = {} self._uuids: dict[str, str] = {} + def create_empty_test_vault(self, vault: str) -> None: + """Create an empty 1Password vault for testing. + + This method is not part of the 1Password Connect API. It is intended + for use by the test suite to set up a test. + + Parameters + ---------- + vault + Name of the 1Password vault. + """ + self._data[vault] = {} + def load_test_data(self, vault: str, environment: str) -> None: """Load 1Password test data for the given environment. @@ -84,7 +97,9 @@ def get_item(self, title: str, vault_id: str) -> Item: Returns ------- Item - Corresponding item. + Corresponding item. This is the exact item that is stored in the + mock, so tests can mutate it to affect future calls to `get_item` + if they wish. Raises ------ diff --git a/tox.ini b/tox.ini index fee1dec8a1..838af0bb31 100644 --- a/tox.ini +++ b/tox.ini @@ -46,10 +46,19 @@ deps = neophile commands = neophile update {posargs} +[testenv:phalanx-lint-change] +description = Lint application chart changes determined by Git +commands = + phalanx application lint-all --git + phalanx environment lint + [testenv:py] description = Run pytest commands = - pytest -vv --cov=phalanx --cov-branch --cov-report= {posargs} + pytest --cov=phalanx --cov-branch --cov-report= {posargs} +# Ensure pytest never trucates diffs on assertions. +setenv = + CI = true [testenv:typing] description = Run mypy.