From 3e540fd496e92bc3205e4077d8e7656db0873bfd Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 27 Sep 2023 17:15:20 -0700 Subject: [PATCH 001/588] Ensure all application charts use 1.0.0 The application chart version is meaningless in Phalanx because of how we use Helm and those charts, as is the chart version for shared charts in the charts directory. Enforce that it is always 1.0.0, matching the documentation, to ensure that no one gets confused by supposed semantic information in the chart version that doesn't mean anything. --- applications/alert-stream-broker/Chart.yaml | 2 +- applications/giftless/Chart.yaml | 2 +- applications/moneypenny/Chart.yaml | 2 +- applications/monitoring/Chart.yaml | 2 +- applications/plot-navigator/Chart.yaml | 2 +- applications/strimzi/Chart.yaml | 2 +- applications/telegraf/Chart.yaml | 2 +- tests/config_test.py | 29 +++++++++++++++++++++ 8 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 tests/config_test.py diff --git a/applications/alert-stream-broker/Chart.yaml b/applications/alert-stream-broker/Chart.yaml index a6042337fe..8e29824c87 100644 --- a/applications/alert-stream-broker/Chart.yaml +++ b/applications/alert-stream-broker/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: alert-stream-broker -version: "3" +version: 1.0.0 description: Alert transmission to community brokers sources: - https://github.com/lsst-dm/alert_database_ingester diff --git a/applications/giftless/Chart.yaml b/applications/giftless/Chart.yaml index 08dbe29ad5..23c43ec83e 100644 --- a/applications/giftless/Chart.yaml +++ b/applications/giftless/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: giftless -version: 0.0.1 +version: 1.0.0 description: Git-LFS server with GCS S3 backend, with Rubin-specific auth sources: - https://github.com/datopian/giftless diff --git a/applications/moneypenny/Chart.yaml b/applications/moneypenny/Chart.yaml index 9c0ba6863a..8197816bf4 100644 --- a/applications/moneypenny/Chart.yaml +++ b/applications/moneypenny/Chart.yaml @@ -6,7 +6,7 @@ sources: - https://github.com/lsst-sqre/moneypenny - https://github.com/lsst-sqre/farthing - https://github.com/lsst-sqre/inituserhome -version: 1.0.2 +version: 1.0.0 annotations: phalanx.lsst.io/docs: | - id: "SQR-052" diff --git a/applications/monitoring/Chart.yaml b/applications/monitoring/Chart.yaml index b34119eed7..fb706e2ce6 100644 --- a/applications/monitoring/Chart.yaml +++ b/applications/monitoring/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: monitoring -version: 0.0.1 +version: 1.0.0 description: Chronograf-based UI for monitoring (data stored in InfluxDBv2) sources: - https://github.com/lsst-sqre/rubin-influx-tools diff --git a/applications/plot-navigator/Chart.yaml b/applications/plot-navigator/Chart.yaml index 8d6f724b52..33aa44d9e2 100644 --- a/applications/plot-navigator/Chart.yaml +++ b/applications/plot-navigator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: plot-navigator description: Panel-based plot viewer -version: 1.7.0 +version: 1.0.0 sources: - https://github.com/lsst-dm/pipetask-plot-navigator appVersion: "0.10.2" diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml index 622d4a01d2..6308212ce0 100644 --- a/applications/strimzi/Chart.yaml +++ b/applications/strimzi/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: strimzi type: application -version: 0.1.0 +version: 1.0.0 description: Strimzi Kafka Operator home: https://strimzi.io appVersion: "0.26.0" diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 08654acf39..8c76be1f93 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: telegraf -version: 1.0.1 +version: 1.0.0 description: Application telemetry collection service home: https://www.influxdata.com/time-series-platform/telegraf/ sources: diff --git a/tests/config_test.py b/tests/config_test.py new file mode 100644 index 0000000000..f783b3750d --- /dev/null +++ b/tests/config_test.py @@ -0,0 +1,29 @@ +"""Tests for the Phalanx configuration itself.""" + +from __future__ import annotations + +from pathlib import Path + +import yaml + + +def test_application_version() -> None: + """Test that all application charts have version 1.0.0.""" + applications_path = Path(__file__).parent.parent / "applications" + for application in applications_path.iterdir(): + if not application.is_dir(): + continue + chart = yaml.safe_load((application / "Chart.yaml").read_text()) + assert ( + chart["version"] == "1.0.0" + ), f"Chart for application {application.name} has incorrect version" + + # Check the same thing for shared charts. + Path(__file__).parent.parent / "charts" + for shared_chart in applications_path.iterdir(): + if not shared_chart.is_dir(): + continue + chart = yaml.safe_load((shared_chart / "Chart.yaml").read_text()) + assert ( + chart["version"] == "1.0.0" + ), f"Shared chart {shared_chart.name} has incorrect version" From 4eb65776701e6609028746005e7a2e4171d400b3 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 28 Sep 2023 12:58:49 -0400 Subject: [PATCH 002/588] Default onepassword.vaultTitle for Roundtables --- environments/values-roundtable-dev.yaml | 2 ++ environments/values-roundtable-prod.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 0b92eebd27..b43260a28f 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -2,6 +2,8 @@ name: roundtable-dev fqdn: roundtable-dev.lsst.cloud vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/roundtable-dev.lsst.cloud +onepassword: + vaultTitle: "RSP roundtable-dev.lsst.cloud" applications: giftless: true diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index c94d92d5a0..a349b4f6a6 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -2,6 +2,8 @@ name: roundtable-prod fqdn: roundtable.lsst.cloud vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/roundtable.lsst.cloud +onepassword: + vaultTitle: "RSP roundtable-dev.lsst.cloud" applications: kubernetes-replicator: true From fbbaab87cc235adf9eefdc52d0b14b681e175345 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 28 Sep 2023 13:00:07 -0400 Subject: [PATCH 003/588] Add secrets.yaml for squarebot --- applications/squarebot/secrets.yaml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 applications/squarebot/secrets.yaml diff --git a/applications/squarebot/secrets.yaml b/applications/squarebot/secrets.yaml new file mode 100644 index 0000000000..7fc308b10f --- /dev/null +++ b/applications/squarebot/secrets.yaml @@ -0,0 +1,28 @@ +SQUAREBOT_GITHUB_APP_ID: + description: >- + The ID of the GitHub App shared by all Squarebot services. +SQUAREBOT_GITHUB_APP_KEY: + description: >- + The private key for the GitHub App shared by all Squarebot services. + onepassword: + encoded: true +SQUAREBOT_SLACK_APP_ID: + description: >- + The ID of the Slack App shared by all Squarebot services. +SQUAREBOT_SLACK_TOKEN: + description: >- + The Slack bot user oauth token for the Slack App shared by all Squarebot services. + onepassword: + encoded: true +SQUAREBOT_SLACK_SIGNING: + description: >- + The signing secret for all webhook payloads from Slack. + onepassword: + encoded: true +ca.crt: + description: >- + The cluster CA certificate for the Kubernetes cluster. This is available + on the Kafka resource in the sasquatch application under the + ``status.listeners[].certificate`` field. + onepassword: + encoded: true From 8e4ebbb18b7672b917ab833df80d160f47bf8c53 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 28 Sep 2023 13:00:31 -0400 Subject: [PATCH 004/588] Add secrets.yaml for ook --- applications/ook/secrets.yaml | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 applications/ook/secrets.yaml diff --git a/applications/ook/secrets.yaml b/applications/ook/secrets.yaml new file mode 100644 index 0000000000..af87774f4d --- /dev/null +++ b/applications/ook/secrets.yaml @@ -0,0 +1,26 @@ +ALGOLIA_APP_ID: + description: >- + The ID of the Algolia application. +ALGOLIA_API_KEY: + description: >- + The admin API key for the Algolia application. +OOK_GITHUB_APP_ID: + description: >- + The ID of the GitHub App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_ID +OOK_GITHUB_APP_KEY: + description: >- + The private key for the GitHub App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_KEY +ca.crt: + description: >- + The cluster CA certificate for the Kubernetes cluster. This is available + on the Kafka resource in the sasquatch application under the + ``status.listeners[].certificate`` field. + copy: + application: squarebot + key: ca.crt From 1dd7ed7b268a43a0c8cb97bcb9222a62afdac025 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 28 Sep 2023 13:41:04 -0700 Subject: [PATCH 005/588] update argocd sso secrets to fit new template --- applications/argocd/values-usdfdev.yaml | 4 ++-- applications/argocd/values-usdfprod.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index fa44ff14a9..1827d88d0d 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -30,8 +30,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: usdf-rsp-dev-argocd + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 86f3188b58..3a73d911bf 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -30,8 +30,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: usdf-rsp-prod-argocd + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. From 624e98318a25d77f4a8991447ffdff6990870aa6 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 28 Sep 2023 17:26:11 -0400 Subject: [PATCH 006/588] Add onepassword.connectUrl for roundtable --- environments/values-roundtable-dev.yaml | 1 + environments/values-roundtable-prod.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index b43260a28f..13cbedaf4b 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -3,6 +3,7 @@ fqdn: roundtable-dev.lsst.cloud vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/roundtable-dev.lsst.cloud onepassword: + connectUrl: https://roundtable-dev.lsst.cloud/1password vaultTitle: "RSP roundtable-dev.lsst.cloud" applications: diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index a349b4f6a6..848f6415e0 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -3,6 +3,7 @@ fqdn: roundtable.lsst.cloud vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/roundtable.lsst.cloud onepassword: + connectUrl: https://roundtable.lsst.cloud/1password vaultTitle: "RSP roundtable-dev.lsst.cloud" applications: From 8c392dcb04c8a5bebf4837d1707189a7c968e581 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 25 Sep 2023 17:13:38 -0700 Subject: [PATCH 007/588] Rework and document 1Password Connect Originally, my plan was to run separate 1Password Connect servers for each environment, since I thought each 1Password Connect server needed to be bound to a single vault to limit access to its clients. However, one, this doesn't work because the 1Password Connect chart has duplicate resources that conflict if installed multiple times in the same namespace, and two, it's not necessary because the tokens used by clients can be scoped to a single vault even if 1Password Connect itself has access to multiple vaults. Undo the separation and instead plan to run two 1Password Connect servers, one for dev environments and one for production environments, to handle all SQuaRE-run environments. Enable 1Password Connect for minikube and roundtable-dev, although they're not yet using the new secrets management system. Document 1Password setup: adding a new environment, adding a new 1Password Connect server, and bootstrapping an environment that hosts a 1Password Connect server for itself (relevant to the roundtable-dev and roundtable-prod environments). --- .../onepassword-connect-dev/README.md | 15 ---- .../onepassword-connect-dev/secrets.yaml | 5 -- .../.helmignore | 0 .../Chart.yaml | 5 +- applications/onepassword-connect/README.md | 14 ++++ applications/onepassword-connect/secrets.yaml | 14 ++++ .../templates/_helpers.tpl | 12 +-- .../templates/ingress.yaml} | 8 +- .../templates/vault-secrets.yaml} | 8 +- .../values-roundtable-dev.yaml | 0 .../values.yaml | 9 +- docs/admin/installation.rst | 3 + docs/admin/migrating-secrets.rst | 5 +- docs/admin/secrets-setup.rst | 13 +-- docs/admin/sync-secrets.rst | 1 + docs/applications/index.rst | 2 +- .../onepassword-connect-dev/index.rst | 27 ------ .../onepassword-connect-dev/values.md | 12 --- .../add-new-connect-server.rst | 84 +++++++++++++++++++ .../add-new-environment.rst | 76 +++++++++++++++++ .../onepassword-connect/bootstrap.rst | 25 ++++++ .../onepassword-connect/index.rst | 26 ++++++ .../onepassword-connect/values.md | 12 +++ environments/README.md | 2 +- ...l => onepassword-connect-application.yaml} | 12 +-- environments/values-idfdev.yaml | 2 +- environments/values-minikube.yaml | 3 + environments/values-roundtable-dev.yaml | 8 +- environments/values.yaml | 4 +- 29 files changed, 301 insertions(+), 106 deletions(-) delete mode 100644 applications/onepassword-connect-dev/README.md delete mode 100644 applications/onepassword-connect-dev/secrets.yaml rename applications/{onepassword-connect-dev => onepassword-connect}/.helmignore (100%) rename applications/{onepassword-connect-dev => onepassword-connect}/Chart.yaml (77%) create mode 100644 applications/onepassword-connect/README.md create mode 100644 applications/onepassword-connect/secrets.yaml rename applications/{onepassword-connect-dev => onepassword-connect}/templates/_helpers.tpl (58%) rename applications/{onepassword-connect-dev/templates/idfdev-ingress.yaml => onepassword-connect/templates/ingress.yaml} (68%) rename applications/{onepassword-connect-dev/templates/idfdev-vault-secrets.yaml => onepassword-connect/templates/vault-secrets.yaml} (51%) rename applications/{onepassword-connect-dev => onepassword-connect}/values-roundtable-dev.yaml (100%) rename applications/{onepassword-connect-dev => onepassword-connect}/values.yaml (80%) delete mode 100644 docs/applications/onepassword-connect-dev/index.rst delete mode 100644 docs/applications/onepassword-connect-dev/values.md create mode 100644 docs/applications/onepassword-connect/add-new-connect-server.rst create mode 100644 docs/applications/onepassword-connect/add-new-environment.rst create mode 100644 docs/applications/onepassword-connect/bootstrap.rst create mode 100644 docs/applications/onepassword-connect/index.rst create mode 100644 docs/applications/onepassword-connect/values.md rename environments/templates/{onepassword-connect-dev-application.yaml => onepassword-connect-application.yaml} (75%) diff --git a/applications/onepassword-connect-dev/README.md b/applications/onepassword-connect-dev/README.md deleted file mode 100644 index e0bd21762a..0000000000 --- a/applications/onepassword-connect-dev/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# onepassword-connect-dev - -1Password API server (dev) - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| idfdev.connect.applicationName | string | `"connect-idfdev"` | Name of the Kubernetes Deployment | -| idfdev.connect.credentialsKey | string | `"op-session"` | Name of key inside secret containing 1Password credentials | -| idfdev.connect.credentialsName | string | `"idfdev-secret"` | Name of secret containing the 1Password credentials | -| idfdev.connect.serviceType | string | `"ClusterIP"` | Type of service to create | diff --git a/applications/onepassword-connect-dev/secrets.yaml b/applications/onepassword-connect-dev/secrets.yaml deleted file mode 100644 index eac7d46c60..0000000000 --- a/applications/onepassword-connect-dev/secrets.yaml +++ /dev/null @@ -1,5 +0,0 @@ -idfdev: - description: >- - Credentials used by the 1Password Connect API server to access the vault - for the IDF dev (data-dev.lsst.cloud) environment. This secret can be - changed at any time. diff --git a/applications/onepassword-connect-dev/.helmignore b/applications/onepassword-connect/.helmignore similarity index 100% rename from applications/onepassword-connect-dev/.helmignore rename to applications/onepassword-connect/.helmignore diff --git a/applications/onepassword-connect-dev/Chart.yaml b/applications/onepassword-connect/Chart.yaml similarity index 77% rename from applications/onepassword-connect-dev/Chart.yaml rename to applications/onepassword-connect/Chart.yaml index 65de2dccf6..d808bd3756 100644 --- a/applications/onepassword-connect-dev/Chart.yaml +++ b/applications/onepassword-connect/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -description: 1Password API server (dev) -name: onepassword-connect-dev +description: 1Password API server +name: onepassword-connect type: application version: 1.0.0 @@ -8,7 +8,6 @@ dependencies: - name: connect version: 1.14.0 repository: https://1password.github.io/connect-helm-charts/ - alias: idfdev annotations: phalanx.lsst.io/docs: | diff --git a/applications/onepassword-connect/README.md b/applications/onepassword-connect/README.md new file mode 100644 index 0000000000..24e2c7f2b9 --- /dev/null +++ b/applications/onepassword-connect/README.md @@ -0,0 +1,14 @@ +# onepassword-connect + +1Password API server + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| connect.connect.credentialsKey | string | `"op-session"` | Name of key inside secret containing 1Password credentials | +| connect.connect.credentialsName | string | `"onepassword-connect-secret"` | Name of secret containing the 1Password credentials | +| connect.connect.serviceType | string | `"ClusterIP"` | Type of service to create | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/onepassword-connect/secrets.yaml b/applications/onepassword-connect/secrets.yaml new file mode 100644 index 0000000000..a3906d8255 --- /dev/null +++ b/applications/onepassword-connect/secrets.yaml @@ -0,0 +1,14 @@ +op-session: + description: >- + Credentials used by the 1Password Connect API server to access 1Password + vaults. This credential is created when the Secret Managements workflow + is created and has access to all of the vaults served by that 1Password + Connect server. It is separate from the 1Password Connect tokens, which + are issued for each environment and have access only to the vault for + that environment. + + This is a base64-encoded version of the credentials file for the Connect + server created as part of the secrets automation workflow. (In other + words, the static secret itself is the base64-encoded version of the + JSON, and when written into a Kubernetes ``Secret`` resource, it will be + base64-encoded twice.) This secret can be changed at any time. diff --git a/applications/onepassword-connect-dev/templates/_helpers.tpl b/applications/onepassword-connect/templates/_helpers.tpl similarity index 58% rename from applications/onepassword-connect-dev/templates/_helpers.tpl rename to applications/onepassword-connect/templates/_helpers.tpl index 368309ecc2..0ca015ad61 100644 --- a/applications/onepassword-connect-dev/templates/_helpers.tpl +++ b/applications/onepassword-connect/templates/_helpers.tpl @@ -1,16 +1,16 @@ {{/* Create chart name and version as used by the chart label. */}} -{{- define "onepassword-connect-dev.chart" -}} +{{- define "onepassword-connect.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} -{{- define "onepassword-connect-dev.labels" -}} -helm.sh/chart: {{ include "onepassword-connect-dev.chart" . }} -{{ include "onepassword-connect-dev.selectorLabels" . }} +{{- define "onepassword-connect.labels" -}} +helm.sh/chart: {{ include "onepassword-connect.chart" . }} +{{ include "onepassword-connect.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} @@ -20,7 +20,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "onepassword-connect-dev.selectorLabels" -}} -app.kubernetes.io/name: "onepassword-connect-dev" +{{- define "onepassword-connect.selectorLabels" -}} +app.kubernetes.io/name: "onepassword-connect" app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} diff --git a/applications/onepassword-connect-dev/templates/idfdev-ingress.yaml b/applications/onepassword-connect/templates/ingress.yaml similarity index 68% rename from applications/onepassword-connect-dev/templates/idfdev-ingress.yaml rename to applications/onepassword-connect/templates/ingress.yaml index b80a06c5bb..2639dc2627 100644 --- a/applications/onepassword-connect-dev/templates/idfdev-ingress.yaml +++ b/applications/onepassword-connect/templates/ingress.yaml @@ -1,9 +1,9 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ .Values.idfdev.connect.applicationName | quote }} + name: {{ .Values.connect.connect.applicationName | quote }} labels: - {{- include "onepassword-connect-dev.labels" . | nindent 4 }} + {{- include "onepassword-connect.labels" . | nindent 4 }} annotations: nginx.ingress.kubernetes.io/rewrite-target: "/$1" nginx.ingress.kubernetes.io/ssl-redirect: "true" @@ -14,10 +14,10 @@ spec: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/1password/idfdev/(.*)" + - path: "/1password/(.*)" pathType: "ImplementationSpecific" backend: service: - name: {{ .Values.idfdev.connect.applicationName | quote }} + name: {{ .Values.connect.connect.applicationName | quote }} port: name: "connect-api" diff --git a/applications/onepassword-connect-dev/templates/idfdev-vault-secrets.yaml b/applications/onepassword-connect/templates/vault-secrets.yaml similarity index 51% rename from applications/onepassword-connect-dev/templates/idfdev-vault-secrets.yaml rename to applications/onepassword-connect/templates/vault-secrets.yaml index d8d5bbdf93..fb9172122b 100644 --- a/applications/onepassword-connect-dev/templates/idfdev-vault-secrets.yaml +++ b/applications/onepassword-connect/templates/vault-secrets.yaml @@ -1,11 +1,9 @@ apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: "idfdev-secret" + name: {{ .Values.connect.connect.credentialsName | quote }} labels: - {{- include "onepassword-connect-dev.labels" . | nindent 4 }} + {{- include "onepassword-connect.labels" . | nindent 4 }} spec: - path: "{{ .Values.global.vaultSecretsPath }}/onepassword-connect-dev" + path: "{{ .Values.global.vaultSecretsPath }}/onepassword-connect" type: "Opaque" - templates: - op-session: "{% .Secrets.idfdev %}" diff --git a/applications/onepassword-connect-dev/values-roundtable-dev.yaml b/applications/onepassword-connect/values-roundtable-dev.yaml similarity index 100% rename from applications/onepassword-connect-dev/values-roundtable-dev.yaml rename to applications/onepassword-connect/values-roundtable-dev.yaml diff --git a/applications/onepassword-connect-dev/values.yaml b/applications/onepassword-connect/values.yaml similarity index 80% rename from applications/onepassword-connect-dev/values.yaml rename to applications/onepassword-connect/values.yaml index 2af6cb4f5e..330aca9fb1 100644 --- a/applications/onepassword-connect-dev/values.yaml +++ b/applications/onepassword-connect/values.yaml @@ -1,14 +1,11 @@ -# Default values for onepassword-connect-dev. +# Default values for onepassword-connect. # This is a YAML-formatted file. # Declare variables to be passed into your templates. -idfdev: +connect: connect: - # -- Name of the Kubernetes Deployment - applicationName: "connect-idfdev" - # -- Name of secret containing the 1Password credentials - credentialsName: "idfdev-secret" + credentialsName: "onepassword-connect-secret" # -- Name of key inside secret containing 1Password credentials credentialsKey: "op-session" diff --git a/docs/admin/installation.rst b/docs/admin/installation.rst index 23f5cf01d2..9214af2e0a 100644 --- a/docs/admin/installation.rst +++ b/docs/admin/installation.rst @@ -8,6 +8,9 @@ An environment has a hostname, Vault server and path to its secrets, and a set o Before starting this process, you should set up the required secrets for your new environment. See :doc:`secrets-setup` for details. +If you are setting up an environment that will be running a 1Password Connect server for itself, you will need to take special bootstrapping steps. +See :px-app-bootstrap:`onepassword-connect` for more information. + Creating an environment ======================= diff --git a/docs/admin/migrating-secrets.rst b/docs/admin/migrating-secrets.rst index 027f76c657..3305d479b6 100644 --- a/docs/admin/migrating-secrets.rst +++ b/docs/admin/migrating-secrets.rst @@ -5,7 +5,8 @@ Migrating to the new secrets management system We introduced a new command-line-driven secrets management system for Phalanx environments in September of 2023. This page documents how to migrate to the new system from the older scripts in :file:`installer`. -These instructions assume that, if you are using 1Password for static secrets, you have already set up a 1Password vault and corresponding :px-app:`1Password Connect server ` for this environment, but that vault may be empty. +These instructions assume that, if you are using 1Password for static secrets, you have already set up a 1Password vault and enabled the :px-app:`1Password Connect server ` for this environment. +If you have not yet done this, see :doc:`/applications/onepassword-connect/add-new-environment`. In all :command:`phalanx` commands listed below, replace ```` with the short identifier of your environment. @@ -155,7 +156,7 @@ Update secrets Replace ```` with the value of ``vaultPathPrefix`` in :file:`environments/values-{environment}.yaml` for your environment. #. If you are using 1Password as the source for static secrets, set ``OP_CONNECT_TOKEN`` to the 1Password Connect token for this environment. - For SQuaRE-managed environments, this can be found in the :guilabel:`RSP 1Password tokens` item in the :guilabel:`SQuaRE` 1Password vault. + For SQuaRE-managed environments, this can be found in the ``RSP 1Password tokens`` item in the SQuaRE 1Password vault. #. Check what secrets are missing or incorrect and fix them. diff --git a/docs/admin/secrets-setup.rst b/docs/admin/secrets-setup.rst index 3124c345f6..2a408333d2 100644 --- a/docs/admin/secrets-setup.rst +++ b/docs/admin/secrets-setup.rst @@ -7,6 +7,9 @@ Phalanx does, however, come with tools to manage one specific approach to using This document explains the basic structure of how secrets must be stored in Vault, describes the tools for managing that structure, and describes the optional tools for managing Vault authentication credentials and paths for one specific Vault design. +If you are setting up an environment that will be running a 1Password Connect server for itself, you will need to take special bootstrapping steps. +See :px-app-bootstrap:`onepassword-connect` for more information. + .. note:: We are in the middle of a migration from an old secrets management system that sometimes used multiple secrets per application and sometimes pointed multiple applications at the same secret, to a new system that always uses one secret per application. @@ -173,13 +176,11 @@ This will be transformed into a Vault entry in the correct format for generating Configuring 1Password support ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In :file:`values-{environment}.yaml` for your environment, in the Phalanx :file:`environments` directory, add the setting ``onePasswordConnectServer``, setting it to the URL of the `1Password Connect`_ server for that 1Password vault. - -When running :command:`phalanx secrets` to sync or audit secrets, you will need to set ``OP_CONNECT_TOKEN`` to a read token for that 1Password Connect server. +For an environment to use 1Password as a static secrets source, there must be a 1Password Connect server that serves the secrets for that environment from a 1Password vault. +See :doc:`/applications/onepassword-connect/add-new-environment` for details on how to enable a new 1Password Connect server for your environment using Phalanx. -Phalanx can manage your 1Password Connect server as well, but it should run in a separate cluster than the environment that it provides secrets for. -SQuaRE-run environments use 1Password Connect servers running in the Roundtable clusters. -See :px-app:`onepassword-connect-dev` for details on how to set up a new 1Password Connect server using Phalanx. +When running :command:`phalanx secrets` to sync or audit secrets, you will need to set ``OP_CONNECT_TOKEN`` to the read token for that 1Password Connect server. +For SQuaRE-run environments, you can get that secret from the 1Password item ``RSP 1Password tokens`` in the SQuaRE 1Password vault. Static secrets from Vault ------------------------- diff --git a/docs/admin/sync-secrets.rst b/docs/admin/sync-secrets.rst index c8546b4a1c..bd21129f26 100644 --- a/docs/admin/sync-secrets.rst +++ b/docs/admin/sync-secrets.rst @@ -13,6 +13,7 @@ To populate Vault with all of the necessary secrets for an environment named ``< The ``VAULT_TOKEN`` environment variable must be set to the Vault write token for this environment. Add the ``--secrets`` command-line option or set ``OP_CONNECT_TOKEN`` if needed for your choice of a :ref:`static secrets source `. +For SQuaRE-managed deployments, the 1Password token for ``OP_CONNECT_TOKEN`` comes from the ``RSP 1Password tokens`` item in the SQuaRE 1Password vault. This must be done before installing a Phalanx environment for the first time. It can then be run again whenever the secrets for that environment change. diff --git a/docs/applications/index.rst b/docs/applications/index.rst index e307a23c4a..18602ea46e 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -68,7 +68,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde giftless/index kubernetes-replicator/index monitoring/index - onepassword-connect-dev/index + onepassword-connect/index ook/index squarebot/index diff --git a/docs/applications/onepassword-connect-dev/index.rst b/docs/applications/onepassword-connect-dev/index.rst deleted file mode 100644 index 4961a87c67..0000000000 --- a/docs/applications/onepassword-connect-dev/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. px-app:: onepassword-connect-dev - -#################################################### -onepassword-connect-dev — 1Password API server (dev) -#################################################### - -1Password Connect provides API access to a 1Password vault. -It is used to provide the API for Phalanx integration with 1Password as a source of static secrets. - -Each instance of the upstream 1Password Connect chart provides an API server for a single 1Password vault. -We want to use one vault per SQuaRE-managed Phalanx environment to ensure isolation of secrets between environments. -The Phalanx onepassword-connect applications therefore instantiate the upstream chart multiple times, one for each vault we are providing access to. - -Unfortunately, because dependencies and their aliases can't be conditional on :file:`values.yaml` settings, that means the set of 1Password Connect servers deployed on roundtable-dev have to be a separate application from the ones deployed on roundtable. -This application is the roundtable-dev set of 1Password Connect API servers. -These provide access to the vaults for development and test environments. - -.. jinja:: onepassword-connect-dev - :file: applications/_summary.rst.jinja - -Guides -====== - -.. toctree:: - :maxdepth: 1 - - values diff --git a/docs/applications/onepassword-connect-dev/values.md b/docs/applications/onepassword-connect-dev/values.md deleted file mode 100644 index fee0c6172a..0000000000 --- a/docs/applications/onepassword-connect-dev/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} onepassword-connect-dev -``` - -# onepassword-connect-dev Helm values reference - -Helm values reference table for the {px-app}`onepassword-connect-dev` application. - -```{include} ../../../applications/onepassword-connect-dev/README.md ---- -start-after: "## Values" ---- -``` \ No newline at end of file diff --git a/docs/applications/onepassword-connect/add-new-connect-server.rst b/docs/applications/onepassword-connect/add-new-connect-server.rst new file mode 100644 index 0000000000..faeea68105 --- /dev/null +++ b/docs/applications/onepassword-connect/add-new-connect-server.rst @@ -0,0 +1,84 @@ +################################## +Add a new 1Password Connect server +################################## + +This document describes how to set up a new 1Password Connect server. + +This is normally not required for SQuaRE-run environments, since all SQuaRE-run environments use the two already-existing 1Password Connect servers. +The one in the :px-env:`roundtable-dev ` environment serves the vaults for development environments, and one in the :px-env:`roundtable-prod ` environment serves the vaults for production environemnts. +However, these instructions may be helpful for other Phalanx users who want to st up an independent 1Password Connect server, or if we need to add a new server for some reason. + +When following these instructions, you will be creating a new `Secrets Automation workflow `__. +You will need to have permissions to create that workflow for the vault for your environment. + +Create the workflow +=================== + +In the following steps, you will create a 1Password Secrets Automation workflow for the 1Password vault for your environment, and save the necessary secrets to another 1Password vault. + +#. Log on to the 1Password UI via a web browser. + +#. Click on :menuselection:`Integrations` in the right sidebar under **LSST IT**. + +#. Click on the :guilabel:`Directory` tab at the top of the screen. + +#. Under :guilabel:`Infrastructure Secrets Management` click on :guilabel:`Other`. + +#. Click on :guilabel:`Create a Connect server`. + +#. Under :guilabel:`Environment Name`, enter :samp:`RSP {environment}` where *environment* is the Phalanx environment in which this 1Password Connect server will be running (**not** the vaults that it will serve). + Then, click :guilabel:`Choose Vaults` and select the vaults that should be accessible through this 1Password Connect server. + Click :guilabel:`Add Enviroment` to continue. + +#. Next, 1Password wants you to create an access token for at least one environment. + This is the token that will be used by the Phalanx command-line tool to access secrets for that environment. + It will have access to one and only one vault. + + Under :guilabel:`Token Name`, enter the name of the environment the token should have access to. + Leave :guilabel:`Expires After` set to ``Never``. + Click :guilabel:`Choose Vaults` and choose the vault corresponding to that environment. + Click :guilabel:`Issue Token` to continue. + +#. Next to the credentials file, click :guilabel:`Save in 1Password`, change the title to :samp:`1Password Connect credentials ({environment})` (with *environment* set to the environment in which the 1Password Connect server will be running), select the ``SQuaRE`` vault, and click :guilabel:`Save`. + Then, next to the access token, click the clipboard icon to copy that token to the clipboard. + +#. Click :guilabel:`View Details` to continue. + Go back to home by clicking on the icon on the upper left. + +#. Go to the SQuaRE vault, find the item ``RSP 1Password tokens``, and edit it. + Add the token to that item as another key/value pair, where the key is the short name of the enviroment. + Mark the value as a password. + +#. Confirm that the new ``1Password Connect credentials`` item created two steps previous exists. + You will need this when creating the 1Password Connect server. + You can download it to your local system now if you wish. + +Create the Phalanx configuration +================================ + +In the following steps, you'll deploy the new 1Password Connect server. + +#. Download the file in the :samp:`1Password Connect credentials ({environment})` item in the SQuaRE vault. + It will be named :file:`1password-credentials.json`. + +#. Encode the contents of that file in base64. + + .. prompt:: bash + + base64 -w0 < 1password-credentials.json; echo '' + + This is the static secret required by the 1Password Connect server. + +#. If you are following this process, you are presumably using 1Password to manage your static secrets. + Go to the 1Password vault for the environment where the 1Password Connect server will be running. + Create a new application secret item for the application ``onepassword-connect`` (see :ref:`dev-add-onepassword` for more details), and add a key named ``op-session`` whose value is the base64-encoded 1Password credentials. + +#. Synchronize secrets for that environment following the instructions in :doc:`/admin/sync-secrets`. + +.. note:: + + That final step assumes that the 1Password Connect server for the environment where you're deploying a new 1Password Connect server is running elsewhere. + In some cases, such as for the SQuaRE :px-env:`roundtable-prod ` and :px-env:`roundtable-dev ` environments, the 1Password Connect server for that environment runs in the environment itself. + + In this case, you won't be able to use :command:`phalanx secrets sync` because the 1Password Connect server it wants to use is the one you're trying to install. + Instead, follow the :px-app-bootstrap:`bootstrapping instructions for onepassword-connect `. diff --git a/docs/applications/onepassword-connect/add-new-environment.rst b/docs/applications/onepassword-connect/add-new-environment.rst new file mode 100644 index 0000000000..393de81951 --- /dev/null +++ b/docs/applications/onepassword-connect/add-new-environment.rst @@ -0,0 +1,76 @@ +############################################## +Enable 1Password Connect for a new environment +############################################## + +SQuaRE-managed Phalanx deployments keep their static secrets in 1Password. +This means that each Phalanx environment run by SQuaRE needs to have a corresponding 1Password vault, and a 1Password Connect server that provides access to that vault. +One 1Password Connect server can provide access to multiple vaults using multiple separate tokens, each of which is scoped to only one vault. + +SQuaRE runs two 1Password Connect servers, one in the :px-env:`roundtable-dev ` environment for development environments and one in the :px-env:`roundtable-prod ` environment for production environemnts. + +This document describes how to enable the 1Password Connect server to serve the vault for a new environment. + +.. note:: + + These instructions only apply to SQuaRE-managed Phalanx environments. + You can use them as a model for how to use 1Password as a static secrets source with a different 1Password account, but some modifications will be required. + +.. _onepassword-add-prerequisites: + +Prerequistes +============ + +Every environment must have a separate 1Password vault in the **LSST IT** 1Password account. +The vault for the environment should be named ``RSP `` where ```` is the top-level FQDN for that environment. +(In hindsight the vaults should be named after the short environment names used in Phalanx, but sadly that's not what we did.) + +When following these instructions, you will be modifying a `Secrets Automation workflow `__. +You will need to have permissions to modify the workflow for the 1Password Connet server that will be serving your environment. + +Process +======= + +In the following steps, you'll change the permissions of the 1Password Connect server to add the new 1Password vault for your environment and create a new token with access to that vault. + +#. Log on to the 1Password UI via a web browser. + +#. Click on :menuselection:`Integrations` in the right sidebar under **LSST IT**. + +#. Click on the Secrets Management workflow for the 1Password Connect server that will be serving this environment. + +#. Next to :guilabel:`Vaults`, click on :guilabel:`Manage`. + Select the vault for the environment that you're adding, in addition to the existing vaults. + Click :guilabel:`Update Vaults`. + +#. Next to :guilabel:`Access Tokens`, click on :guilabel:`New Token`. + +#. Under :guilabel:`Environment Name`, enter the same name as the 1Password vault name for your environment. + Then, click :guilabel:`Choose Vaults` and select the corresponding vault (and only that one). + Click :guilabel:`Issue Token` to continue. + +#. Next to the access token, click on the clipboard icon to copy the token to the clipboard. + Then, click on :guilabel:`View Details` to continue. + +#. Go back to home by clicking on the icon on the upper left. + Go to the SQuaRE vault, find the ``RSP 1Password tokens``, and edit it. + Add the token to that item as another key/value pair, where the key is the short name of the enviroment. + Mark the value as a password. + +#. Modify :file:`environments/values-{environment}.yaml` to add the configuration for the 1Password Connect server: + + .. code-block:: yaml + + onepassword: + connectUrl: "https://roundtable-dev.lsst.cloud/1password" + vaultTitle: "RSP " + + The ``connectUrl`` will be either ``https://roundtable-dev.lsst.cloud/1password`` (development environments) or ``https://roundtable.lsst.cloud/1password`` (production environments) for SQuaRE-run environments. + ``vaultTitle`` should be set to the name of the 1Password vault for the environment (see :ref:`onepassword-add-prerequisites`). + +Next steps +========== + +You have now confirmed that 1Password is set up for your environment. + +- If you are migrating from the old secrets management system, perform the other steps now: :doc:`/admin/migrating-secrets` +- If you are setting up a new environment, start populating the 1Password vault with static secrets for the applications running in that environment: :doc:`/developers/update-a-onepassword-secret` diff --git a/docs/applications/onepassword-connect/bootstrap.rst b/docs/applications/onepassword-connect/bootstrap.rst new file mode 100644 index 0000000000..fa951dff6f --- /dev/null +++ b/docs/applications/onepassword-connect/bootstrap.rst @@ -0,0 +1,25 @@ +.. px-app-bootstrap:: onepassword-connect + +############################### +Bootstrapping 1Password Connect +############################### + +When :ref:`installing a new environment `, one of the steps is to :doc:`synchronize secrets for that environment `. +However, when 1Password is used as the source for static secrets, this requires a running 1Password Connect server and a token to connect to that server. +Bootstrapping an environment with this property therefore a different process to break this cycle. + +The recommended process of bootstrapping this type of environment is: + +#. In :file:`environment/values-{environment}.yaml`, enable only the minimum required applications plus ``onepassword-connect``. + Leave everything else disabled to start. + +#. Follow the normal secrets setup for the environment using :ref:`a YAML file for static secrets `. + Fill in the ``onepassword-connect`` secret with the base64-encoded credentials file obtained from :doc:`add-new-connect-server`. + +#. Install the environment using the :doc:`normal instructions `. + +#. Now that you have a running 1Password Connect server, take the secrets from your static secrets YAML file and :ref:`populate your 1Password vault with those secrets `. + +#. Set the ``OP_CONNECT_TOKEN`` environment variable to the token for this environment and :doc:`sync secrets again ` using 1Password. + +#. Now, enable the rest of the applications you want to run in this environment and finish :doc:`secrets setup ` and :doc:`installation `. diff --git a/docs/applications/onepassword-connect/index.rst b/docs/applications/onepassword-connect/index.rst new file mode 100644 index 0000000000..1caa0b1d6e --- /dev/null +++ b/docs/applications/onepassword-connect/index.rst @@ -0,0 +1,26 @@ +.. px-app:: onepassword-connect + +########################################## +onepassword-connect — 1Password API server +########################################## + +1Password Connect provides API access to a 1Password vault. +It is used to provide the API for Phalanx integration with 1Password as a source of static secrets. + +Each 1Password Connect server can serve multiple 1Password vaults. +For SQuaRE-managed environments, we run two 1Password Connect servers, one for development environments and one for production environments. +Each environment gets its own 1Password Connect token that can only see secrets in its own 1Password Connect vault. + +.. jinja:: onepassword-connect + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + bootstrap + add-new-environment + add-new-connect-server + values diff --git a/docs/applications/onepassword-connect/values.md b/docs/applications/onepassword-connect/values.md new file mode 100644 index 0000000000..459c0f97de --- /dev/null +++ b/docs/applications/onepassword-connect/values.md @@ -0,0 +1,12 @@ +```{px-app-values} onepassword-connect +``` + +# onepassword-connect Helm values reference + +Helm values reference table for the {px-app}`onepassword-connect` application. + +```{include} ../../../applications/onepassword-connect/README.md +--- +start-after: "## Values" +--- +``` diff --git a/environments/README.md b/environments/README.md index c92b04c4a1..793760602c 100644 --- a/environments/README.md +++ b/environments/README.md @@ -27,7 +27,7 @@ | applications.nublado | bool | `false` | Enable the nublado application (v3 of the Notebook Aspect) | | applications.nublado2 | bool | `false` | Enable the nublado2 application (v2 of the Notebook Aspect, now deprecated). This should not be used for new environments. | | applications.obsloctap | bool | `false` | Enable the obsloctap application | -| applications.onepassword-connect-dev | bool | `false` | Enable the onepassword-connect-dev application | +| applications.onepassword-connect | bool | `false` | Enable the onepassword-connect application | | applications.ook | bool | `false` | Enable the ook application | | applications.plot-navigator | bool | `false` | Enable the plot-navigator application | | applications.portal | bool | `false` | Enable the portal application | diff --git a/environments/templates/onepassword-connect-dev-application.yaml b/environments/templates/onepassword-connect-application.yaml similarity index 75% rename from environments/templates/onepassword-connect-dev-application.yaml rename to environments/templates/onepassword-connect-application.yaml index 590783e740..c87d077f6a 100644 --- a/environments/templates/onepassword-connect-dev-application.yaml +++ b/environments/templates/onepassword-connect-application.yaml @@ -1,23 +1,23 @@ -{{- if (index .Values "applications" "onepassword-connect-dev") -}} +{{- if (index .Values "applications" "onepassword-connect") -}} apiVersion: v1 kind: Namespace metadata: - name: "onepassword-connect-dev" + name: "onepassword-connect" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: "onepassword-connect-dev" + name: "onepassword-connect" namespace: "argocd" finalizers: - "resources-finalizer.argocd.argoproj.io" spec: destination: - namespace: "onepassword-connect-dev" + namespace: "onepassword-connect" server: "https://kubernetes.default.svc" project: "default" source: - path: "applications/onepassword-connect-dev" + path: "applications/onepassword-connect" repoURL: {{ .Values.repoUrl | quote }} targetRevision: {{ .Values.targetRevision | quote }} helm: @@ -31,4 +31,4 @@ spec: valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index a403d70372..689a156b26 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -2,7 +2,7 @@ butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-dev-repos.ya fqdn: data-dev.lsst.cloud name: idfdev onepassword: - connectUrl: "https://roundtable-dev.lsst.cloud/1password/idfdev" + connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP data-dev.lsst.cloud" vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/phalanx/idfdev diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index 0ee92a116b..d015773284 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -1,5 +1,8 @@ name: minikube fqdn: minikube.lsst.codes +onepassword: + connectUrl: "https://roundtable-dev.lsst.cloud/1password" + vaultTitle: "RSP minikube.lsst.codes" vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/minikube.lsst.codes diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 13cbedaf4b..ca0db3d6f6 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -1,16 +1,16 @@ name: roundtable-dev fqdn: roundtable-dev.lsst.cloud -vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/k8s_operator/roundtable-dev.lsst.cloud onepassword: - connectUrl: https://roundtable-dev.lsst.cloud/1password + connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP roundtable-dev.lsst.cloud" +vaultUrl: "https://vault.lsst.codes" +vaultPathPrefix: secret/k8s_operator/roundtable-dev.lsst.cloud applications: giftless: true kubernetes-replicator: true monitoring: true - onepassword-connect-dev: true + onepassword-connect: true ook: true sasquatch: true squarebot: true diff --git a/environments/values.yaml b/environments/values.yaml index 4b6fe495f8..d69bc78c5d 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -103,8 +103,8 @@ applications: # deprecated). This should not be used for new environments. nublado2: false - # -- Enable the onepassword-connect-dev application - onepassword-connect-dev: false + # -- Enable the onepassword-connect application + onepassword-connect: false # -- Enable the ook application ook: false From 6595d797b47c37e5b4c3b787e679ec0d30ab2970 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 13:25:09 -0700 Subject: [PATCH 008/588] Clarify introduction to adding new 1Password Connect servers Co-authored-by: Jonathan Sick --- .../onepassword-connect/add-new-connect-server.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/applications/onepassword-connect/add-new-connect-server.rst b/docs/applications/onepassword-connect/add-new-connect-server.rst index faeea68105..915057fc64 100644 --- a/docs/applications/onepassword-connect/add-new-connect-server.rst +++ b/docs/applications/onepassword-connect/add-new-connect-server.rst @@ -2,11 +2,10 @@ Add a new 1Password Connect server ################################## -This document describes how to set up a new 1Password Connect server. +This document describes how to set up a new 1Password Connect server to push secrets from 1Password to Vault for one or more Phalanx environments. -This is normally not required for SQuaRE-run environments, since all SQuaRE-run environments use the two already-existing 1Password Connect servers. -The one in the :px-env:`roundtable-dev ` environment serves the vaults for development environments, and one in the :px-env:`roundtable-prod ` environment serves the vaults for production environemnts. -However, these instructions may be helpful for other Phalanx users who want to st up an independent 1Password Connect server, or if we need to add a new server for some reason. +SQuaRE-run Phalanx environments already have 1Password Connect servers set up. +The one in the :px-env:`roundtable-dev ` environment serves the vaults for development environments, and one in the :px-env:`roundtable-prod ` environment serves the vaults for production environments. When following these instructions, you will be creating a new `Secrets Automation workflow `__. You will need to have permissions to create that workflow for the vault for your environment. From b0b824666c5271e2f5599495603fdff9368b1d6d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 13:25:47 -0700 Subject: [PATCH 009/588] Always add 1Password for 1Password vault Co-authored-by: Jonathan Sick --- .../applications/onepassword-connect/add-new-connect-server.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/applications/onepassword-connect/add-new-connect-server.rst b/docs/applications/onepassword-connect/add-new-connect-server.rst index 915057fc64..9b1a98923b 100644 --- a/docs/applications/onepassword-connect/add-new-connect-server.rst +++ b/docs/applications/onepassword-connect/add-new-connect-server.rst @@ -31,7 +31,7 @@ In the following steps, you will create a 1Password Secrets Automation workflow #. Next, 1Password wants you to create an access token for at least one environment. This is the token that will be used by the Phalanx command-line tool to access secrets for that environment. - It will have access to one and only one vault. + It will have access to one and only one 1Password vault. Under :guilabel:`Token Name`, enter the name of the environment the token should have access to. Leave :guilabel:`Expires After` set to ``Never``. From 04f256ad6e5da0d08ff2fbf41d935b35a1a4f2d2 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 13:26:08 -0700 Subject: [PATCH 010/588] Add macOS X base64 instructions Co-authored-by: Jonathan Sick --- .../onepassword-connect/add-new-connect-server.rst | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/applications/onepassword-connect/add-new-connect-server.rst b/docs/applications/onepassword-connect/add-new-connect-server.rst index 9b1a98923b..9036266946 100644 --- a/docs/applications/onepassword-connect/add-new-connect-server.rst +++ b/docs/applications/onepassword-connect/add-new-connect-server.rst @@ -62,9 +62,19 @@ In the following steps, you'll deploy the new 1Password Connect server. #. Encode the contents of that file in base64. - .. prompt:: bash + .. tab-set:: + + .. tab-item:: Linux - base64 -w0 < 1password-credentials.json; echo '' + .. prompt:: bash + + base64 -w0 < 1password-credentials.json; echo '' + + .. tab-item:: macOS + + .. prompt:: bash + + base64 -i 1password-credentials.json; echo '' This is the static secret required by the 1Password Connect server. From 2f53073fadfe6e26bb2ffbedda740e3480a2fd96 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 13:32:49 -0700 Subject: [PATCH 011/588] Further clarify 1Password Connect docs Tweak the wording for the introduction to 1Password Connect servers more, and cross-link from the admin secrets setup documentation. --- docs/admin/secrets-setup.rst | 3 +++ .../onepassword-connect/add-new-connect-server.rst | 7 ++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/admin/secrets-setup.rst b/docs/admin/secrets-setup.rst index 2a408333d2..09f65cccd4 100644 --- a/docs/admin/secrets-setup.rst +++ b/docs/admin/secrets-setup.rst @@ -142,6 +142,9 @@ Static secrets from 1Password Static secrets may be stored in a 1Password vault. In this case, each application with static secrets should have an entry in this 1Password vault. +The 1Password vault must be served by a 1Password Connect server so that the Phalanx tooling can access the secrets. +See :px-app:`onepassword-connect` for more details on how this is done. + Application secrets ^^^^^^^^^^^^^^^^^^^ diff --git a/docs/applications/onepassword-connect/add-new-connect-server.rst b/docs/applications/onepassword-connect/add-new-connect-server.rst index 9036266946..42ce34a191 100644 --- a/docs/applications/onepassword-connect/add-new-connect-server.rst +++ b/docs/applications/onepassword-connect/add-new-connect-server.rst @@ -2,10 +2,11 @@ Add a new 1Password Connect server ################################## -This document describes how to set up a new 1Password Connect server to push secrets from 1Password to Vault for one or more Phalanx environments. +This document describes how to set up a new 1Password Connect server to provide static secrets for one or more Phalanx environments. +See :ref:`admin-static-secrets` for more background. SQuaRE-run Phalanx environments already have 1Password Connect servers set up. -The one in the :px-env:`roundtable-dev ` environment serves the vaults for development environments, and one in the :px-env:`roundtable-prod ` environment serves the vaults for production environments. +The one in the :px-env:`roundtable-dev` environment serves the vaults for development environments, and one in the :px-env:`roundtable-prod` environment serves the vaults for production environments. When following these instructions, you will be creating a new `Secrets Automation workflow `__. You will need to have permissions to create that workflow for the vault for your environment. @@ -63,7 +64,7 @@ In the following steps, you'll deploy the new 1Password Connect server. #. Encode the contents of that file in base64. .. tab-set:: - + .. tab-item:: Linux .. prompt:: bash From db4ab3c04195e7c394cc721bc061c11580dc2186 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 13:33:15 -0700 Subject: [PATCH 012/588] Add macOS method to do single-line base64 --- docs/developers/define-secrets.rst | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/docs/developers/define-secrets.rst b/docs/developers/define-secrets.rst index f342d86ec1..45d1a530e7 100644 --- a/docs/developers/define-secrets.rst +++ b/docs/developers/define-secrets.rst @@ -215,9 +215,19 @@ Newlines will be converted to spaces when pasting the secret value. If newlines need to be preserved, be sure to mark the secret with ``onepassword.encoded`` set to ``true`` in :file:`secrets.yaml`, and then encode the secret in base64 before pasting it into 1Password. To encode the secret, save it to a file with the correct newlines, and then use a command such as: -.. prompt:: bash +.. tab-set:: - base64 -w0 < /path/to/secret; echo '' + .. tab-item:: Linux + + .. prompt:: bash + + base64 -w0 < /path/to/secret; echo '' + + .. tab-item:: macOS + + .. prompt:: bash + + base64 -i /path/to/secret; echo '' This will generate a base64-encoded version of the secret on one line, suitable for cutting and pasting into the 1Password field. From beae2c9623dd244156deac3d1f9124fb57978263 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 27 Sep 2023 12:24:18 -0700 Subject: [PATCH 013/588] Don't install datalinker on minikube datalinker requires Butler secrets, but there are no meaningful Butler secrets in minikube. Rather than create dummy ones, just don't install the application because it can't be meaningfully tested. We can revisit this later if we generate test data for minikube. --- applications/datalinker/values-minikube.yaml | 0 environments/values-minikube.yaml | 5 +++-- 2 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 applications/datalinker/values-minikube.yaml diff --git a/applications/datalinker/values-minikube.yaml b/applications/datalinker/values-minikube.yaml deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index d015773284..4c91e50c03 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -8,9 +8,10 @@ vaultPathPrefix: secret/k8s_operator/minikube.lsst.codes # The primary constraint on enabling applications is the low available memory # of a GitHub Actions runner, since minikube is used for smoke testing of new -# Helm configurations. +# Helm configurations. minikube also doesn't have access to data, at least +# currently, which substantially limits the applications that can be +# meaningfully deployed. applications: - datalinker: true hips: true mobu: true postgres: true From 93b3bebd1770389a3a95aa519375852e2069fa1b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 14:19:43 -0700 Subject: [PATCH 014/588] Reduce the limits for the TAP server on minikube minikube is thrashing. See if it's due to the resource requests and limits for TAP, which in theory were all overridden as 0 but which I'm not sure were working correctly because the pods keep dying and getting restarted. --- applications/tap/values-minikube.yaml | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml index 7cac2030b8..73fe09ec8f 100644 --- a/applications/tap/values-minikube.yaml +++ b/applications/tap/values-minikube.yaml @@ -7,21 +7,17 @@ cadc-tap: enabled: true config: - jvmMaxHeapSize: 4G + jvmMaxHeapSize: 2G resources: requests: - cpu: 0 - memory: 0 + cpu: 0.01 + memory: 500M limits: - cpu: 0 - memory: 0 + memory: 2G uws: resources: requests: - cpu: 0 - memory: 0 - limits: - cpu: 0 - memory: 0 + cpu: 0.01 + memory: 500M From 29b0b2ff8e082ad445062c3e94810361256b8852 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 14:34:27 -0700 Subject: [PATCH 015/588] Set more memory limits for TAP The TAP mock database and schema database are still thrashing. Try setting lower memory limits. --- applications/tap/values-minikube.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml index 73fe09ec8f..19d87e1b52 100644 --- a/applications/tap/values-minikube.yaml +++ b/applications/tap/values-minikube.yaml @@ -2,9 +2,15 @@ cadc-tap: tapSchema: image: repository: "lsstsqre/tap-schema-idfprod-tap" + resources: + limits: + memory: 500M mockdb: enabled: true + resources: + limits: + memory: 500M config: jvmMaxHeapSize: 2G From 27f0f7106e4d502677dfefcc91c221ba92979958 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 28 Sep 2023 15:23:17 -0700 Subject: [PATCH 016/588] fix prod clientID --- applications/argocd/values-usdfprod.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 3a73d911bf..e7b190ceb2 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -30,7 +30,7 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: usdf-rsp-prod-argocd + clientID: usdf-rsp-argocd clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] From bd2fc9fac1aae1af96bc6200cc704e640ad10fa0 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 27 Sep 2023 15:05:59 -0700 Subject: [PATCH 017/588] Enable onepassword-connect on roundtable-prod Set up the 1Password Connect server for production environments on roundtable-prod. --- .../onepassword-connect/values-roundtable-prod.yaml | 0 environments/values-roundtable-prod.yaml | 7 ++++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 applications/onepassword-connect/values-roundtable-prod.yaml diff --git a/applications/onepassword-connect/values-roundtable-prod.yaml b/applications/onepassword-connect/values-roundtable-prod.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index 848f6415e0..e6d0ce5469 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -1,13 +1,14 @@ name: roundtable-prod fqdn: roundtable.lsst.cloud +onepassword: + connectUrl: "https://roundtable.lsst.cloud/1password" + vaultTitle: "RSP roundtable.lsst.cloud" vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/roundtable.lsst.cloud -onepassword: - connectUrl: https://roundtable.lsst.cloud/1password - vaultTitle: "RSP roundtable-dev.lsst.cloud" applications: kubernetes-replicator: true + onepassword-connect: true ook: true sasquatch: true squareone: true From 4a0f404f0f1b4ba73a1a384750ca1f437b9be02e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 15:51:21 -0700 Subject: [PATCH 018/588] Add test for secrets.yaml files Test that every application that installs a VaultSecret resource has a secrets.yaml or secrets-.yaml file that defines its secrets. Add a whitelist of applications that have not yet done this. --- tests/config_test.py | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/tests/config_test.py b/tests/config_test.py index f783b3750d..6b662afbef 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -6,9 +6,20 @@ import yaml +_ALLOW_NO_SECRETS = ( + "giftless", + "linters", + "monitoring", + "obsloctap", + "next-visit-fan-out", + "plot-navigator", + "production-tools", +) +"""Temporary whitelist of applications that haven't added secrets.yaml.""" + def test_application_version() -> None: - """Test that all application charts have version 1.0.0.""" + """All application charts should have version 1.0.0.""" applications_path = Path(__file__).parent.parent / "applications" for application in applications_path.iterdir(): if not application.is_dir(): @@ -27,3 +38,30 @@ def test_application_version() -> None: assert ( chart["version"] == "1.0.0" ), f"Shared chart {shared_chart.name} has incorrect version" + + +def test_secrets_defined() -> None: + """Any application with a VaultSecret should have secrets.yaml.""" + applications_path = Path(__file__).parent.parent / "applications" + for application in applications_path.iterdir(): + if not application.is_dir() or application.name in _ALLOW_NO_SECRETS: + continue + if list(application.glob("secrets*.yaml")): + continue + template_path = application / "templates" + if not template_path.is_dir(): + continue + for template in (application / "templates").iterdir(): + if not template.is_file(): + continue + resources = template.read_text().split("---\n") + for resource in resources: + if "kind: VaultSecret" not in resource: + continue + if "name: pull-secret" in resource: + continue + msg = ( + f"Application {application.name} installs a VaultSecret" + " resource but has no secrets.yaml configuration" + ) + raise AssertionError(msg) From e3fc89ad7835a86018bfe970b15653aad178616e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 16:33:06 -0700 Subject: [PATCH 019/588] Better handling of missing 1Password secrets Retrieve each 1Password item separately rather than using the bulk API so that we can do proper error reporting of all of the missing secrets we expected to be in 1Password but which weren't found. Throw that as an exception up through sync, but, in audit, catch that exception and properly report the missing secrets. --- src/phalanx/exceptions.py | 18 +++++ src/phalanx/services/secrets.py | 17 ++++- src/phalanx/storage/onepassword.py | 72 ++++++++++--------- tests/cli/secrets_test.py | 21 ++++++ .../data/output/minikube/audit-missing-output | 4 ++ tests/support/onepassword.py | 13 ++++ 6 files changed, 110 insertions(+), 35 deletions(-) create mode 100644 tests/data/output/minikube/audit-missing-output diff --git a/src/phalanx/exceptions.py b/src/phalanx/exceptions.py index b9e0a78f0a..204e860eaa 100644 --- a/src/phalanx/exceptions.py +++ b/src/phalanx/exceptions.py @@ -116,6 +116,24 @@ def __init__(self, application: str, key: str, error: str) -> None: super().__init__(msg) +class MissingOnepasswordSecretsError(Exception): + """Secrets are missing from 1Password. + + Parameters + ---------- + secrets + List of strings identifying missing secrets. These will either be a + bare application name, indicating the entire application item is + missing from 1Password, or the application name followed by a space, + indicating the 1Password item doesn't have that field. + """ + + def __init__(self, secrets: Iterable[str]) -> None: + self.secrets = list(secrets) + msg = f'Missing 1Password items or fields: {", ".join(self.secrets)}' + super().__init__(msg) + + class NoOnepasswordConfigError(Exception): """Environment does not use 1Password.""" diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 2f0e761184..cec83c0e8a 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -9,7 +9,11 @@ import yaml from pydantic import SecretStr -from ..exceptions import NoOnepasswordConfigError, UnresolvedSecretsError +from ..exceptions import ( + MissingOnepasswordSecretsError, + NoOnepasswordConfigError, + UnresolvedSecretsError, +) from ..models.environments import Environment from ..models.secrets import ( PullSecret, @@ -102,7 +106,11 @@ def audit( """ environment = self._config.load_environment(env_name) if not static_secrets: - static_secrets = self._get_onepassword_secrets(environment) + try: + static_secrets = self._get_onepassword_secrets(environment) + except MissingOnepasswordSecretsError as e: + heading = "Missing static secrets from 1Password:" + return f"{heading}\n• " + "\n• ".join(e.secrets) + "\n" vault_client = self._vault.get_vault_client(environment) pull_secret = static_secrets.pull_secret if static_secrets else None @@ -364,6 +372,9 @@ def _get_onepassword_secrets( Raises ------ + MissingOnepasswordSecretsError + Raised if any of the items or fields expected to be in 1Password + are not present. NoOnepasswordCredentialsError Raised if the environment uses 1Password but no 1Password credentials were available in the environment. @@ -375,6 +386,8 @@ def _get_onepassword_secrets( encoded = {} for application in environment.all_applications(): static_secrets = application.all_static_secrets() + if not static_secrets: + continue query[application.name] = [s.key for s in static_secrets] encoded[application.name] = { s.key for s in static_secrets if s.onepassword.encoded diff --git a/src/phalanx/storage/onepassword.py b/src/phalanx/storage/onepassword.py index 487ba8d753..bc1e1b662a 100644 --- a/src/phalanx/storage/onepassword.py +++ b/src/phalanx/storage/onepassword.py @@ -5,10 +5,13 @@ import os from collections import defaultdict -from onepasswordconnectsdk import load_dict, new_client +from onepasswordconnectsdk import new_client from onepasswordconnectsdk.client import FailedToRetrieveItemException -from ..exceptions import NoOnepasswordCredentialsError +from ..exceptions import ( + MissingOnepasswordSecretsError, + NoOnepasswordCredentialsError, +) from ..models.environments import EnvironmentBaseConfig from ..models.secrets import PullSecret, StaticSecret, StaticSecrets @@ -55,40 +58,43 @@ def get_secrets(self, query: dict[str, list[str]]) -> StaticSecrets: dict of dict Retrieved static secrets as a dictionary of applications to secret keys to `~phalanx.models.secrets.StaticSecret` objects. + + Raises + ------ + MissingOnepasswordSecretsError + Raised if any of the items or fields expected to be in 1Password + are not present. """ - request: dict[tuple[str, str], dict[str, str]] = {} - extra = [] - for application, secrets in query.items(): - for secret in secrets: - if "." in secret: - extra.append((application, secret)) - else: - request[(application, secret)] = { - "opitem": application, - "opfield": f".{secret}", - "opvault": self._vault_id, - } - response = load_dict(self._onepassword, request) applications: defaultdict[str, dict[str, StaticSecret]] applications = defaultdict(dict) - for key, value in response.items(): - application, secret = key - applications[application][secret] = StaticSecret(value=value) - - # Separately handle the secret field names that contain periods, since - # that conflicts with the syntax used by load_dict. - for application, secret in extra: - item = self._onepassword.get_item(application, self._vault_id) - found = False - for field in item.fields: - if field.label == secret: - static_secret = StaticSecret(value=field.value) - applications[application][secret] = static_secret - found = True - break - if not found: - msg = f"Item {application} has no field {secret}" - raise FailedToRetrieveItemException(msg) + + # This method originally used the load_dict bulk query interface, but + # the onepasswordconnectsdk Python library appears to turn that into + # separate queries per item anyway, it can't handle fields whose names + # contain periods, and it means we don't know what items are missing + # for error reporting. It seems better to do the work directly. + not_found = [] + for application, secrets in query.items(): + try: + item = self._onepassword.get_item(application, self._vault_id) + except FailedToRetrieveItemException: + not_found.append(application) + continue + for secret in secrets: + found = False + for field in item.fields: + if field.label == secret: + static_secret = StaticSecret(value=field.value) + applications[application][secret] = static_secret + found = True + break + if not found: + not_found.append(f"{application} {secret}") + + # If any secrets weren't found, raise an exception with the list of + # secrets that weren't found. + if not_found: + raise MissingOnepasswordSecretsError(not_found) # Return the static secrets. return StaticSecrets( diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py index cbf764250f..c32c917654 100644 --- a/tests/cli/secrets_test.py +++ b/tests/cli/secrets_test.py @@ -61,6 +61,27 @@ def test_audit(factory: Factory, mock_vault: MockVaultClient) -> None: assert result.output == read_output_data("idfdev", "secrets-audit") +def test_audit_onepassword_missing( + factory: Factory, + mock_onepassword: MockOnepasswordClient, + mock_vault: MockVaultClient, +) -> None: + """Check reporting of missing 1Password secrets.""" + phalanx_test_path() + config_storage = factory.create_config_storage() + environment = config_storage.load_environment("minikube") + assert environment.onepassword + vault_title = environment.onepassword.vault_title + mock_onepassword.create_empty_test_vault(vault_title) + mock_vault.load_test_data(environment.vault_path_prefix, "minikube") + + result = run_cli("secrets", "audit", "minikube") + assert result.exit_code == 0 + assert result.output == read_output_data( + "minikube", "audit-missing-output" + ) + + def test_list() -> None: result = run_cli("secrets", "list", "idfdev") assert result.exit_code == 0 diff --git a/tests/data/output/minikube/audit-missing-output b/tests/data/output/minikube/audit-missing-output new file mode 100644 index 0000000000..d0ad300c7e --- /dev/null +++ b/tests/data/output/minikube/audit-missing-output @@ -0,0 +1,4 @@ +Missing static secrets from 1Password: +• argocd +• gafaelfawr +• mobu diff --git a/tests/support/onepassword.py b/tests/support/onepassword.py index 2c04886a43..133ae3d957 100644 --- a/tests/support/onepassword.py +++ b/tests/support/onepassword.py @@ -37,6 +37,19 @@ def __init__(self) -> None: self._data: dict[str, dict[str, Item]] = {} self._uuids: dict[str, str] = {} + def create_empty_test_vault(self, vault: str) -> None: + """Create an empty 1Password vault for testing. + + This method is not part of the 1Password Connect API. It is intended + for use by the test suite to set up a test. + + Parameters + ---------- + vault + Name of the 1Password vault. + """ + self._data[vault] = {} + def load_test_data(self, vault: str, environment: str) -> None: """Load 1Password test data for the given environment. From f21b6089f1c7ccc1b5d6982750a4afb53eb59a83 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 29 Sep 2023 08:35:26 -0700 Subject: [PATCH 020/588] Change secrets audit heading Change "Incorrect secrets" to "Secrets that do not have their expected value." --- src/phalanx/services/secrets.py | 3 ++- tests/data/output/idfdev/secrets-audit | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index cec83c0e8a..9718a7b67d 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -55,7 +55,8 @@ def to_text(self) -> str: report += "Missing secrets:\n• " + secrets + "\n" if self.mismatch: secrets = "\n• ".join(sorted(self.mismatch)) - report += "Incorrect secrets:\n• " + secrets + "\n" + heading = "Secrets that do not have their expected value:" + report += f"{heading}\n• " + secrets + "\n" if self.unknown: secrets = "\n• ".join(sorted(self.unknown)) report += "Unknown secrets in Vault:\n• " + secrets + "\n" diff --git a/tests/data/output/idfdev/secrets-audit b/tests/data/output/idfdev/secrets-audit index 18db12fb05..baa8c920ff 100644 --- a/tests/data/output/idfdev/secrets-audit +++ b/tests/data/output/idfdev/secrets-audit @@ -9,7 +9,7 @@ Missing secrets: • nublado postgres-credentials.txt • nublado proxy_token • portal ADMIN_PASSWORD -Incorrect secrets: +Secrets that do not have their expected value: • gafaelfawr database-password • postgres nublado3_password Unknown secrets in Vault: From 4e8fec6fb0b6db576a9d95b704cf71897358bc9c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 17:10:00 -0700 Subject: [PATCH 021/588] Properly diagnose 1Password encoding errors Report a proper exception including the application and secret key names if the value of a secret in 1Password that is marked as encoded is either invalid base64 or decodes to something that cannot be represented as a Unicode string. --- src/phalanx/exceptions.py | 22 ++++++++++++++ src/phalanx/services/secrets.py | 40 ++++++++++++++++++++++-- tests/cli/secrets_test.py | 54 ++++++++++++++++++++++++++++++++- tests/support/onepassword.py | 4 ++- 4 files changed, 116 insertions(+), 4 deletions(-) diff --git a/src/phalanx/exceptions.py b/src/phalanx/exceptions.py index 204e860eaa..f349c59259 100644 --- a/src/phalanx/exceptions.py +++ b/src/phalanx/exceptions.py @@ -116,6 +116,28 @@ def __init__(self, application: str, key: str, error: str) -> None: super().__init__(msg) +class MalformedOnepasswordSecretError(Exception): + """A secret stored in 1Password was malformed. + + The most common cause of this error is that the secret was marked as + encoded in base64 but couldn't be decoded. + + Parameters + ---------- + application + Name of the application. + key + Secret key. + error + Error message. + """ + + def __init__(self, application: str, key: str, error: str) -> None: + name = f"{application}/{key}" + msg = f"Value of secret {name} is malformed: {error}" + super().__init__(msg) + + class MissingOnepasswordSecretsError(Exception): """Secrets are missing from 1Password. diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 9718a7b67d..1c18c8f3fd 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -2,6 +2,7 @@ from __future__ import annotations +import binascii from base64 import b64decode from collections import defaultdict from dataclasses import dataclass, field @@ -10,6 +11,7 @@ from pydantic import SecretStr from ..exceptions import ( + MalformedOnepasswordSecretError, MissingOnepasswordSecretsError, NoOnepasswordConfigError, UnresolvedSecretsError, @@ -355,6 +357,37 @@ def _clean_vault_secrets( for key in sorted(to_delete): print("Deleted Vault secret for", application, key) + def _decode_base64_secret( + self, application: str, key: str, value: SecretStr + ) -> SecretStr: + """Decode a secret value that was encoded in base64. + + Parameters + ---------- + application + Name of the application owning the secret, for error reporting. + key + Key of the secret, for error reporting. + value + Value of the secret. + + Returns + ------- + pydantic.SecretStr or None + Decoded value of the secret. + + Raises + ------ + MalformedOnepasswordSecretError + Raised if the secret could not be decoded. + """ + try: + secret = value.get_secret_value() + return SecretStr(b64decode(secret.encode()).decode()) + except (binascii.Error, UnicodeDecodeError) as e: + msg = "value could not be base64-decoded to a valid secret string" + raise MalformedOnepasswordSecretError(application, key, msg) from e + def _get_onepassword_secrets( self, environment: Environment ) -> StaticSecrets | None: @@ -373,6 +406,8 @@ def _get_onepassword_secrets( Raises ------ + MalformedOnepasswordSecretError + Raised if the secret could not be decoded. MissingOnepasswordSecretsError Raised if any of the items or fields expected to be in 1Password are not present. @@ -400,8 +435,9 @@ def _get_onepassword_secrets( for key in secrets: secret = result.applications[app_name][key] if secret.value: - value = secret.value.get_secret_value().encode() - secret.value = SecretStr(b64decode(value).decode()) + secret.value = self._decode_base64_secret( + app_name, key, secret.value + ) return result def _resolve_secrets( diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py index c32c917654..159de2673b 100644 --- a/tests/cli/secrets_test.py +++ b/tests/cli/secrets_test.py @@ -4,16 +4,18 @@ import os import re -from base64 import b64decode +from base64 import b64decode, b64encode from datetime import datetime, timedelta from pathlib import Path import bcrypt import click +import pytest import yaml from cryptography.fernet import Fernet from safir.datetime import current_datetime +from phalanx.exceptions import MalformedOnepasswordSecretError from phalanx.factory import Factory from phalanx.models.gafaelfawr import Token @@ -245,6 +247,56 @@ def test_sync_onepassword( assert vault == pull_secret +def test_sync_onepassword_errors( + factory: Factory, + mock_onepassword: MockOnepasswordClient, + mock_vault: MockVaultClient, +) -> None: + phalanx_test_path() + config_storage = factory.create_config_storage() + environment = config_storage.load_environment("minikube") + assert environment.onepassword + vault_title = environment.onepassword.vault_title + mock_onepassword.load_test_data(vault_title, "minikube") + mock_vault.load_test_data(environment.vault_path_prefix, "minikube") + + # Find a secret that's supposed to be encoded and change it to have an + # invalid base64 string. + app_name = None + key = None + for application in environment.applications.values(): + for secret in application.secrets.values(): + if secret.onepassword.encoded: + app_name = application.name + key = secret.key + break + assert app_name + assert key + vault_id = mock_onepassword.get_vault_by_title(vault_title).id + item = mock_onepassword.get_item(app_name, vault_id) + for field in item.fields: + if field.label == key: + field.value = "invalid base64" + + # sync should throw an exception containing the application and key. + with pytest.raises(MalformedOnepasswordSecretError) as excinfo: + run_cli("secrets", "sync", "minikube") + assert app_name in str(excinfo.value) + assert key in str(excinfo.value) + + # Instead set the secret to a value that is valid base64, but of binary + # data that cannot be decoded to a string. + for field in item.fields: + if field.label == key: + field.value = b64encode("ää".encode("iso-8859-1")).decode() + + # sync should throw an exception containing the application and key. + with pytest.raises(MalformedOnepasswordSecretError) as excinfo: + run_cli("secrets", "sync", "minikube") + assert app_name in str(excinfo.value) + assert key in str(excinfo.value) + + def test_sync_regenerate( factory: Factory, mock_vault: MockVaultClient ) -> None: diff --git a/tests/support/onepassword.py b/tests/support/onepassword.py index 133ae3d957..7cc283df15 100644 --- a/tests/support/onepassword.py +++ b/tests/support/onepassword.py @@ -97,7 +97,9 @@ def get_item(self, title: str, vault_id: str) -> Item: Returns ------- Item - Corresponding item. + Corresponding item. This is the exact item that is stored in the + mock, so tests can mutate it to affect future calls to `get_item` + if they wish. Raises ------ From 285f5ed884dccf9a691cc6a9286fc18ed9d8a564 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 28 Sep 2023 15:56:41 -0700 Subject: [PATCH 022/588] Fix datalinker secrets Currently, datalinker unconditionally expects the full Butler secret on every environment, so put that directly in secrets.yaml. Eventually that will change, but we'll cross that bridge later. Change the names of the secrets to match what the application actually expects. --- .../datalinker/{secrets-idfdev.yaml => secrets.yaml} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename applications/datalinker/{secrets-idfdev.yaml => secrets.yaml} (87%) diff --git a/applications/datalinker/secrets-idfdev.yaml b/applications/datalinker/secrets.yaml similarity index 87% rename from applications/datalinker/secrets-idfdev.yaml rename to applications/datalinker/secrets.yaml index 57998942f8..3f830741d4 100644 --- a/applications/datalinker/secrets-idfdev.yaml +++ b/applications/datalinker/secrets.yaml @@ -1,18 +1,18 @@ -aws-credentials: +"aws-credentials.ini": description: >- Google Cloud Storage credentials to the Butler data store, formatted using AWS syntax for use with boto. copy: application: nublado key: "aws-credentials.ini" -google-credentials: +"butler-gcs-idf-creds.json": description: >- Google Cloud Storage credentials to the Butler data store in the native Google syntax, containing the private asymmetric key. copy: application: nublado key: "butler-gcs-idf-creds.json" -postgres-credentials: +"postgres-credentials.txt": description: >- PostgreSQL credentials in its pgpass format for the Butler database. copy: From 2dfc31555cbb1dbacd2afaa56f72d7ab95094ee0 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 29 Sep 2023 15:24:12 -0700 Subject: [PATCH 023/588] Add secrets config for more applications Add configuration for the new secrets management system to obsloctap, plot-navigator, and production-tools. All of these only used the old Butler secret, so copy the secret from Nublado like we do for other uses of the Butler secret. As with datalinker, add a configuration option that determines whether to use the new per-application secret or the old shared Butler secret, and default it to false. Fix some helm-docs comments around the image tag. --- applications/obsloctap/README.md | 1 + applications/obsloctap/secrets.yaml | 20 +++++++++++++++++++ .../obsloctap/templates/deployment.yaml | 2 +- .../obsloctap/templates/vault-secrets.yaml | 7 +++++-- applications/obsloctap/values.yaml | 3 ++- applications/plot-navigator/README.md | 3 ++- applications/plot-navigator/secrets.yaml | 20 +++++++++++++++++++ .../plot-navigator/templates/deployment.yaml | 4 +--- .../templates/vault-secrets.yaml | 17 +++++----------- applications/plot-navigator/values.yaml | 6 +++++- applications/production-tools/README.md | 3 ++- applications/production-tools/secrets.yaml | 20 +++++++++++++++++++ .../templates/deployment.yaml | 2 +- .../templates/vault-secrets.yaml | 7 +++++-- applications/production-tools/values.yaml | 7 ++++++- tests/config_test.py | 3 --- 16 files changed, 96 insertions(+), 29 deletions(-) create mode 100644 applications/obsloctap/secrets.yaml create mode 100644 applications/plot-navigator/secrets.yaml create mode 100644 applications/production-tools/secrets.yaml diff --git a/applications/obsloctap/README.md b/applications/obsloctap/README.md index c6cc3d3a5a..7dabea9cf2 100644 --- a/applications/obsloctap/README.md +++ b/applications/obsloctap/README.md @@ -11,6 +11,7 @@ Publish observing schedule | Key | Type | Default | Description | |-----|------|---------|-------------| | config.persistentVolumeClaims | list | `[]` | PersistentVolumeClaims to create. | +| config.separateSecrets | bool | `false` | Whether to use the new secrets management scheme | | config.volume_mounts | list | `[]` | Mount points for additional volumes | | config.volumes | list | `[]` | Additional volumes to attach | | environment | object | `{}` | Environment variables (e.g. butler configuration/auth parms) for panel | diff --git a/applications/obsloctap/secrets.yaml b/applications/obsloctap/secrets.yaml new file mode 100644 index 0000000000..3f830741d4 --- /dev/null +++ b/applications/obsloctap/secrets.yaml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/obsloctap/templates/deployment.yaml b/applications/obsloctap/templates/deployment.yaml index 8e0fe7ffc8..f878880fb3 100644 --- a/applications/obsloctap/templates/deployment.yaml +++ b/applications/obsloctap/templates/deployment.yaml @@ -18,7 +18,7 @@ spec: # butler-secrets-raw is the secrets we get from vault - name: "butler-secrets-raw" secret: - secretName: "butler-secret" + secretName: {{ include "obsloctap.fullname" . }} # butler-secrets are the copied and chmoded versions - name: "butler-secrets" emptyDir: {} diff --git a/applications/obsloctap/templates/vault-secrets.yaml b/applications/obsloctap/templates/vault-secrets.yaml index 2a0c967229..2229f2438a 100644 --- a/applications/obsloctap/templates/vault-secrets.yaml +++ b/applications/obsloctap/templates/vault-secrets.yaml @@ -1,10 +1,13 @@ ---- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: butler-secret + name: {{ template "obsloctap.fullname" . }} labels: {{- include "obsloctap.labels" . | nindent 4 }} spec: +{{- if .Values.config.separateSecrets }} + path: "{{ .Values.global.vaultSecretsPath }}/obsloctap" +{{- else }} path: "{{ .Values.global.vaultSecretsPath }}/butler-secret" +{{- end }} type: Opaque diff --git a/applications/obsloctap/values.yaml b/applications/obsloctap/values.yaml index bc23d3ab77..7b38506bcf 100644 --- a/applications/obsloctap/values.yaml +++ b/applications/obsloctap/values.yaml @@ -16,7 +16,6 @@ ingress: # -- Additional annotations to add to the ingress annotations: {} - config: # -- Additional volumes to attach volumes: [] @@ -27,6 +26,8 @@ config: # -- PersistentVolumeClaims to create. persistentVolumeClaims: [] + # -- Whether to use the new secrets management scheme + separateSecrets: false # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. diff --git a/applications/plot-navigator/README.md b/applications/plot-navigator/README.md index 6ca85a93d7..87a645d585 100644 --- a/applications/plot-navigator/README.md +++ b/applications/plot-navigator/README.md @@ -11,6 +11,7 @@ Panel-based plot viewer | Key | Type | Default | Description | |-----|------|---------|-------------| | config.persistentVolumeClaims | list | `[]` | PersistentVolumeClaims to create. | +| config.separateSecrets | bool | `false` | Whether to use the new secrets management scheme | | config.volume_mounts | list | `[]` | Mount points for additional volumes | | config.volumes | list | `[]` | Additional volumes to attach | | environment | object | `{}` | Environment variables (e.g. butler configuration/auth parms) for panel | @@ -18,5 +19,5 @@ Panel-based plot viewer | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.repository | string | `"ghcr.io/lsst-dm/pipetask-plot-navigator"` | plot-navigator image to use | -| image.tag | string | `""` | | +| image.tag | string | The appVersion of the chart | Tag of plot-navigator image to use | | ingress.annotations | object | `{}` | Additional annotations to add to the ingress | diff --git a/applications/plot-navigator/secrets.yaml b/applications/plot-navigator/secrets.yaml new file mode 100644 index 0000000000..3f830741d4 --- /dev/null +++ b/applications/plot-navigator/secrets.yaml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/plot-navigator/templates/deployment.yaml b/applications/plot-navigator/templates/deployment.yaml index e2c8cf52f2..fbd8bad61c 100644 --- a/applications/plot-navigator/templates/deployment.yaml +++ b/applications/plot-navigator/templates/deployment.yaml @@ -14,13 +14,11 @@ spec: labels: {{- include "plot-navigator.selectorLabels" . | nindent 8 }} spec: - imagePullSecrets: - - name: "pull-secret" volumes: # butler-secrets-raw is the secrets we get from vault - name: "butler-secrets-raw" secret: - secretName: "butler-secret" + secretName: {{ include "plot-navigator.fullname" . }} # butler-secrets are the copied and chmoded versions - name: "butler-secrets" emptyDir: {} diff --git a/applications/plot-navigator/templates/vault-secrets.yaml b/applications/plot-navigator/templates/vault-secrets.yaml index c189eb29c7..43310ae6b9 100644 --- a/applications/plot-navigator/templates/vault-secrets.yaml +++ b/applications/plot-navigator/templates/vault-secrets.yaml @@ -1,20 +1,13 @@ ---- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: butler-secret + name: {{ template "plot-navigator.fullname" . }} labels: {{- include "plot-navigator.labels" . | nindent 4 }} spec: +{{- if .Values.config.separateSecrets }} + path: "{{ .Values.global.vaultSecretsPath }}/plot-navigator" +{{- else }} path: "{{ .Values.global.vaultSecretsPath }}/butler-secret" +{{- end }} type: Opaque ---- -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: pull-secret - labels: - {{- include "plot-navigator.labels" . | nindent 4 }} -spec: - path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" - type: kubernetes.io/dockerconfigjson diff --git a/applications/plot-navigator/values.yaml b/applications/plot-navigator/values.yaml index 29ed4cb802..3a808b27c6 100644 --- a/applications/plot-navigator/values.yaml +++ b/applications/plot-navigator/values.yaml @@ -1,6 +1,9 @@ image: # -- plot-navigator image to use repository: ghcr.io/lsst-dm/pipetask-plot-navigator + + # -- Tag of plot-navigator image to use + # @default -- The appVersion of the chart tag: "" # -- Environment variables (e.g. butler configuration/auth parms) for panel @@ -10,7 +13,6 @@ ingress: # -- Additional annotations to add to the ingress annotations: {} - config: # -- Additional volumes to attach volumes: [] @@ -21,6 +23,8 @@ config: # -- PersistentVolumeClaims to create. persistentVolumeClaims: [] + # -- Whether to use the new secrets management scheme + separateSecrets: false # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. diff --git a/applications/production-tools/README.md b/applications/production-tools/README.md index cb7fa475cb..f2d753296d 100644 --- a/applications/production-tools/README.md +++ b/applications/production-tools/README.md @@ -11,6 +11,7 @@ A collection of utility pages for monitoring data processing. | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the production-tools deployment pod | +| config.separateSecrets | bool | `false` | Whether to use the new secrets management scheme | | environment | object | `{}` | | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | @@ -18,7 +19,7 @@ A collection of utility pages for monitoring data processing. | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the production-tools image | | image.repository | string | `"lsstdm/production_tools"` | Image to use in the production-tools deployment | -| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| image.tag | string | The appVersion of the chart | Tag of production-tools image to use | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selection rules for the production-tools deployment pod | diff --git a/applications/production-tools/secrets.yaml b/applications/production-tools/secrets.yaml new file mode 100644 index 0000000000..3f830741d4 --- /dev/null +++ b/applications/production-tools/secrets.yaml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/production-tools/templates/deployment.yaml b/applications/production-tools/templates/deployment.yaml index af46c2995e..932771af6e 100644 --- a/applications/production-tools/templates/deployment.yaml +++ b/applications/production-tools/templates/deployment.yaml @@ -29,7 +29,7 @@ spec: # butler-secrets-raw is the secrets we get from vault - name: "butler-secrets-raw" secret: - secretName: "butler-secret" + secretName: {{ include "production-tools.fullname" . }} # butler-secrets are the copied and chmoded versions - name: "butler-secrets" emptyDir: {} diff --git a/applications/production-tools/templates/vault-secrets.yaml b/applications/production-tools/templates/vault-secrets.yaml index 0b90cc3b7a..e93329880b 100644 --- a/applications/production-tools/templates/vault-secrets.yaml +++ b/applications/production-tools/templates/vault-secrets.yaml @@ -1,12 +1,15 @@ ---- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: butler-secret + name: {{ template "production-tools.fullname" . }} labels: {{- include "production-tools.labels" . | nindent 4 }} spec: +{{- if .Values.config.separateSecrets }} + path: "{{ .Values.global.vaultSecretsPath }}/production-tools" +{{- else }} path: "{{ .Values.global.vaultSecretsPath }}/butler-secret" +{{- end }} type: Opaque --- apiVersion: ricoberger.de/v1alpha1 diff --git a/applications/production-tools/values.yaml b/applications/production-tools/values.yaml index d0196e8401..a405a3726f 100644 --- a/applications/production-tools/values.yaml +++ b/applications/production-tools/values.yaml @@ -12,7 +12,8 @@ image: # -- Pull policy for the production-tools image pullPolicy: IfNotPresent - # -- Overrides the image tag whose default is the chart appVersion. + # -- Tag of production-tools image to use + # @default -- The appVersion of the chart tag: "" # -- Override the base name for resources @@ -31,6 +32,10 @@ ingress: # -- Additional annotations for the ingress rule annotations: {} +config: + # -- Whether to use the new secrets management scheme + separateSecrets: false + # -- Resource limits and requests for the production-tools deployment pod resources: {} diff --git a/tests/config_test.py b/tests/config_test.py index 6b662afbef..d17f3c6d74 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -10,10 +10,7 @@ "giftless", "linters", "monitoring", - "obsloctap", "next-visit-fan-out", - "plot-navigator", - "production-tools", ) """Temporary whitelist of applications that haven't added secrets.yaml.""" From 135cbf395bed89a3b5940d32d8b92db354698d33 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 29 Sep 2023 15:27:33 -0700 Subject: [PATCH 024/588] Document applications with secrets migration flags In the documentation of migrating to the new secrets management system, list the applications that require configuration settings to tell them to use the new secrets layout. --- docs/admin/migrating-secrets.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/admin/migrating-secrets.rst b/docs/admin/migrating-secrets.rst index 3305d479b6..fdf6f93d06 100644 --- a/docs/admin/migrating-secrets.rst +++ b/docs/admin/migrating-secrets.rst @@ -194,6 +194,16 @@ Switch to the new secrets tree If you are using a static secrets file, add the ``--secrets`` flag pointing to that file. This will fix any secrets that are missing or incorrect in Vault. +#. Some Phalanx applications need to know whether the old or new secrets layout is in use. + On your working branch, add the necessary settings for those applications to their :file:`values-{environment}.yaml` files for your environment. + Applications to review: + + - :px-app:`datalinker` (``config.separateSecrets``) + - :px-app:`nublado` (``secrets.templateSecrets``) + - :px-app:`obsloctap` (``config.separateSecrets``) + - :px-app:`plot-navigator` (``config.separateSecrets``) + - :px-app:`production-tools` (``config.separateSecrets``) + #. You're now ready to test the new secrets tree. You can do this on a branch that contains the changes you made above. From c08ec700a78999db72a71a1886a0b0a53281044c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 29 Sep 2023 16:12:34 -0700 Subject: [PATCH 025/588] Update documentation for where to find tokens Rename the 1Password items used by SQuaRE to use Phalanx instead of RSP in the titles, since we include roundtable as well. Document where to find the admin token now that it's been moved back into the SQuaRE vault. --- docs/admin/audit-secrets.rst | 1 + docs/admin/migrating-secrets.rst | 4 ++-- docs/admin/secrets-setup.rst | 1 + docs/admin/sync-secrets.rst | 4 +++- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/admin/audit-secrets.rst b/docs/admin/audit-secrets.rst index 7d61c4d2d7..1e73b5c8c1 100644 --- a/docs/admin/audit-secrets.rst +++ b/docs/admin/audit-secrets.rst @@ -9,6 +9,7 @@ To check that all of the necessary secrets for an environment named `` The ``VAULT_TOKEN`` environment variable must be set to the Vault write token for this environment (or a read token; this command will not make any changes). +For SQuaRE-managed environments, you can get the write token from the ``Phalanx Vault write tokens`` item in the SQuaRE 1Password vault. The output of the command will be a report of any inconsistencies or problems found in the Vault secrets for this environment. No output indicates no problems. diff --git a/docs/admin/migrating-secrets.rst b/docs/admin/migrating-secrets.rst index 3305d479b6..05929f3dbf 100644 --- a/docs/admin/migrating-secrets.rst +++ b/docs/admin/migrating-secrets.rst @@ -52,7 +52,7 @@ The new secret management system uses Vault AppRoles instead, which are the reco If you are using some other Vault server with its own path conventions, you can skip this step, although it is easier to do the migration if you can set up the new secrets in a new Vault path without having to change the old Vault path. #. Set the ``VAULT_TOKEN`` environment variable to a token with access to create new AppRoles and tokens and to list token accessors and secret IDs. - If you are using the SQuaRE Vault server, use the admin token. + If you are using the SQuaRE Vault server, use the admin token from the ``Phalanx Vault admin credentials`` 1Password item in the SQuaRE 1Password vault. This environment variable will be used for multiple following commands. You will be told when you can clear it again. @@ -85,7 +85,7 @@ The new secret management system uses Vault AppRoles instead, which are the reco The new token will be printed to standard output along with some metadata about it. - For SQuaRE-managed environments, save that token in the ``SQuaRE`` 1Password vault (**not** the vault for the RSP environment) in the item named ``RSP Vault write tokens``. + For SQuaRE-managed environments, save that token in the ``SQuaRE`` 1Password vault (**not** the vault for the RSP environment) in the item named ``Phalanx Vault write tokens``. Add a key for the short environment identifier and set the value to the newly-created write token. Don't forget to mark it as a password using the icon on the right. Then, add a key under the :guilabel:`Accessors` heading for the environment and set the value to the token accessor. diff --git a/docs/admin/secrets-setup.rst b/docs/admin/secrets-setup.rst index 09f65cccd4..f189c8e719 100644 --- a/docs/admin/secrets-setup.rst +++ b/docs/admin/secrets-setup.rst @@ -81,6 +81,7 @@ This normally requires a Vault admin or provisioner token or some equivalent. The output includes the new Vault token, which you should save somewhere secure where you store other secrets. (The running Phalanx environment does not need and should not have access to this token.) You will later set the environment variable ``VAULT_TOKEN`` to this token when running other :command:`phalanx` commands. + For SQuaRE-managed environments, always update the ``Phalanx Vault write tokens`` 1Password item in the SQuaRE 1Password vault after running this command. :samp:`phalanx vault audit {environment}` Check the authentication credentials created by the previous two commands in the given environment for any misconfiguration. diff --git a/docs/admin/sync-secrets.rst b/docs/admin/sync-secrets.rst index bd21129f26..e22badb784 100644 --- a/docs/admin/sync-secrets.rst +++ b/docs/admin/sync-secrets.rst @@ -12,8 +12,10 @@ To populate Vault with all of the necessary secrets for an environment named ``< phalanx secrets sync The ``VAULT_TOKEN`` environment variable must be set to the Vault write token for this environment. +For SQuaRE-managed environments, you can get the write token from the ``Phalanx Vault write tokens`` item in the SQuaRE 1Password vault. + Add the ``--secrets`` command-line option or set ``OP_CONNECT_TOKEN`` if needed for your choice of a :ref:`static secrets source `. -For SQuaRE-managed deployments, the 1Password token for ``OP_CONNECT_TOKEN`` comes from the ``RSP 1Password tokens`` item in the SQuaRE 1Password vault. +For SQuaRE-managed deployments, the 1Password token for ``OP_CONNECT_TOKEN`` comes from the ``Phalanx 1Password tokens`` item in the SQuaRE 1Password vault. This must be done before installing a Phalanx environment for the first time. It can then be run again whenever the secrets for that environment change. From 2b4fa0fc2ed97447d4fb6e77ee8d2660e0e5f34c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 29 Sep 2023 15:36:23 -0700 Subject: [PATCH 026/588] Specify Kubernetes memory limits in powers of two Kubernetes interprets 1G as 10^9, not 2^30, but Java interprets its resource limits as GiB. Use GiB uniformly for all Kubernetes resource limits and requests. --- applications/sherlock/values-base.yaml | 4 ++-- applications/sherlock/values-idfdev.yaml | 4 ++-- applications/sherlock/values-idfint.yaml | 4 ++-- applications/sherlock/values-idfprod.yaml | 4 ++-- applications/sherlock/values-roe.yaml | 4 ++-- applications/sherlock/values-summit.yaml | 4 ++-- applications/sherlock/values-tucson-teststand.yaml | 4 ++-- applications/tap/values-minikube.yaml | 12 ++++++------ charts/cadc-tap/README.md | 4 ++-- charts/cadc-tap/values.yaml | 8 ++++---- 10 files changed, 26 insertions(+), 26 deletions(-) diff --git a/applications/sherlock/values-base.yaml b/applications/sherlock/values-base.yaml index de35ed5941..29b173fed6 100644 --- a/applications/sherlock/values-base.yaml +++ b/applications/sherlock/values-base.yaml @@ -1,7 +1,7 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" diff --git a/applications/sherlock/values-idfdev.yaml b/applications/sherlock/values-idfdev.yaml index 09d06b446e..6e477f644e 100644 --- a/applications/sherlock/values-idfdev.yaml +++ b/applications/sherlock/values-idfdev.yaml @@ -1,9 +1,9 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" publishUrl: "https://status.lsst.codes/api/data-dev" diff --git a/applications/sherlock/values-idfint.yaml b/applications/sherlock/values-idfint.yaml index f26f30166c..eaf463d1ca 100644 --- a/applications/sherlock/values-idfint.yaml +++ b/applications/sherlock/values-idfint.yaml @@ -1,8 +1,8 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" publishUrl: "https://status.lsst.codes/api/data-int" diff --git a/applications/sherlock/values-idfprod.yaml b/applications/sherlock/values-idfprod.yaml index 6dc7b40cad..08f8030234 100644 --- a/applications/sherlock/values-idfprod.yaml +++ b/applications/sherlock/values-idfprod.yaml @@ -1,8 +1,8 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" publishUrl: "https://status.lsst.codes/api/data" diff --git a/applications/sherlock/values-roe.yaml b/applications/sherlock/values-roe.yaml index de35ed5941..29b173fed6 100644 --- a/applications/sherlock/values-roe.yaml +++ b/applications/sherlock/values-roe.yaml @@ -1,7 +1,7 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" diff --git a/applications/sherlock/values-summit.yaml b/applications/sherlock/values-summit.yaml index de35ed5941..29b173fed6 100644 --- a/applications/sherlock/values-summit.yaml +++ b/applications/sherlock/values-summit.yaml @@ -1,7 +1,7 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" diff --git a/applications/sherlock/values-tucson-teststand.yaml b/applications/sherlock/values-tucson-teststand.yaml index de35ed5941..29b173fed6 100644 --- a/applications/sherlock/values-tucson-teststand.yaml +++ b/applications/sherlock/values-tucson-teststand.yaml @@ -1,7 +1,7 @@ resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 4.0 - memory: "4G" + memory: "4Gi" diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml index 19d87e1b52..dbe4b3ae75 100644 --- a/applications/tap/values-minikube.yaml +++ b/applications/tap/values-minikube.yaml @@ -4,26 +4,26 @@ cadc-tap: repository: "lsstsqre/tap-schema-idfprod-tap" resources: limits: - memory: 500M + memory: 500Mi mockdb: enabled: true resources: limits: - memory: 500M + memory: 500Mi config: - jvmMaxHeapSize: 2G + jvmMaxHeapSize: 1.9G resources: requests: cpu: 0.01 - memory: 500M + memory: 500Mi limits: - memory: 2G + memory: 2Gi uws: resources: requests: cpu: 0.01 - memory: 500M + memory: 500Mi diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index e0cc953908..8d7e4cf370 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -52,7 +52,7 @@ IVOA TAP service | nodeSelector | object | `{}` | Node selector rules for the TAP pod | | podAnnotations | object | `{}` | Annotations for the TAP pod | | replicaCount | int | `1` | Number of pods to start | -| resources | object | `{"limits":{"cpu":8,"memory":"32G"},"requests":{"cpu":2,"memory":"2G"}}` | Resource limits and requests for the TAP pod | +| resources | object | `{"limits":{"cpu":8,"memory":"32Gi"},"requests":{"cpu":2,"memory":"2Gi"}}` | Resource limits and requests for the TAP pod | | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | @@ -68,6 +68,6 @@ IVOA TAP service | uws.image.tag | string | Version of QServ TAP image | Tag of UWS database image to use | | uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | -| uws.resources | object | `{"limits":{"cpu":2,"memory":"4G"},"requests":{"cpu":0.25,"memory":"1G"}}` | Resource limits and requests for the UWS database pod | +| uws.resources | object | `{"limits":{"cpu":2,"memory":"4Gi"},"requests":{"cpu":0.25,"memory":"1Gi"}}` | Resource limits and requests for the UWS database pod | | uws.tolerations | list | `[]` | Tolerations for the UWS database pod | | vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index f843ccb299..7a0027c5b4 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -29,10 +29,10 @@ ingress: resources: requests: cpu: 2.0 - memory: "2G" + memory: "2Gi" limits: cpu: 8.0 - memory: "32G" + memory: "32Gi" # -- Annotations for the TAP pod podAnnotations: {} @@ -198,10 +198,10 @@ uws: resources: requests: cpu: 0.25 - memory: "1G" + memory: "1Gi" limits: cpu: 2.0 - memory: "4G" + memory: "4Gi" # -- Annotations for the UWS databse pod podAnnotations: {} From 10bd2a6d24ac1710f4e04d3618074509647ff780 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 29 Sep 2023 16:20:21 -0700 Subject: [PATCH 027/588] Remove TAP from minikube The TAP server is constantly crashing in the minikube environment, presumably due to lack of resources. We can try again if we move to infrastructure or in-cluster databases for the UWS, mock, and schema databases and have a reduced-memory-profile version of the TAP server itself. --- applications/tap/values-minikube.yaml | 29 --------------------------- environments/values-minikube.yaml | 1 - 2 files changed, 30 deletions(-) delete mode 100644 applications/tap/values-minikube.yaml diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml deleted file mode 100644 index dbe4b3ae75..0000000000 --- a/applications/tap/values-minikube.yaml +++ /dev/null @@ -1,29 +0,0 @@ -cadc-tap: - tapSchema: - image: - repository: "lsstsqre/tap-schema-idfprod-tap" - resources: - limits: - memory: 500Mi - - mockdb: - enabled: true - resources: - limits: - memory: 500Mi - - config: - jvmMaxHeapSize: 1.9G - - resources: - requests: - cpu: 0.01 - memory: 500Mi - limits: - memory: 2Gi - - uws: - resources: - requests: - cpu: 0.01 - memory: 500Mi diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index 4c91e50c03..2e1791537d 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -16,4 +16,3 @@ applications: mobu: true postgres: true squareone: true - tap: true From cee4c024ef8db5a4d62ae4f54b97d2a69b6dbf2f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 27 Sep 2023 10:41:37 -0700 Subject: [PATCH 028/588] Convert minikube to new secrets management Convert the minikube environment to the new secrets management approach. Make the minimum changes to the installer to allow it to work with a Vault AppRole instead of a read token, which makes it usable with the new secrets management system. Delete all of the old secret management scripts. Although we still have environments using the old secrets management approach, we're unlikely to use those scripts again to manage those environments, and any environment we touch from this point forward should be converted to the new system. In case of emergencies, we can poke changes directly into Vault for the time being. --- .github/workflows/ci.yaml | 11 +- .../values-minikube.yaml | 14 + docs/admin/installation.rst | 18 +- environments/values-minikube.yaml | 2 +- installer/generate_secrets.py | 564 ------------------ installer/install.sh | 38 +- installer/read_secrets.sh | 13 - installer/requirements.txt | 5 - installer/update_all_secrets.sh | 11 - installer/update_secrets.sh | 24 - installer/vault_key.py | 42 -- installer/write_secrets.sh | 13 - 12 files changed, 54 insertions(+), 701 deletions(-) delete mode 100755 installer/generate_secrets.py delete mode 100755 installer/read_secrets.sh delete mode 100644 installer/requirements.txt delete mode 100755 installer/update_all_secrets.sh delete mode 100755 installer/update_secrets.sh delete mode 100755 installer/vault_key.py delete mode 100755 installer/write_secrets.sh diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index eb2acff8d2..d086d8485a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -86,6 +86,14 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.11" + + - name: Install test dependencies + run: make init + - name: Filter paths uses: dorny/paths-filter@v2 id: filter @@ -124,14 +132,13 @@ jobs: sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.10/argocd-linux-amd64 sudo chmod +x /usr/local/bin/argocd sudo apt-get install socat - sudo pip install -r installer/requirements.txt - name: Run installer timeout-minutes: 15 if: steps.filter.outputs.minikube == 'true' run: | cd installer - ./install.sh minikube ${{ secrets.MINIKUBE_VAULT_KEY }} + ./install.sh minikube "${{ secrets.MINIKUBE_VAULT_ROLE_ID }}" "${{ secrets.MINIKUBE_VAULT_SECRET_ID }}" - name: Get final list of resources if: steps.filter.outputs.minikube == 'true' diff --git a/applications/vault-secrets-operator/values-minikube.yaml b/applications/vault-secrets-operator/values-minikube.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-minikube.yaml +++ b/applications/vault-secrets-operator/values-minikube.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle diff --git a/docs/admin/installation.rst b/docs/admin/installation.rst index 9214af2e0a..d9d14d7f76 100644 --- a/docs/admin/installation.rst +++ b/docs/admin/installation.rst @@ -58,19 +58,25 @@ Installing Phalanx Once you have defined a Phalanx environment, follow these steps to install it. These can be run repeatedly to reinstall Phalanx over an existing deployment. -.. warning:: +#. Create a Vault AppRole that will be used by Vault Secrets Operator. - The installer has not been updated to work with the new secrets management system yet, and the way it initializes Vault Secrets Operator is incorrect for the new system and will not work. - This is currently being worked on, but in the meantime you will have to make changes to the installation script to use :command:`phalanx vault create-read-approle --as-secret vault-credentials` and skip the attempt to create a Vault read token secret obtained from 1Password. - Hopefully this will be fixed shortly. + .. prompt:: bash -.. rst-class:: open + phalanx vault create-read-approle -#. Create a virtual environment with the tools you will need from the installer's `requirements.txt `__. + Be aware that this will invalidate any existing AppRole for that environment. #. Run the installer script at `installer/install.sh `__. + + .. prompt:: bash + + installer/install.sh + + ```` and ```` are the Role ID and Secret ID of the Vault AppRole created in the previous step. + Debug any problems. The most common source of problems are errors or missing configuration in the :file:`values-{environment}.yaml` files you created for each application. + You can safely run the installer repeatedly as you debug and fix issues. #. If the installation is using a dynamically-assigned IP address, while the installer is running, wait until the ingress-nginx-controller service comes up and has an external IP address. Then, set the A record for your endpoint to that address (or set an A record with that IP address for the ingress and a CNAME from the endpoint to the A record). diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index 4c91e50c03..763a2b055a 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -4,7 +4,7 @@ onepassword: connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP minikube.lsst.codes" vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/k8s_operator/minikube.lsst.codes +vaultPathPrefix: secret/phalanx/minikube # The primary constraint on enabling applications is the low available memory # of a GitHub Actions runner, since minikube is used for smoke testing of new diff --git a/installer/generate_secrets.py b/installer/generate_secrets.py deleted file mode 100755 index df5b407e17..0000000000 --- a/installer/generate_secrets.py +++ /dev/null @@ -1,564 +0,0 @@ -#!/usr/bin/env python3 -import argparse -import base64 -import json -import logging -import os -import secrets -from collections import defaultdict -from datetime import UTC, datetime -from pathlib import Path - -import bcrypt -from cryptography.fernet import Fernet -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization -from cryptography.hazmat.primitives.asymmetric import rsa -from onepasswordconnectsdk.client import new_client_from_environment - - -class SecretGenerator: - """A basic secret generator that manages a secrets directory containing - per-component secret export files from from Vault, as generated by - read_secrets.sh. - - Parameters - ---------- - environment : str - The name of the environment (the environment's domain name). - regenerate : bool - If `True`, any secrets that can be generated by the SecretGenerator - will be regenerated. - """ - - def __init__(self, environment, regenerate) -> None: - self.secrets = defaultdict(dict) - self.environment = environment - self.regenerate = regenerate - - def generate(self): - """Generate secrets for each component based on the `secrets` - attribute, and regenerating secrets if applicable when the - `regenerate` attribute is `True`. - """ - self._pull_secret() - self._rsp_alerts() - self._butler_secret() - self._argo_sso_secret() - self._postgres() - self._tap() - self._nublado() - self._nublado2() - self._mobu() - self._gafaelfawr() - self._argocd() - self._portal() - self._vo_cutouts() - self._telegraf() - self._sherlock() - - self.input_field("cert-manager", "enabled", "Use cert-manager? (y/n):") - use_cert_manager = self.secrets["cert-manager"]["enabled"] - if use_cert_manager == "y": - self._cert_manager() - elif use_cert_manager == "n": - self._ingress_nginx() - else: - raise Exception( - f"Invalid cert manager enabled value {use_cert_manager}" - ) - - def load(self): - """Load the secrets files for each RSP component from the - ``secrets`` directory. - - This method parses the JSON files and persists them in the ``secrets`` - attribute, keyed by the component name. - """ - if Path("secrets").is_dir(): - for f in Path("secrets").iterdir(): - print(f"Loading {f}") - component = os.path.basename(f) - self.secrets[component] = json.loads(f.read_text()) - - def save(self): - """For each component, save a secret JSON file into the secrets - directory. - """ - os.makedirs("secrets", exist_ok=True) - - for k, v in self.secrets.items(): - with open(f"secrets/{k}", "w") as f: - f.write(json.dumps(v)) - - def input_field(self, component, name, description): - default = self.secrets[component].get(name, "") - prompt_string = ( - f"[{component} {name}] ({description}): [current: {default}] " - ) - input_string = input(prompt_string) - - if input_string: - self.secrets[component][name] = input_string - - def input_file(self, component, name, description): - current = self.secrets.get(component, {}).get(name, "") - print(f"[{component} {name}] ({description})") - print(f"Current contents:\n{current}") - prompt_string = "New filename with contents (empty to not change): " - fname = input(prompt_string) - - if fname: - with open(fname) as f: - self.secrets[component][name] = f.read() - - @staticmethod - def _generate_gafaelfawr_token() -> str: - key = base64.urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") - secret = base64.urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") - return f"gt-{key}.{secret}" - - def _get_current(self, component, name): - if not self._exists(component, name): - return None - - return self.secrets[component][name] - - def _set(self, component, name, new_value): - self.secrets[component][name] = new_value - - def _exists(self, component, name): - return component in self.secrets and name in self.secrets[component] - - def _set_generated(self, component, name, new_value): - if not self._exists(component, name) or self.regenerate: - self._set(component, name, new_value) - - def _tap(self): - self.input_file( - "tap", - "google_creds.json", - "file containing google service account credentials", - ) - - def _postgres(self): - self._set_generated( - "postgres", "exposurelog_password", secrets.token_hex(32) - ) - self._set_generated( - "postgres", "gafaelfawr_password", secrets.token_hex(32) - ) - self._set_generated( - "postgres", "jupyterhub_password", secrets.token_hex(32) - ) - self._set_generated("postgres", "root_password", secrets.token_hex(64)) - self._set_generated( - "postgres", "vo_cutouts_password", secrets.token_hex(32) - ) - self._set_generated( - "postgres", "narrativelog_password", secrets.token_hex(32) - ) - - def _nublado(self): - self._set_generated("nublado", "crypto_key", secrets.token_hex(32)) - self._set_generated("nublado", "proxy_token", secrets.token_hex(32)) - self._set_generated( - "nublado", "cryptkeeper_key", secrets.token_hex(32) - ) - - # Pluck the password out of the postgres portion. - db_password = self.secrets["postgres"]["jupyterhub_password"] - self.secrets["nublado"]["hub_db_password"] = db_password - - slack_webhook = self._get_current("rsp-alerts", "slack-webhook") - if slack_webhook: - self._set("nublado", "slack_webhook", slack_webhook) - - # Grab lab secrets from the Butler secret. - butler = self.secrets["butler-secret"].copy() - self.secrets["nublado-lab-secret"] = butler - - def _nublado2(self): - crypto_key = secrets.token_hex(32) - self._set_generated("nublado2", "crypto_key", crypto_key) - self._set_generated("nublado2", "proxy_token", secrets.token_hex(32)) - self._set_generated( - "nublado2", "cryptkeeper_key", secrets.token_hex(32) - ) - - # Pluck the password out of the postgres portion. - self.secrets["nublado2"]["hub_db_password"] = self.secrets["postgres"][ - "jupyterhub_password" - ] - - def _mobu(self): - self.input_field( - "mobu", - "ALERT_HOOK", - "Slack webhook for reporting mobu alerts. " - "Or use None for no alerting.", - ) - - def _cert_manager(self): - self.input_field( - "cert-manager", - "aws-secret-access-key", - "AWS secret access key for zone for DNS cert solver.", - ) - - def _gafaelfawr(self): - key = rsa.generate_private_key( - backend=default_backend(), public_exponent=65537, key_size=2048 - ) - - key_bytes = key.private_bytes( - serialization.Encoding.PEM, - serialization.PrivateFormat.PKCS8, - serialization.NoEncryption(), - ) - - self._set_generated( - "gafaelfawr", "bootstrap-token", self._generate_gafaelfawr_token() - ) - self._set_generated( - "gafaelfawr", "redis-password", os.urandom(32).hex() - ) - self._set_generated( - "gafaelfawr", "session-secret", Fernet.generate_key().decode() - ) - self._set_generated("gafaelfawr", "signing-key", key_bytes.decode()) - - self.input_field("gafaelfawr", "cloudsql", "Use CloudSQL? (y/n):") - use_cloudsql = self.secrets["gafaelfawr"]["cloudsql"] - if use_cloudsql == "y": - self.input_field( - "gafaelfawr", "database-password", "Database password" - ) - elif use_cloudsql == "n": - # Pluck the password out of the postgres portion. - db_pass = self.secrets["postgres"]["gafaelfawr_password"] - self._set("gafaelfawr", "database-password", db_pass) - else: - raise Exception( - f"Invalid gafaelfawr cloudsql value {use_cloudsql}" - ) - - self.input_field("gafaelfawr", "ldap", "Use LDAP? (y/n):") - use_ldap = self.secrets["gafaelfawr"]["ldap"] - if use_ldap == "y": - self.input_field("gafaelfawr", "ldap-password", "LDAP password") - - self.input_field("gafaelfawr", "auth_type", "Use cilogon or github?") - auth_type = self.secrets["gafaelfawr"]["auth_type"] - if auth_type == "cilogon": - self.input_field( - "gafaelfawr", "cilogon-client-secret", "CILogon client secret" - ) - use_ldap = self.secrets["gafaelfawr"]["ldap"] - if use_ldap == "y": - self.input_field( - "gafaelfawr", "ldap-secret", "LDAP simple bind password" - ) - elif auth_type == "github": - self.input_field( - "gafaelfawr", "github-client-secret", "GitHub client secret" - ) - elif auth_type == "oidc": - self.input_field( - "gafaelfawr", - "oidc-client-secret", - "OpenID Connect client secret", - ) - if use_ldap == "y": - self.input_field( - "gafaelfawr", "ldap-secret", "LDAP simple bind password" - ) - else: - raise Exception(f"Invalid auth provider {auth_type}") - - slack_webhook = self._get_current("rsp-alerts", "slack-webhook") - if slack_webhook: - self._set("gafaelfawr", "slack-webhook", slack_webhook) - - def _pull_secret(self): - self.input_file( - "pull-secret", - ".dockerconfigjson", - ".docker/config.json to pull images", - ) - - def _butler_secret(self): - self.input_file( - "butler-secret", - "aws-credentials.ini", - "AWS credentials for butler", - ) - self.input_file( - "butler-secret", - "butler-gcs-idf-creds.json", - "Google credentials for butler", - ) - self.input_file( - "butler-secret", - "postgres-credentials.txt", - "Postgres credentials for butler", - ) - - def _argo_sso_secret(self): - # We aren't currently using this, but might as well generate it - # against the day we do. - self._set_generated( - "argo-sso-secret", "client-id", "argo-workflows-sso" - ) - self._set_generated( - "argo-sso-secret", "client-secret", secrets.token_hex(16) - ) - - def _ingress_nginx(self): - self.input_file("ingress-nginx", "tls.key", "Certificate private key") - self.input_file("ingress-nginx", "tls.crt", "Certificate chain") - - def _argocd(self): - current_pw = self._get_current( - "installer", "argocd.admin.plaintext_password" - ) - - self.input_field( - "installer", - "argocd.admin.plaintext_password", - "Admin password for ArgoCD?", - ) - new_pw = self.secrets["installer"]["argocd.admin.plaintext_password"] - - if current_pw != new_pw or self.regenerate: - h = bcrypt.hashpw( - new_pw.encode("ascii"), bcrypt.gensalt(rounds=15) - ).decode("ascii") - now_time = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - - self._set("argocd", "admin.password", h) - self._set("argocd", "admin.passwordMtime", now_time) - - self.input_field( - "argocd", - "dex.clientSecret", - "OAuth client secret for ArgoCD (either GitHub or Google)?", - ) - - self._set_generated( - "argocd", "server.secretkey", secrets.token_hex(16) - ) - - def _telegraf(self): - self.input_field( - "telegraf", - "influx-token", - "Token for communicating with monitoring InfluxDB2 instance", - ) - self._set("telegraf", "org-id", "square") - - def _portal(self): - pw = secrets.token_hex(32) - self._set_generated("portal", "ADMIN_PASSWORD", pw) - - def _vo_cutouts(self): - self._set_generated( - "vo-cutouts", "redis-password", os.urandom(32).hex() - ) - - self.input_field("vo-cutouts", "cloudsql", "Use CloudSQL? (y/n):") - use_cloudsql = self.secrets["vo-cutouts"]["cloudsql"] - if use_cloudsql == "y": - self.input_field( - "vo-cutouts", "database-password", "Database password" - ) - elif use_cloudsql == "n": - # Pluck the password out of the postgres portion. - db_pass = self.secrets["postgres"]["vo_cutouts_password"] - self._set("vo-cutouts", "database-password", db_pass) - else: - raise Exception( - f"Invalid vo-cutouts cloudsql value {use_cloudsql}" - ) - - aws = self.secrets["butler-secret"]["aws-credentials.ini"] - self._set("vo-cutouts", "aws-credentials", aws) - google = self.secrets["butler-secret"]["butler-gcs-idf-creds.json"] - self._set("vo-cutouts", "google-credentials", google) - postgres = self.secrets["butler-secret"]["postgres-credentials.txt"] - self._set("vo-cutouts", "postgres-credentials", postgres) - - def _sherlock(self): - """This secret is for sherlock to push status to status.lsst.codes.""" - publish_key = secrets.token_hex(32) - self._set_generated("sherlock", "publish_key", publish_key) - - def _rsp_alerts(self): - """Shared secrets for alerting.""" - self.input_field( - "rsp-alerts", "slack-webhook", "Slack webhook for alerts" - ) - - def _narrativelog(self): - """Give narrativelog its own secret for externalization.""" - db_pass = self.secrets["postgres"]["narrativelog_password"] - self._set("narrativelog", "database-password", db_pass) - - def _exposurelog(self): - """Give exposurelog its own secret for externalization.""" - db_pass = self.secrets["postgres"]["exposurelog_password"] - self._set("exposureloglog", "database-password", db_pass) - - -class OnePasswordSecretGenerator(SecretGenerator): - """A secret generator that syncs 1Password secrets into a secrets directory - containing per-component secret export files from Vault (as generated - by read_secrets.sh). - - Parameters - ---------- - environment : str - The name of the environment (the environment's domain name). - regenerate : bool - If `True`, any secrets that can be generated by the SecretGenerator - will be regenerated. - """ - - def __init__(self, environment, regenerate) -> None: - super().__init__(environment, regenerate) - self.op_secrets = {} - self.op = new_client_from_environment() - self.parse_vault() - - def parse_vault(self): - """Parse the 1Password vault and store secrets applicable to this - environment in the `op_secrets` attribute. - - This method is called automatically when initializing a - `OnePasswordSecretGenerator`. - """ - vault = self.op.get_vault_by_title("RSP-Vault") - items = self.op.get_items(vault.id) - - for item_summary in items: - key = None - secret_notes = None - secret_password = None - environments = [] - item = self.op.get_item(item_summary.id, vault.id) - - logging.debug(f"Looking at {item.id}") - - for field in item.fields: - if field.label == "generate_secrets_key": - if key is None: - key = field.value - else: - msg = "Found two generate_secrets_keys for {key}" - raise Exception(msg) - elif field.label == "environment": - environments.append(field.value) - elif field.label == "notesPlain": - secret_notes = field.value - elif field.purpose == "PASSWORD": - secret_password = field.value - - if not key: - continue - - secret_value = secret_notes or secret_password - - if not secret_value: - logging.error("No value found for %s", item.title) - continue - - logging.debug("Environments are %s for %s", environments, item.id) - - if self.environment in environments: - self.op_secrets[key] = secret_value - logging.debug("Storing %s (matching environment)", item.id) - elif not environments and key not in self.op_secrets: - self.op_secrets[key] = secret_value - logging.debug("Storing %s (applicable to all envs)", item.id) - else: - logging.debug("Ignoring %s", item.id) - - def input_field(self, component, name, description): - """Query for a secret's value from 1Password (`op_secrets` attribute). - - This method overrides `SecretGenerator.input_field`, which prompts - a user interactively. - """ - key = f"{component} {name}" - if key not in self.op_secrets: - raise Exception(f"Did not find entry in 1Password for {key}") - - self.secrets[component][name] = self.op_secrets[key] - - def input_file(self, component, name, description): - """Query for a secret file from 1Password (`op_secrets` attribute). - - This method overrides `SecretGenerator.input_file`, which prompts - a user interactively. - """ - return self.input_field(component, name, description) - - def generate(self): - """Generate secrets, updating the `secrets` attribute. - - This method first runs `SecretGenerator.generate`, and then - automatically generates secrets for any additional components - that were identified in 1Password. - - If a secret appears already, it is overridden with the value in - 1Password. - """ - super().generate() - - for composite_key, _secret_value in self.op_secrets.items(): - item_component, item_name = composite_key.split() - # Special case for components that may not be present in every - # environment, but nonetheless might be 1Password secrets (see - # conditional in SecretGenerator.generate) - if item_component in {"ingress-nginx", "cert-manager"}: - continue - - logging.debug( - "Updating component: %s/%s", item_component, item_name - ) - self.input_field(item_component, item_name, "") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="generate_secrets") - parser.add_argument( - "--op", - default=False, - action="store_true", - help="Load secrets from 1Password", - ) - parser.add_argument( - "--verbose", default=False, action="store_true", help="Verbose logging" - ) - parser.add_argument( - "--regenerate", - default=False, - action="store_true", - help="Regenerate random secrets", - ) - parser.add_argument("environment", help="Environment to generate") - args = parser.parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig() - - if args.op: - sg = OnePasswordSecretGenerator(args.environment, args.regenerate) - else: - sg = SecretGenerator(args.environment, args.regenerate) - - sg.load() - sg.generate() - sg.save() diff --git a/installer/install.sh b/installer/install.sh index 8919cef2c1..9c473c9514 100755 --- a/installer/install.sh +++ b/installer/install.sh @@ -1,25 +1,27 @@ #!/bin/bash -e -USAGE="Usage: ./install.sh ENVIRONMENT VAULT_TOKEN [VAULT_TOKEN_LEASE_DURATION]" +USAGE="Usage: ./install.sh ENVIRONMENT VAULT_ROLE_ID VAULT_SECRET_ID" ENVIRONMENT=${1:?$USAGE} -export VAULT_TOKEN=${2:?$USAGE} -export VAULT_TOKEN_LEASE_DURATION=${4:-31536000} -export VAULT_ADDR=`yq -r .vaultUrl ../environments/values-$ENVIRONMENT.yaml` -VAULT_PATH_PREFIX=`yq -r .vaultPathPrefix ../environments/values-$ENVIRONMENT.yaml` -ARGOCD_PASSWORD=`vault kv get --field=argocd.admin.plaintext_password $VAULT_PATH_PREFIX/installer` +config="../environments/values-${ENVIRONMENT}.yaml" +VAULT_ROLE_ID=${2:?$USAGE} +VAULT_SECRET_ID=${3:?$USAGE} +export VAULT_ADDR=$(yq -r .vaultUrl "$config") +export VAULT_TOKEN=$(vault write auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID" | grep 'token ' | awk '{ print $2 }') +VAULT_PATH_PREFIX=$(yq -r .vaultPathPrefix "$config") +ARGOCD_PASSWORD=$(vault kv get --field=admin.plaintext_password $VAULT_PATH_PREFIX/argocd) -GIT_URL=`git config --get remote.origin.url` +GIT_URL=$(git config --get remote.origin.url) # Github runs in a detached head state, but sets GITHUB_REF, # extract the branch from it. If we're there, use that branch. # git branch --show-current will return empty in deatached head. -GIT_BRANCH=${GITHUB_HEAD_REF:-`git branch --show-current`} +GIT_BRANCH=${GITHUB_HEAD_REF:-$(git branch --show-current)} echo "Set VAULT_TOKEN in a secret for vault-secrets-operator..." # The namespace may not exist already, but don't error if it does. kubectl create ns vault-secrets-operator || true -kubectl create secret generic vault-secrets-operator \ +kubectl create secret generic vault-credentials \ --namespace vault-secrets-operator \ - --from-literal=VAULT_TOKEN=$VAULT_TOKEN \ - --from-literal=VAULT_TOKEN_LEASE_DURATION=$VAULT_TOKEN_LEASE_DURATION \ + --from-literal=VAULT_ROLE_ID=$VAULT_ROLE_ID \ + --from-literal=VAULT_SECRET_ID=$VAULT_SECRET_ID \ --dry-run=client -o yaml | kubectl apply -f - echo "Set up docker pull secret for vault-secrets-operator..." @@ -81,8 +83,7 @@ argocd app sync science-platform \ --port-forward-namespace argocd echo "Syncing critical early applications" -if [ $(yq -r '.applications."ingress-nginx"' ../environments/values-$ENVIRONMENT.yaml) != "false" ]; -then +if [ $(yq -r '.applications."ingress-nginx"' "$config") != "false" ]; then echo "Syncing ingress-nginx..." argocd app sync ingress-nginx \ --port-forward \ @@ -91,8 +92,7 @@ fi # Wait for the cert-manager's webhook to finish deploying by running # kubectl, argocd's sync doesn't seem to wait for this to finish. -if [ $(yq -r '.applications."cert-manager"' ../environments/values-$ENVIRONMENT.yaml) != "false" ]; -then +if [ $(yq -r '.applications."cert-manager"' "$config") != "false" ]; then echo "Syncing cert-manager..." argocd app sync cert-manager \ --port-forward \ @@ -100,16 +100,14 @@ then kubectl -n cert-manager rollout status deploy/cert-manager-webhook fi -if [ $(yq -r .applications.postgres ../environments/values-$ENVIRONMENT.yaml) == "true" ]; -then +if [ $(yq -r .applications.postgres "$config") == "true" ]; then echo "Syncing postgres..." argocd app sync postgres \ --port-forward \ --port-forward-namespace argocd fi -if [ $(yq -r .applications.gafaelfawr ../environments/values-$ENVIRONMENT.yaml) != "false" ]; -then +if [ $(yq -r .applications.gafaelfawr "$config") != "false" ]; then echo "Syncing gafaelfawr..." argocd app sync gafaelfawr \ --port-forward \ @@ -124,4 +122,4 @@ argocd app sync -l "argocd.argoproj.io/instance=science-platform" \ echo "You can now check on your argo cd installation by running:" echo "kubectl port-forward service/argocd-server -n argocd 8080:443" echo "For the ArgoCD admin password:" -echo "vault kv get --field=argocd.admin.plaintext_password $VAULT_PATH_PREFIX/installer" +echo "vault kv get --field=admin.plaintext_password $VAULT_PATH_PREFIX/argocd" diff --git a/installer/read_secrets.sh b/installer/read_secrets.sh deleted file mode 100755 index 3a5e2f3a2e..0000000000 --- a/installer/read_secrets.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -e - -ENVIRONMENT=${1:?"Usage: read_secrets.sh ENVIRONMENT"} - -mkdir -p secrets - -COMPONENTS=`vault kv list --format=yaml secret/k8s_operator/$ENVIRONMENT | yq -r '.[]'` -for SECRET in $COMPONENTS -do - if [ $SECRET != "efd/" ] && [ $SECRET != "ts/" ]; then - vault kv get --field=data --format=json secret/k8s_operator/$ENVIRONMENT/$SECRET > secrets/$SECRET - fi -done diff --git a/installer/requirements.txt b/installer/requirements.txt deleted file mode 100644 index 73e8efa191..0000000000 --- a/installer/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -bcrypt -cryptography -onepasswordconnectsdk -pyyaml -yq diff --git a/installer/update_all_secrets.sh b/installer/update_all_secrets.sh deleted file mode 100755 index 65a8f5abd5..0000000000 --- a/installer/update_all_secrets.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -ex -./update_secrets.sh minikube.lsst.codes -./update_secrets.sh base-lsp.lsst.codes -./update_secrets.sh summit-lsp.lsst.codes -./update_secrets.sh tucson-teststand.lsst.codes -./update_secrets.sh data.lsst.cloud -./update_secrets.sh data-int.lsst.cloud -./update_secrets.sh data-dev.lsst.cloud -./update_secrets.sh roe -./update_secrets.sh roundtable-dev.lsst.cloud -./update_secrets.sh roundtable.lsst.cloud diff --git a/installer/update_secrets.sh b/installer/update_secrets.sh deleted file mode 100755 index 3f02056d7c..0000000000 --- a/installer/update_secrets.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -e -ENVIRONMENT=$1 - -export OP_CONNECT_HOST=https://roundtable.lsst.codes/1password -export VAULT_DOC_UUID=`yq -r .onepasswordUuid ../environments/values.yaml` -export VAULT_ADDR=https://vault.lsst.codes -export VAULT_TOKEN=`./vault_key.py $ENVIRONMENT write` - -if [ -z "$OP_CONNECT_TOKEN" ]; then - echo 'OP_CONNECT_TOKEN must be set to a 1Password Connect token' >&2 - exit 1 -fi - -echo "Clear out any existing secrets" -rm -rf secrets - -echo "Reading current secrets from vault" -./read_secrets.sh $ENVIRONMENT - -echo "Generating missing secrets with values from 1Password" -./generate_secrets.py $ENVIRONMENT --op - -echo "Writing secrets to vault" -./write_secrets.sh $ENVIRONMENT diff --git a/installer/vault_key.py b/installer/vault_key.py deleted file mode 100755 index 6e60759ae8..0000000000 --- a/installer/vault_key.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python3 -import argparse -import json -import os - -from onepasswordconnectsdk import new_client_from_environment - - -class VaultKeyRetriever: - def __init__(self) -> None: - self.op = new_client_from_environment() - vault_keys = self.op.get_item( - os.environ["VAULT_DOC_UUID"], "RSP-Vault" - ) - for field in vault_keys.fields: - if field.label == "notesPlain": - vault_keys_json = field.value - break - self.vault_keys = json.loads(vault_keys_json) - - def retrieve_key(self, environment, key_type): - env_key = f"k8s_operator/{environment}" - for e in self.vault_keys: - if env_key in e: - return e[env_key][key_type]["id"] - return None - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="fetch the vault key for an environment" - ) - parser.add_argument( - "environment", help="Environment name to retrieve key for" - ) - parser.add_argument( - "key_type", choices=["read", "write"], help="Which key to retrieve" - ) - args = parser.parse_args() - - vkr = VaultKeyRetriever() - print(vkr.retrieve_key(args.environment, args.key_type)) diff --git a/installer/write_secrets.sh b/installer/write_secrets.sh deleted file mode 100755 index 6497dd6c42..0000000000 --- a/installer/write_secrets.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -x - -ENVIRONMENT=${1:?"Usage: write_secrets.sh ENVIRONMENT"} - -# This is a bit tricky. This makes the path different for -# $SECRET, which ends up getting passed into vault and making -# the keys. -cd secrets - -for SECRET in * -do - vault kv put secret/k8s_operator/$ENVIRONMENT/$SECRET @$SECRET -done From 360ddd4a28261bbaa9efc3d87e22ecdd378accd4 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 27 Sep 2023 12:04:30 -0700 Subject: [PATCH 029/588] Add progress indication for vault copy-secrets phalanx vault copy-secrets can take a while to complete, so add some output so that it's obvious it's doing something. --- src/phalanx/services/vault.py | 3 ++- tests/cli/vault_test.py | 2 +- tests/data/output/idfdev/copy-output | 6 ++++++ 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 tests/data/output/idfdev/copy-output diff --git a/src/phalanx/services/vault.py b/src/phalanx/services/vault.py index 9628b8992c..3acc7664aa 100644 --- a/src/phalanx/services/vault.py +++ b/src/phalanx/services/vault.py @@ -88,9 +88,10 @@ def copy_secrets(self, environment: str, old_path: str) -> None: new_vault_client = self._vault.get_vault_client(config) old_vault_client = self._vault.get_vault_client(config, old_path) secrets = old_vault_client.list_application_secrets() - for name in secrets: + for name in sorted(secrets): secret = old_vault_client.get_application_secret(name) new_vault_client.store_application_secret(name, secret) + print("Copied Vault secret for", name) def create_read_approle(self, environment: str) -> VaultAppRole: """Create a new Vault read AppRole for the given environment. diff --git a/tests/cli/vault_test.py b/tests/cli/vault_test.py index 9d9fe8328a..4d47b22476 100644 --- a/tests/cli/vault_test.py +++ b/tests/cli/vault_test.py @@ -107,7 +107,7 @@ def test_copy_secrets( result = run_cli("vault", "copy-secrets", "idfdev", old_path) assert result.exit_code == 0 - assert result.output == "" + assert result.output == read_output_data("idfdev", "copy-output") result = run_cli("vault", "export-secrets", "idfdev", str(tmp_path)) assert result.exit_code == 0 assert result.output == "" diff --git a/tests/data/output/idfdev/copy-output b/tests/data/output/idfdev/copy-output new file mode 100644 index 0000000000..38649e2963 --- /dev/null +++ b/tests/data/output/idfdev/copy-output @@ -0,0 +1,6 @@ +Copied Vault secret for argocd +Copied Vault secret for gafaelfawr +Copied Vault secret for mobu +Copied Vault secret for nublado +Copied Vault secret for postgres +Copied Vault secret for unknown From 759024fe605ff41af6e34424dd7a845811b06251 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 27 Sep 2023 12:20:22 -0700 Subject: [PATCH 030/588] Produce nicer output from install.sh Be more consistent in the formatting and make the comments a bit more accurate. Stop doing an early sync of the pull secret, since vault-secrets-operator and Argo CD no longer need it. --- installer/install.sh | 43 +++++++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/installer/install.sh b/installer/install.sh index 9c473c9514..0dcfd2f75b 100755 --- a/installer/install.sh +++ b/installer/install.sh @@ -4,18 +4,21 @@ ENVIRONMENT=${1:?$USAGE} config="../environments/values-${ENVIRONMENT}.yaml" VAULT_ROLE_ID=${2:?$USAGE} VAULT_SECRET_ID=${3:?$USAGE} -export VAULT_ADDR=$(yq -r .vaultUrl "$config") -export VAULT_TOKEN=$(vault write auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID" | grep 'token ' | awk '{ print $2 }') -VAULT_PATH_PREFIX=$(yq -r .vaultPathPrefix "$config") -ARGOCD_PASSWORD=$(vault kv get --field=admin.plaintext_password $VAULT_PATH_PREFIX/argocd) +echo "Getting Git branch and remote information..." GIT_URL=$(git config --get remote.origin.url) # Github runs in a detached head state, but sets GITHUB_REF, # extract the branch from it. If we're there, use that branch. # git branch --show-current will return empty in deatached head. GIT_BRANCH=${GITHUB_HEAD_REF:-$(git branch --show-current)} -echo "Set VAULT_TOKEN in a secret for vault-secrets-operator..." +echo "Logging on to Vault..." +export VAULT_ADDR=$(yq -r .vaultUrl "$config") +export VAULT_TOKEN=$(vault write auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID" | grep 'token ' | awk '{ print $2 }') +VAULT_PATH_PREFIX=$(yq -r .vaultPathPrefix "$config") +ARGOCD_PASSWORD=$(vault kv get --field=admin.plaintext_password $VAULT_PATH_PREFIX/argocd) + +echo "Putting Vault credentials in a secret for vault-secrets-operator..." # The namespace may not exist already, but don't error if it does. kubectl create ns vault-secrets-operator || true kubectl create secret generic vault-credentials \ @@ -24,15 +27,9 @@ kubectl create secret generic vault-credentials \ --from-literal=VAULT_SECRET_ID=$VAULT_SECRET_ID \ --dry-run=client -o yaml | kubectl apply -f - -echo "Set up docker pull secret for vault-secrets-operator..." -vault kv get --field=.dockerconfigjson $VAULT_PATH_PREFIX/pull-secret > docker-creds -kubectl create secret generic pull-secret -n vault-secrets-operator \ - --from-file=.dockerconfigjson=docker-creds \ - --type=kubernetes.io/dockerconfigjson \ - --dry-run=client -o yaml | kubectl apply -f - - -echo "Update / install vault-secrets-operator..." -# ArgoCD depends on pull-secret, which depends on vault-secrets-operator. +# Argo CD depends a Vault-created secret for its credentials, so +# vault-secrets-operator has to be installed first. +echo "Updating or installing vault-secrets-operator..." helm dependency update ../applications/vault-secrets-operator helm upgrade vault-secrets-operator ../applications/vault-secrets-operator \ --install \ @@ -44,7 +41,7 @@ helm upgrade vault-secrets-operator ../applications/vault-secrets-operator \ --timeout 5m \ --wait -echo "Update / install argocd using helm..." +echo "Updating or installing Argo CD using Helm..." helm dependency update ../applications/argocd helm upgrade argocd ../applications/argocd \ --install \ @@ -56,7 +53,7 @@ helm upgrade argocd ../applications/argocd \ --timeout 5m \ --wait -echo "Login to argocd..." +echo "Logging in to Argo CD..." argocd login \ --plaintext \ --port-forward \ @@ -64,7 +61,7 @@ argocd login \ --username admin \ --password $ARGOCD_PASSWORD -echo "Creating top level application" +echo "Creating the top-level Argo CD application..." argocd app create science-platform \ --repo $GIT_URL \ --path environments --dest-namespace default \ @@ -78,11 +75,11 @@ argocd app create science-platform \ --values values.yaml \ --values values-$ENVIRONMENT.yaml +echo "Syncing the top-level Argo CD application..." argocd app sync science-platform \ --port-forward \ --port-forward-namespace argocd -echo "Syncing critical early applications" if [ $(yq -r '.applications."ingress-nginx"' "$config") != "false" ]; then echo "Syncing ingress-nginx..." argocd app sync ingress-nginx \ @@ -90,14 +87,15 @@ if [ $(yq -r '.applications."ingress-nginx"' "$config") != "false" ]; then --port-forward-namespace argocd fi -# Wait for the cert-manager's webhook to finish deploying by running -# kubectl, argocd's sync doesn't seem to wait for this to finish. if [ $(yq -r '.applications."cert-manager"' "$config") != "false" ]; then echo "Syncing cert-manager..." argocd app sync cert-manager \ --port-forward \ --port-forward-namespace argocd && \ - kubectl -n cert-manager rollout status deploy/cert-manager-webhook + + # Wait for the cert-manager's webhook to finish deploying by running + # kubectl, argocd's sync doesn't seem to wait for this to finish. + kubectl -n cert-manager rollout status deploy/cert-manager-webhook fi if [ $(yq -r .applications.postgres "$config") == "true" ]; then @@ -114,11 +112,12 @@ if [ $(yq -r .applications.gafaelfawr "$config") != "false" ]; then --port-forward-namespace argocd fi -echo "Sync remaining science platform apps" +echo "Syncing remaining applications..." argocd app sync -l "argocd.argoproj.io/instance=science-platform" \ --port-forward \ --port-forward-namespace argocd +echo '' echo "You can now check on your argo cd installation by running:" echo "kubectl port-forward service/argocd-server -n argocd 8080:443" echo "For the ArgoCD admin password:" From 0857f55d3017e593c51dcf81238e3540706a8f68 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 27 Sep 2023 15:13:25 -0700 Subject: [PATCH 031/588] Remove warning about AppRoles and installer The installer now supports AppRoles as of this set of changes, so this warning can be removed. --- docs/admin/secrets-setup.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/admin/secrets-setup.rst b/docs/admin/secrets-setup.rst index f189c8e719..3d2f99f07a 100644 --- a/docs/admin/secrets-setup.rst +++ b/docs/admin/secrets-setup.rst @@ -43,12 +43,6 @@ This approach is being replaced with a `Vault AppRole`_ that has read access to .. _Vault AppRole: https://developer.hashicorp.com/vault/docs/auth/approle -.. warning:: - - The current Phalanx installer only supports Vault read tokens, not Vault AppRoles. - Support for Vault AppRoles will be added in the future. - In the meantime, the Vault bootstrapping process in `install.sh `__ will need to be modified when installing environments that use Vault AppRoles. - Phalanx does not strictly require either of those approaches; any authentication approach that `Vault Secrets Operator`_ supports may be used as long as :px-app:`vault-secrets-operator` is configured accordingly for that environment. However, the standard installation process only supports AppRoles, and tooling is provided to manage those roles. From 6ebaaa0a96680b43f2dcf6ea132dbe4d4f755be6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 11:16:06 +0000 Subject: [PATCH 032/588] Update Helm release cert-manager to v1.13.1 --- applications/cert-manager/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml index aea788535f..fb4aac4cea 100644 --- a/applications/cert-manager/Chart.yaml +++ b/applications/cert-manager/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/cert-manager/cert-manager dependencies: - name: cert-manager - version: v1.13.0 + version: v1.13.1 repository: https://charts.jetstack.io From 15c1fccf45a8cac7fb2b007e299416b5f14f49c5 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 12:31:29 +0000 Subject: [PATCH 033/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 489 +++++++++++++++++++++------------------- requirements/main.txt | 511 +++++++++++++++++++++--------------------- 2 files changed, 513 insertions(+), 487 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 54f1f1b7cb..edfc221b1e 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -42,82 +42,97 @@ cfgv==3.4.0 \ --hash=sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9 \ --hash=sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560 # via pre-commit -charset-normalizer==3.2.0 \ - --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \ - --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \ - --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \ - --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \ - --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \ - --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \ - --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \ - --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \ - --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \ - --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \ - --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \ - --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \ - --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \ - --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \ - --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \ - --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \ - --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \ - --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \ - --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \ - --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \ - --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \ - --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \ - --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \ - --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \ - --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \ - --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \ - --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \ - --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \ - --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \ - --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \ - --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \ - --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \ - --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \ - --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \ - --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \ - --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \ - --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \ - --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \ - --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \ - --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \ - --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \ - --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \ - --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \ - --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \ - --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \ - --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \ - --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \ - --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \ - --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \ - --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \ - --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \ - --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \ - --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \ - --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \ - --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \ - --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \ - --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \ - --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \ - --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \ - --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \ - --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \ - --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \ - --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \ - --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \ - --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \ - --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \ - --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \ - --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \ - --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \ - --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \ - --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \ - --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \ - --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \ - --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \ - --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa +charset-normalizer==3.3.0 \ + --hash=sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843 \ + --hash=sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786 \ + --hash=sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e \ + --hash=sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8 \ + --hash=sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4 \ + --hash=sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa \ + --hash=sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d \ + --hash=sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82 \ + --hash=sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7 \ + --hash=sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895 \ + --hash=sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d \ + --hash=sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a \ + --hash=sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382 \ + --hash=sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678 \ + --hash=sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b \ + --hash=sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e \ + --hash=sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741 \ + --hash=sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4 \ + --hash=sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596 \ + --hash=sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9 \ + --hash=sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69 \ + --hash=sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c \ + --hash=sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77 \ + --hash=sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13 \ + --hash=sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459 \ + --hash=sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e \ + --hash=sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7 \ + --hash=sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908 \ + --hash=sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a \ + --hash=sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f \ + --hash=sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8 \ + --hash=sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482 \ + --hash=sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d \ + --hash=sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d \ + --hash=sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545 \ + --hash=sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34 \ + --hash=sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86 \ + --hash=sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6 \ + --hash=sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe \ + --hash=sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e \ + --hash=sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc \ + --hash=sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7 \ + --hash=sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd \ + --hash=sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c \ + --hash=sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557 \ + --hash=sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a \ + --hash=sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89 \ + --hash=sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078 \ + --hash=sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e \ + --hash=sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4 \ + --hash=sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403 \ + --hash=sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0 \ + --hash=sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89 \ + --hash=sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115 \ + --hash=sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9 \ + --hash=sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05 \ + --hash=sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a \ + --hash=sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec \ + --hash=sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56 \ + --hash=sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38 \ + --hash=sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479 \ + --hash=sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c \ + --hash=sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e \ + --hash=sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd \ + --hash=sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186 \ + --hash=sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455 \ + --hash=sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c \ + --hash=sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65 \ + --hash=sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78 \ + --hash=sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287 \ + --hash=sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df \ + --hash=sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43 \ + --hash=sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1 \ + --hash=sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7 \ + --hash=sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989 \ + --hash=sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a \ + --hash=sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63 \ + --hash=sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884 \ + --hash=sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649 \ + --hash=sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810 \ + --hash=sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828 \ + --hash=sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4 \ + --hash=sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2 \ + --hash=sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd \ + --hash=sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5 \ + --hash=sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe \ + --hash=sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293 \ + --hash=sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e \ + --hash=sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e \ + --hash=sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8 # via # -c requirements/main.txt # requests @@ -238,9 +253,9 @@ coverage[toml]==7.3.1 \ # via # -r requirements/dev.in # pytest-cov -cycler==0.11.0 \ - --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \ - --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f +cycler==0.12.0 \ + --hash=sha256:7896994252d006771357777d0251f3e34d266f4fa5f2c572247a80ab01440947 \ + --hash=sha256:8cc3a7b4861f91b1095157f9916f748549a617046e67eb7619abed9b34d2c94a # via matplotlib diagrams==0.23.3 \ --hash=sha256:543c707c36a2c896dfdf8f23e993a9c7ae48bb1a667f6baf19151eb98e57a134 \ @@ -250,9 +265,9 @@ distlib==0.3.7 \ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -documenteer[guide]==1.0.0a8 \ - --hash=sha256:566126dd8c798b4b4a282580b372c1b814f189f0d6a9f25b9ff1aebdd3353a0f \ - --hash=sha256:a7889bfb8f246e01ce1af508aeb477312d4885f4ecc33fdd89ddbfaeacab5208 +documenteer[guide]==1.0.0a9 \ + --hash=sha256:3661510f3acec78fa07fb20d2eb82677c302b66a283ed3911ebc54b451cd51c8 \ + --hash=sha256:53c4e5e697abe366bcff3a33a437060413627239e31a827a7cfbe254758633c6 # via -r requirements/dev.in docutils==0.19 \ --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ @@ -269,41 +284,49 @@ filelock==3.12.4 \ --hash=sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4 \ --hash=sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd # via virtualenv -fonttools==4.42.1 \ - --hash=sha256:0eb79a2da5eb6457a6f8ab904838454accc7d4cccdaff1fd2bd3a0679ea33d64 \ - --hash=sha256:113337c2d29665839b7d90b39f99b3cac731f72a0eda9306165a305c7c31d341 \ - --hash=sha256:12a7c247d1b946829bfa2f331107a629ea77dc5391dfd34fdcd78efa61f354ca \ - --hash=sha256:179737095eb98332a2744e8f12037b2977f22948cf23ff96656928923ddf560a \ - --hash=sha256:19b7db825c8adee96fac0692e6e1ecd858cae9affb3b4812cdb9d934a898b29e \ - --hash=sha256:37983b6bdab42c501202500a2be3a572f50d4efe3237e0686ee9d5f794d76b35 \ - --hash=sha256:3a35981d90feebeaef05e46e33e6b9e5b5e618504672ca9cd0ff96b171e4bfff \ - --hash=sha256:46a0ec8adbc6ff13494eb0c9c2e643b6f009ce7320cf640de106fb614e4d4360 \ - --hash=sha256:4aa79366e442dbca6e2c8595645a3a605d9eeabdb7a094d745ed6106816bef5d \ - --hash=sha256:515607ec756d7865f23070682622c49d922901943697871fc292277cf1e71967 \ - --hash=sha256:53eb5091ddc8b1199330bb7b4a8a2e7995ad5d43376cadce84523d8223ef3136 \ - --hash=sha256:5d18fc642fd0ac29236ff88ecfccff229ec0386090a839dd3f1162e9a7944a40 \ - --hash=sha256:5fb289b7a815638a7613d46bcf324c9106804725b2bb8ad913c12b6958ffc4ec \ - --hash=sha256:62f481ac772fd68901573956231aea3e4b1ad87b9b1089a61613a91e2b50bb9b \ - --hash=sha256:689508b918332fb40ce117131633647731d098b1b10d092234aa959b4251add5 \ - --hash=sha256:68a02bbe020dc22ee0540e040117535f06df9358106d3775e8817d826047f3fd \ - --hash=sha256:6ed2662a3d9c832afa36405f8748c250be94ae5dfc5283d668308391f2102861 \ - --hash=sha256:7286aed4ea271df9eab8d7a9b29e507094b51397812f7ce051ecd77915a6e26b \ - --hash=sha256:7cc7d685b8eeca7ae69dc6416833fbfea61660684b7089bca666067cb2937dcf \ - --hash=sha256:8708b98c278012ad267ee8a7433baeb809948855e81922878118464b274c909d \ - --hash=sha256:9398f244e28e0596e2ee6024f808b06060109e33ed38dcc9bded452fd9bbb853 \ - --hash=sha256:9e36344e48af3e3bde867a1ca54f97c308735dd8697005c2d24a86054a114a71 \ - --hash=sha256:a398bdadb055f8de69f62b0fc70625f7cbdab436bbb31eef5816e28cab083ee8 \ - --hash=sha256:acb47f6f8680de24c1ab65ebde39dd035768e2a9b571a07c7b8da95f6c8815fd \ - --hash=sha256:be24fcb80493b2c94eae21df70017351851652a37de514de553435b256b2f249 \ - --hash=sha256:c391cd5af88aacaf41dd7cfb96eeedfad297b5899a39e12f4c2c3706d0a3329d \ - --hash=sha256:c95b0724a6deea2c8c5d3222191783ced0a2f09bd6d33f93e563f6f1a4b3b3a4 \ - --hash=sha256:c9b1ce7a45978b821a06d375b83763b27a3a5e8a2e4570b3065abad240a18760 \ - --hash=sha256:db372213d39fa33af667c2aa586a0c1235e88e9c850f5dd5c8e1f17515861868 \ - --hash=sha256:db55cbaea02a20b49fefbd8e9d62bd481aaabe1f2301dabc575acc6b358874fa \ - --hash=sha256:ed1a13a27f59d1fc1920394a7f596792e9d546c9ca5a044419dca70c37815d7c \ - --hash=sha256:f2b82f46917d8722e6b5eafeefb4fb585d23babd15d8246c664cd88a5bddd19c \ - --hash=sha256:f2f806990160d1ce42d287aa419df3ffc42dfefe60d473695fb048355fe0c6a0 \ - --hash=sha256:f720fa82a11c0f9042376fd509b5ed88dab7e3cd602eee63a1af08883b37342b +fonttools==4.43.0 \ + --hash=sha256:030355fbb0cea59cf75d076d04d3852900583d1258574ff2d7d719abf4513836 \ + --hash=sha256:05056a8c9af048381fdb17e89b17d45f6c8394176d01e8c6fef5ac96ea950d38 \ + --hash=sha256:206808f9717c9b19117f461246372a2c160fa12b9b8dbdfb904ab50ca235ba0a \ + --hash=sha256:20fc43783c432862071fa76da6fa714902ae587bc68441e12ff4099b94b1fcef \ + --hash=sha256:25620b738d4533cfc21fd2a4f4b667e481f7cb60e86b609799f7d98af657854e \ + --hash=sha256:33c40a657fb87ff83185828c0323032d63a4df1279d5c1c38e21f3ec56327803 \ + --hash=sha256:3d7adfa342e6b3a2b36960981f23f480969f833d565a4eba259c2e6f59d2674f \ + --hash=sha256:48078357984214ccd22d7fe0340cd6ff7286b2f74f173603a1a9a40b5dc25afe \ + --hash=sha256:5056f69a18f3f28ab5283202d1efcfe011585d31de09d8560f91c6c88f041e92 \ + --hash=sha256:52e77f23a9c059f8be01a07300ba4c4d23dc271d33eed502aea5a01ab5d2f4c1 \ + --hash=sha256:57c22e5f9f53630d458830f710424dce4f43c5f0d95cb3368c0f5178541e4db7 \ + --hash=sha256:5aa67d1e720fdd902fde4a59d0880854ae9f19fc958f3e1538bceb36f7f4dc92 \ + --hash=sha256:5f9660e70a2430780e23830476332bc3391c3c8694769e2c0032a5038702a662 \ + --hash=sha256:635658464dccff6fa5c3b43fe8f818ae2c386ee6a9e1abc27359d1e255528186 \ + --hash=sha256:6a530fa28c155538d32214eafa0964989098a662bd63e91e790e6a7a4e9c02da \ + --hash=sha256:70f021a6b9eb10dfe7a411b78e63a503a06955dd6d2a4e130906d8760474f77c \ + --hash=sha256:77e5113233a2df07af9dbf493468ce526784c3b179c0e8b9c7838ced37c98b69 \ + --hash=sha256:7c76f32051159f8284f1a5f5b605152b5a530736fb8b55b09957db38dcae5348 \ + --hash=sha256:812142a0e53cc853964d487e6b40963df62f522b1b571e19d1ff8467d7880ceb \ + --hash=sha256:82d8e687a42799df5325e7ee12977b74738f34bf7fde1c296f8140efd699a213 \ + --hash=sha256:8dfd8edfce34ad135bd69de20c77449c06e2c92b38f2a8358d0987737f82b49e \ + --hash=sha256:93c5b6d77baf28f306bc13fa987b0b13edca6a39dc2324eaca299a74ccc6316f \ + --hash=sha256:9d654d3e780e0ceabb1f4eff5a3c042c67d4428d0fe1ea3afd238a721cf171b3 \ + --hash=sha256:a682fb5cbf8837d1822b80acc0be5ff2ea0c49ca836e468a21ffd388ef280fd3 \ + --hash=sha256:a68b71adc3b3a90346e4ac92f0a69ab9caeba391f3b04ab6f1e98f2c8ebe88e3 \ + --hash=sha256:a6a2e99bb9ea51e0974bbe71768df42c6dd189308c22f3f00560c3341b345646 \ + --hash=sha256:ab80e7d6bb01316d5fc8161a2660ca2e9e597d0880db4927bc866c76474472ef \ + --hash=sha256:ace0fd5afb79849f599f76af5c6aa5e865bd042c811e4e047bbaa7752cc26126 \ + --hash=sha256:ace51902ab67ef5fe225e8b361039e996db153e467e24a28d35f74849b37b7ce \ + --hash=sha256:af38f5145258e9866da5881580507e6d17ff7756beef175d13213a43a84244e9 \ + --hash=sha256:b3813f57f85bbc0e4011a0e1e9211f9ee52f87f402e41dc05bc5135f03fa51c1 \ + --hash=sha256:b5e760198f0b87e42478bb35a6eae385c636208f6f0d413e100b9c9c5efafb6a \ + --hash=sha256:b62a53a4ca83c32c6b78cac64464f88d02929779373c716f738af6968c8c821e \ + --hash=sha256:d08a694b280d615460563a6b4e2afb0b1b9df708c799ec212bf966652b94fc84 \ + --hash=sha256:d27d960e10cf7617d70cf3104c32a69b008dde56f2d55a9bed4ba6e3df611544 \ + --hash=sha256:da78f39b601ed0b4262929403186d65cf7a016f91ff349ab18fdc5a7080af465 \ + --hash=sha256:dcc01cea0a121fb0c009993497bad93cae25e77db7dee5345fec9cce1aaa09cd \ + --hash=sha256:e3f8acc6ef4a627394021246e099faee4b343afd3ffe2e517d8195b4ebf20289 \ + --hash=sha256:e4bc589d8da09267c7c4ceaaaa4fc01a7908ac5b43b286ac9279afe76407c384 \ + --hash=sha256:e5d53eddaf436fa131042f44a76ea1ead0a17c354ab9de0d80e818f0cb1629f1 \ + --hash=sha256:ee728d5af70f117581712966a21e2e07031e92c687ef1fdc457ac8d281016f64 \ + --hash=sha256:f19c2b1c65d57cbea25cabb80941fea3fbf2625ff0cdcae8900b5fb1c145704f # via matplotlib gitdb==4.0.10 \ --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ @@ -321,9 +344,9 @@ graphviz==0.20.1 \ --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \ --hash=sha256:8c58f14adaa3b947daf26c19bc1e98c4e0702cdc31cf99153e6f06904d492bf8 # via diagrams -identify==2.5.29 \ - --hash=sha256:24437fbf6f4d3fe6efd0eb9d67e24dd9106db99af5ceb27996a5f7895f24bf1b \ - --hash=sha256:d43d52b86b15918c137e3a74fff5224f60385cd0e9c38e99d07c257f02f151a5 +identify==2.5.30 \ + --hash=sha256:afe67f26ae29bab007ec21b03d4114f41316ab9dd15aa8736a167481e108da54 \ + --hash=sha256:f302a4256a15c849b91cfcdcec052a8ce914634b2f77ae87dad29cd749f2d88d # via pre-commit idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ @@ -657,9 +680,9 @@ numpy==1.26.0 \ # via # contourpy # matplotlib -packaging==23.1 \ - --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \ - --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f +packaging==23.2 \ + --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ + --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 # via # matplotlib # pydata-sphinx-theme @@ -743,121 +766,121 @@ pybtex-docutils==1.0.3 \ --hash=sha256:3a7ebdf92b593e00e8c1c538aa9a20bca5d92d84231124715acc964d51d93c6b \ --hash=sha256:8fd290d2ae48e32fcb54d86b0efb8d573198653c7e2447d5bec5847095f430b9 # via sphinxcontrib-bibtex -pydantic==2.3.0 \ - --hash=sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d \ - --hash=sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81 +pydantic==2.4.2 \ + --hash=sha256:94f336138093a5d7f426aac732dcfe7ab4eb4da243c88f891d65deb4a2556ee7 \ + --hash=sha256:bc3ddf669d234f4220e6e1c4d96b061abe0998185a8d7855c0126782b7abc8c1 # via # -c requirements/main.txt # autodoc-pydantic # documenteer # pydantic-settings -pydantic-core==2.6.3 \ - --hash=sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3 \ - --hash=sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6 \ - --hash=sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418 \ - --hash=sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7 \ - --hash=sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc \ - --hash=sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5 \ - --hash=sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7 \ - --hash=sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f \ - --hash=sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48 \ - --hash=sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad \ - --hash=sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef \ - --hash=sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9 \ - --hash=sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58 \ - --hash=sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da \ - --hash=sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149 \ - --hash=sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b \ - --hash=sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881 \ - --hash=sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456 \ - --hash=sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98 \ - --hash=sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e \ - --hash=sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c \ - --hash=sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e \ - --hash=sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb \ - --hash=sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862 \ - --hash=sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728 \ - --hash=sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6 \ - --hash=sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf \ - --hash=sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e \ - --hash=sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd \ - --hash=sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8 \ - --hash=sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987 \ - --hash=sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a \ - --hash=sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2 \ - --hash=sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784 \ - --hash=sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b \ - --hash=sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309 \ - --hash=sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7 \ - --hash=sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413 \ - --hash=sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2 \ - --hash=sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f \ - --hash=sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6 \ - --hash=sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b \ - --hash=sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3 \ - --hash=sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7 \ - --hash=sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d \ - --hash=sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378 \ - --hash=sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8 \ - --hash=sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe \ - --hash=sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7 \ - --hash=sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973 \ - --hash=sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad \ - --hash=sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34 \ - --hash=sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb \ - --hash=sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c \ - --hash=sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465 \ - --hash=sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5 \ - --hash=sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588 \ - --hash=sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950 \ - --hash=sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70 \ - --hash=sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32 \ - --hash=sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7 \ - --hash=sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec \ - --hash=sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67 \ - --hash=sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645 \ - --hash=sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db \ - --hash=sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7 \ - --hash=sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170 \ - --hash=sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17 \ - --hash=sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb \ - --hash=sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c \ - --hash=sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819 \ - --hash=sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b \ - --hash=sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d \ - --hash=sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a \ - --hash=sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525 \ - --hash=sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1 \ - --hash=sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76 \ - --hash=sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60 \ - --hash=sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b \ - --hash=sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42 \ - --hash=sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd \ - --hash=sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014 \ - --hash=sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d \ - --hash=sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a \ - --hash=sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa \ - --hash=sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f \ - --hash=sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26 \ - --hash=sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a \ - --hash=sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64 \ - --hash=sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5 \ - --hash=sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057 \ - --hash=sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50 \ - --hash=sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b \ - --hash=sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483 \ - --hash=sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b \ - --hash=sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c \ - --hash=sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9 \ - --hash=sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698 \ - --hash=sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362 \ - --hash=sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49 \ - --hash=sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282 \ - --hash=sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0 \ - --hash=sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a \ - --hash=sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b \ - --hash=sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1 \ - --hash=sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa +pydantic-core==2.10.1 \ + --hash=sha256:042462d8d6ba707fd3ce9649e7bf268633a41018d6a998fb5fbacb7e928a183e \ + --hash=sha256:0523aeb76e03f753b58be33b26540880bac5aa54422e4462404c432230543f33 \ + --hash=sha256:05560ab976012bf40f25d5225a58bfa649bb897b87192a36c6fef1ab132540d7 \ + --hash=sha256:0675ba5d22de54d07bccde38997e780044dcfa9a71aac9fd7d4d7a1d2e3e65f7 \ + --hash=sha256:073d4a470b195d2b2245d0343569aac7e979d3a0dcce6c7d2af6d8a920ad0bea \ + --hash=sha256:07ec6d7d929ae9c68f716195ce15e745b3e8fa122fc67698ac6498d802ed0fa4 \ + --hash=sha256:0880e239827b4b5b3e2ce05e6b766a7414e5f5aedc4523be6b68cfbc7f61c5d0 \ + --hash=sha256:0c27f38dc4fbf07b358b2bc90edf35e82d1703e22ff2efa4af4ad5de1b3833e7 \ + --hash=sha256:0d8a8adef23d86d8eceed3e32e9cca8879c7481c183f84ed1a8edc7df073af94 \ + --hash=sha256:0e2a35baa428181cb2270a15864ec6286822d3576f2ed0f4cd7f0c1708472aff \ + --hash=sha256:0f8682dbdd2f67f8e1edddcbffcc29f60a6182b4901c367fc8c1c40d30bb0a82 \ + --hash=sha256:0fa467fd300a6f046bdb248d40cd015b21b7576c168a6bb20aa22e595c8ffcdd \ + --hash=sha256:128552af70a64660f21cb0eb4876cbdadf1a1f9d5de820fed6421fa8de07c893 \ + --hash=sha256:1396e81b83516b9d5c9e26a924fa69164156c148c717131f54f586485ac3c15e \ + --hash=sha256:149b8a07712f45b332faee1a2258d8ef1fb4a36f88c0c17cb687f205c5dc6e7d \ + --hash=sha256:14ac492c686defc8e6133e3a2d9eaf5261b3df26b8ae97450c1647286750b901 \ + --hash=sha256:14cfbb00959259e15d684505263d5a21732b31248a5dd4941f73a3be233865b9 \ + --hash=sha256:14e09ff0b8fe6e46b93d36a878f6e4a3a98ba5303c76bb8e716f4878a3bee92c \ + --hash=sha256:154ea7c52e32dce13065dbb20a4a6f0cc012b4f667ac90d648d36b12007fa9f7 \ + --hash=sha256:15d6bca84ffc966cc9976b09a18cf9543ed4d4ecbd97e7086f9ce9327ea48891 \ + --hash=sha256:1d40f55222b233e98e3921df7811c27567f0e1a4411b93d4c5c0f4ce131bc42f \ + --hash=sha256:25bd966103890ccfa028841a8f30cebcf5875eeac8c4bde4fe221364c92f0c9a \ + --hash=sha256:2cf5bb4dd67f20f3bbc1209ef572a259027c49e5ff694fa56bed62959b41e1f9 \ + --hash=sha256:2e0e2959ef5d5b8dc9ef21e1a305a21a36e254e6a34432d00c72a92fdc5ecda5 \ + --hash=sha256:320f14bd4542a04ab23747ff2c8a778bde727158b606e2661349557f0770711e \ + --hash=sha256:3625578b6010c65964d177626fde80cf60d7f2e297d56b925cb5cdeda6e9925a \ + --hash=sha256:39215d809470f4c8d1881758575b2abfb80174a9e8daf8f33b1d4379357e417c \ + --hash=sha256:3f0ac9fb8608dbc6eaf17956bf623c9119b4db7dbb511650910a82e261e6600f \ + --hash=sha256:417243bf599ba1f1fef2bb8c543ceb918676954734e2dcb82bf162ae9d7bd514 \ + --hash=sha256:420a692b547736a8d8703c39ea935ab5d8f0d2573f8f123b0a294e49a73f214b \ + --hash=sha256:443fed67d33aa85357464f297e3d26e570267d1af6fef1c21ca50921d2976302 \ + --hash=sha256:48525933fea744a3e7464c19bfede85df4aba79ce90c60b94d8b6e1eddd67096 \ + --hash=sha256:485a91abe3a07c3a8d1e082ba29254eea3e2bb13cbbd4351ea4e5a21912cc9b0 \ + --hash=sha256:4a5be350f922430997f240d25f8219f93b0c81e15f7b30b868b2fddfc2d05f27 \ + --hash=sha256:4d966c47f9dd73c2d32a809d2be529112d509321c5310ebf54076812e6ecd884 \ + --hash=sha256:524ff0ca3baea164d6d93a32c58ac79eca9f6cf713586fdc0adb66a8cdeab96a \ + --hash=sha256:53df009d1e1ba40f696f8995683e067e3967101d4bb4ea6f667931b7d4a01357 \ + --hash=sha256:5994985da903d0b8a08e4935c46ed8daf5be1cf217489e673910951dc533d430 \ + --hash=sha256:5cabb9710f09d5d2e9e2748c3e3e20d991a4c5f96ed8f1132518f54ab2967221 \ + --hash=sha256:5fdb39f67c779b183b0c853cd6b45f7db84b84e0571b3ef1c89cdb1dfc367325 \ + --hash=sha256:600d04a7b342363058b9190d4e929a8e2e715c5682a70cc37d5ded1e0dd370b4 \ + --hash=sha256:631cb7415225954fdcc2a024119101946793e5923f6c4d73a5914d27eb3d3a05 \ + --hash=sha256:63974d168b6233b4ed6a0046296803cb13c56637a7b8106564ab575926572a55 \ + --hash=sha256:64322bfa13e44c6c30c518729ef08fda6026b96d5c0be724b3c4ae4da939f875 \ + --hash=sha256:655f8f4c8d6a5963c9a0687793da37b9b681d9ad06f29438a3b2326d4e6b7970 \ + --hash=sha256:6835451b57c1b467b95ffb03a38bb75b52fb4dc2762bb1d9dbed8de31ea7d0fc \ + --hash=sha256:6db2eb9654a85ada248afa5a6db5ff1cf0f7b16043a6b070adc4a5be68c716d6 \ + --hash=sha256:7c4d1894fe112b0864c1fa75dffa045720a194b227bed12f4be7f6045b25209f \ + --hash=sha256:7eb037106f5c6b3b0b864ad226b0b7ab58157124161d48e4b30c4a43fef8bc4b \ + --hash=sha256:8282bab177a9a3081fd3d0a0175a07a1e2bfb7fcbbd949519ea0980f8a07144d \ + --hash=sha256:82f55187a5bebae7d81d35b1e9aaea5e169d44819789837cdd4720d768c55d15 \ + --hash=sha256:8572cadbf4cfa95fb4187775b5ade2eaa93511f07947b38f4cd67cf10783b118 \ + --hash=sha256:8cdbbd92154db2fec4ec973d45c565e767ddc20aa6dbaf50142676484cbff8ee \ + --hash=sha256:8f6e6aed5818c264412ac0598b581a002a9f050cb2637a84979859e70197aa9e \ + --hash=sha256:92f675fefa977625105708492850bcbc1182bfc3e997f8eecb866d1927c98ae6 \ + --hash=sha256:962ed72424bf1f72334e2f1e61b68f16c0e596f024ca7ac5daf229f7c26e4208 \ + --hash=sha256:9badf8d45171d92387410b04639d73811b785b5161ecadabf056ea14d62d4ede \ + --hash=sha256:9c120c9ce3b163b985a3b966bb701114beb1da4b0468b9b236fc754783d85aa3 \ + --hash=sha256:9f6f3e2598604956480f6c8aa24a3384dbf6509fe995d97f6ca6103bb8c2534e \ + --hash=sha256:a1254357f7e4c82e77c348dabf2d55f1d14d19d91ff025004775e70a6ef40ada \ + --hash=sha256:a1392e0638af203cee360495fd2cfdd6054711f2db5175b6e9c3c461b76f5175 \ + --hash=sha256:a1c311fd06ab3b10805abb72109f01a134019739bd3286b8ae1bc2fc4e50c07a \ + --hash=sha256:a5cb87bdc2e5f620693148b5f8f842d293cae46c5f15a1b1bf7ceeed324a740c \ + --hash=sha256:a7a7902bf75779bc12ccfc508bfb7a4c47063f748ea3de87135d433a4cca7a2f \ + --hash=sha256:aad7bd686363d1ce4ee930ad39f14e1673248373f4a9d74d2b9554f06199fb58 \ + --hash=sha256:aafdb89fdeb5fe165043896817eccd6434aee124d5ee9b354f92cd574ba5e78f \ + --hash=sha256:ae8a8843b11dc0b03b57b52793e391f0122e740de3df1474814c700d2622950a \ + --hash=sha256:b00bc4619f60c853556b35f83731bd817f989cba3e97dc792bb8c97941b8053a \ + --hash=sha256:b1f22a9ab44de5f082216270552aa54259db20189e68fc12484873d926426921 \ + --hash=sha256:b3c01c2fb081fced3bbb3da78510693dc7121bb893a1f0f5f4b48013201f362e \ + --hash=sha256:b3dcd587b69bbf54fc04ca157c2323b8911033e827fffaecf0cafa5a892a0904 \ + --hash=sha256:b4a6db486ac8e99ae696e09efc8b2b9fea67b63c8f88ba7a1a16c24a057a0776 \ + --hash=sha256:bec7dd208a4182e99c5b6c501ce0b1f49de2802448d4056091f8e630b28e9a52 \ + --hash=sha256:c0877239307b7e69d025b73774e88e86ce82f6ba6adf98f41069d5b0b78bd1bf \ + --hash=sha256:caa48fc31fc7243e50188197b5f0c4228956f97b954f76da157aae7f67269ae8 \ + --hash=sha256:cfe1090245c078720d250d19cb05d67e21a9cd7c257698ef139bc41cf6c27b4f \ + --hash=sha256:d43002441932f9a9ea5d6f9efaa2e21458221a3a4b417a14027a1d530201ef1b \ + --hash=sha256:d64728ee14e667ba27c66314b7d880b8eeb050e58ffc5fec3b7a109f8cddbd63 \ + --hash=sha256:d6495008733c7521a89422d7a68efa0a0122c99a5861f06020ef5b1f51f9ba7c \ + --hash=sha256:d8f1ebca515a03e5654f88411420fea6380fc841d1bea08effb28184e3d4899f \ + --hash=sha256:d99277877daf2efe074eae6338453a4ed54a2d93fb4678ddfe1209a0c93a2468 \ + --hash=sha256:da01bec0a26befab4898ed83b362993c844b9a607a86add78604186297eb047e \ + --hash=sha256:db9a28c063c7c00844ae42a80203eb6d2d6bbb97070cfa00194dff40e6f545ab \ + --hash=sha256:dda81e5ec82485155a19d9624cfcca9be88a405e2857354e5b089c2a982144b2 \ + --hash=sha256:e357571bb0efd65fd55f18db0a2fb0ed89d0bb1d41d906b138f088933ae618bb \ + --hash=sha256:e544246b859f17373bed915182ab841b80849ed9cf23f1f07b73b7c58baee5fb \ + --hash=sha256:e562617a45b5a9da5be4abe72b971d4f00bf8555eb29bb91ec2ef2be348cd132 \ + --hash=sha256:e570ffeb2170e116a5b17e83f19911020ac79d19c96f320cbfa1fa96b470185b \ + --hash=sha256:e6f31a17acede6a8cd1ae2d123ce04d8cca74056c9d456075f4f6f85de055607 \ + --hash=sha256:e9121b4009339b0f751955baf4543a0bfd6bc3f8188f8056b1a25a2d45099934 \ + --hash=sha256:ebedb45b9feb7258fac0a268a3f6bec0a2ea4d9558f3d6f813f02ff3a6dc6698 \ + --hash=sha256:ecaac27da855b8d73f92123e5f03612b04c5632fd0a476e469dfc47cd37d6b2e \ + --hash=sha256:ecdbde46235f3d560b18be0cb706c8e8ad1b965e5c13bbba7450c86064e96561 \ + --hash=sha256:ed550ed05540c03f0e69e6d74ad58d026de61b9eaebebbaaf8873e585cbb18de \ + --hash=sha256:eeb3d3d6b399ffe55f9a04e09e635554012f1980696d6b0aca3e6cf42a17a03b \ + --hash=sha256:ef337945bbd76cce390d1b2496ccf9f90b1c1242a3a7bc242ca4a9fc5993427a \ + --hash=sha256:f1365e032a477c1430cfe0cf2856679529a2331426f8081172c4a74186f1d595 \ + --hash=sha256:f23b55eb5464468f9e0e9a9935ce3ed2a870608d5f534025cd5536bca25b1402 \ + --hash=sha256:f2e9072d71c1f6cfc79a36d4484c82823c560e6f5599c43c1ca6b5cdbd54f881 \ + --hash=sha256:f323306d0556351735b54acbf82904fe30a27b6a7147153cbe6e19aaaa2aa429 \ + --hash=sha256:f36a3489d9e28fe4b67be9992a23029c3cec0babc3bd9afb39f49844a8c721c5 \ + --hash=sha256:f64f82cc3443149292b32387086d02a6c7fb39b8781563e0ca7b8d7d9cf72bd7 \ + --hash=sha256:f6defd966ca3b187ec6c366604e9296f585021d922e666b99c47e78738b5666c \ + --hash=sha256:f7c2b8eb9fc872e68b46eeaf835e86bccc3a58ba57d0eedc109cbb14177be531 \ + --hash=sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6 \ + --hash=sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521 # via # -c requirements/main.txt # pydantic diff --git a/requirements/main.txt b/requirements/main.txt index df5e7aca87..492a2a0fd5 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -45,148 +45,151 @@ certifi==2023.7.22 \ # httpcore # httpx # requests -cffi==1.15.1 \ - --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ - --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ - --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ - --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ - --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ - --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ - --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ - --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ - --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ - --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ - --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ - --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ - --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ - --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ - --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ - --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ - --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ - --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ - --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ - --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ - --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ - --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ - --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ - --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ - --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ - --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ - --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ - --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ - --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ - --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ - --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ - --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ - --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ - --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ - --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ - --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ - --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ - --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ - --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ - --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ - --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ - --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ - --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ - --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ - --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ - --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ - --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ - --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ - --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ - --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ - --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ - --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ - --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ - --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ - --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ - --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ - --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ - --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ - --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ - --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ - --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ - --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ - --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ - --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 +cffi==1.16.0 \ + --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ + --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ + --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ + --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ + --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ + --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ + --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ + --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ + --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ + --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ + --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ + --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ + --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ + --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ + --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ + --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ + --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ + --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ + --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ + --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ + --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ + --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ + --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ + --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ + --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ + --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ + --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ + --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ + --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ + --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ + --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ + --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ + --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ + --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ + --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ + --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ + --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ + --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ + --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ + --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ + --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ + --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ + --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ + --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ + --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ + --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ + --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ + --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ + --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ + --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ + --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ + --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 # via cryptography -charset-normalizer==3.2.0 \ - --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \ - --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \ - --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \ - --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \ - --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \ - --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \ - --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \ - --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \ - --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \ - --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \ - --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \ - --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \ - --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \ - --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \ - --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \ - --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \ - --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \ - --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \ - --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \ - --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \ - --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \ - --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \ - --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \ - --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \ - --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \ - --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \ - --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \ - --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \ - --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \ - --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \ - --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \ - --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \ - --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \ - --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \ - --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \ - --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \ - --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \ - --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \ - --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \ - --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \ - --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \ - --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \ - --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \ - --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \ - --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \ - --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \ - --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \ - --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \ - --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \ - --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \ - --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \ - --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \ - --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \ - --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \ - --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \ - --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \ - --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \ - --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \ - --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \ - --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \ - --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \ - --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \ - --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \ - --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \ - --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \ - --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \ - --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \ - --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \ - --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \ - --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \ - --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \ - --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \ - --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \ - --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \ - --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa +charset-normalizer==3.3.0 \ + --hash=sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843 \ + --hash=sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786 \ + --hash=sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e \ + --hash=sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8 \ + --hash=sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4 \ + --hash=sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa \ + --hash=sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d \ + --hash=sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82 \ + --hash=sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7 \ + --hash=sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895 \ + --hash=sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d \ + --hash=sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a \ + --hash=sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382 \ + --hash=sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678 \ + --hash=sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b \ + --hash=sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e \ + --hash=sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741 \ + --hash=sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4 \ + --hash=sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596 \ + --hash=sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9 \ + --hash=sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69 \ + --hash=sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c \ + --hash=sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77 \ + --hash=sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13 \ + --hash=sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459 \ + --hash=sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e \ + --hash=sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7 \ + --hash=sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908 \ + --hash=sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a \ + --hash=sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f \ + --hash=sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8 \ + --hash=sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482 \ + --hash=sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d \ + --hash=sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d \ + --hash=sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545 \ + --hash=sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34 \ + --hash=sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86 \ + --hash=sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6 \ + --hash=sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe \ + --hash=sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e \ + --hash=sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc \ + --hash=sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7 \ + --hash=sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd \ + --hash=sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c \ + --hash=sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557 \ + --hash=sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a \ + --hash=sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89 \ + --hash=sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078 \ + --hash=sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e \ + --hash=sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4 \ + --hash=sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403 \ + --hash=sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0 \ + --hash=sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89 \ + --hash=sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115 \ + --hash=sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9 \ + --hash=sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05 \ + --hash=sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a \ + --hash=sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec \ + --hash=sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56 \ + --hash=sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38 \ + --hash=sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479 \ + --hash=sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c \ + --hash=sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e \ + --hash=sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd \ + --hash=sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186 \ + --hash=sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455 \ + --hash=sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c \ + --hash=sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65 \ + --hash=sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78 \ + --hash=sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287 \ + --hash=sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df \ + --hash=sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43 \ + --hash=sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1 \ + --hash=sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7 \ + --hash=sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989 \ + --hash=sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a \ + --hash=sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63 \ + --hash=sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884 \ + --hash=sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649 \ + --hash=sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810 \ + --hash=sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828 \ + --hash=sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4 \ + --hash=sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2 \ + --hash=sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd \ + --hash=sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5 \ + --hash=sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe \ + --hash=sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293 \ + --hash=sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e \ + --hash=sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e \ + --hash=sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8 # via requests click==8.1.7 \ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ @@ -222,9 +225,9 @@ cryptography==41.0.4 \ # -r requirements/main.in # pyjwt # safir -fastapi==0.103.1 \ - --hash=sha256:345844e6a82062f06a096684196aaf96c1198b25c06b72c1311b882aa2d8a35d \ - --hash=sha256:5e5f17e826dbd9e9b5a5145976c5cd90bcaa61f2bf9a69aca423f2bcebe44d83 +fastapi==0.103.2 \ + --hash=sha256:3270de872f0fe9ec809d4bd3d4d890c6d5cc7b9611d721d6438f9dacc8c4ef2e \ + --hash=sha256:75a11f6bfb8fc4d2bec0bd710c2d5f2829659c0e8c0afd5560fdda6ce25ec653 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -337,120 +340,120 @@ pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pydantic==2.3.0 \ - --hash=sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d \ - --hash=sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81 +pydantic==2.4.2 \ + --hash=sha256:94f336138093a5d7f426aac732dcfe7ab4eb4da243c88f891d65deb4a2556ee7 \ + --hash=sha256:bc3ddf669d234f4220e6e1c4d96b061abe0998185a8d7855c0126782b7abc8c1 # via # -r requirements/main.in # fastapi # safir -pydantic-core==2.6.3 \ - --hash=sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3 \ - --hash=sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6 \ - --hash=sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418 \ - --hash=sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7 \ - --hash=sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc \ - --hash=sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5 \ - --hash=sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7 \ - --hash=sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f \ - --hash=sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48 \ - --hash=sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad \ - --hash=sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef \ - --hash=sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9 \ - --hash=sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58 \ - --hash=sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da \ - --hash=sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149 \ - --hash=sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b \ - --hash=sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881 \ - --hash=sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456 \ - --hash=sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98 \ - --hash=sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e \ - --hash=sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c \ - --hash=sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e \ - --hash=sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb \ - --hash=sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862 \ - --hash=sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728 \ - --hash=sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6 \ - --hash=sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf \ - --hash=sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e \ - --hash=sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd \ - --hash=sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8 \ - --hash=sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987 \ - --hash=sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a \ - --hash=sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2 \ - --hash=sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784 \ - --hash=sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b \ - --hash=sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309 \ - --hash=sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7 \ - --hash=sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413 \ - --hash=sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2 \ - --hash=sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f \ - --hash=sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6 \ - --hash=sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b \ - --hash=sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3 \ - --hash=sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7 \ - --hash=sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d \ - --hash=sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378 \ - --hash=sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8 \ - --hash=sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe \ - --hash=sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7 \ - --hash=sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973 \ - --hash=sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad \ - --hash=sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34 \ - --hash=sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb \ - --hash=sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c \ - --hash=sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465 \ - --hash=sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5 \ - --hash=sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588 \ - --hash=sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950 \ - --hash=sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70 \ - --hash=sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32 \ - --hash=sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7 \ - --hash=sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec \ - --hash=sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67 \ - --hash=sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645 \ - --hash=sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db \ - --hash=sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7 \ - --hash=sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170 \ - --hash=sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17 \ - --hash=sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb \ - --hash=sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c \ - --hash=sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819 \ - --hash=sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b \ - --hash=sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d \ - --hash=sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a \ - --hash=sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525 \ - --hash=sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1 \ - --hash=sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76 \ - --hash=sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60 \ - --hash=sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b \ - --hash=sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42 \ - --hash=sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd \ - --hash=sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014 \ - --hash=sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d \ - --hash=sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a \ - --hash=sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa \ - --hash=sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f \ - --hash=sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26 \ - --hash=sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a \ - --hash=sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64 \ - --hash=sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5 \ - --hash=sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057 \ - --hash=sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50 \ - --hash=sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b \ - --hash=sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483 \ - --hash=sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b \ - --hash=sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c \ - --hash=sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9 \ - --hash=sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698 \ - --hash=sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362 \ - --hash=sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49 \ - --hash=sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282 \ - --hash=sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0 \ - --hash=sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a \ - --hash=sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b \ - --hash=sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1 \ - --hash=sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa +pydantic-core==2.10.1 \ + --hash=sha256:042462d8d6ba707fd3ce9649e7bf268633a41018d6a998fb5fbacb7e928a183e \ + --hash=sha256:0523aeb76e03f753b58be33b26540880bac5aa54422e4462404c432230543f33 \ + --hash=sha256:05560ab976012bf40f25d5225a58bfa649bb897b87192a36c6fef1ab132540d7 \ + --hash=sha256:0675ba5d22de54d07bccde38997e780044dcfa9a71aac9fd7d4d7a1d2e3e65f7 \ + --hash=sha256:073d4a470b195d2b2245d0343569aac7e979d3a0dcce6c7d2af6d8a920ad0bea \ + --hash=sha256:07ec6d7d929ae9c68f716195ce15e745b3e8fa122fc67698ac6498d802ed0fa4 \ + --hash=sha256:0880e239827b4b5b3e2ce05e6b766a7414e5f5aedc4523be6b68cfbc7f61c5d0 \ + --hash=sha256:0c27f38dc4fbf07b358b2bc90edf35e82d1703e22ff2efa4af4ad5de1b3833e7 \ + --hash=sha256:0d8a8adef23d86d8eceed3e32e9cca8879c7481c183f84ed1a8edc7df073af94 \ + --hash=sha256:0e2a35baa428181cb2270a15864ec6286822d3576f2ed0f4cd7f0c1708472aff \ + --hash=sha256:0f8682dbdd2f67f8e1edddcbffcc29f60a6182b4901c367fc8c1c40d30bb0a82 \ + --hash=sha256:0fa467fd300a6f046bdb248d40cd015b21b7576c168a6bb20aa22e595c8ffcdd \ + --hash=sha256:128552af70a64660f21cb0eb4876cbdadf1a1f9d5de820fed6421fa8de07c893 \ + --hash=sha256:1396e81b83516b9d5c9e26a924fa69164156c148c717131f54f586485ac3c15e \ + --hash=sha256:149b8a07712f45b332faee1a2258d8ef1fb4a36f88c0c17cb687f205c5dc6e7d \ + --hash=sha256:14ac492c686defc8e6133e3a2d9eaf5261b3df26b8ae97450c1647286750b901 \ + --hash=sha256:14cfbb00959259e15d684505263d5a21732b31248a5dd4941f73a3be233865b9 \ + --hash=sha256:14e09ff0b8fe6e46b93d36a878f6e4a3a98ba5303c76bb8e716f4878a3bee92c \ + --hash=sha256:154ea7c52e32dce13065dbb20a4a6f0cc012b4f667ac90d648d36b12007fa9f7 \ + --hash=sha256:15d6bca84ffc966cc9976b09a18cf9543ed4d4ecbd97e7086f9ce9327ea48891 \ + --hash=sha256:1d40f55222b233e98e3921df7811c27567f0e1a4411b93d4c5c0f4ce131bc42f \ + --hash=sha256:25bd966103890ccfa028841a8f30cebcf5875eeac8c4bde4fe221364c92f0c9a \ + --hash=sha256:2cf5bb4dd67f20f3bbc1209ef572a259027c49e5ff694fa56bed62959b41e1f9 \ + --hash=sha256:2e0e2959ef5d5b8dc9ef21e1a305a21a36e254e6a34432d00c72a92fdc5ecda5 \ + --hash=sha256:320f14bd4542a04ab23747ff2c8a778bde727158b606e2661349557f0770711e \ + --hash=sha256:3625578b6010c65964d177626fde80cf60d7f2e297d56b925cb5cdeda6e9925a \ + --hash=sha256:39215d809470f4c8d1881758575b2abfb80174a9e8daf8f33b1d4379357e417c \ + --hash=sha256:3f0ac9fb8608dbc6eaf17956bf623c9119b4db7dbb511650910a82e261e6600f \ + --hash=sha256:417243bf599ba1f1fef2bb8c543ceb918676954734e2dcb82bf162ae9d7bd514 \ + --hash=sha256:420a692b547736a8d8703c39ea935ab5d8f0d2573f8f123b0a294e49a73f214b \ + --hash=sha256:443fed67d33aa85357464f297e3d26e570267d1af6fef1c21ca50921d2976302 \ + --hash=sha256:48525933fea744a3e7464c19bfede85df4aba79ce90c60b94d8b6e1eddd67096 \ + --hash=sha256:485a91abe3a07c3a8d1e082ba29254eea3e2bb13cbbd4351ea4e5a21912cc9b0 \ + --hash=sha256:4a5be350f922430997f240d25f8219f93b0c81e15f7b30b868b2fddfc2d05f27 \ + --hash=sha256:4d966c47f9dd73c2d32a809d2be529112d509321c5310ebf54076812e6ecd884 \ + --hash=sha256:524ff0ca3baea164d6d93a32c58ac79eca9f6cf713586fdc0adb66a8cdeab96a \ + --hash=sha256:53df009d1e1ba40f696f8995683e067e3967101d4bb4ea6f667931b7d4a01357 \ + --hash=sha256:5994985da903d0b8a08e4935c46ed8daf5be1cf217489e673910951dc533d430 \ + --hash=sha256:5cabb9710f09d5d2e9e2748c3e3e20d991a4c5f96ed8f1132518f54ab2967221 \ + --hash=sha256:5fdb39f67c779b183b0c853cd6b45f7db84b84e0571b3ef1c89cdb1dfc367325 \ + --hash=sha256:600d04a7b342363058b9190d4e929a8e2e715c5682a70cc37d5ded1e0dd370b4 \ + --hash=sha256:631cb7415225954fdcc2a024119101946793e5923f6c4d73a5914d27eb3d3a05 \ + --hash=sha256:63974d168b6233b4ed6a0046296803cb13c56637a7b8106564ab575926572a55 \ + --hash=sha256:64322bfa13e44c6c30c518729ef08fda6026b96d5c0be724b3c4ae4da939f875 \ + --hash=sha256:655f8f4c8d6a5963c9a0687793da37b9b681d9ad06f29438a3b2326d4e6b7970 \ + --hash=sha256:6835451b57c1b467b95ffb03a38bb75b52fb4dc2762bb1d9dbed8de31ea7d0fc \ + --hash=sha256:6db2eb9654a85ada248afa5a6db5ff1cf0f7b16043a6b070adc4a5be68c716d6 \ + --hash=sha256:7c4d1894fe112b0864c1fa75dffa045720a194b227bed12f4be7f6045b25209f \ + --hash=sha256:7eb037106f5c6b3b0b864ad226b0b7ab58157124161d48e4b30c4a43fef8bc4b \ + --hash=sha256:8282bab177a9a3081fd3d0a0175a07a1e2bfb7fcbbd949519ea0980f8a07144d \ + --hash=sha256:82f55187a5bebae7d81d35b1e9aaea5e169d44819789837cdd4720d768c55d15 \ + --hash=sha256:8572cadbf4cfa95fb4187775b5ade2eaa93511f07947b38f4cd67cf10783b118 \ + --hash=sha256:8cdbbd92154db2fec4ec973d45c565e767ddc20aa6dbaf50142676484cbff8ee \ + --hash=sha256:8f6e6aed5818c264412ac0598b581a002a9f050cb2637a84979859e70197aa9e \ + --hash=sha256:92f675fefa977625105708492850bcbc1182bfc3e997f8eecb866d1927c98ae6 \ + --hash=sha256:962ed72424bf1f72334e2f1e61b68f16c0e596f024ca7ac5daf229f7c26e4208 \ + --hash=sha256:9badf8d45171d92387410b04639d73811b785b5161ecadabf056ea14d62d4ede \ + --hash=sha256:9c120c9ce3b163b985a3b966bb701114beb1da4b0468b9b236fc754783d85aa3 \ + --hash=sha256:9f6f3e2598604956480f6c8aa24a3384dbf6509fe995d97f6ca6103bb8c2534e \ + --hash=sha256:a1254357f7e4c82e77c348dabf2d55f1d14d19d91ff025004775e70a6ef40ada \ + --hash=sha256:a1392e0638af203cee360495fd2cfdd6054711f2db5175b6e9c3c461b76f5175 \ + --hash=sha256:a1c311fd06ab3b10805abb72109f01a134019739bd3286b8ae1bc2fc4e50c07a \ + --hash=sha256:a5cb87bdc2e5f620693148b5f8f842d293cae46c5f15a1b1bf7ceeed324a740c \ + --hash=sha256:a7a7902bf75779bc12ccfc508bfb7a4c47063f748ea3de87135d433a4cca7a2f \ + --hash=sha256:aad7bd686363d1ce4ee930ad39f14e1673248373f4a9d74d2b9554f06199fb58 \ + --hash=sha256:aafdb89fdeb5fe165043896817eccd6434aee124d5ee9b354f92cd574ba5e78f \ + --hash=sha256:ae8a8843b11dc0b03b57b52793e391f0122e740de3df1474814c700d2622950a \ + --hash=sha256:b00bc4619f60c853556b35f83731bd817f989cba3e97dc792bb8c97941b8053a \ + --hash=sha256:b1f22a9ab44de5f082216270552aa54259db20189e68fc12484873d926426921 \ + --hash=sha256:b3c01c2fb081fced3bbb3da78510693dc7121bb893a1f0f5f4b48013201f362e \ + --hash=sha256:b3dcd587b69bbf54fc04ca157c2323b8911033e827fffaecf0cafa5a892a0904 \ + --hash=sha256:b4a6db486ac8e99ae696e09efc8b2b9fea67b63c8f88ba7a1a16c24a057a0776 \ + --hash=sha256:bec7dd208a4182e99c5b6c501ce0b1f49de2802448d4056091f8e630b28e9a52 \ + --hash=sha256:c0877239307b7e69d025b73774e88e86ce82f6ba6adf98f41069d5b0b78bd1bf \ + --hash=sha256:caa48fc31fc7243e50188197b5f0c4228956f97b954f76da157aae7f67269ae8 \ + --hash=sha256:cfe1090245c078720d250d19cb05d67e21a9cd7c257698ef139bc41cf6c27b4f \ + --hash=sha256:d43002441932f9a9ea5d6f9efaa2e21458221a3a4b417a14027a1d530201ef1b \ + --hash=sha256:d64728ee14e667ba27c66314b7d880b8eeb050e58ffc5fec3b7a109f8cddbd63 \ + --hash=sha256:d6495008733c7521a89422d7a68efa0a0122c99a5861f06020ef5b1f51f9ba7c \ + --hash=sha256:d8f1ebca515a03e5654f88411420fea6380fc841d1bea08effb28184e3d4899f \ + --hash=sha256:d99277877daf2efe074eae6338453a4ed54a2d93fb4678ddfe1209a0c93a2468 \ + --hash=sha256:da01bec0a26befab4898ed83b362993c844b9a607a86add78604186297eb047e \ + --hash=sha256:db9a28c063c7c00844ae42a80203eb6d2d6bbb97070cfa00194dff40e6f545ab \ + --hash=sha256:dda81e5ec82485155a19d9624cfcca9be88a405e2857354e5b089c2a982144b2 \ + --hash=sha256:e357571bb0efd65fd55f18db0a2fb0ed89d0bb1d41d906b138f088933ae618bb \ + --hash=sha256:e544246b859f17373bed915182ab841b80849ed9cf23f1f07b73b7c58baee5fb \ + --hash=sha256:e562617a45b5a9da5be4abe72b971d4f00bf8555eb29bb91ec2ef2be348cd132 \ + --hash=sha256:e570ffeb2170e116a5b17e83f19911020ac79d19c96f320cbfa1fa96b470185b \ + --hash=sha256:e6f31a17acede6a8cd1ae2d123ce04d8cca74056c9d456075f4f6f85de055607 \ + --hash=sha256:e9121b4009339b0f751955baf4543a0bfd6bc3f8188f8056b1a25a2d45099934 \ + --hash=sha256:ebedb45b9feb7258fac0a268a3f6bec0a2ea4d9558f3d6f813f02ff3a6dc6698 \ + --hash=sha256:ecaac27da855b8d73f92123e5f03612b04c5632fd0a476e469dfc47cd37d6b2e \ + --hash=sha256:ecdbde46235f3d560b18be0cb706c8e8ad1b965e5c13bbba7450c86064e96561 \ + --hash=sha256:ed550ed05540c03f0e69e6d74ad58d026de61b9eaebebbaaf8873e585cbb18de \ + --hash=sha256:eeb3d3d6b399ffe55f9a04e09e635554012f1980696d6b0aca3e6cf42a17a03b \ + --hash=sha256:ef337945bbd76cce390d1b2496ccf9f90b1c1242a3a7bc242ca4a9fc5993427a \ + --hash=sha256:f1365e032a477c1430cfe0cf2856679529a2331426f8081172c4a74186f1d595 \ + --hash=sha256:f23b55eb5464468f9e0e9a9935ce3ed2a870608d5f534025cd5536bca25b1402 \ + --hash=sha256:f2e9072d71c1f6cfc79a36d4484c82823c560e6f5599c43c1ca6b5cdbd54f881 \ + --hash=sha256:f323306d0556351735b54acbf82904fe30a27b6a7147153cbe6e19aaaa2aa429 \ + --hash=sha256:f36a3489d9e28fe4b67be9992a23029c3cec0babc3bd9afb39f49844a8c721c5 \ + --hash=sha256:f64f82cc3443149292b32387086d02a6c7fb39b8781563e0ca7b8d7d9cf72bd7 \ + --hash=sha256:f6defd966ca3b187ec6c366604e9296f585021d922e666b99c47e78738b5666c \ + --hash=sha256:f7c2b8eb9fc872e68b46eeaf835e86bccc3a58ba57d0eedc109cbb14177be531 \ + --hash=sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6 \ + --hash=sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521 # via pydantic pyhcl==0.4.5 \ --hash=sha256:30ee337d330d1f90c9f5ed8f49c468f66c8e6e43192bdc7c6ece1420beb3070c \ From 0e9977ded27004c784aad73b6855a41a35a72489 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 2 Oct 2023 10:35:14 -0700 Subject: [PATCH 034/588] Add documentation for shared charts Document the charts directory in the section on the repository layout and add developer documentation for using shared charts. --- applications/tap/Chart.yaml | 2 +- docs/about/repository.rst | 17 ++++++- docs/developers/index.rst | 1 + docs/developers/shared-charts.rst | 65 ++++++++++++++++++++++++++ docs/developers/write-a-helm-chart.rst | 3 ++ 5 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 docs/developers/shared-charts.rst diff --git a/applications/tap/Chart.yaml b/applications/tap/Chart.yaml index a5f99cd97a..fa6f85e365 100644 --- a/applications/tap/Chart.yaml +++ b/applications/tap/Chart.yaml @@ -9,4 +9,4 @@ sources: dependencies: - name: cadc-tap version: 1.0.0 - repository: "file://../../charts/cadc-tap/" + repository: "file://../../charts/cadc-tap" diff --git a/docs/about/repository.rst b/docs/about/repository.rst index 251aafc696..31aee65496 100644 --- a/docs/about/repository.rst +++ b/docs/about/repository.rst @@ -59,11 +59,26 @@ installer directory :bdg-link-primary-line:`Browse installer/ on GitHub ` This directory contains a script named `install.sh `__. -The arguments to this are the name of the environment, the FQDN, and the read key for Vault (see :ref:`secrets` for more details on Vault). +The arguments to this are the name of the environment, the Vault RoleID, and the Vault SecretID (see :ref:`secrets` for more details on Vault). This installer script is the entry point for setting up a new environment. It can also be run on an existing environment to update it. See the :ref:`environment bootstrapping documentation ` for details. +charts directory +---------------- + +:bdg-link-primary-line:`Browse charts/ on GitHub ` + +This directory contains shared charts used by multiple Phalanx applications, but not generally useful enough to warrant separate publication in a proper Helm chart repository. + +In some cases, several Phalanx applications should use common Helm templates to avoid duplication. +The best way to do this within Helm is to use a subchart. +This can be done by publishing a separate Helm chart using the `charts repository `__, but publication as a Helm chart implies that the chart may be useful outside of Phalanx. +Sometimes these shared subcharts are merely artifacts of code organization and deduplication within Phalanx, and should not have an independent existence outside of Phalanx. +In those cases, they're maintained in the :file:`charts` directory. + +See :doc:`/developers/shared-charts` for details. + docs directory -------------- diff --git a/docs/developers/index.rst b/docs/developers/index.rst index 991327cce5..bb8d0bbf7e 100644 --- a/docs/developers/index.rst +++ b/docs/developers/index.rst @@ -24,6 +24,7 @@ Individual applications are documented in the :doc:`/applications/index` section write-a-helm-chart add-external-chart + shared-charts define-secrets add-application diff --git a/docs/developers/shared-charts.rst b/docs/developers/shared-charts.rst new file mode 100644 index 0000000000..18cc658619 --- /dev/null +++ b/docs/developers/shared-charts.rst @@ -0,0 +1,65 @@ +###################################### +Sharing subcharts between applications +###################################### + +In some cases, you may want to instantiate multiple Phalanx applications from mostly the same Helm chart. +For example, Phalanx contains multiple TAP server applications (:px-app:`tap`, :px-app:`ssotap`, and :px-app:`livetap`) that are all deployments of the CADC TAP server. +The Helm template resources should be shared among those applications to avoid code duplication, unnecessary maintenance overhead, and unintentional inconsistencies. + +There are two options for how to handle cases like this: + +#. Publish a generic Helm chart for the underlying service using the `charts repository `__, and then use it like any other external chart. + See :doc:`add-external-chart` for more details on how to use an external chart within Phalanx. + +#. Use a shared chart within Phalanx. + This is more appropriate if the chart is only useful inside Phalanx and doesn't make sense to publish as a stand-alone Helm chart. + The shared chart is included as a subchart in each Phalanx application that needs roughly the same resources. + +This document describes the second choice. + +Writing the shared subchart +=========================== + +Shared subcharts go into the `charts directory `__. +Each subdirectory of that directory is a Helm chart, similar to the structure of the :file:`applications` directory. +Those Helm charts should follow our normal Phalanx chart conventions from :doc:`write-a-helm-chart`. +For example, the ``version`` field of every chart should be set to ``1.0.0``, since these charts will not be published and don't need version tracking. + +Usually, the easiest way to create a shared subchart is to start by writing a regular application chart for one instance of the application following the instructions in :doc:`write-a-helm-chart`. +Then, copy that application chart into a subdirectory in the :file:`charts` directory, remove all the parts that don't make sense to share between applications, and add any additional :file:`values.yaml` settings that will be required to customize the instantiation of this chart for different applications. + +Shared charts do not have :file:`values-{environment}.yaml` files and are not aware of Phalanx environemnts. +Any per-environment settings must be handled in the parent charts that use this subchart and passed down as regular :file:`values.yaml` overrides. + +Shared charts do not have :file:`secrets.yaml` files. +All application secrets must be defined by the application charts in the :file:`applications` directory. +This may mean there is some duplication of secrets between applications. +This is intentional; often, one application should be the owner of those secrets and other applications should use ``copy`` directives to use the same secret value. + +Any documentation URLs such as ``home``, ``sources``, and ``phalanx.lsst.io/docs`` annotations in the shared chart will be ignored. +They can be included in the shared chart for reference, but each application will need to copy that information into its own :file:`Chart.yaml` file for it to show up in the generated Phalanx documentation. + +Using a shared subchart +======================= + +To use a shared subchart, reference it as a dependency in :file:`Chart.yaml` the way that you would use any other Helm chart as a subchart, but use a ``file:`` URL to point to the shared chart directory. +For example: + +.. code-block:: yaml + :caption: applications/tap/Chart.yaml + + dependencies: + - name: cadc-tap + version: 1.0.0 + repository: "file://../../charts/cadc-tap" + +Note the relative ``file:`` URL, which ensures the chart comes from the same checkout of Phalanx as the application chart. +The ``version`` in the dependency must always be ``1.0.0``. + +Don't forget to copy any relevant ``home``, ``sources``, or ``annotations`` settings from the shared chart into the application :file:`Chart.yaml` so that it will be included in the generated Phalanx documentation. + +Next steps +========== + +- Define the secrets needed by each application: :doc:`define-secrets` +- Add the Argo CD applications to appropriate environments: :doc:`add-application` diff --git a/docs/developers/write-a-helm-chart.rst b/docs/developers/write-a-helm-chart.rst index ebef7d96cf..3e1ead63de 100644 --- a/docs/developers/write-a-helm-chart.rst +++ b/docs/developers/write-a-helm-chart.rst @@ -11,6 +11,9 @@ For first-party charts, the :file:`templates` directory is generally richly popu Here are instructions for writing a Helm chart for a newly-developed application. If you are using an external third-party chart to deploy part of the application, also see :doc:`add-external-chart`. +In some cases where there is a lot of internal duplication between multiple Phalanx applications, those applications should share a subchart that encapsulates that duplication. +See :doc:`shared-charts` if you think that may be the case for your application. + .. _dev-chart-starters: Start from a template From aa163d1f2f2e69a1c5697e08e43fba155647acef Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 2 Oct 2023 10:39:15 -0700 Subject: [PATCH 035/588] Add missing exceptions to __all__ Some Phalanx exception classes were missing from __all__ in the phalanx.exceptions module. Add them. --- src/phalanx/exceptions.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/phalanx/exceptions.py b/src/phalanx/exceptions.py index f349c59259..fc7102d2a0 100644 --- a/src/phalanx/exceptions.py +++ b/src/phalanx/exceptions.py @@ -13,6 +13,9 @@ "InvalidApplicationConfigError", "InvalidEnvironmentConfigError", "InvalidSecretConfigError", + "MalformedOnepasswordSecretError", + "MissingOnepasswordSecretsError", + "NoOnepasswordConfigError", "NoOnepasswordCredentialsError", "UnknownEnvironmentError", "UnresolvedSecretsError", From 63889f77c9d3faf481cbbd6dda828f60ee936e26 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 2 Oct 2023 11:00:34 -0700 Subject: [PATCH 036/588] Add a test for shared chart dependencies Enforce the proper URL convention and version number, and catch references to nonexistent shared charts. --- applications/livetap/Chart.yaml | 2 +- applications/production-tools/Chart.yaml | 1 - applications/ssotap/Chart.yaml | 2 +- charts/README.md | 2 +- tests/config_test.py | 51 +++++++++++++++++++----- 5 files changed, 43 insertions(+), 15 deletions(-) diff --git a/applications/livetap/Chart.yaml b/applications/livetap/Chart.yaml index 8d2668f59b..650616a8d9 100644 --- a/applications/livetap/Chart.yaml +++ b/applications/livetap/Chart.yaml @@ -9,4 +9,4 @@ sources: dependencies: - name: cadc-tap version: 1.0.0 - repository: "file://../../charts/cadc-tap/" + repository: "file://../../charts/cadc-tap" diff --git a/applications/production-tools/Chart.yaml b/applications/production-tools/Chart.yaml index 95add46d37..3df45ea6a5 100644 --- a/applications/production-tools/Chart.yaml +++ b/applications/production-tools/Chart.yaml @@ -1,7 +1,6 @@ apiVersion: v2 name: production-tools version: 1.0.0 -dependencies: description: A collection of utility pages for monitoring data processing. sources: - https://github.com/lsst-dm/production_tools diff --git a/applications/ssotap/Chart.yaml b/applications/ssotap/Chart.yaml index ce8558f9e4..232e7f780d 100644 --- a/applications/ssotap/Chart.yaml +++ b/applications/ssotap/Chart.yaml @@ -9,4 +9,4 @@ sources: dependencies: - name: cadc-tap version: 1.0.0 - repository: "file://../../charts/cadc-tap/" + repository: "file://../../charts/cadc-tap" diff --git a/charts/README.md b/charts/README.md index 3a868d1d92..af2a8f66c6 100644 --- a/charts/README.md +++ b/charts/README.md @@ -12,7 +12,7 @@ To use a chart in this directory, use a dependency stanza similar to the followi dependencies: - name: cadc-tap version: 1.0.0 - repository: "file://../../charts/cadc-tap/" + repository: "file://../../charts/cadc-tap" ``` If a Helm chart should be usable independently of Phalanx and warrants a separate existence with its own version number, that chart should instead go into the [charts](https://github.com/lsst-sqre/charts) repository. diff --git a/tests/config_test.py b/tests/config_test.py index d17f3c6d74..26a6936304 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -2,7 +2,10 @@ from __future__ import annotations +import re +from collections.abc import Iterator from pathlib import Path +from typing import Literal import yaml @@ -15,22 +18,27 @@ """Temporary whitelist of applications that haven't added secrets.yaml.""" +def all_charts( + parent: Literal["applications", "charts"], +) -> Iterator[Path]: + """Iterate through all chart paths.""" + root_path = Path(__file__).parent.parent / parent + for candidate in root_path.iterdir(): + if not candidate.is_dir(): + continue + yield candidate + + def test_application_version() -> None: """All application charts should have version 1.0.0.""" - applications_path = Path(__file__).parent.parent / "applications" - for application in applications_path.iterdir(): - if not application.is_dir(): - continue + for application in all_charts("applications"): chart = yaml.safe_load((application / "Chart.yaml").read_text()) assert ( chart["version"] == "1.0.0" ), f"Chart for application {application.name} has incorrect version" # Check the same thing for shared charts. - Path(__file__).parent.parent / "charts" - for shared_chart in applications_path.iterdir(): - if not shared_chart.is_dir(): - continue + for shared_chart in all_charts("charts"): chart = yaml.safe_load((shared_chart / "Chart.yaml").read_text()) assert ( chart["version"] == "1.0.0" @@ -39,9 +47,8 @@ def test_application_version() -> None: def test_secrets_defined() -> None: """Any application with a VaultSecret should have secrets.yaml.""" - applications_path = Path(__file__).parent.parent / "applications" - for application in applications_path.iterdir(): - if not application.is_dir() or application.name in _ALLOW_NO_SECRETS: + for application in all_charts("applications"): + if application.name in _ALLOW_NO_SECRETS: continue if list(application.glob("secrets*.yaml")): continue @@ -62,3 +69,25 @@ def test_secrets_defined() -> None: " resource but has no secrets.yaml configuration" ) raise AssertionError(msg) + + +def test_shared_subcharts() -> None: + """Check references to shared subcharts.""" + available = [c.name for c in all_charts("charts")] + for application in all_charts("applications"): + chart = yaml.safe_load((application / "Chart.yaml").read_text()) + chart.get("dependencies") + for dependency in chart.get("dependencies", []): + if not re.match("file:", dependency.get("repository", "")): + continue + name = application.name + version = dependency.get("version") + repository = dependency["repository"] + m = re.match(r"file://[.][.]/[.][.]/charts/([^/]+)$", repository) + assert m, f"Incorrect shared chart URL in {name}: {repository}" + assert ( + m.group(1) in available + ), f"Missing shared chart dependency {m.group(1)} in {name}" + assert ( + dependency["version"] == "1.0.0" + ), f"Incorrect shared chart version in {name}: {version} != 1.0.0" From afc811e91a97e74011169000c003bb84fc9fb178 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 2 Oct 2023 11:24:03 -0700 Subject: [PATCH 037/588] Refactor static secret loading Now that we have a proper Pydantic model for static secrets, that class can gain a class method for loading it from a path, which can be used to simplify the CLI code. --- src/phalanx/cli.py | 26 ++------------------------ src/phalanx/models/secrets.py | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 2fc2b971b2..8bef679e0f 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -71,24 +71,6 @@ def _is_config(path: Path) -> bool: return current -def _load_static_secrets(path: Path) -> StaticSecrets: - """Load static secrets from a file. - - Parameters - ---------- - path - Path to the file. - - Returns - ------- - dict of dict - Map from application to secret key to - `~phalanx.models.secrets.StaticSecret`. - """ - with path.open() as fh: - return StaticSecrets.model_validate(yaml.safe_load(fh)) - - @click.group(context_settings={"help_option_names": ["-h", "--help"]}) @click.version_option(message="%(version)s") def main() -> None: @@ -224,9 +206,7 @@ def secrets_audit( """ if not config: config = _find_config() - static_secrets = None - if secrets: - static_secrets = _load_static_secrets(secrets) + static_secrets = StaticSecrets.from_path(secrets) if secrets else None factory = Factory(config) secrets_service = factory.create_secrets_service() sys.stdout.write(secrets_service.audit(environment, static_secrets)) @@ -409,9 +389,7 @@ def secrets_sync( """ if not config: config = _find_config() - static_secrets = None - if secrets: - static_secrets = _load_static_secrets(secrets) + static_secrets = StaticSecrets.from_path(secrets) if secrets else None factory = Factory(config) secrets_service = factory.create_secrets_service() secrets_service.sync( diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py index 1581a4b783..3861864e3c 100644 --- a/src/phalanx/models/secrets.py +++ b/src/phalanx/models/secrets.py @@ -7,9 +7,11 @@ from base64 import b64encode from datetime import UTC, datetime from enum import Enum +from pathlib import Path from typing import Literal, Self import bcrypt +import yaml from cryptography.fernet import Fernet from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization @@ -357,6 +359,23 @@ class StaticSecrets(BaseModel): model_config = ConfigDict(populate_by_name=True, extra="forbid") + @classmethod + def from_path(cls, path: Path) -> Self: + """Load static secrets from a file on disk. + + Parameters + ---------- + path + Path to the file. + + Returns + ------- + StaticSecrets + Parsed static secrets. + """ + with path.open() as fh: + return cls.model_validate(yaml.safe_load(fh)) + def for_application(self, application: str) -> dict[str, StaticSecret]: """Return any known secrets for an application. From 182020b724e5c4dacb0c841f5a553c1b5598a4c4 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 2 Oct 2023 13:02:45 -0700 Subject: [PATCH 038/588] Improve generated schemas Add more annotations to the Pydantic models so that the generated schemas contain more details. Mark the Vault URL as an HTTP URL for Pydantic validation purposes. Set a default URL pointing to the SQuaRE Vault server so that most environments don't need to set it, and adjust the schema so that this will validate. Drop the now-obsolete onepasswordUuid enviroment setting. --- docs/admin/installation.rst | 1 + docs/admin/secrets-setup.rst | 2 +- docs/extras/schemas/environment.json | 57 ++++++---- docs/extras/schemas/secrets.json | 27 +++-- environments/README.md | 3 +- environments/values-base.yaml | 1 - environments/values-ccin2p3.yaml | 1 - environments/values-idfdev.yaml | 1 - environments/values-idfint.yaml | 1 - environments/values-idfprod.yaml | 1 - environments/values-minikube.yaml | 1 - environments/values-roundtable-dev.yaml | 1 - environments/values-roundtable-prod.yaml | 1 - environments/values-summit.yaml | 1 - environments/values-tucson-teststand.yaml | 1 - environments/values.yaml | 6 +- installer/install.sh | 6 +- src/phalanx/models/environments.py | 131 ++++++++++++++-------- src/phalanx/models/secrets.py | 87 ++++++++------ src/phalanx/storage/vault.py | 4 +- 20 files changed, 201 insertions(+), 133 deletions(-) diff --git a/docs/admin/installation.rst b/docs/admin/installation.rst index d9d14d7f76..4837535918 100644 --- a/docs/admin/installation.rst +++ b/docs/admin/installation.rst @@ -23,6 +23,7 @@ To create a new Phalanx environment, take the following steps: #. Create a new :file:`values-{environment}.yaml` file in `environments `__. Start with a template copied from an existing environment that's similar to the new environment. Edit it so that ``name``, ``fqdn``, ``vaultUrl``, and ``vaultPathPrefix`` at the top match your new environment. + You may omit ``vaultUrl`` for SQuaRE-managed environments. See :doc:`secrets-setup` for more information about the latter two settings and additional settings you may need. Enable the applications this environment should include. diff --git a/docs/admin/secrets-setup.rst b/docs/admin/secrets-setup.rst index 3d2f99f07a..34d147a4f0 100644 --- a/docs/admin/secrets-setup.rst +++ b/docs/admin/secrets-setup.rst @@ -32,7 +32,7 @@ The name of each secret other than ``pull-secret`` matches the name of the appli So, for example, all secrets for Gafaelfawr for a given environment may be stored as key/value pairs in the secret named :samp:`secrets/phalanx/{environment}/gafaelfawr`. This path is configured for each environment via the ``vaultPathPrefix`` setting in the environment :file:`values-{environment}.yaml` file. -The URL to the Vault server is set via the ``vaultUrl`` setting in the same file. +The URL to the Vault server is set via the ``vaultUrl`` setting in the same file and defaults to the SQuaRE-run Vault server. Vault credentials ================= diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index 9f50157e4f..df92ef86a8 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -4,13 +4,15 @@ "description": "Configuration for 1Password static secrets source.", "properties": { "connectUrl": { + "description": "URL to the 1Password Connect API server", "format": "uri", "minLength": 1, - "title": "Connecturl", + "title": "1Password Connect URL", "type": "string" }, "vaultTitle": { - "title": "Vaulttitle", + "description": "Title of the 1Password vault from which to retrieve secrets", + "title": "1Password vault title", "type": "string" } }, @@ -27,11 +29,13 @@ "description": "Configuration for a Phalanx environment.\n\nThis is a model for the :file:`values-{environment}.yaml` files for each\nenvironment and is also used to validate those files. For the complete\nconfiguration for an environment, initialize this model with the merger of\n:file:`values.yaml` and :file:`values-{environment}.yaml`.", "properties": { "name": { + "description": "Name of the environment", "title": "Name", "type": "string" }, "fqdn": { - "title": "Fqdn", + "description": "Fully-qualified domain name on which the environment listens", + "title": "Domain name", "type": "string" }, "onepassword": { @@ -43,21 +47,36 @@ "type": "null" } ], - "default": null + "default": null, + "description": "Configuration for using 1Password as a static secrets source", + "title": "1Password configuration" }, "vaultUrl": { - "title": "Vaulturl", - "type": "string" + "anyOf": [ + { + "format": "uri", + "minLength": 1, + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "URL of the Vault server. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", + "title": "Vault server URL" }, "vaultPathPrefix": { - "title": "Vaultpathprefix", + "description": "Prefix of Vault paths, including the KV v2 mount point", + "title": "Vault path prefix", "type": "string" }, "applications": { "additionalProperties": { "type": "boolean" }, - "title": "Applications", + "description": "List of applications and whether they are enabled", + "title": "Enabled applications", "type": "object" }, "butlerRepositoryIndex": { @@ -70,19 +89,8 @@ } ], "default": null, - "title": "Butlerrepositoryindex" - }, - "onepasswordUuid": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Onepassworduuid" + "description": "URL to Butler repository index", + "title": "Butler repository index URL" }, "repoUrl": { "anyOf": [ @@ -94,7 +102,8 @@ } ], "default": null, - "title": "Repourl" + "description": "URL of the Git repository holding Argo CD configuration. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", + "title": "URL of Git repository" }, "targetRevision": { "anyOf": [ @@ -106,13 +115,13 @@ } ], "default": null, - "title": "Targetrevision" + "description": "Branch of the Git repository holding Argo CD configuration. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", + "title": "Git repository branch" } }, "required": [ "name", "fqdn", - "vaultUrl", "vaultPathPrefix", "applications" ], diff --git a/docs/extras/schemas/secrets.json b/docs/extras/schemas/secrets.json index d222b6c7b6..306b45a11c 100644 --- a/docs/extras/schemas/secrets.json +++ b/docs/extras/schemas/secrets.json @@ -14,9 +14,11 @@ } ], "default": null, - "description": "Rules for where the secret should be copied from" + "description": "Rules for where the secret should be copied from", + "title": "Copy rules" }, "description": { + "description": "Description of the secret", "title": "Description", "type": "string" }, @@ -33,7 +35,8 @@ } ], "default": null, - "title": "Generate" + "description": "Rules for how the secret should be generated", + "title": "Generation rules" }, "if": { "anyOf": [ @@ -54,9 +57,8 @@ "$ref": "#/$defs/SecretOnepasswordConfig" } ], - "default": { - "encoded": false - } + "description": "Configuration for how the secret is stored in 1Password", + "title": "1Password configuration" }, "value": { "anyOf": [ @@ -70,6 +72,7 @@ } ], "default": null, + "description": "Fixed value of secret", "title": "Value" } }, @@ -84,6 +87,7 @@ "description": "Possibly conditional rules for copying a secret value from another.", "properties": { "application": { + "description": "Application from which the secret should be copied", "title": "Application", "type": "string" }, @@ -101,6 +105,7 @@ "title": "Condition" }, "key": { + "description": "Secret key from which the secret should be copied", "title": "Key", "type": "string" } @@ -130,13 +135,14 @@ "title": "Condition" }, "type": { + "description": "Type of secret", "enum": [ "password", "gafaelfawr-token", "fernet-key", "rsa-private-key" ], - "title": "Type", + "title": "Secret type", "type": "string" } }, @@ -163,15 +169,17 @@ "title": "Condition" }, "source": { - "title": "Source", + "description": "Key of secret on which this secret is based. This may only be set by secrets of type `bcrypt-password-hash` or `mtime`.", + "title": "Source key", "type": "string" }, "type": { + "description": "Type of secret", "enum": [ "bcrypt-password-hash", "mtime" ], - "title": "Type", + "title": "Secret type", "type": "string" } }, @@ -187,7 +195,8 @@ "properties": { "encoded": { "default": false, - "title": "Encoded", + "description": "Whether the 1Password copy of the secret is encoded in base64. 1Password doesn't support newlines in secrets, so secrets that contain significant newlines have to be encoded when storing them in 1Password. This flag indicates that this has been done, and therefore when retrieving the secret from 1Password, its base64-encoding must be undone.", + "title": "Is base64-encoded", "type": "boolean" } }, diff --git a/environments/README.md b/environments/README.md index 793760602c..2673d51902 100644 --- a/environments/README.md +++ b/environments/README.md @@ -56,8 +56,7 @@ | butlerRepositoryIndex | string | None, must be set | Butler repository index to use for this environment | | fqdn | string | None, must be set | Fully-qualified domain name where the environment is running | | name | string | None, must be set | Name of the environment | -| onepasswordUuid | string | `"dg5afgiadsffeklfr6jykqymeu"` | UUID of the 1Password item in which to find Vault tokens | | repoUrl | string | `"https://github.com/lsst-sqre/phalanx.git"` | URL of the repository for all applications | | targetRevision | string | `"main"` | Revision of repository to use for all applications | | vaultPathPrefix | string | None, must be set | Prefix for Vault secrets for this environment | -| vaultUrl | string | None, must be set | URL of Vault server for this environment | +| vaultUrl | string | `"https://vault.lsst.codes/"` | URL of Vault server for this environment | diff --git a/environments/values-base.yaml b/environments/values-base.yaml index 8310f36de0..67f73d44a9 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -1,6 +1,5 @@ name: base fqdn: base-lsp.lsst.codes -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/base-lsp.lsst.codes applications: diff --git a/environments/values-ccin2p3.yaml b/environments/values-ccin2p3.yaml index 4dad63364e..d9a90d13e5 100644 --- a/environments/values-ccin2p3.yaml +++ b/environments/values-ccin2p3.yaml @@ -1,6 +1,5 @@ name: ccin2p3 fqdn: data-dev.lsst.eu -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/rsp-cc applications: diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 689a156b26..c5938ef961 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -4,7 +4,6 @@ name: idfdev onepassword: connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP data-dev.lsst.cloud" -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/phalanx/idfdev applications: diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index ee29239791..f07106e224 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -1,7 +1,6 @@ butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" fqdn: data-int.lsst.cloud name: idfint -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/data-int.lsst.cloud applications: diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index 2d9137f291..4ad3b30065 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -1,7 +1,6 @@ butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-repos.yaml" fqdn: data.lsst.cloud name: idfprod -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/data.lsst.cloud applications: diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index f082793512..aea3579661 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -3,7 +3,6 @@ fqdn: minikube.lsst.codes onepassword: connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP minikube.lsst.codes" -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/phalanx/minikube # The primary constraint on enabling applications is the low available memory diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index ca0db3d6f6..fb2c1569af 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -3,7 +3,6 @@ fqdn: roundtable-dev.lsst.cloud onepassword: connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP roundtable-dev.lsst.cloud" -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/roundtable-dev.lsst.cloud applications: diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index e6d0ce5469..655a48a4bd 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -3,7 +3,6 @@ fqdn: roundtable.lsst.cloud onepassword: connectUrl: "https://roundtable.lsst.cloud/1password" vaultTitle: "RSP roundtable.lsst.cloud" -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/roundtable.lsst.cloud applications: diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index 6ca59cf4ac..f82d65ace3 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -1,6 +1,5 @@ name: summit fqdn: summit-lsp.lsst.codes -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/summit-lsp.lsst.codes applications: diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index df9bef1bc6..d1fa87345a 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -1,6 +1,5 @@ name: tucson-teststand fqdn: tucson-teststand.lsst.codes -vaultUrl: "https://vault.lsst.codes" vaultPathPrefix: secret/k8s_operator/tucson-teststand.lsst.codes applications: diff --git a/environments/values.yaml b/environments/values.yaml index d69bc78c5d..0dbce119ba 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -12,9 +12,6 @@ name: "" # @default -- None, must be set fqdn: "" -# -- UUID of the 1Password item in which to find Vault tokens -onepasswordUuid: "dg5afgiadsffeklfr6jykqymeu" - # -- URL of the repository for all applications repoUrl: https://github.com/lsst-sqre/phalanx.git @@ -22,8 +19,7 @@ repoUrl: https://github.com/lsst-sqre/phalanx.git targetRevision: "main" # -- URL of Vault server for this environment -# @default -- None, must be set -vaultUrl: "" +vaultUrl: "https://vault.lsst.codes/" # -- Prefix for Vault secrets for this environment # @default -- None, must be set diff --git a/installer/install.sh b/installer/install.sh index 0dcfd2f75b..d3cbe8ee90 100755 --- a/installer/install.sh +++ b/installer/install.sh @@ -13,7 +13,11 @@ GIT_URL=$(git config --get remote.origin.url) GIT_BRANCH=${GITHUB_HEAD_REF:-$(git branch --show-current)} echo "Logging on to Vault..." -export VAULT_ADDR=$(yq -r .vaultUrl "$config") +if grep '^vaultUrl:' "$config" >/dev/null; then + export VAULT_ADDR=$(yq -r .vaultUrl "$config") +else + export VAULT_ADDR=$(yq -r .vaultUrl ../environments/values.yaml) +fi export VAULT_TOKEN=$(vault write auth/approle/login role_id="$VAULT_ROLE_ID" secret_id="$VAULT_SECRET_ID" | grep 'token ' | awk '{ print $2 }') VAULT_PATH_PREFIX=$(yq -r .vaultPathPrefix "$config") ARGOCD_PASSWORD=$(vault kv get --field=admin.plaintext_password $VAULT_PATH_PREFIX/argocd) diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index 57505175c8..c7d7a3b346 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -8,6 +8,7 @@ AnyHttpUrl, BaseModel, ConfigDict, + Field, GetJsonSchemaHandler, field_validator, ) @@ -35,30 +36,59 @@ class OnepasswordConfig(CamelCaseModel): """Configuration for 1Password static secrets source.""" - connect_url: AnyHttpUrl - """URL to the 1Password Connect API server.""" + connect_url: AnyHttpUrl = Field( + ..., + title="1Password Connect URL", + description="URL to the 1Password Connect API server", + ) - vault_title: str - """Title of the 1Password vault from which to retrieve secrets.""" + vault_title: str = Field( + ..., + title="1Password vault title", + description=( + "Title of the 1Password vault from which to retrieve secrets" + ), + ) class EnvironmentBaseConfig(CamelCaseModel): """Configuration common to `EnviromentConfig` and `Environment`.""" - name: str - """Name of the environment.""" - - fqdn: str - """Fully-qualified domain name.""" - - onepassword: OnepasswordConfig | None = None - """Configuration for using 1Password as a static secrets source.""" - - vault_url: str - """URL of Vault server.""" - - vault_path_prefix: str - """Prefix of Vault paths, including the Kv2 mount point.""" + name: str = Field(..., title="Name", description="Name of the environment") + + fqdn: str = Field( + ..., + title="Domain name", + description=( + "Fully-qualified domain name on which the environment listens" + ), + ) + + onepassword: OnepasswordConfig | None = Field( + None, + title="1Password configuration", + description=( + "Configuration for using 1Password as a static secrets source" + ), + ) + + vault_url: AnyHttpUrl | None = Field( + None, + title="Vault server URL", + description=( + "URL of the Vault server. This is required in the merged values" + " file that includes environment overrides, but the environment" + " override file doesn't need to set it, so it's marked as" + " optional for schema checking purposes to allow the override" + " file to be schema-checked independently." + ), + ) + + vault_path_prefix: str = Field( + ..., + title="Vault path prefix", + description="Prefix of Vault paths, including the KV v2 mount point", + ) @field_validator("onepassword", mode="before") @classmethod @@ -125,36 +155,41 @@ class EnvironmentConfig(EnvironmentBaseConfig): :file:`values.yaml` and :file:`values-{environment}.yaml`. """ - applications: dict[str, bool] - """List of applications and whether they are enabled.""" - - butler_repository_index: str | None = None - """URL to Butler repository index.""" - - onepassword_uuid: str | None = None - """UUID of 1Password item in which to find Vault tokens. - - This is used only by the old installer and will be removed once the new - secrets management and 1Password integration is deployed everywhere. - """ - - repo_url: str | None = None - """URL of the Git repository holding Argo CD configuration. - - This is required in the merged values file that includes environment - overrides, but the environment override file doesn't need to set it, so - it's marked as optional for schema checking purposes to allow the override - file to be schema-checked independently. - """ - - target_revision: str | None = None - """Branch of the Git repository holding Argo CD configuration. - - This is required in the merged values file that includes environment - overrides, but the environment override file doesn't need to set it, so - it's marked as optional for schema checking purposes to allow the override - file to be schema-checked independently. - """ + applications: dict[str, bool] = Field( + ..., + title="Enabled applications", + description="List of applications and whether they are enabled", + ) + + butler_repository_index: str | None = Field( + None, + title="Butler repository index URL", + description="URL to Butler repository index", + ) + + repo_url: str | None = Field( + None, + title="URL of Git repository", + description=( + "URL of the Git repository holding Argo CD configuration. This is" + " required in the merged values file that includes environment" + " overrides, but the environment override file doesn't need to" + " set it, so it's marked as optional for schema checking purposes" + " to allow the override file to be schema-checked independently." + ), + ) + + target_revision: str | None = Field( + None, + title="Git repository branch", + description=( + "Branch of the Git repository holding Argo CD configuration. This" + " is required in the merged values file that includes environment" + " overrides, but the environment override file doesn't need to set" + " it, so it's marked as optional for schema checking purposes to" + " allow the override file to be schema-checked independently." + ), + ) model_config = ConfigDict(extra="forbid") diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py index 3861864e3c..854f32e7b7 100644 --- a/src/phalanx/models/secrets.py +++ b/src/phalanx/models/secrets.py @@ -62,11 +62,17 @@ class ConditionalMixin(BaseModel): class SecretCopyRules(BaseModel): """Rules for copying a secret value from another secret.""" - application: str - """Application from which the secret should be copied.""" + application: str = Field( + ..., + title="Application", + description="Application from which the secret should be copied", + ) - key: str - """Secret key from which the secret should be copied.""" + key: str = Field( + ..., + title="Key", + description="Secret key from which the secret should be copied", + ) model_config = ConfigDict(populate_by_name=True, extra="forbid") @@ -94,8 +100,7 @@ class SimpleSecretGenerateRules(BaseModel): SecretGenerateType.gafaelfawr_token, SecretGenerateType.fernet_key, SecretGenerateType.rsa_private_key, - ] - """Type of secret.""" + ] = Field(..., title="Secret type", description="Type of secret") model_config = ConfigDict(populate_by_name=True, extra="forbid") @@ -134,15 +139,16 @@ class SourceSecretGenerateRules(BaseModel): type: Literal[ SecretGenerateType.bcrypt_password_hash, SecretGenerateType.mtime, - ] - """Type of secret.""" - - source: str - """Key of secret on which this secret is based. + ] = Field(..., title="Secret type", description="Type of secret") - This may only be set by secrets of type ``bcrypt-password-hash`` or - ``mtime``. - """ + source: str = Field( + ..., + title="Source key", + description=( + "Key of secret on which this secret is based. This may only be" + " set by secrets of type `bcrypt-password-hash` or `mtime`." + ), + ) def generate(self, source: SecretStr) -> SecretStr: match self.type: @@ -172,36 +178,49 @@ class ConditionalSourceSecretGenerateRules( class SecretOnepasswordConfig(BaseModel): """Configuration for how a static secret is stored in 1Password.""" - encoded: bool = False - """Whether the 1Password copy of the secret is encoded in base64. - - 1Password doesn't support newlines in secrets, so secrets that contain - significant newlines have to be encoded when storing them in 1Password. - This flag indicates that this has been done, and therefore when retrieving - the secret from 1Password, its base64-encoding must be undone. - """ + encoded: bool = Field( + False, + title="Is base64-encoded", + description=( + "Whether the 1Password copy of the secret is encoded in base64." + " 1Password doesn't support newlines in secrets, so secrets that" + " contain significant newlines have to be encoded when storing" + " them in 1Password. This flag indicates that this has been done," + " and therefore when retrieving the secret from 1Password, its" + " base64-encoding must be undone." + ), + ) class SecretConfig(BaseModel): """Specification for an application secret.""" - description: str - """Description of the secret.""" + description: str = Field( + ..., title="Description", description="Description of the secret" + ) copy_rules: SecretCopyRules | None = Field( None, + title="Copy rules", description="Rules for where the secret should be copied from", alias="copy", ) - generate: SecretGenerateRules | None = None - """Rules for how the secret should be generated.""" + generate: SecretGenerateRules | None = Field( + None, + title="Generation rules", + description="Rules for how the secret should be generated", + ) - onepassword: SecretOnepasswordConfig = SecretOnepasswordConfig() - """Configuration for how the secret is stored in 1Password.""" + onepassword: SecretOnepasswordConfig = Field( + default_factory=SecretOnepasswordConfig, + title="1Password configuration", + description="Configuration for how the secret is stored in 1Password", + ) - value: SecretStr | None = None - """Secret value.""" + value: SecretStr | None = Field( + None, title="Value", description="Fixed value of secret" + ) model_config = ConfigDict(populate_by_name=True, extra="forbid") @@ -211,12 +230,16 @@ class ConditionalSecretConfig(SecretConfig, ConditionalMixin): copy_rules: ConditionalSecretCopyRules | None = Field( None, + title="Copy rules", description="Rules for where the secret should be copied from", alias="copy", ) - generate: ConditionalSecretGenerateRules | None = None - """Rules for how the secret should be generated.""" + generate: ConditionalSecretGenerateRules | None = Field( + None, + title="Generation rules", + description="Rules for how the secret should be generated", + ) @model_validator(mode="after") def _validate_generate(self) -> Self: diff --git a/src/phalanx/storage/vault.py b/src/phalanx/storage/vault.py index 7c81bdcb1d..974b4460cc 100644 --- a/src/phalanx/storage/vault.py +++ b/src/phalanx/storage/vault.py @@ -359,4 +359,6 @@ def get_vault_client( """ if not path_prefix: path_prefix = env.vault_path_prefix - return VaultClient(env.vault_url, path_prefix) + if not env.vault_url: + raise ValueError("vaultUrl not set for this environment") + return VaultClient(str(env.vault_url), path_prefix) From c8e63c0437b80840d008c7ad60f514346c918966 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 2 Oct 2023 13:37:29 -0700 Subject: [PATCH 039/588] Improve whitespace handling in doc templates Suppress more whitespace in the documentation templates to make debug output a bit easier to read. --- docs/applications/_summary.rst.jinja | 41 ++++++++++++++-------------- docs/environments/_summary.rst.jinja | 9 +++--- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/docs/applications/_summary.rst.jinja b/docs/applications/_summary.rst.jinja index 96ba49b3d8..5fcced1f2f 100644 --- a/docs/applications/_summary.rst.jinja +++ b/docs/applications/_summary.rst.jinja @@ -3,45 +3,44 @@ * - View on GitHub - :bdg-link-primary-line:`applications/{{ app.name }} ` :bdg-link-primary-line:`Application template ` - {% if app.homepage %} + {%- if app.homepage %} * - Homepage - {{ app.homepage }} - {% endif %} - {% if app.source_urls %} + {%- endif %} + {%- if app.source_urls %} * - Source - {% if app.source_urls|length == 1 %} + {%- if app.source_urls|length == 1 %} - {{ app.source_urls[0] }} - {% else %} + {%- else %} - - {{ app.source_urls[0] }} - {% for source_url in app.source_urls[1:] %} + {%- for source_url in app.source_urls[1:] %} - {{ source_url }} - {% endfor %} - {% endif %} - {% endif %} - {% if app.doc_links %} + {%- endfor %} + {%- endif %} + {%- endif %} + {%- if app.doc_links %} * - Related docs - {% if app.doc_links|length == 1 %} + {%- if app.doc_links|length == 1 %} - {{ app.doc_links[0].to_rst() }} - {% else %} + {%- else %} - - {{ app.doc_links[0].to_rst() }} - {% for doc_link in app.doc_links[1:] %} + {%- for doc_link in app.doc_links[1:] %} - {{ doc_link.to_rst() }} - {% endfor %} - {% endif %} - {% endif %} + {%- endfor %} + {%- endif %} + {%- endif %} * - Type - Helm_ * - Namespace - {{ app.namespace }} * - Environments - .. list-table:: - {% for env_name in app.active_environments %} * - :px-env:`{{ env_name }}` - `values `__ - {% if envs[env_name].argocd_url %} + {%- if envs[env_name].argocd_url %} - `Argo CD <{{ envs[env_name].argocd_url }}/applications/{{ app.name }}>`__ - {% else %} + {%- else %} - - {% endif %} - {% endfor %} + {%- endif %} + {%- endfor %} diff --git a/docs/environments/_summary.rst.jinja b/docs/environments/_summary.rst.jinja index 97002cab01..7f192f5df6 100644 --- a/docs/environments/_summary.rst.jinja +++ b/docs/environments/_summary.rst.jinja @@ -36,18 +36,17 @@ {% for scope_groups in env.gafaelfawr_scopes %} * - ``{{ scope_groups.scope }}`` - - {{ scope_groups.groups_as_rst()[0] }} - {% if scope_groups.groups|length > 1 %} - {% for group in scope_groups.groups_as_rst()[1:] %} + {%- if scope_groups.groups|length > 1 %} + {%- for group in scope_groups.groups_as_rst()[1:] %} - {{ group }} {%- endfor %} {%- endif %} {%- endfor %} - {% endif %} + {%- endif %} {% if env.argocd_rbac %} * - Argo CD RBAC - .. csv-table:: - {% for line in env.argocd_rbac_csv %} {{ line }} {%- endfor %} - {% endif %} + {%- endif %} From 35bf0803a7c88d7c743eaec644bf9603a00a4b2b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 2 Oct 2023 14:22:46 -0700 Subject: [PATCH 040/588] Add encoded secret warnings to static templates If a secret is marked as base64-encoded in 1Password, add a warning to the static secret template saying so, in case people are using that as a reference to decide what to put into 1Password. This required some refactoring to export the correct fields from the static secrets model for a template. In the process, I fixed a few places where we may have incorrectly synced an empty pull secret when no pull secret was specified. --- src/phalanx/constants.py | 7 +++++ src/phalanx/models/secrets.py | 31 +++++++++++++++++++- src/phalanx/services/secrets.py | 17 +++++++---- tests/data/output/idfdev/static-secrets.yaml | 3 ++ 4 files changed, 51 insertions(+), 7 deletions(-) diff --git a/src/phalanx/constants.py b/src/phalanx/constants.py index 625bb64122..378be56c30 100644 --- a/src/phalanx/constants.py +++ b/src/phalanx/constants.py @@ -10,6 +10,7 @@ __all__ = [ "HELM_DOCLINK_ANNOTATION", + "ONEPASSWORD_ENCODED_WARNING", "PULL_SECRET_DESCRIPTION", "VAULT_SECRET_TEMPLATE", "VAULT_WRITE_TOKEN_LIFETIME", @@ -19,6 +20,12 @@ HELM_DOCLINK_ANNOTATION = "phalanx.lsst.io/docs" """Annotation in :file:`Chart.yaml` for application documentation links.""" +ONEPASSWORD_ENCODED_WARNING = ( + "If you store this secret in a 1Password item, encode it with base64" + " first." +) +"""Warning to add to secrets that must be encoded in 1Password.""" + PULL_SECRET_DESCRIPTION = ( "Pull secrets for Docker registries. Each key under registries is the name" " of a Docker registry that needs a pull secret. The value should have two" diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py index 854f32e7b7..e72d0219cd 100644 --- a/src/phalanx/models/secrets.py +++ b/src/phalanx/models/secrets.py @@ -8,7 +8,7 @@ from datetime import UTC, datetime from enum import Enum from pathlib import Path -from typing import Literal, Self +from typing import Any, Literal, Self import bcrypt import yaml @@ -301,6 +301,7 @@ class PullSecret(BaseModel): title="Pull secret by registry", description="Pull secrets for each registry that needs one", ) + model_config = ConfigDict(extra="forbid") def to_dockerconfigjson(self) -> str: @@ -348,6 +349,15 @@ class StaticSecret(BaseModel): description="Intended for human writers and ignored by tools", ) + warning: YAMLFoldedString | None = Field( + None, + title="Warning for humans", + description=( + "Any warnings humans need to know about when filling out this" + " secret" + ), + ) + value: SecretStr | None = Field( None, title="Value of secret", @@ -414,3 +424,22 @@ def for_application(self, application: str) -> dict[str, StaticSecret]: application has no static secrets, returns an empty dictionary. """ return self.applications.get(application, {}) + + def to_template(self) -> dict[str, Any]: + """Export the model in a suitable form for the template. + + The static secrets template should always include the ``value`` field + even though it will be `None`, should not include ``warning`` if it is + unset, and should always include the `PullSecret` fields even though + they are defaults. The parameters to `~pydantic.BaseModel.model_dict` + aren't up to specifying this, hence this custom serializer. + + Returns + ------- + dict + Dictionary suitable for dumping as YAML to make a template. + """ + result = self.model_dump(by_alias=True, exclude_unset=True) + if self.pull_secret: + result["pull-secret"] = self.pull_secret.model_dump() + return result diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 1c18c8f3fd..f11b99c5ed 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -10,6 +10,7 @@ import yaml from pydantic import SecretStr +from ..constants import ONEPASSWORD_ENCODED_WARNING from ..exceptions import ( MalformedOnepasswordSecretError, MissingOnepasswordSecretsError, @@ -137,8 +138,6 @@ def audit( # Generate the textual report. return report.to_text() - # Generate the textual report. - def generate_static_template(self, env_name: str) -> str: """Generate a template for providing static secrets. @@ -158,17 +157,21 @@ def generate_static_template(self, env_name: str) -> str: YAML template the user can fill out, as a string. """ environment = self._config.load_environment(env_name) + warning = ONEPASSWORD_ENCODED_WARNING template: defaultdict[str, dict[str, StaticSecret]] = defaultdict(dict) for application in environment.all_applications(): for secret in application.all_static_secrets(): - template[secret.application][secret.key] = StaticSecret( + static_secret = StaticSecret( description=YAMLFoldedString(secret.description), value=None, ) + if secret.onepassword.encoded: + static_secret.warning = YAMLFoldedString(warning) + template[secret.application][secret.key] = static_secret static_secrets = StaticSecrets( applications=template, pull_secret=PullSecret() ) - return yaml.dump(static_secrets.model_dump(by_alias=True), width=70) + return yaml.dump(static_secrets.to_template(), width=70) def get_onepassword_static_secrets(self, env_name: str) -> StaticSecrets: """Retrieve static secrets for an environment from 1Password. @@ -251,7 +254,9 @@ def sync( # Replace any Vault secrets that are incorrect. self._sync_application_secrets(vault_client, vault_secrets, resolved) + has_pull_secret = False if resolved.pull_secret and resolved.pull_secret.registries: + has_pull_secret = True pull_secret = resolved.pull_secret self._sync_pull_secret(vault_client, vault_secrets, pull_secret) @@ -261,7 +266,7 @@ def sync( vault_client, vault_secrets, resolved, - has_pull_secret=resolved.pull_secret is not None, + has_pull_secret=has_pull_secret, ) def _audit_secrets( @@ -304,7 +309,7 @@ def _audit_secrets( ] # The pull-secret has to be handled separately. - if pull_secret: + if pull_secret and pull_secret.registries: if "pull-secret" in vault_secrets: value = SecretStr(pull_secret.to_dockerconfigjson()) expected = {".dockerconfigjson": value} diff --git a/tests/data/output/idfdev/static-secrets.yaml b/tests/data/output/idfdev/static-secrets.yaml index 4c12fee9c8..705a55685a 100644 --- a/tests/data/output/idfdev/static-secrets.yaml +++ b/tests/data/output/idfdev/static-secrets.yaml @@ -29,6 +29,9 @@ applications: description: >- Slack web hook to which mobu should report failures and daily status. value: null + warning: >- + If you store this secret in a 1Password item, encode it with base64 + first. app-alert-webhook: description: >- Slack web hook to which to post internal application alerts. This From 98bdca80d3505f74a8078b32edd0253eba65c009 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 3 Oct 2023 14:47:22 -0700 Subject: [PATCH 041/588] Clarify description of charts directory Co-authored-by: Jonathan Sick --- docs/about/repository.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/about/repository.rst b/docs/about/repository.rst index 31aee65496..5546459183 100644 --- a/docs/about/repository.rst +++ b/docs/about/repository.rst @@ -69,7 +69,7 @@ charts directory :bdg-link-primary-line:`Browse charts/ on GitHub ` -This directory contains shared charts used by multiple Phalanx applications, but not generally useful enough to warrant separate publication in a proper Helm chart repository. +This directory contains Helm charts shared by multiple Phalanx applications that are not generally useful enough to warrant separate publication in a proper Helm chart repository. In some cases, several Phalanx applications should use common Helm templates to avoid duplication. The best way to do this within Helm is to use a subchart. From 1bf9791cf2e1b02547238a47943834ea5430e58e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 3 Oct 2023 14:47:42 -0700 Subject: [PATCH 042/588] Fixed spelling error in shared charts documentation Co-authored-by: Jonathan Sick --- docs/developers/shared-charts.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/developers/shared-charts.rst b/docs/developers/shared-charts.rst index 18cc658619..a0f4edd0f9 100644 --- a/docs/developers/shared-charts.rst +++ b/docs/developers/shared-charts.rst @@ -28,7 +28,7 @@ For example, the ``version`` field of every chart should be set to ``1.0.0``, si Usually, the easiest way to create a shared subchart is to start by writing a regular application chart for one instance of the application following the instructions in :doc:`write-a-helm-chart`. Then, copy that application chart into a subdirectory in the :file:`charts` directory, remove all the parts that don't make sense to share between applications, and add any additional :file:`values.yaml` settings that will be required to customize the instantiation of this chart for different applications. -Shared charts do not have :file:`values-{environment}.yaml` files and are not aware of Phalanx environemnts. +Shared charts do not have :file:`values-{environment}.yaml` files and are not aware of Phalanx environments. Any per-environment settings must be handled in the parent charts that use this subchart and passed down as regular :file:`values.yaml` overrides. Shared charts do not have :file:`secrets.yaml` files. From 0ed341a872c8fc032f7a8e0efa401c72ea0191b6 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Tue, 3 Oct 2023 16:24:32 -0700 Subject: [PATCH 043/588] fix: pin current recommended image tag --- applications/nublado/values-usdfdev.yaml | 1 + applications/nublado/values-usdfprod.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 8b24353338..61d980448d 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -12,6 +12,7 @@ controller: registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" recommendedTag: "recommended" + pin: ["w_2023_37"] numReleases: 1 numWeeklies: 2 numDailies: 3 diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml index 5c8dfb9c4f..f8cc1660dc 100644 --- a/applications/nublado/values-usdfprod.yaml +++ b/applications/nublado/values-usdfprod.yaml @@ -12,6 +12,7 @@ controller: registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" recommendedTag: "recommended" + pin: ["w_2023_37"] numReleases: 1 numWeeklies: 2 numDailies: 3 From 165e4aab18eb055e010ba0b174d5af4b2424431e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 3 Oct 2023 17:00:41 -0700 Subject: [PATCH 044/588] Bump version of Gafaelfawr Pick up the new Gafaelfawr release, which has support for finding group membership by user DN. This will be required for the new Keycloak and LDAP configuration on T&S sites. --- applications/gafaelfawr/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index 211cd6641f..f182217a06 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,7 +5,7 @@ description: Authentication and identity system home: https://gafaelfawr.lsst.io/ sources: - https://github.com/lsst-sqre/gafaelfawr -appVersion: 9.3.1 +appVersion: 9.4.0 dependencies: - name: redis From 56cd1ad0cbff7541f857a27ff24245121a5ddb8a Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 3 Oct 2023 17:03:53 -0700 Subject: [PATCH 045/588] Update Python dependencies --- .pre-commit-config.yaml | 2 +- requirements/dev.txt | 160 ++++++++++++++++++++-------------------- requirements/main.txt | 12 +-- 3 files changed, 87 insertions(+), 87 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fa06f3e758..2303624b95 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.289 + rev: v0.0.292 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/requirements/dev.txt b/requirements/dev.txt index edfc221b1e..8cedfa6b3e 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -24,9 +24,9 @@ autodoc-pydantic==2.0.1 \ --hash=sha256:7a125a4ff18e4903e27be71e4ddb3269380860eacab4a584d6cc2e212fa96991 \ --hash=sha256:d3c302fdb6d37edb5b721f0f540252fa79cea7018bc1a9a85bf70f33a68b0ce4 # via -r requirements/dev.in -babel==2.12.1 \ - --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \ - --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455 +babel==2.13.0 \ + --hash=sha256:04c3e2d28d2b7681644508f836be388ae49e0cfe91465095340395b60d00f210 \ + --hash=sha256:fbfcae1575ff78e26c7449136f1abbefc3c13ce542eeb13d43d50d8b047216ec # via sphinx beautifulsoup4==4.12.2 \ --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ @@ -197,59 +197,59 @@ contourpy==1.1.1 \ --hash=sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0 \ --hash=sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7 # via matplotlib -coverage[toml]==7.3.1 \ - --hash=sha256:025ded371f1ca280c035d91b43252adbb04d2aea4c7105252d3cbc227f03b375 \ - --hash=sha256:04312b036580ec505f2b77cbbdfb15137d5efdfade09156961f5277149f5e344 \ - --hash=sha256:0575c37e207bb9b98b6cf72fdaaa18ac909fb3d153083400c2d48e2e6d28bd8e \ - --hash=sha256:07d156269718670d00a3b06db2288b48527fc5f36859425ff7cec07c6b367745 \ - --hash=sha256:1f111a7d85658ea52ffad7084088277135ec5f368457275fc57f11cebb15607f \ - --hash=sha256:220eb51f5fb38dfdb7e5d54284ca4d0cd70ddac047d750111a68ab1798945194 \ - --hash=sha256:229c0dd2ccf956bf5aeede7e3131ca48b65beacde2029f0361b54bf93d36f45a \ - --hash=sha256:245c5a99254e83875c7fed8b8b2536f040997a9b76ac4c1da5bff398c06e860f \ - --hash=sha256:2829c65c8faaf55b868ed7af3c7477b76b1c6ebeee99a28f59a2cb5907a45760 \ - --hash=sha256:4aba512a15a3e1e4fdbfed2f5392ec221434a614cc68100ca99dcad7af29f3f8 \ - --hash=sha256:4c96dd7798d83b960afc6c1feb9e5af537fc4908852ef025600374ff1a017392 \ - --hash=sha256:50dd1e2dd13dbbd856ffef69196781edff26c800a74f070d3b3e3389cab2600d \ - --hash=sha256:5289490dd1c3bb86de4730a92261ae66ea8d44b79ed3cc26464f4c2cde581fbc \ - --hash=sha256:53669b79f3d599da95a0afbef039ac0fadbb236532feb042c534fbb81b1a4e40 \ - --hash=sha256:553d7094cb27db58ea91332e8b5681bac107e7242c23f7629ab1316ee73c4981 \ - --hash=sha256:586649ada7cf139445da386ab6f8ef00e6172f11a939fc3b2b7e7c9082052fa0 \ - --hash=sha256:5ae4c6da8b3d123500f9525b50bf0168023313963e0e2e814badf9000dd6ef92 \ - --hash=sha256:5b4ee7080878077af0afa7238df1b967f00dc10763f6e1b66f5cced4abebb0a3 \ - --hash=sha256:5d991e13ad2ed3aced177f524e4d670f304c8233edad3210e02c465351f785a0 \ - --hash=sha256:614f1f98b84eb256e4f35e726bfe5ca82349f8dfa576faabf8a49ca09e630086 \ - --hash=sha256:636a8ac0b044cfeccae76a36f3b18264edcc810a76a49884b96dd744613ec0b7 \ - --hash=sha256:6407424621f40205bbe6325686417e5e552f6b2dba3535dd1f90afc88a61d465 \ - --hash=sha256:6bc6f3f4692d806831c136c5acad5ccedd0262aa44c087c46b7101c77e139140 \ - --hash=sha256:6cb7fe1581deb67b782c153136541e20901aa312ceedaf1467dcb35255787952 \ - --hash=sha256:74bb470399dc1989b535cb41f5ca7ab2af561e40def22d7e188e0a445e7639e3 \ - --hash=sha256:75c8f0df9dfd8ff745bccff75867d63ef336e57cc22b2908ee725cc552689ec8 \ - --hash=sha256:770f143980cc16eb601ccfd571846e89a5fe4c03b4193f2e485268f224ab602f \ - --hash=sha256:7eb0b188f30e41ddd659a529e385470aa6782f3b412f860ce22b2491c89b8593 \ - --hash=sha256:7eb3cd48d54b9bd0e73026dedce44773214064be93611deab0b6a43158c3d5a0 \ - --hash=sha256:87d38444efffd5b056fcc026c1e8d862191881143c3aa80bb11fcf9dca9ae204 \ - --hash=sha256:8a07b692129b8a14ad7a37941a3029c291254feb7a4237f245cfae2de78de037 \ - --hash=sha256:966f10df9b2b2115da87f50f6a248e313c72a668248be1b9060ce935c871f276 \ - --hash=sha256:a6191b3a6ad3e09b6cfd75b45c6aeeffe7e3b0ad46b268345d159b8df8d835f9 \ - --hash=sha256:aab8e9464c00da5cb9c536150b7fbcd8850d376d1151741dd0d16dfe1ba4fd26 \ - --hash=sha256:ac3c5b7e75acac31e490b7851595212ed951889918d398b7afa12736c85e13ce \ - --hash=sha256:ac9ad38204887349853d7c313f53a7b1c210ce138c73859e925bc4e5d8fc18e7 \ - --hash=sha256:b9c0c19f70d30219113b18fe07e372b244fb2a773d4afde29d5a2f7930765136 \ - --hash=sha256:c397c70cd20f6df7d2a52283857af622d5f23300c4ca8e5bd8c7a543825baa5a \ - --hash=sha256:c6601a60318f9c3945be6ea0f2a80571f4299b6801716f8a6e4846892737ebe4 \ - --hash=sha256:c6f55d38818ca9596dc9019eae19a47410d5322408140d9a0076001a3dcb938c \ - --hash=sha256:ca70466ca3a17460e8fc9cea7123c8cbef5ada4be3140a1ef8f7b63f2f37108f \ - --hash=sha256:ca833941ec701fda15414be400c3259479bfde7ae6d806b69e63b3dc423b1832 \ - --hash=sha256:cd0f7429ecfd1ff597389907045ff209c8fdb5b013d38cfa7c60728cb484b6e3 \ - --hash=sha256:cd694e19c031733e446c8024dedd12a00cda87e1c10bd7b8539a87963685e969 \ - --hash=sha256:cdd088c00c39a27cfa5329349cc763a48761fdc785879220d54eb785c8a38520 \ - --hash=sha256:de30c1aa80f30af0f6b2058a91505ea6e36d6535d437520067f525f7df123887 \ - --hash=sha256:defbbb51121189722420a208957e26e49809feafca6afeef325df66c39c4fdb3 \ - --hash=sha256:f09195dda68d94a53123883de75bb97b0e35f5f6f9f3aa5bf6e496da718f0cb6 \ - --hash=sha256:f12d8b11a54f32688b165fd1a788c408f927b0960984b899be7e4c190ae758f1 \ - --hash=sha256:f1a317fdf5c122ad642db8a97964733ab7c3cf6009e1a8ae8821089993f175ff \ - --hash=sha256:f2781fd3cabc28278dc982a352f50c81c09a1a500cc2086dc4249853ea96b981 \ - --hash=sha256:f4f456590eefb6e1b3c9ea6328c1e9fa0f1006e7481179d749b3376fc793478e +coverage[toml]==7.3.2 \ + --hash=sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1 \ + --hash=sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63 \ + --hash=sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9 \ + --hash=sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312 \ + --hash=sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3 \ + --hash=sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb \ + --hash=sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25 \ + --hash=sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92 \ + --hash=sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda \ + --hash=sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148 \ + --hash=sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6 \ + --hash=sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216 \ + --hash=sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a \ + --hash=sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640 \ + --hash=sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836 \ + --hash=sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c \ + --hash=sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f \ + --hash=sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2 \ + --hash=sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901 \ + --hash=sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed \ + --hash=sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a \ + --hash=sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074 \ + --hash=sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc \ + --hash=sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84 \ + --hash=sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083 \ + --hash=sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f \ + --hash=sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c \ + --hash=sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c \ + --hash=sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637 \ + --hash=sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2 \ + --hash=sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82 \ + --hash=sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f \ + --hash=sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce \ + --hash=sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef \ + --hash=sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f \ + --hash=sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611 \ + --hash=sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c \ + --hash=sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76 \ + --hash=sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9 \ + --hash=sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce \ + --hash=sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9 \ + --hash=sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf \ + --hash=sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf \ + --hash=sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9 \ + --hash=sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6 \ + --hash=sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2 \ + --hash=sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a \ + --hash=sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a \ + --hash=sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf \ + --hash=sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738 \ + --hash=sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a \ + --hash=sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4 # via # -r requirements/dev.in # pytest-cov @@ -744,9 +744,9 @@ pillow==10.0.1 \ --hash=sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68 \ --hash=sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1 # via matplotlib -platformdirs==3.10.0 \ - --hash=sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d \ - --hash=sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d +platformdirs==3.11.0 \ + --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ + --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e # via virtualenv pluggy==1.3.0 \ --hash=sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12 \ @@ -1095,24 +1095,24 @@ rpds-py==0.10.3 \ # via # jsonschema # referencing -ruff==0.0.291 \ - --hash=sha256:13f0d88e5f367b2dc8c7d90a8afdcfff9dd7d174e324fd3ed8e0b5cb5dc9b7f6 \ - --hash=sha256:1d5f0616ae4cdc7a938b493b6a1a71c8a47d0300c0d65f6e41c281c2f7490ad3 \ - --hash=sha256:5383ba67ad360caf6060d09012f1fb2ab8bd605ab766d10ca4427a28ab106e0b \ - --hash=sha256:6ab44ea607967171e18aa5c80335237be12f3a1523375fa0cede83c5cf77feb4 \ - --hash=sha256:6c06006350c3bb689765d71f810128c9cdf4a1121fd01afc655c87bab4fb4f83 \ - --hash=sha256:87671e33175ae949702774071b35ed4937da06f11851af75cd087e1b5a488ac4 \ - --hash=sha256:8a69bfbde72db8ca1c43ee3570f59daad155196c3fbe357047cd9b77de65f15b \ - --hash=sha256:8d5b56bc3a2f83a7a1d7f4447c54d8d3db52021f726fdd55d549ca87bca5d747 \ - --hash=sha256:a04b384f2d36f00d5fb55313d52a7d66236531195ef08157a09c4728090f2ef0 \ - --hash=sha256:b09b94efdcd162fe32b472b2dd5bf1c969fcc15b8ff52f478b048f41d4590e09 \ - --hash=sha256:b3eeee1b1a45a247758ecdc3ab26c307336d157aafc61edb98b825cadb153df3 \ - --hash=sha256:b727c219b43f903875b7503a76c86237a00d1a39579bb3e21ce027eec9534051 \ - --hash=sha256:b75f5801547f79b7541d72a211949754c21dc0705c70eddf7f21c88a64de8b97 \ - --hash=sha256:b97d0d7c136a85badbc7fd8397fdbb336e9409b01c07027622f28dcd7db366f2 \ - --hash=sha256:c61109661dde9db73469d14a82b42a88c7164f731e6a3b0042e71394c1c7ceed \ - --hash=sha256:d867384a4615b7f30b223a849b52104214442b5ba79b473d7edd18da3cde22d6 \ - --hash=sha256:fd17220611047de247b635596e3174f3d7f2becf63bd56301fc758778df9b629 +ruff==0.0.292 \ + --hash=sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96 \ + --hash=sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac \ + --hash=sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade \ + --hash=sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205 \ + --hash=sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4 \ + --hash=sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68 \ + --hash=sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0 \ + --hash=sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9 \ + --hash=sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81 \ + --hash=sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8 \ + --hash=sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0 \ + --hash=sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016 \ + --hash=sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7 \ + --hash=sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003 \ + --hash=sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a \ + --hash=sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c \ + --hash=sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4 # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -1294,9 +1294,9 @@ uc-micro-py==1.0.2 \ --hash=sha256:30ae2ac9c49f39ac6dce743bd187fcd2b574b16ca095fa74cd9396795c954c54 \ --hash=sha256:8c9110c309db9d9e87302e2f4ad2c3152770930d88ab385cd544e7a7e75f3de0 # via linkify-it-py -urllib3==2.0.5 \ - --hash=sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594 \ - --hash=sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e +urllib3==2.0.6 \ + --hash=sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2 \ + --hash=sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564 # via # -c requirements/main.txt # requests diff --git a/requirements/main.txt b/requirements/main.txt index 492a2a0fd5..a4284be259 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -527,9 +527,9 @@ rfc3986[idna2008]==1.5.0 \ --hash=sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835 \ --hash=sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97 # via httpx -safir==5.0.0a1 \ - --hash=sha256:149073f008a227c29e047195ecf5515c05181d6bc1cf816efd38781b7aca3e02 \ - --hash=sha256:66a72284f2d907023936bfa8e319d1da9210019d64c99c83516c4bc974cd50e8 +safir==5.0.0a2 \ + --hash=sha256:a13ac781a345d67ae43fd8a0a2434904e5dfca9f9321c15547e4d18b50144fe4 \ + --hash=sha256:c8ab7f043e0e65ccda4fef2a15697802224b2c42876991c1a12d0b41115d0bc5 # via -r requirements/main.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -567,7 +567,7 @@ uritemplate==4.1.1 \ --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e # via gidgethub -urllib3==2.0.5 \ - --hash=sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594 \ - --hash=sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e +urllib3==2.0.6 \ + --hash=sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2 \ + --hash=sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564 # via requests From bc97ec031c05ef526b219f6e0b2c6cde80c4018f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 4 Oct 2023 09:22:34 -0700 Subject: [PATCH 046/588] Add missing secret definition for ssotap ssotap requires a PostgreSQL password, which was missing from the secrets.yaml. Add it in and fix the Chart.yaml short description. --- applications/ssotap/Chart.yaml | 2 +- applications/ssotap/README.md | 2 +- applications/ssotap/secrets.yaml | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/applications/ssotap/Chart.yaml b/applications/ssotap/Chart.yaml index 232e7f780d..c525c47d3e 100644 --- a/applications/ssotap/Chart.yaml +++ b/applications/ssotap/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: ssotap version: 1.0.0 -description: IVOA TAP service +description: IVOA TAP service for Solar System Objects sources: - https://github.com/lsst-sqre/tap-postgres - https://github.com/opencadc/tap diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index f51056c0b6..52a4c9e4a8 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -1,6 +1,6 @@ # ssotap -IVOA TAP service +IVOA TAP service for Solar System Objects ## Source Code diff --git a/applications/ssotap/secrets.yaml b/applications/ssotap/secrets.yaml index 4280c602a3..8f50e59791 100644 --- a/applications/ssotap/secrets.yaml +++ b/applications/ssotap/secrets.yaml @@ -2,3 +2,7 @@ description: >- Google service account credentials used to write async job output to Google Cloud Storage. +pgpassword: + description: >- + Password to external PostgreSQL server that contains the Solar System + Objects data. From ae635c2b5dcc59b05cb0ba1f97175f8a1faf61af Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 4 Oct 2023 09:48:25 -0700 Subject: [PATCH 047/588] Use tox v4 syntax consistently The tox instructions under contributing documentation were using the legacy syntax (without the run keyword). Switch to the current syntax. --- docs/about/contributing-docs.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/about/contributing-docs.rst b/docs/about/contributing-docs.rst index 7edac8fb5f..f326f261e8 100644 --- a/docs/about/contributing-docs.rst +++ b/docs/about/contributing-docs.rst @@ -19,7 +19,7 @@ Use the tox_ ``docs`` environment for compiling the documentation: .. code-block:: bash - tox -e docs + tox run -e docs The built documentation is located in the ``docs/_build/html`` directory. @@ -36,7 +36,7 @@ Links in the documentation are validated in the GitHub Actions workflow, but you .. code-block:: bash - tox -e docs-linkcheck + tox run -e docs-linkcheck Submitting a pull request and sharing documentation drafts ========================================================== From 70b82c040f7e2bb9f1e11c440de4cbf58bdb62ae Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 17:12:23 +0000 Subject: [PATCH 048/588] chore(deps): update helm release ingress-nginx to v4.8.1 --- applications/ingress-nginx/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index a99c6145ca..f8df8e3666 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.8.0 + version: 4.8.1 repository: https://kubernetes.github.io/ingress-nginx From 8e03d64c3406959cdd0cd38a171c3163d77948f2 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 4 Oct 2023 10:11:02 -0700 Subject: [PATCH 049/588] Make nublado3 default on summit --- applications/nublado/values-summit.yaml | 1 - applications/nublado2/values-summit.yaml | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 63d3cf7483..d1b4e147eb 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -92,7 +92,6 @@ jupyterhub: every: 300 maxAge: 2160000 hub: - baseUrl: "/n3" db: upgrade: true url: "postgresql://nublado3@postgresdb01.cp.lsst.org/nublado3" diff --git a/applications/nublado2/values-summit.yaml b/applications/nublado2/values-summit.yaml index 3f8f14c394..acbac59fe8 100644 --- a/applications/nublado2/values-summit.yaml +++ b/applications/nublado2/values-summit.yaml @@ -4,6 +4,7 @@ jupyterhub: annotations: nginx.ingress.kubernetes.io/auth-signin: "https://summit-lsp.lsst.codes/login" hub: + baseUrl: /n2 db: upgrade: true url: "postgresql://jovyan@postgresdb01.cp.lsst.org/jupyterhub" From 44be9f56583a6fcb9ff480b1006035f64cb0dcf7 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 4 Oct 2023 10:30:49 -0700 Subject: [PATCH 050/588] Add data tests back in --- starters/empty/README.md | 16 +++ .../data/input/applications/argocd/README.md | 44 +++++++ .../input/applications/gafaelfawr/README.md | 124 ++++++++++++++++++ tests/data/input/applications/mobu/README.md | 35 +++++ .../data/input/applications/nublado/README.md | 98 ++++++++++++++ .../data/input/applications/portal/README.md | 57 ++++++++ .../input/applications/postgres/README.md | 25 ++++ tests/data/input/applications/tap/README.md | 68 ++++++++++ tests/data/input/starters/empty/README.md | 18 +++ .../data/input/starters/web-service/README.md | 32 +++++ 10 files changed, 517 insertions(+) create mode 100644 starters/empty/README.md create mode 100644 tests/data/input/applications/argocd/README.md create mode 100644 tests/data/input/applications/gafaelfawr/README.md create mode 100644 tests/data/input/applications/mobu/README.md create mode 100644 tests/data/input/applications/nublado/README.md create mode 100644 tests/data/input/applications/portal/README.md create mode 100644 tests/data/input/applications/postgres/README.md create mode 100644 tests/data/input/applications/tap/README.md create mode 100644 tests/data/input/starters/empty/README.md create mode 100644 tests/data/input/starters/web-service/README.md diff --git a/starters/empty/README.md b/starters/empty/README.md new file mode 100644 index 0000000000..6b28c8a625 --- /dev/null +++ b/starters/empty/README.md @@ -0,0 +1,16 @@ +# + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) + +Helm starter chart for a new RSP service + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/argocd/README.md b/tests/data/input/applications/argocd/README.md new file mode 100644 index 0000000000..6f01cc888c --- /dev/null +++ b/tests/data/input/applications/argocd/README.md @@ -0,0 +1,44 @@ +# argo-cd + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) + +Kubernetes application manager + +**Homepage:** + +## Source Code + +* +* + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://argoproj.github.io/argo-helm | argo-cd | 5.43.3 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| argo-cd.configs.cm."resource.compareoptions" | string | `"ignoreAggregatedRoles: true\n"` | Configure resource comparison | +| argo-cd.configs.params."server.basehref" | string | `"/argo-cd"` | Base href for `index.html` when running under a reverse proxy | +| argo-cd.configs.params."server.insecure" | bool | `true` | Do not use TLS (this is terminated at the ingress) | +| argo-cd.configs.secret.createSecret | bool | `false` | Create the Argo CD secret (we manage this with Vault) | +| argo-cd.controller.metrics.applicationLabels.enabled | bool | `true` | Enable adding additional labels to `argocd_app_labels` metric | +| argo-cd.controller.metrics.applicationLabels.labels | list | `["name","instance"]` | Labels to add to `argocd_app_labels` metric | +| argo-cd.controller.metrics.enabled | bool | `true` | Enable controller metrics service | +| argo-cd.global.logging.format | string | `"json"` | Set the global logging format. Either: `text` or `json` | +| argo-cd.notifications.metrics.enabled | bool | `true` | Enable notifications metrics service | +| argo-cd.redis.metrics.enabled | bool | `true` | Enable Redis metrics service | +| argo-cd.repoServer.metrics.enabled | bool | `true` | Enable repo server metrics service | +| argo-cd.server.ingress.annotations | object | Rewrite requests to remove `/argo-cd/` prefix | Additional annotations to add to the Argo CD ingress | +| argo-cd.server.ingress.enabled | bool | `true` | Create an ingress for the Argo CD server | +| argo-cd.server.ingress.ingressClassName | string | `"nginx"` | Ingress class to use for Argo CD ingress | +| argo-cd.server.ingress.pathType | string | `"ImplementationSpecific"` | Type of path expression for Argo CD ingress | +| argo-cd.server.ingress.paths | list | `["/argo-cd(/|$)(.*)"]` | Paths to route to Argo CD | +| argo-cd.server.metrics.enabled | bool | `true` | Enable server metrics service | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/gafaelfawr/README.md b/tests/data/input/applications/gafaelfawr/README.md new file mode 100644 index 0000000000..9c280647da --- /dev/null +++ b/tests/data/input/applications/gafaelfawr/README.md @@ -0,0 +1,124 @@ +# gafaelfawr + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 9.3.0](https://img.shields.io/badge/AppVersion-9.3.0-informational?style=flat-square) + +Authentication and identity system + +**Homepage:** + +## Source Code + +* + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://lsst-sqre.github.io/charts/ | redis | 1.0.6 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the Gafaelfawr frontend pod | +| cloudsql.affinity | object | `{}` | Affinity rules for the Cloud SQL Proxy pod | +| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | +| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | +| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | +| cloudsql.image.tag | string | `"1.33.8"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | +| cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | +| cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | +| cloudsql.resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod | +| cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | +| cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | +| config.cilogon.clientId | string | `""` | CILogon client ID. One and only one of this, `config.github.clientId`, or `config.oidc.clientId` must be set. | +| config.cilogon.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | +| config.cilogon.gidClaim | string | Do not set a primary GID | Claim from which to get the primary GID (only used if not retrieved from LDAP or Firestore) | +| config.cilogon.groupsClaim | string | `"isMemberOf"` | Claim from which to get the group membership (only used if not retrieved from LDAP) | +| config.cilogon.loginParams | object | `{"skin":"LSST"}` | Additional parameters to add | +| config.cilogon.test | bool | `false` | Whether to use the test instance of CILogon | +| config.cilogon.uidClaim | string | `"uidNumber"` | Claim from which to get the numeric UID (only used if not retrieved from LDAP or Firestore) | +| config.cilogon.usernameClaim | string | `"uid"` | Claim from which to get the username | +| config.databaseUrl | string | None, must be set if neither `cloudsql.enabled` nor | URL for the PostgreSQL database `config.internalDatabase` are true | +| config.errorFooter | string | `""` | HTML footer to add to any login error page (will be enclosed in a

tag). | +| config.firestore.project | string | Firestore support is disabled | If set, assign UIDs and GIDs using Google Firestore in the given project. Cloud SQL must be enabled and the Cloud SQL service account must have read/write access to that Firestore instance. | +| config.forgerock.url | string | ForgeRock Identity Management support is disabled | If set, obtain the GIDs for groups from this ForgeRock Identity Management server. | +| config.forgerock.username | string | None, must be set if `config.forgerock.url` is set | Username to use for HTTP Basic authentication to ForgeRock Identity Managemnt. The corresponding password must be in the `forgerock-passsword` key of the Gafaelfawr Vault secret. | +| config.github.clientId | string | `""` | GitHub client ID. One and only one of this, `config.cilogon.clientId`, or `config.oidc.clientId` must be set. | +| config.groupMapping | object | `{}` | Defines a mapping of scopes to groups that provide that scope. See [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes. | +| config.initialAdmins | list | `[]` | Usernames to add as administrators when initializing a new database. Used only if there are no administrators. | +| config.internalDatabase | bool | `false` | Whether to use the PostgreSQL server internal to the Kubernetes cluster | +| config.knownScopes | object | See the `values.yaml` file | Names and descriptions of all scopes in use. This is used to populate the new token creation page. Only scopes listed here will be options when creating a new token. See [DMTN-235](https://dmtn-235.lsst.io/). | +| config.ldap.addUserGroup | bool | `false` | Whether to synthesize a user private group for each user with a GID equal to their UID | +| config.ldap.emailAttr | string | `"mail"` | Attribute containing the user's email address | +| config.ldap.gidAttr | string | Use GID of user private group | Attribute containing the user's primary GID (set to `gidNumber` for most LDAP servers) | +| config.ldap.groupBaseDn | string | None, must be set | Base DN for the LDAP search to find a user's groups | +| config.ldap.groupMemberAttr | string | `"member"` | Member attribute of the object class. Values must match the username returned in the token from the OpenID Connect authentication server. | +| config.ldap.groupObjectClass | string | `"posixGroup"` | Object class containing group information | +| config.ldap.kerberosConfig | string | Use anonymous binds | Enable GSSAPI (Kerberos) binds to LDAP using this `krb5.conf` file. If set, `ldap-keytab` must be set in the Gafaelfawr Vault secret. Set either this or `userDn`, not both. | +| config.ldap.nameAttr | string | `"displayName"` | Attribute containing the user's full name | +| config.ldap.uidAttr | string | Get UID from upstream authentication provider | Attribute containing the user's UID number (set to `uidNumber` for most LDAP servers) | +| config.ldap.url | string | Do not use LDAP | LDAP server URL from which to retrieve user group information | +| config.ldap.userBaseDn | string | Get user metadata from the upstream authentication provider | Base DN for the LDAP search to find a user's entry | +| config.ldap.userDn | string | Use anonymous binds | Bind DN for simple bind authentication. If set, `ldap-secret` must be set in the Gafaelfawr Vault secret. Set this or `kerberosConfig`, not both. | +| config.ldap.userSearchAttr | string | `"uid"` | Search attribute containing the user's username | +| config.logLevel | string | `"INFO"` | Choose from the text form of Python logging levels | +| config.oidc.audience | string | Value of `config.oidc.clientId` | Audience for the JWT token | +| config.oidc.clientId | string | `""` | Client ID for generic OpenID Connect support. One and only one of this, `config.cilogon.clientId`, or `config.github.clientId` must be set. | +| config.oidc.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | +| config.oidc.gidClaim | string | Do not set a primary GID | Claim from which to get the primary GID (only used if not retrieved from LDAP or Firestore) | +| config.oidc.groupsClaim | string | `"isMemberOf"` | Claim from which to get the group membership (only used if not retrieved from LDAP) | +| config.oidc.issuer | string | None, must be set | Issuer for the JWT token | +| config.oidc.loginParams | object | `{}` | Additional parameters to add to the login request | +| config.oidc.loginUrl | string | None, must be set | URL to which to redirect the user for authorization | +| config.oidc.scopes | list | `["openid"]` | Scopes to request from the OpenID Connect provider | +| config.oidc.tokenUrl | string | None, must be set | URL from which to retrieve the token for the user | +| config.oidc.uidClaim | string | `"uidNumber"` | Claim from which to get the numeric UID (only used if not retrieved from LDAP or Firestore) | +| config.oidc.usernameClaim | string | `"sub"` | Claim from which to get the username | +| config.oidcServer.enabled | bool | `false` | Whether to support OpenID Connect clients. If set to true, `oidc-server-secrets` must be set in the Gafaelfawr secret. | +| config.proxies | list | [`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`] | List of netblocks used for internal Kubernetes IP addresses, used to determine the true client IP for logging | +| config.quota | object | `{}` | Quota settings (see [Quotas](https://gafaelfawr.lsst.io/user-guide/helm.html#quotas)). | +| config.slackAlerts | bool | `false` | Whether to send certain serious alerts to Slack. If `true`, the `slack-webhook` secret must also be set. | +| config.tokenLifetimeMinutes | int | `43200` (30 days) | Session length and token expiration (in minutes) | +| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Gafaelfawr image | +| image.repository | string | `"ghcr.io/lsst-sqre/gafaelfawr"` | Gafaelfawr image to use | +| image.tag | string | The appVersion of the chart | Tag of Gafaelfawr image to use | +| ingress.additionalHosts | list | `[]` | Defines additional FQDNs for Gafaelfawr. This doesn't work for cookie or browser authentication, but for token-based services like git-lfs or the webdav server it does. | +| maintenance.affinity | object | `{}` | Affinity rules for Gafaelfawr maintenance and audit pods | +| maintenance.auditSchedule | string | `"30 3 * * *"` | Cron schedule string for Gafaelfawr data consistency audit (in UTC) | +| maintenance.maintenanceSchedule | string | `"5 * * * *"` | Cron schedule string for Gafaelfawr periodic maintenance (in UTC) | +| maintenance.nodeSelector | object | `{}` | Node selection rules for Gafaelfawr maintenance and audit pods | +| maintenance.podAnnotations | object | `{}` | Annotations for Gafaelfawr maintenance and audit pods | +| maintenance.resources | object | `{}` | Resource limits and requests for Gafaelfawr maintenance and audit pods | +| maintenance.tolerations | list | `[]` | Tolerations for Gafaelfawr maintenance and audit pods | +| nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | Node selector rules for the Gafaelfawr frontend pod | +| operator.affinity | object | `{}` | Affinity rules for the token management pod | +| operator.nodeSelector | object | `{}` | Node selection rules for the token management pod | +| operator.podAnnotations | object | `{}` | Annotations for the token management pod | +| operator.resources | object | `{}` | Resource limits and requests for the Gafaelfawr Kubernetes operator | +| operator.tolerations | list | `[]` | Tolerations for the token management pod | +| podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | +| redis.affinity | object | `{}` | Affinity rules for the Redis pod | +| redis.config.secretKey | string | `"redis-password"` | Key inside secret from which to get the Redis password (do not change) | +| redis.config.secretName | string | `"gafaelfawr-secret"` | Name of secret containing Redis password (may require changing if fullnameOverride is set) | +| redis.nodeSelector | object | `{}` | Node selection rules for the Redis pod | +| redis.persistence.accessMode | string | `"ReadWriteOnce"` | Access mode of storage to request | +| redis.persistence.enabled | bool | `true` | Whether to persist Redis storage and thus tokens. Setting this to false will use `emptyDir` and reset all tokens on every restart. Only use this for a test deployment. | +| redis.persistence.size | string | `"1Gi"` | Amount of persistent storage to request | +| redis.persistence.storageClass | string | `""` | Class of storage to request | +| redis.persistence.volumeClaimName | string | `""` | Use an existing PVC, not dynamic provisioning. If this is set, the size, storageClass, and accessMode settings are ignored. | +| redis.podAnnotations | object | `{}` | Pod annotations for the Redis pod | +| redis.resources | object | See `values.yaml` | Resource limits and requests for the Redis pod | +| redis.tolerations | list | `[]` | Tolerations for the Redis pod | +| replicaCount | int | `1` | Number of web frontend pods to start | +| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | +| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/mobu/README.md b/tests/data/input/applications/mobu/README.md new file mode 100644 index 0000000000..9a33cafce2 --- /dev/null +++ b/tests/data/input/applications/mobu/README.md @@ -0,0 +1,35 @@ +# mobu + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 6.1.1](https://img.shields.io/badge/AppVersion-6.1.1-informational?style=flat-square) + +Continuous integration testing + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the mobu frontend pod | +| config.autostart | list | `[]` | Autostart specification. Must be a list of mobu flock specifications. Each flock listed will be automatically started when mobu is started. | +| config.debug | bool | `false` | If set to true, include the output from all flocks in the main mobu log and disable structured JSON logging. | +| config.disableSlackAlerts | bool | `false` | If set to true, do not configure mobu to send alerts to Slack. | +| config.pathPrefix | string | `"/mobu"` | Prefix for mobu's API routes. | +| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the mobu image | +| image.repository | string | `"ghcr.io/lsst-sqre/mobu"` | mobu image to use | +| image.tag | string | The appVersion of the chart | Tag of mobu image to use | +| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | Node selector rules for the mobu frontend pod | +| podAnnotations | object | `{}` | Annotations for the mobu frontend pod | +| resources | object | `{}` | Resource limits and requests for the mobu frontend pod | +| tolerations | list | `[]` | Tolerations for the mobu frontend pod | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/nublado/README.md b/tests/data/input/applications/nublado/README.md new file mode 100644 index 0000000000..0e38ef82c8 --- /dev/null +++ b/tests/data/input/applications/nublado/README.md @@ -0,0 +1,98 @@ +# nublado + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 0.7.1](https://img.shields.io/badge/AppVersion-0.7.1-informational?style=flat-square) + +JupyterHub and custom spawner for the Rubin Science Platform + +**Homepage:** + +## Source Code + +* +* + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://jupyterhub.github.io/helm-chart/ | jupyterhub | 2.0.0 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| controller.affinity | object | `{}` | Affinity rules for the lab controller pod | +| controller.config.fileserver.enabled | bool | `false` | Enable fileserver management | +| controller.config.fileserver.image | string | `"ghcr.io/lsst-sqre/worblehat"` | Image for fileserver container | +| controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user fileservers | +| controller.config.fileserver.pullPolicy | string | `"IfNotPresent"` | Pull policy for fileserver container | +| controller.config.fileserver.tag | string | `"0.1.0"` | Tag for fileserver container | +| controller.config.fileserver.timeout | int | `3600` | Timeout for user fileservers, in seconds | +| controller.config.images.aliasTags | list | `[]` | Additional tags besides `recommendedTag` that should be recognized as aliases. | +| controller.config.images.cycle | string | `nil` | Restrict images to this SAL cycle, if given. | +| controller.config.images.numDailies | int | `3` | Number of most-recent dailies to prepull. | +| controller.config.images.numReleases | int | `1` | Number of most-recent releases to prepull. | +| controller.config.images.numWeeklies | int | `2` | Number of most-recent weeklies to prepull. | +| controller.config.images.pin | list | `[]` | List of additional image tags to prepull. Listing the image tagged as recommended here is recommended when using a Docker image source to ensure its name can be expanded properly in the menu. | +| controller.config.images.recommendedTag | string | `"recommended"` | Tag marking the recommended image (shown first in the menu) | +| controller.config.images.source | object | None, must be specified | Source for prepulled images. For Docker, set `type` to `docker`, `registry` to the hostname and `repository` to the name of the repository. For Google Artifact Repository, set `type` to `google`, `location` to the region, `projectId` to the Google project, `repository` to the name of the repository, and `image` to the name of the image. | +| controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab. | +| controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | +| controller.config.lab.initcontainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image reference), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with `allowPrivilegeEscalation` true. Otherwise it will, run as UID 1000. | +| controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | +| controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | +| controller.config.lab.sizes | object | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Names must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI prefixes for memory are supported. `large`) | +| controller.config.lab.volumes | list | `[]` | Volumes that should be mounted in lab pods. This supports NFS, HostPath, and PVC volume types (differentiated in source.type) | +| controller.config.safir.logLevel | string | `"INFO"` | Level of Python logging | +| controller.config.safir.pathPrefix | string | `"/nublado"` | Path prefix that will be routed to the controller | +| controller.googleServiceAccount | string | None, must be set when using Google Artifact Registry | If Google Artifact Registry is used as the image source, the Google service account that has an IAM binding to the `nublado-controller` Kubernetes service account and has the Artifact Registry reader role | +| controller.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the nublado image | +| controller.image.repository | string | `"ghcr.io/lsst-sqre/jupyterlab-controller"` | nublado image to use | +| controller.image.tag | string | The appVersion of the chart | Tag of nublado image to use | +| controller.ingress.annotations | object | `{}` | Additional annotations to add for the lab controller pod ingress | +| controller.nodeSelector | object | `{}` | Node selector rules for the lab controller pod | +| controller.podAnnotations | object | `{}` | Annotations for the lab controller pod | +| controller.resources | object | `{}` | Resource limits and requests for the lab controller pod | +| controller.slackAlerts | bool | `false` | Whether to enable Slack alerts. If set to true, `slack_webhook` must be set in the corresponding Nublado Vault secret. | +| controller.tolerations | list | `[]` | Tolerations for the lab controller pod | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| hub.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. | +| hub.timeout.spawn | int | `600` | Timeout for the Kubernetes spawn process in seconds. (Allow long enough to pull uncached images if needed.) | +| hub.timeout.startup | int | `90` | Timeout for JupyterLab to start. Currently this sometimes takes over 60 seconds for reasons we don't understand. | +| jupyterhub.cull.enabled | bool | `true` | Enable the lab culler. | +| jupyterhub.cull.every | int | 600 (10 minutes) | How frequently to check for idle labs in seconds | +| jupyterhub.cull.maxAge | int | 5184000 (60 days) | Maximum age of a lab regardless of activity | +| jupyterhub.cull.removeNamedServers | bool | `true` | Whether to remove named servers when culling their lab | +| jupyterhub.cull.timeout | int | 2592000 (30 days) | Default idle timeout before the lab is automatically deleted in seconds | +| jupyterhub.cull.users | bool | `true` | Whether to log out the server when culling their lab | +| jupyterhub.hub.authenticatePrometheus | bool | `false` | Whether to require metrics requests to be authenticated | +| jupyterhub.hub.baseUrl | string | `"/nb"` | Base URL on which JupyterHub listens | +| jupyterhub.hub.containerSecurityContext | object | `{"allowPrivilegeEscalation":false,"runAsGroup":768,"runAsUser":768}` | Security context for JupyterHub container | +| jupyterhub.hub.db.password | string | Comes from nublado-secret | Database password (not used) | +| jupyterhub.hub.db.type | string | `"postgres"` | Type of database to use | +| jupyterhub.hub.db.url | string | Use the in-cluster PostgreSQL installed by Phalanx | URL of PostgreSQL server | +| jupyterhub.hub.existingSecret | string | `"nublado-secret"` | Existing secret to use for private keys | +| jupyterhub.hub.extraEnv | object | Gets `JUPYTERHUB_CRYPT_KEY` from `nublado-secret` | Additional environment variables to set | +| jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | +| jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | +| jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/rsp-restspawner"` | Image to use for JupyterHub | +| jupyterhub.hub.image.tag | string | `"0.3.2"` | Tag of image to use for JupyterHub | +| jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | +| jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | +| jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | +| jupyterhub.ingress.enabled | bool | `false` | Whether to enable the default ingress | +| jupyterhub.prePuller.continuous.enabled | bool | `false` | Whether to run the JupyterHub continuous prepuller (the Nublado controller does its own prepulling) | +| jupyterhub.prePuller.hook.enabled | bool | `false` | Whether to run the JupyterHub hook prepuller (the Nublado controller does its own prepulling) | +| jupyterhub.proxy.chp.networkPolicy.interNamespaceAccessLabels | string | `"accept"` | Enable access to the proxy from other namespaces, since we put each user's lab environment in its own namespace | +| jupyterhub.proxy.service.type | string | `"ClusterIP"` | Only expose the proxy to the cluster, overriding the default of exposing the proxy directly to the Internet | +| jupyterhub.scheduling.userPlaceholder.enabled | bool | `false` | Whether to spawn placeholder pods representing fake users to force autoscaling in advance of running out of resources | +| jupyterhub.scheduling.userScheduler.enabled | bool | `false` | Whether the user scheduler should be enabled | +| jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | Whether to configure iptables to block cloud metadata endpoints. This is unnecessary in our environments (they are blocked by cluster configuration) and thus is disabled to reduce complexity. | +| jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | Start command for labs | +| jupyterhub.singleuser.defaultUrl | string | `"/lab"` | Default URL prefix for lab endpoints | +| proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/portal/README.md b/tests/data/input/applications/portal/README.md new file mode 100644 index 0000000000..ca915dc200 --- /dev/null +++ b/tests/data/input/applications/portal/README.md @@ -0,0 +1,57 @@ +# portal + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: suit-2023.1.5](https://img.shields.io/badge/AppVersion-suit--2023.1.5-informational?style=flat-square) + +Rubin Science Platform Portal Aspect + +## Source Code + +* +* + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://lsst-sqre.github.io/charts/ | redis | 1.0.6 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the Portal pod | +| config.cleanupInterval | string | `"36h"` | How long results should be retained before being deleted | +| config.debug | string | `"FALSE"` | Set to `TRUE` to enable service debugging | +| config.hipsUrl | string | `/api/hips/images/color_gri` in the local Science Platform | URL for default HiPS service | +| config.ssotap | string | `""` | Endpoint under `/api/` for the DP0.3 SSO TAP service on the instance, if present | +| config.visualizeFitsSearchPath | string | `"/datasets"` | Search path for FITS files | +| config.volumes.configHostPath | string | Use an `emptyDir` | hostPath to mount as configuration. Set either this of `configNfs`, not both. | +| config.volumes.configNfs | object | Use an `emptyDir` | NFS information for a configuration. If set, must have keys for path and server, Set either this of `configHostPath`, not both. | +| config.volumes.workareaHostPath | string | Use an `emptyDir` | hostPath to mount as a shared work area. Set either this or `workareaNfs`, not both. | +| config.volumes.workareaNfs | object | Use an `emptyDir` | NFS information for a shared work area. If set, must have keys for path and server. Set either this or `workareaHostPath`, not both. | +| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Portal image | +| image.repository | string | `"ipac/suit"` | Portal image to use | +| image.tag | string | The appVersion of the chart | Tag of Portal image to use | +| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | Node selector rules for the Portal pod | +| podAnnotations | object | `{}` | Annotations for the Portal pod | +| redis.affinity | object | `{}` | Affinity rules for the Redis pod | +| redis.config.secretKey | string | `"ADMIN_PASSWORD"` | Key inside secret from which to get the Redis password (do not change) | +| redis.config.secretName | string | `"portal-secret"` | Name of secret containing Redis password (may require changing if fullnameOverride is set) | +| redis.nodeSelector | object | `{}` | Node selection rules for the Redis pod | +| redis.persistence.enabled | bool | `false` | Whether to persist Redis storage. Setting this to false will use `emptyDir` and reset all data on every restart. | +| redis.podAnnotations | object | `{}` | Pod annotations for the Redis pod | +| redis.resources | object | See `values.yaml` | Resource limits and requests for the Redis pod | +| redis.tolerations | list | `[]` | Tolerations for the Redis pod | +| replicaCount | int | `1` | Number of pods to start | +| resources | object | `{"limits":{"cpu":2,"memory":"6Gi"}}` | Resource limits and requests. The Portal will use (by default) 93% of container RAM. This is a smallish Portal; tweak it as you need to in instance definitions in Phalanx. | +| securityContext | object | `{}` | Security context for the Portal pod | +| tolerations | list | `[]` | Tolerations for the Portal pod | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/postgres/README.md b/tests/data/input/applications/postgres/README.md new file mode 100644 index 0000000000..612cc4d6b1 --- /dev/null +++ b/tests/data/input/applications/postgres/README.md @@ -0,0 +1,25 @@ +# postgres + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 0.0.5](https://img.shields.io/badge/AppVersion-0.0.5-informational?style=flat-square) + +Postgres RDBMS for LSP + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| debug | string | `""` | Set to non-empty to enable debugging output | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the postgres image | +| image.repository | string | `"lsstsqre/lsp-postgres"` | postgres image to use | +| image.tag | string | The appVersion of the chart | Tag of postgres image to use | +| postgresStorageClass | string | `"standard"` | Storage class for postgres volume. Set to appropriate value for your deployment: at GKE, "standard" (if you want SSD, "premium-rwo", but if you want a good database maybe it's better to use a cloud database?), on Rubin Observatory Rancher, "rook-ceph-block", elsewhere probably "standard" | +| postgresVolumeSize | string | `"1Gi"` | Volume size for postgres. It can generally be very small | +| volumeName | string | `""` | Volume name for postgres, if you use an existing volume that isn't automatically created from the PVC by the storage driver. | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/tap/README.md b/tests/data/input/applications/tap/README.md new file mode 100644 index 0000000000..19b005f8d0 --- /dev/null +++ b/tests/data/input/applications/tap/README.md @@ -0,0 +1,68 @@ +# cadc-tap + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 1.4.5](https://img.shields.io/badge/AppVersion-1.4.5-informational?style=flat-square) + +IVOA TAP service + +## Source Code + +* +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the Gafaelfawr frontend pod | +| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip"` | Datalink payload URL | +| config.gafaelfawrHost | string | Value of `ingress.host` | Gafaelfawr hostname to get user information from a token | +| config.gcsBucket | string | None, must be set | Name of GCS bucket in which to store results | +| config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) | +| config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket | +| config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. | +| config.tapSchemaAddress | string | `"cadc-tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data | +| fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the tap image | +| image.repository | string | `"ghcr.io/lsst-sqre/lsst-tap-service"` | tap image to use | +| image.tag | string | The appVersion of the chart | Tag of tap image to use | +| ingress.anonymousAnnotations | object | `{}` | Additional annotations to use for endpoints that allow anonymous access, such as `/capabilities` and `/availability` | +| ingress.authenticatedAnnotations | object | `{}` | Additional annotations to use for endpoints that are authenticated, such as `/sync`, `/async`, and `/tables` | +| nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | Node selector rules for the Gafaelfawr frontend pod | +| podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | +| qserv.host | string | `"mock-qserv:3306"` (the mock QServ) | QServ hostname:port to connect to | +| qserv.mock.affinity | object | `{}` | Affinity rules for the mock QServ pod | +| qserv.mock.enabled | bool | `true` | Spin up a container to pretend to be QServ. | +| qserv.mock.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the mock QServ image | +| qserv.mock.image.repository | string | `"ghcr.io/lsst-sqre/lsst-tap-mock-qserv"` | Mock QServ image to use | +| qserv.mock.image.tag | string | The appVersion of the chart | Tag of mock QServ image to use | +| qserv.mock.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | +| qserv.mock.podAnnotations | object | `{}` | Annotations for the mock QServ pod | +| qserv.mock.resources | object | `{}` | Resource limits and requests for the mock QServ pod | +| qserv.mock.tolerations | list | `[]` | Tolerations for the mock QServ pod | +| replicaCount | int | `1` | Number of pods to start | +| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | +| tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | +| tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | +| tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | +| tapSchema.image.tag | string | `"2.0.2"` | Tag of TAP schema image | +| tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | +| tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | +| tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | +| tapSchema.tolerations | list | `[]` | Tolerations for the mock QServ pod | +| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | +| uws.affinity | object | `{}` | Affinity rules for the UWS database pod | +| uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | +| uws.image.repository | string | `"ghcr.io/lsst-sqre/lsst-tap-uws-db"` | UWS database image to use | +| uws.image.tag | string | The appVersion of the chart | Tag of UWS database image to use | +| uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | +| uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | +| uws.resources | object | `{}` | Resource limits and requests for the UWS database pod | +| uws.tolerations | list | `[]` | Tolerations for the UWS database pod | +| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/starters/empty/README.md b/tests/data/input/starters/empty/README.md new file mode 100644 index 0000000000..d4e2901eed --- /dev/null +++ b/tests/data/input/starters/empty/README.md @@ -0,0 +1,18 @@ +# + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) + +Helm starter chart for a new RSP service + +**Homepage:** > + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/starters/web-service/README.md b/tests/data/input/starters/web-service/README.md new file mode 100644 index 0000000000..55f6d0c3f9 --- /dev/null +++ b/tests/data/input/starters/web-service/README.md @@ -0,0 +1,32 @@ +# + +![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.1.0](https://img.shields.io/badge/AppVersion-0.1.0-informational?style=flat-square) + +Helm starter chart for a new RSP service + +**Homepage:** > + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of deployment pods | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the image | +| image.repository | string | `"ghcr.io/lsst-sqre/"` | Image to use in the deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the deployment pod | +| podAnnotations | object | `{}` | Annotations for the deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the deployment pod | +| tolerations | list | `[]` | Tolerations for the deployment pod | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) From 32edbea4f12118afb9e9e7d6f62d61a3b48ff8dc Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 4 Oct 2023 10:35:22 -0700 Subject: [PATCH 051/588] Remove cachemachine/moneypenny/nublado2 from base and tts --- applications/cachemachine/values-base.yaml | 25 ----- .../cachemachine/values-tucson-teststand.yaml | 23 ----- applications/moneypenny/values-base.yaml | 15 --- .../moneypenny/values-tucson-teststand.yaml | 15 --- applications/nublado2/values-base.yaml | 81 ---------------- .../nublado2/values-tucson-teststand.yaml | 95 ------------------- environments/values-base.yaml | 3 - environments/values-tucson-teststand.yaml | 3 - 8 files changed, 260 deletions(-) delete mode 100644 applications/cachemachine/values-base.yaml delete mode 100644 applications/cachemachine/values-tucson-teststand.yaml delete mode 100644 applications/moneypenny/values-base.yaml delete mode 100644 applications/moneypenny/values-tucson-teststand.yaml delete mode 100644 applications/nublado2/values-base.yaml delete mode 100644 applications/nublado2/values-tucson-teststand.yaml diff --git a/applications/cachemachine/values-base.yaml b/applications/cachemachine/values-base.yaml deleted file mode 100644 index 2a5640986d..0000000000 --- a/applications/cachemachine/values-base.yaml +++ /dev/null @@ -1,25 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": { - "jupyterlab": "ok" - }, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "ts-dockerhub.lsst.org", - "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0032", - "num_releases": 0, - "num_weeklies": 3, - "num_dailies": 2, - "cycle": 32, - "alias_tags": [ - "latest", - "latest_daily", - "latest_weekly" - ] - } - ] - } diff --git a/applications/cachemachine/values-tucson-teststand.yaml b/applications/cachemachine/values-tucson-teststand.yaml deleted file mode 100644 index f88f37ba79..0000000000 --- a/applications/cachemachine/values-tucson-teststand.yaml +++ /dev/null @@ -1,23 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "ts-dockerhub.lsst.org", - "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0032", - "num_releases": 1, - "num_weeklies": 3, - "num_dailies": 2, - "cycle": 32, - "alias_tags": [ - "latest", - "latest_daily", - "latest_weekly" - ] - } - ] - } diff --git a/applications/moneypenny/values-base.yaml b/applications/moneypenny/values-base.yaml deleted file mode 100644 index e8bf412e25..0000000000 --- a/applications/moneypenny/values-base.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: nfs-jhome.ls.lsst.org - path: /jhome diff --git a/applications/moneypenny/values-tucson-teststand.yaml b/applications/moneypenny/values-tucson-teststand.yaml deleted file mode 100644 index 845233c931..0000000000 --- a/applications/moneypenny/values-tucson-teststand.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: nfs-jhome.tu.lsst.org - path: /jhome diff --git a/applications/nublado2/values-base.yaml b/applications/nublado2/values-base.yaml deleted file mode 100644 index f546535e2f..0000000000 --- a/applications/nublado2/values-base.yaml +++ /dev/null @@ -1,81 +0,0 @@ -jupyterhub: - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - ingress: - hosts: ["base-lsp.lsst.codes"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://base-lsp.lsst.codes/login" - singleuser: - extraAnnotations: - k8s.v1.cni.cncf.io/networks: "kube-system/dds" - hub: - baseUrl: "/n2" - db: - upgrade: true - url: "postgresql://jovyan@postgresdb01.ls.lsst.org/jupyterhub" - -config: - base_url: "https://base-lsp.lsst.codes" - butler_secret_path: "secret/k8s_operator/base-lsp.lsst.codes/butler-secret" - pull_secret_path: "secret/k8s_operator/base-lsp.lsst.codes/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - LSST_DDS_INTERFACE: net1 - LSST_DDS_PARTITION_PREFIX: base - LSST_SITE: base - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "oods" - volumes: - - name: home - nfs: - path: /jhome - server: nfs-jhome.ls.lsst.org - - name: project - nfs: - path: /project - server: nfs-project.ls.lsst.org - - name: scratch - nfs: - path: /scratch - server: nfs-scratch.ls.lsst.org - - name: datasets - nfs: - path: /lsstdata - server: nfs-lsstdata.ls.lsst.org - - name: auxtel-butler - nfs: - path: /auxtel/repo/LATISS - server: nfs-auxtel.ls.lsst.org - - name: auxtel-oods - nfs: - path: /auxtel/lsstdata/BTS/auxtel - server: nfs-auxtel.ls.lsst.org - readOnly: true - - name: obs-env - nfs: - path: /obs-env - server: nfs-obsenv.ls.lsst.org - volume_mounts: - - name: home - mountPath: /home - - name: datasets - mountPath: /datasets - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - - name: auxtel-butler - mountPath: /repo/LATISS - - name: auxtel-oods - mountPath: /data/lsstdata/BTS/auxtel - readOnly: true - - name: obs-env - mountPath: /net/obs-env diff --git a/applications/nublado2/values-tucson-teststand.yaml b/applications/nublado2/values-tucson-teststand.yaml deleted file mode 100644 index 1594760b74..0000000000 --- a/applications/nublado2/values-tucson-teststand.yaml +++ /dev/null @@ -1,95 +0,0 @@ -jupyterhub: - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - ingress: - hosts: ["tucson-teststand.lsst.codes"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://tucson-teststand.lsst.codes/login" - singleuser: - extraAnnotations: - k8s.v1.cni.cncf.io/networks: "kube-system/dds" - hub: - baseUrl: "/n2" - db: - upgrade: true - url: "postgresql://jovyan@squoint.tu.lsst.org/jupyterhub" - -config: - base_url: "https://tucson-teststand.lsst.codes" - butler_secret_path: "secret/k8s_operator/tucson-teststand.lsst.codes/butler-secret" - pull_secret_path: "secret/k8s_operator/tucson-teststand.lsst.codes/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - LSST_DDS_INTERFACE: net1 - LSST_DDS_PARTITION_PREFIX: tucson - LSST_SITE: tucson - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "oods" - volumes: - - name: home - nfs: - path: /jhome - server: nfs-jhome.tu.lsst.org - - name: project - nfs: - path: /project - server: nfs-project.tu.lsst.org - - name: scratch - nfs: - path: /scratch - server: nfs-scratch.tu.lsst.org - - name: datasets - nfs: - path: /lsstdata - server: nfs-lsstdata.tu.lsst.org - - name: auxtel-butler - nfs: - path: /auxtel/repo/LATISS - server: nfs-auxtel.tu.lsst.org - - name: auxtel-oods - nfs: - path: /auxtel/lsstdata/TTS/auxtel - server: nfs-auxtel.tu.lsst.org - readOnly: true - - name: comcam-butler - nfs: - path: /repo/LSSTComCam - server: comcam-archiver.tu.lsst.org - - name: comcam-oods - nfs: - path: /lsstdata/TTS/comcam - server: comcam-archiver.tu.lsst.org - readOnly: true - - name: obs-env - nfs: - path: /obs-env - server: nfs-obsenv.tu.lsst.org - volume_mounts: - - name: home - mountPath: /home - - name: datasets - mountPath: /datasets - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - - name: auxtel-butler - mountPath: /repo/LATISS - - name: auxtel-oods - mountPath: /data/lsstdata/TTS/auxtel - readOnly: true - - name: comcam-butler - mountPath: /repo/LSSTComCam - - name: comcam-oods - mountPath: /data/lsstdata/TTS/comcam - readOnly: true - - name: obs-env - mountPath: /net/obs-env diff --git a/environments/values-base.yaml b/environments/values-base.yaml index 67f73d44a9..1b0108032f 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -4,12 +4,9 @@ vaultPathPrefix: secret/k8s_operator/base-lsp.lsst.codes applications: argo-workflows: true - cachemachine: true exposurelog: true - moneypenny: true narrativelog: true nublado: true - nublado2: true portal: true sasquatch: true squareone: true diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index d1fa87345a..03e0136f74 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -4,12 +4,9 @@ vaultPathPrefix: secret/k8s_operator/tucson-teststand.lsst.codes applications: argo-workflows: true - cachemachine: true exposurelog: true - moneypenny: true narrativelog: true nublado: true - nublado2: true portal: true sasquatch: true squareone: true From 686dea91f18e687b256355d16215951191f33b34 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 4 Oct 2023 10:48:24 -0700 Subject: [PATCH 052/588] disable telegraf connector at T&S sites --- applications/sasquatch/values-base.yaml | 2 +- applications/sasquatch/values-summit.yaml | 2 +- applications/sasquatch/values-tucson-teststand.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index ba96c6ff6b..907f25f040 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -133,7 +133,7 @@ kafka-connect-manager: topicsRegex: "lsst.sal.MTCamera|lsst.sal.MTHeaderService|lsst.sal.MTOODS" telegraf-kafka-consumer: - enabled: true + enabled: false kafkaConsumers: auxtel: enabled: true diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 3ea7944186..a82397b46d 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -119,7 +119,7 @@ kafka-connect-manager: topicsRegex: ".*LaserTracker" telegraf-kafka-consumer: - enabled: true + enabled: false kafkaConsumers: auxtel: enabled: true diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 7cc7292865..cd2e0e01bd 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -43,7 +43,7 @@ influxdb2: hostname: tucson-teststand.lsst.codes telegraf-kafka-consumer: - enabled: true + enabled: false kafkaConsumers: auxtel: enabled: true From 0c5f3cae358c38239cf60b377ec763655e920d96 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 5 Oct 2023 10:26:54 -0700 Subject: [PATCH 053/588] Add init containers for T&S sites --- applications/nublado/values-base.yaml | 11 +++++++++++ applications/nublado/values-summit.yaml | 11 +++++++++++ applications/nublado/values-tucson-teststand.yaml | 11 +++++++++++ 3 files changed, 33 insertions(+) diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 5e7eb18580..e1d3af2ad2 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -21,6 +21,17 @@ controller: LSST_SITE: "base" PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" + initContainers: + - name: "initdir" + image: "ghcr.io/lsst-sqre/initdir:0.0.4" + privileged: true + volumes: + - containerPath: "/home" + mode: "rw" + source: + serverPath: "/jhome" + server: "nfs-jhome.ls.lsst.org" + type: "nfs" volumes: - containerPath: "/home" mode: "rw" diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index d1b4e147eb..ac9585fefb 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -21,6 +21,17 @@ controller: LSST_SITE: "summit" PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" + initContainers: + - name: "initdir" + image: "ghcr.io/lsst-sqre/initdir:0.0.4" + privileged: true + volumes: + - containerPath: "/home" + mode: "rw" + source: + serverPath: "/jhome" + server: "nfs1.cp.lsst.org" + type: "nfs" volumes: - containerPath: "/home" mode: "rw" diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 82a329ec79..df1bafd98a 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -21,6 +21,17 @@ controller: LSST_SITE: tucson PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" + initContainers: + - name: "initdir" + image: "ghcr.io/lsst-sqre/initdir:0.0.4" + privileged: true + volumes: + - containerPath: "/home" + mode: "rw" + source: + serverPath: "/jhome" + server: "nfs-jhome.tu.lsst.org" + type: "nfs" volumes: - containerPath: "/home" mode: "rw" From 572a5fa6fdc0dcea0e301d9969ea16284528ceef Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 28 Sep 2023 17:53:02 -0400 Subject: [PATCH 054/588] App approle for roundtable-dev vault --- .../values-roundtable-dev.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/applications/vault-secrets-operator/values-roundtable-dev.yaml b/applications/vault-secrets-operator/values-roundtable-dev.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-roundtable-dev.yaml +++ b/applications/vault-secrets-operator/values-roundtable-dev.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle From 98450a000a290a73789d0c9db84723a3e833cb70 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 28 Sep 2023 17:54:35 -0400 Subject: [PATCH 055/588] Migrate vaultPathPrefix for roundtable-dev --- environments/values-roundtable-dev.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index fb2c1569af..514fe3f0a5 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -3,7 +3,8 @@ fqdn: roundtable-dev.lsst.cloud onepassword: connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP roundtable-dev.lsst.cloud" -vaultPathPrefix: secret/k8s_operator/roundtable-dev.lsst.cloud +vaultUrl: "https://vault.lsst.codes" +vaultPathPrefix: secret/phalanx/roundtable-dev applications: giftless: true From 7fca67bc7209e73b2f532cdfee0b698146280115 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 28 Sep 2023 18:29:00 -0400 Subject: [PATCH 056/588] Add secrets.yaml for giftless --- applications/giftless/secrets.yaml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 applications/giftless/secrets.yaml diff --git a/applications/giftless/secrets.yaml b/applications/giftless/secrets.yaml new file mode 100644 index 0000000000..52e5b2dbb8 --- /dev/null +++ b/applications/giftless/secrets.yaml @@ -0,0 +1,5 @@ +"giftless-gcp-key.json": + description: >- + The GCP service account JSON file for the giftless + onepassword: + encoded: true From 54363296d12186f1c1382819a25f00d9f96ad98e Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 28 Sep 2023 18:45:35 -0400 Subject: [PATCH 057/588] Add secrets.yaml for monitoring Descriptions still need to be added and its possible there are extraneous entries. These match what's currently in Vault for roundtable-dev. --- applications/monitoring/secrets.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 applications/monitoring/secrets.yaml diff --git a/applications/monitoring/secrets.yaml b/applications/monitoring/secrets.yaml new file mode 100644 index 0000000000..7dfd1a819c --- /dev/null +++ b/applications/monitoring/secrets.yaml @@ -0,0 +1,15 @@ +GH_CLIENT_SECRET: + description: >- + ? +INFLUXDB_TOKEN: + description: >- + ? +TOKEN_SECRET: + description: >- + ? +admin-token: + description: >- + ? +influx-alert-token: + description: >- + ? From 30c7265d96b31a2caa7518fb60a279ac10d5fdc0 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Fri, 29 Sep 2023 10:12:27 -0400 Subject: [PATCH 058/588] Add conditionals for sasquatch secrets --- applications/sasquatch/secrets.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index 92684a3c2b..52bd82d4cc 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -34,33 +34,44 @@ influxdb-user: kafdrop-kafka-properties: description: >- ? + if: kafdrop.enabled kafdrop-password: description: >- ? + if: kafdrop.enabled kafka-connect-manager-password: description: >- ? + if: strimzi-kafka.connect.enabled prompt-processing-password: description: >- ? + if: strimzi-kafka.users.promptProcessing.enabled replicator-password: description: >- ? + if: strimzi-kafka.users.replicator.enabled rest-proxy-password: description: >- ? + if: rest-proxy.enabled rest-proxy-sasl-jass-config: description: >- ? + if: rest-proxy.enabled sasquatch-test-kafka-properties: description: >- ? + if: kafka.listeners.plain.enabled sasquatch-test-password: description: >- ? + if: kafka.listeners.plain.enabled telegraf-password: description: >- ? + if: strimzi-kafka.users.telegraf.enabled ts-salkafka-password: description: >- ? + if: strimzi-kafka.users.telegraf.enabled From 3d1bb97d0b92134904b8f63c0a64a070b4c5555a Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Fri, 29 Sep 2023 10:17:11 -0400 Subject: [PATCH 059/588] Fix slack token, signing secret are not base64 --- applications/squarebot/secrets.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/applications/squarebot/secrets.yaml b/applications/squarebot/secrets.yaml index 7fc308b10f..f197587f56 100644 --- a/applications/squarebot/secrets.yaml +++ b/applications/squarebot/secrets.yaml @@ -12,13 +12,9 @@ SQUAREBOT_SLACK_APP_ID: SQUAREBOT_SLACK_TOKEN: description: >- The Slack bot user oauth token for the Slack App shared by all Squarebot services. - onepassword: - encoded: true SQUAREBOT_SLACK_SIGNING: description: >- The signing secret for all webhook payloads from Slack. - onepassword: - encoded: true ca.crt: description: >- The cluster CA certificate for the Kubernetes cluster. This is available From 631791a88b8907a6daabea13bf3c36636d5fff95 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 3 Oct 2023 13:22:00 -0400 Subject: [PATCH 060/588] Fix GitHub secrets for ook, squarebot --- applications/ook/secrets.yaml | 4 ++-- applications/squarebot/secrets.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/ook/secrets.yaml b/applications/ook/secrets.yaml index af87774f4d..29d39798cc 100644 --- a/applications/ook/secrets.yaml +++ b/applications/ook/secrets.yaml @@ -10,12 +10,12 @@ OOK_GITHUB_APP_ID: copy: application: squarebot key: SQUAREBOT_GITHUB_APP_ID -OOK_GITHUB_APP_KEY: +OOK_GITHUB_APP_PRIVATE_KEY: description: >- The private key for the GitHub App shared by all Squarebot services. copy: application: squarebot - key: SQUAREBOT_GITHUB_APP_KEY + key: SQUAREBOT_GITHUB_APP_PRIVATE_KEY ca.crt: description: >- The cluster CA certificate for the Kubernetes cluster. This is available diff --git a/applications/squarebot/secrets.yaml b/applications/squarebot/secrets.yaml index f197587f56..e3a30ec367 100644 --- a/applications/squarebot/secrets.yaml +++ b/applications/squarebot/secrets.yaml @@ -1,7 +1,7 @@ SQUAREBOT_GITHUB_APP_ID: description: >- The ID of the GitHub App shared by all Squarebot services. -SQUAREBOT_GITHUB_APP_KEY: +SQUAREBOT_GITHUB_APP_PRIVATE_KEY: description: >- The private key for the GitHub App shared by all Squarebot services. onepassword: From 772307bd114569d355c17b7c9e6f14e35d70895d Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 5 Oct 2023 15:39:19 -0700 Subject: [PATCH 061/588] [DM-40944] Get rid of ingressClass: nginx in the starter This is because this part of the yaml will be stripped after the CRD is made, making it look always out of sync. --- starters/web-service/templates/ingress.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/starters/web-service/templates/ingress.yaml b/starters/web-service/templates/ingress.yaml index eacb451a7e..ae40656ae8 100644 --- a/starters/web-service/templates/ingress.yaml +++ b/starters/web-service/templates/ingress.yaml @@ -18,7 +18,6 @@ template: {{- toYaml . | nindent 6 }} {{- end }} spec: - ingressClassName: "nginx" rules: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: From 79e8226cf9262dd894b99a9144c30caaf33cedd4 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 5 Oct 2023 16:52:45 -0700 Subject: [PATCH 062/588] Revert "Add data tests back in" This reverts commit 44be9f56583a6fcb9ff480b1006035f64cb0dcf7. These shouldn't be needed and we don't have a hook to keep them up to date. --- starters/empty/README.md | 16 --- .../data/input/applications/argocd/README.md | 44 ------- .../input/applications/gafaelfawr/README.md | 124 ------------------ tests/data/input/applications/mobu/README.md | 35 ----- .../data/input/applications/nublado/README.md | 98 -------------- .../data/input/applications/portal/README.md | 57 -------- .../input/applications/postgres/README.md | 25 ---- tests/data/input/applications/tap/README.md | 68 ---------- tests/data/input/starters/empty/README.md | 18 --- .../data/input/starters/web-service/README.md | 32 ----- 10 files changed, 517 deletions(-) delete mode 100644 starters/empty/README.md delete mode 100644 tests/data/input/applications/argocd/README.md delete mode 100644 tests/data/input/applications/gafaelfawr/README.md delete mode 100644 tests/data/input/applications/mobu/README.md delete mode 100644 tests/data/input/applications/nublado/README.md delete mode 100644 tests/data/input/applications/portal/README.md delete mode 100644 tests/data/input/applications/postgres/README.md delete mode 100644 tests/data/input/applications/tap/README.md delete mode 100644 tests/data/input/starters/empty/README.md delete mode 100644 tests/data/input/starters/web-service/README.md diff --git a/starters/empty/README.md b/starters/empty/README.md deleted file mode 100644 index 6b28c8a625..0000000000 --- a/starters/empty/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) - -Helm starter chart for a new RSP service - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/argocd/README.md b/tests/data/input/applications/argocd/README.md deleted file mode 100644 index 6f01cc888c..0000000000 --- a/tests/data/input/applications/argocd/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# argo-cd - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) - -Kubernetes application manager - -**Homepage:** - -## Source Code - -* -* - -## Requirements - -| Repository | Name | Version | -|------------|------|---------| -| https://argoproj.github.io/argo-helm | argo-cd | 5.43.3 | - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| argo-cd.configs.cm."resource.compareoptions" | string | `"ignoreAggregatedRoles: true\n"` | Configure resource comparison | -| argo-cd.configs.params."server.basehref" | string | `"/argo-cd"` | Base href for `index.html` when running under a reverse proxy | -| argo-cd.configs.params."server.insecure" | bool | `true` | Do not use TLS (this is terminated at the ingress) | -| argo-cd.configs.secret.createSecret | bool | `false` | Create the Argo CD secret (we manage this with Vault) | -| argo-cd.controller.metrics.applicationLabels.enabled | bool | `true` | Enable adding additional labels to `argocd_app_labels` metric | -| argo-cd.controller.metrics.applicationLabels.labels | list | `["name","instance"]` | Labels to add to `argocd_app_labels` metric | -| argo-cd.controller.metrics.enabled | bool | `true` | Enable controller metrics service | -| argo-cd.global.logging.format | string | `"json"` | Set the global logging format. Either: `text` or `json` | -| argo-cd.notifications.metrics.enabled | bool | `true` | Enable notifications metrics service | -| argo-cd.redis.metrics.enabled | bool | `true` | Enable Redis metrics service | -| argo-cd.repoServer.metrics.enabled | bool | `true` | Enable repo server metrics service | -| argo-cd.server.ingress.annotations | object | Rewrite requests to remove `/argo-cd/` prefix | Additional annotations to add to the Argo CD ingress | -| argo-cd.server.ingress.enabled | bool | `true` | Create an ingress for the Argo CD server | -| argo-cd.server.ingress.ingressClassName | string | `"nginx"` | Ingress class to use for Argo CD ingress | -| argo-cd.server.ingress.pathType | string | `"ImplementationSpecific"` | Type of path expression for Argo CD ingress | -| argo-cd.server.ingress.paths | list | `["/argo-cd(/|$)(.*)"]` | Paths to route to Argo CD | -| argo-cd.server.metrics.enabled | bool | `true` | Enable server metrics service | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/gafaelfawr/README.md b/tests/data/input/applications/gafaelfawr/README.md deleted file mode 100644 index 9c280647da..0000000000 --- a/tests/data/input/applications/gafaelfawr/README.md +++ /dev/null @@ -1,124 +0,0 @@ -# gafaelfawr - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 9.3.0](https://img.shields.io/badge/AppVersion-9.3.0-informational?style=flat-square) - -Authentication and identity system - -**Homepage:** - -## Source Code - -* - -## Requirements - -| Repository | Name | Version | -|------------|------|---------| -| https://lsst-sqre.github.io/charts/ | redis | 1.0.6 | - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the Gafaelfawr frontend pod | -| cloudsql.affinity | object | `{}` | Affinity rules for the Cloud SQL Proxy pod | -| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | -| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | -| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.8"` | Cloud SQL Auth Proxy tag to use | -| cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | -| cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | -| cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | -| cloudsql.resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod | -| cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | -| cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | -| config.cilogon.clientId | string | `""` | CILogon client ID. One and only one of this, `config.github.clientId`, or `config.oidc.clientId` must be set. | -| config.cilogon.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | -| config.cilogon.gidClaim | string | Do not set a primary GID | Claim from which to get the primary GID (only used if not retrieved from LDAP or Firestore) | -| config.cilogon.groupsClaim | string | `"isMemberOf"` | Claim from which to get the group membership (only used if not retrieved from LDAP) | -| config.cilogon.loginParams | object | `{"skin":"LSST"}` | Additional parameters to add | -| config.cilogon.test | bool | `false` | Whether to use the test instance of CILogon | -| config.cilogon.uidClaim | string | `"uidNumber"` | Claim from which to get the numeric UID (only used if not retrieved from LDAP or Firestore) | -| config.cilogon.usernameClaim | string | `"uid"` | Claim from which to get the username | -| config.databaseUrl | string | None, must be set if neither `cloudsql.enabled` nor | URL for the PostgreSQL database `config.internalDatabase` are true | -| config.errorFooter | string | `""` | HTML footer to add to any login error page (will be enclosed in a

tag). | -| config.firestore.project | string | Firestore support is disabled | If set, assign UIDs and GIDs using Google Firestore in the given project. Cloud SQL must be enabled and the Cloud SQL service account must have read/write access to that Firestore instance. | -| config.forgerock.url | string | ForgeRock Identity Management support is disabled | If set, obtain the GIDs for groups from this ForgeRock Identity Management server. | -| config.forgerock.username | string | None, must be set if `config.forgerock.url` is set | Username to use for HTTP Basic authentication to ForgeRock Identity Managemnt. The corresponding password must be in the `forgerock-passsword` key of the Gafaelfawr Vault secret. | -| config.github.clientId | string | `""` | GitHub client ID. One and only one of this, `config.cilogon.clientId`, or `config.oidc.clientId` must be set. | -| config.groupMapping | object | `{}` | Defines a mapping of scopes to groups that provide that scope. See [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes. | -| config.initialAdmins | list | `[]` | Usernames to add as administrators when initializing a new database. Used only if there are no administrators. | -| config.internalDatabase | bool | `false` | Whether to use the PostgreSQL server internal to the Kubernetes cluster | -| config.knownScopes | object | See the `values.yaml` file | Names and descriptions of all scopes in use. This is used to populate the new token creation page. Only scopes listed here will be options when creating a new token. See [DMTN-235](https://dmtn-235.lsst.io/). | -| config.ldap.addUserGroup | bool | `false` | Whether to synthesize a user private group for each user with a GID equal to their UID | -| config.ldap.emailAttr | string | `"mail"` | Attribute containing the user's email address | -| config.ldap.gidAttr | string | Use GID of user private group | Attribute containing the user's primary GID (set to `gidNumber` for most LDAP servers) | -| config.ldap.groupBaseDn | string | None, must be set | Base DN for the LDAP search to find a user's groups | -| config.ldap.groupMemberAttr | string | `"member"` | Member attribute of the object class. Values must match the username returned in the token from the OpenID Connect authentication server. | -| config.ldap.groupObjectClass | string | `"posixGroup"` | Object class containing group information | -| config.ldap.kerberosConfig | string | Use anonymous binds | Enable GSSAPI (Kerberos) binds to LDAP using this `krb5.conf` file. If set, `ldap-keytab` must be set in the Gafaelfawr Vault secret. Set either this or `userDn`, not both. | -| config.ldap.nameAttr | string | `"displayName"` | Attribute containing the user's full name | -| config.ldap.uidAttr | string | Get UID from upstream authentication provider | Attribute containing the user's UID number (set to `uidNumber` for most LDAP servers) | -| config.ldap.url | string | Do not use LDAP | LDAP server URL from which to retrieve user group information | -| config.ldap.userBaseDn | string | Get user metadata from the upstream authentication provider | Base DN for the LDAP search to find a user's entry | -| config.ldap.userDn | string | Use anonymous binds | Bind DN for simple bind authentication. If set, `ldap-secret` must be set in the Gafaelfawr Vault secret. Set this or `kerberosConfig`, not both. | -| config.ldap.userSearchAttr | string | `"uid"` | Search attribute containing the user's username | -| config.logLevel | string | `"INFO"` | Choose from the text form of Python logging levels | -| config.oidc.audience | string | Value of `config.oidc.clientId` | Audience for the JWT token | -| config.oidc.clientId | string | `""` | Client ID for generic OpenID Connect support. One and only one of this, `config.cilogon.clientId`, or `config.github.clientId` must be set. | -| config.oidc.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | -| config.oidc.gidClaim | string | Do not set a primary GID | Claim from which to get the primary GID (only used if not retrieved from LDAP or Firestore) | -| config.oidc.groupsClaim | string | `"isMemberOf"` | Claim from which to get the group membership (only used if not retrieved from LDAP) | -| config.oidc.issuer | string | None, must be set | Issuer for the JWT token | -| config.oidc.loginParams | object | `{}` | Additional parameters to add to the login request | -| config.oidc.loginUrl | string | None, must be set | URL to which to redirect the user for authorization | -| config.oidc.scopes | list | `["openid"]` | Scopes to request from the OpenID Connect provider | -| config.oidc.tokenUrl | string | None, must be set | URL from which to retrieve the token for the user | -| config.oidc.uidClaim | string | `"uidNumber"` | Claim from which to get the numeric UID (only used if not retrieved from LDAP or Firestore) | -| config.oidc.usernameClaim | string | `"sub"` | Claim from which to get the username | -| config.oidcServer.enabled | bool | `false` | Whether to support OpenID Connect clients. If set to true, `oidc-server-secrets` must be set in the Gafaelfawr secret. | -| config.proxies | list | [`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`] | List of netblocks used for internal Kubernetes IP addresses, used to determine the true client IP for logging | -| config.quota | object | `{}` | Quota settings (see [Quotas](https://gafaelfawr.lsst.io/user-guide/helm.html#quotas)). | -| config.slackAlerts | bool | `false` | Whether to send certain serious alerts to Slack. If `true`, the `slack-webhook` secret must also be set. | -| config.tokenLifetimeMinutes | int | `43200` (30 days) | Session length and token expiration (in minutes) | -| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Gafaelfawr image | -| image.repository | string | `"ghcr.io/lsst-sqre/gafaelfawr"` | Gafaelfawr image to use | -| image.tag | string | The appVersion of the chart | Tag of Gafaelfawr image to use | -| ingress.additionalHosts | list | `[]` | Defines additional FQDNs for Gafaelfawr. This doesn't work for cookie or browser authentication, but for token-based services like git-lfs or the webdav server it does. | -| maintenance.affinity | object | `{}` | Affinity rules for Gafaelfawr maintenance and audit pods | -| maintenance.auditSchedule | string | `"30 3 * * *"` | Cron schedule string for Gafaelfawr data consistency audit (in UTC) | -| maintenance.maintenanceSchedule | string | `"5 * * * *"` | Cron schedule string for Gafaelfawr periodic maintenance (in UTC) | -| maintenance.nodeSelector | object | `{}` | Node selection rules for Gafaelfawr maintenance and audit pods | -| maintenance.podAnnotations | object | `{}` | Annotations for Gafaelfawr maintenance and audit pods | -| maintenance.resources | object | `{}` | Resource limits and requests for Gafaelfawr maintenance and audit pods | -| maintenance.tolerations | list | `[]` | Tolerations for Gafaelfawr maintenance and audit pods | -| nameOverride | string | `""` | Override the base name for resources | -| nodeSelector | object | `{}` | Node selector rules for the Gafaelfawr frontend pod | -| operator.affinity | object | `{}` | Affinity rules for the token management pod | -| operator.nodeSelector | object | `{}` | Node selection rules for the token management pod | -| operator.podAnnotations | object | `{}` | Annotations for the token management pod | -| operator.resources | object | `{}` | Resource limits and requests for the Gafaelfawr Kubernetes operator | -| operator.tolerations | list | `[]` | Tolerations for the token management pod | -| podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | -| redis.affinity | object | `{}` | Affinity rules for the Redis pod | -| redis.config.secretKey | string | `"redis-password"` | Key inside secret from which to get the Redis password (do not change) | -| redis.config.secretName | string | `"gafaelfawr-secret"` | Name of secret containing Redis password (may require changing if fullnameOverride is set) | -| redis.nodeSelector | object | `{}` | Node selection rules for the Redis pod | -| redis.persistence.accessMode | string | `"ReadWriteOnce"` | Access mode of storage to request | -| redis.persistence.enabled | bool | `true` | Whether to persist Redis storage and thus tokens. Setting this to false will use `emptyDir` and reset all tokens on every restart. Only use this for a test deployment. | -| redis.persistence.size | string | `"1Gi"` | Amount of persistent storage to request | -| redis.persistence.storageClass | string | `""` | Class of storage to request | -| redis.persistence.volumeClaimName | string | `""` | Use an existing PVC, not dynamic provisioning. If this is set, the size, storageClass, and accessMode settings are ignored. | -| redis.podAnnotations | object | `{}` | Pod annotations for the Redis pod | -| redis.resources | object | See `values.yaml` | Resource limits and requests for the Redis pod | -| redis.tolerations | list | `[]` | Tolerations for the Redis pod | -| replicaCount | int | `1` | Number of web frontend pods to start | -| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | -| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/mobu/README.md b/tests/data/input/applications/mobu/README.md deleted file mode 100644 index 9a33cafce2..0000000000 --- a/tests/data/input/applications/mobu/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# mobu - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 6.1.1](https://img.shields.io/badge/AppVersion-6.1.1-informational?style=flat-square) - -Continuous integration testing - -## Source Code - -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the mobu frontend pod | -| config.autostart | list | `[]` | Autostart specification. Must be a list of mobu flock specifications. Each flock listed will be automatically started when mobu is started. | -| config.debug | bool | `false` | If set to true, include the output from all flocks in the main mobu log and disable structured JSON logging. | -| config.disableSlackAlerts | bool | `false` | If set to true, do not configure mobu to send alerts to Slack. | -| config.pathPrefix | string | `"/mobu"` | Prefix for mobu's API routes. | -| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the mobu image | -| image.repository | string | `"ghcr.io/lsst-sqre/mobu"` | mobu image to use | -| image.tag | string | The appVersion of the chart | Tag of mobu image to use | -| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | -| nameOverride | string | `""` | Override the base name for resources | -| nodeSelector | object | `{}` | Node selector rules for the mobu frontend pod | -| podAnnotations | object | `{}` | Annotations for the mobu frontend pod | -| resources | object | `{}` | Resource limits and requests for the mobu frontend pod | -| tolerations | list | `[]` | Tolerations for the mobu frontend pod | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/nublado/README.md b/tests/data/input/applications/nublado/README.md deleted file mode 100644 index 0e38ef82c8..0000000000 --- a/tests/data/input/applications/nublado/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# nublado - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 0.7.1](https://img.shields.io/badge/AppVersion-0.7.1-informational?style=flat-square) - -JupyterHub and custom spawner for the Rubin Science Platform - -**Homepage:** - -## Source Code - -* -* - -## Requirements - -| Repository | Name | Version | -|------------|------|---------| -| https://jupyterhub.github.io/helm-chart/ | jupyterhub | 2.0.0 | - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| controller.affinity | object | `{}` | Affinity rules for the lab controller pod | -| controller.config.fileserver.enabled | bool | `false` | Enable fileserver management | -| controller.config.fileserver.image | string | `"ghcr.io/lsst-sqre/worblehat"` | Image for fileserver container | -| controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user fileservers | -| controller.config.fileserver.pullPolicy | string | `"IfNotPresent"` | Pull policy for fileserver container | -| controller.config.fileserver.tag | string | `"0.1.0"` | Tag for fileserver container | -| controller.config.fileserver.timeout | int | `3600` | Timeout for user fileservers, in seconds | -| controller.config.images.aliasTags | list | `[]` | Additional tags besides `recommendedTag` that should be recognized as aliases. | -| controller.config.images.cycle | string | `nil` | Restrict images to this SAL cycle, if given. | -| controller.config.images.numDailies | int | `3` | Number of most-recent dailies to prepull. | -| controller.config.images.numReleases | int | `1` | Number of most-recent releases to prepull. | -| controller.config.images.numWeeklies | int | `2` | Number of most-recent weeklies to prepull. | -| controller.config.images.pin | list | `[]` | List of additional image tags to prepull. Listing the image tagged as recommended here is recommended when using a Docker image source to ensure its name can be expanded properly in the menu. | -| controller.config.images.recommendedTag | string | `"recommended"` | Tag marking the recommended image (shown first in the menu) | -| controller.config.images.source | object | None, must be specified | Source for prepulled images. For Docker, set `type` to `docker`, `registry` to the hostname and `repository` to the name of the repository. For Google Artifact Repository, set `type` to `google`, `location` to the region, `projectId` to the Google project, `repository` to the name of the repository, and `image` to the name of the image. | -| controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab. | -| controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | -| controller.config.lab.initcontainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image reference), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with `allowPrivilegeEscalation` true. Otherwise it will, run as UID 1000. | -| controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | -| controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | -| controller.config.lab.sizes | object | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Names must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI prefixes for memory are supported. `large`) | -| controller.config.lab.volumes | list | `[]` | Volumes that should be mounted in lab pods. This supports NFS, HostPath, and PVC volume types (differentiated in source.type) | -| controller.config.safir.logLevel | string | `"INFO"` | Level of Python logging | -| controller.config.safir.pathPrefix | string | `"/nublado"` | Path prefix that will be routed to the controller | -| controller.googleServiceAccount | string | None, must be set when using Google Artifact Registry | If Google Artifact Registry is used as the image source, the Google service account that has an IAM binding to the `nublado-controller` Kubernetes service account and has the Artifact Registry reader role | -| controller.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the nublado image | -| controller.image.repository | string | `"ghcr.io/lsst-sqre/jupyterlab-controller"` | nublado image to use | -| controller.image.tag | string | The appVersion of the chart | Tag of nublado image to use | -| controller.ingress.annotations | object | `{}` | Additional annotations to add for the lab controller pod ingress | -| controller.nodeSelector | object | `{}` | Node selector rules for the lab controller pod | -| controller.podAnnotations | object | `{}` | Annotations for the lab controller pod | -| controller.resources | object | `{}` | Resource limits and requests for the lab controller pod | -| controller.slackAlerts | bool | `false` | Whether to enable Slack alerts. If set to true, `slack_webhook` must be set in the corresponding Nublado Vault secret. | -| controller.tolerations | list | `[]` | Tolerations for the lab controller pod | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| hub.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. | -| hub.timeout.spawn | int | `600` | Timeout for the Kubernetes spawn process in seconds. (Allow long enough to pull uncached images if needed.) | -| hub.timeout.startup | int | `90` | Timeout for JupyterLab to start. Currently this sometimes takes over 60 seconds for reasons we don't understand. | -| jupyterhub.cull.enabled | bool | `true` | Enable the lab culler. | -| jupyterhub.cull.every | int | 600 (10 minutes) | How frequently to check for idle labs in seconds | -| jupyterhub.cull.maxAge | int | 5184000 (60 days) | Maximum age of a lab regardless of activity | -| jupyterhub.cull.removeNamedServers | bool | `true` | Whether to remove named servers when culling their lab | -| jupyterhub.cull.timeout | int | 2592000 (30 days) | Default idle timeout before the lab is automatically deleted in seconds | -| jupyterhub.cull.users | bool | `true` | Whether to log out the server when culling their lab | -| jupyterhub.hub.authenticatePrometheus | bool | `false` | Whether to require metrics requests to be authenticated | -| jupyterhub.hub.baseUrl | string | `"/nb"` | Base URL on which JupyterHub listens | -| jupyterhub.hub.containerSecurityContext | object | `{"allowPrivilegeEscalation":false,"runAsGroup":768,"runAsUser":768}` | Security context for JupyterHub container | -| jupyterhub.hub.db.password | string | Comes from nublado-secret | Database password (not used) | -| jupyterhub.hub.db.type | string | `"postgres"` | Type of database to use | -| jupyterhub.hub.db.url | string | Use the in-cluster PostgreSQL installed by Phalanx | URL of PostgreSQL server | -| jupyterhub.hub.existingSecret | string | `"nublado-secret"` | Existing secret to use for private keys | -| jupyterhub.hub.extraEnv | object | Gets `JUPYTERHUB_CRYPT_KEY` from `nublado-secret` | Additional environment variables to set | -| jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | -| jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | -| jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/rsp-restspawner"` | Image to use for JupyterHub | -| jupyterhub.hub.image.tag | string | `"0.3.2"` | Tag of image to use for JupyterHub | -| jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | -| jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | -| jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | -| jupyterhub.ingress.enabled | bool | `false` | Whether to enable the default ingress | -| jupyterhub.prePuller.continuous.enabled | bool | `false` | Whether to run the JupyterHub continuous prepuller (the Nublado controller does its own prepulling) | -| jupyterhub.prePuller.hook.enabled | bool | `false` | Whether to run the JupyterHub hook prepuller (the Nublado controller does its own prepulling) | -| jupyterhub.proxy.chp.networkPolicy.interNamespaceAccessLabels | string | `"accept"` | Enable access to the proxy from other namespaces, since we put each user's lab environment in its own namespace | -| jupyterhub.proxy.service.type | string | `"ClusterIP"` | Only expose the proxy to the cluster, overriding the default of exposing the proxy directly to the Internet | -| jupyterhub.scheduling.userPlaceholder.enabled | bool | `false` | Whether to spawn placeholder pods representing fake users to force autoscaling in advance of running out of resources | -| jupyterhub.scheduling.userScheduler.enabled | bool | `false` | Whether the user scheduler should be enabled | -| jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | Whether to configure iptables to block cloud metadata endpoints. This is unnecessary in our environments (they are blocked by cluster configuration) and thus is disabled to reduce complexity. | -| jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | Start command for labs | -| jupyterhub.singleuser.defaultUrl | string | `"/lab"` | Default URL prefix for lab endpoints | -| proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/portal/README.md b/tests/data/input/applications/portal/README.md deleted file mode 100644 index ca915dc200..0000000000 --- a/tests/data/input/applications/portal/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# portal - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: suit-2023.1.5](https://img.shields.io/badge/AppVersion-suit--2023.1.5-informational?style=flat-square) - -Rubin Science Platform Portal Aspect - -## Source Code - -* -* - -## Requirements - -| Repository | Name | Version | -|------------|------|---------| -| https://lsst-sqre.github.io/charts/ | redis | 1.0.6 | - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the Portal pod | -| config.cleanupInterval | string | `"36h"` | How long results should be retained before being deleted | -| config.debug | string | `"FALSE"` | Set to `TRUE` to enable service debugging | -| config.hipsUrl | string | `/api/hips/images/color_gri` in the local Science Platform | URL for default HiPS service | -| config.ssotap | string | `""` | Endpoint under `/api/` for the DP0.3 SSO TAP service on the instance, if present | -| config.visualizeFitsSearchPath | string | `"/datasets"` | Search path for FITS files | -| config.volumes.configHostPath | string | Use an `emptyDir` | hostPath to mount as configuration. Set either this of `configNfs`, not both. | -| config.volumes.configNfs | object | Use an `emptyDir` | NFS information for a configuration. If set, must have keys for path and server, Set either this of `configHostPath`, not both. | -| config.volumes.workareaHostPath | string | Use an `emptyDir` | hostPath to mount as a shared work area. Set either this or `workareaNfs`, not both. | -| config.volumes.workareaNfs | object | Use an `emptyDir` | NFS information for a shared work area. If set, must have keys for path and server. Set either this or `workareaHostPath`, not both. | -| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Portal image | -| image.repository | string | `"ipac/suit"` | Portal image to use | -| image.tag | string | The appVersion of the chart | Tag of Portal image to use | -| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | -| nameOverride | string | `""` | Override the base name for resources | -| nodeSelector | object | `{}` | Node selector rules for the Portal pod | -| podAnnotations | object | `{}` | Annotations for the Portal pod | -| redis.affinity | object | `{}` | Affinity rules for the Redis pod | -| redis.config.secretKey | string | `"ADMIN_PASSWORD"` | Key inside secret from which to get the Redis password (do not change) | -| redis.config.secretName | string | `"portal-secret"` | Name of secret containing Redis password (may require changing if fullnameOverride is set) | -| redis.nodeSelector | object | `{}` | Node selection rules for the Redis pod | -| redis.persistence.enabled | bool | `false` | Whether to persist Redis storage. Setting this to false will use `emptyDir` and reset all data on every restart. | -| redis.podAnnotations | object | `{}` | Pod annotations for the Redis pod | -| redis.resources | object | See `values.yaml` | Resource limits and requests for the Redis pod | -| redis.tolerations | list | `[]` | Tolerations for the Redis pod | -| replicaCount | int | `1` | Number of pods to start | -| resources | object | `{"limits":{"cpu":2,"memory":"6Gi"}}` | Resource limits and requests. The Portal will use (by default) 93% of container RAM. This is a smallish Portal; tweak it as you need to in instance definitions in Phalanx. | -| securityContext | object | `{}` | Security context for the Portal pod | -| tolerations | list | `[]` | Tolerations for the Portal pod | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/postgres/README.md b/tests/data/input/applications/postgres/README.md deleted file mode 100644 index 612cc4d6b1..0000000000 --- a/tests/data/input/applications/postgres/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# postgres - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 0.0.5](https://img.shields.io/badge/AppVersion-0.0.5-informational?style=flat-square) - -Postgres RDBMS for LSP - -## Source Code - -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| debug | string | `""` | Set to non-empty to enable debugging output | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the postgres image | -| image.repository | string | `"lsstsqre/lsp-postgres"` | postgres image to use | -| image.tag | string | The appVersion of the chart | Tag of postgres image to use | -| postgresStorageClass | string | `"standard"` | Storage class for postgres volume. Set to appropriate value for your deployment: at GKE, "standard" (if you want SSD, "premium-rwo", but if you want a good database maybe it's better to use a cloud database?), on Rubin Observatory Rancher, "rook-ceph-block", elsewhere probably "standard" | -| postgresVolumeSize | string | `"1Gi"` | Volume size for postgres. It can generally be very small | -| volumeName | string | `""` | Volume name for postgres, if you use an existing volume that isn't automatically created from the PVC by the storage driver. | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/applications/tap/README.md b/tests/data/input/applications/tap/README.md deleted file mode 100644 index 19b005f8d0..0000000000 --- a/tests/data/input/applications/tap/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# cadc-tap - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![AppVersion: 1.4.5](https://img.shields.io/badge/AppVersion-1.4.5-informational?style=flat-square) - -IVOA TAP service - -## Source Code - -* -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the Gafaelfawr frontend pod | -| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip"` | Datalink payload URL | -| config.gafaelfawrHost | string | Value of `ingress.host` | Gafaelfawr hostname to get user information from a token | -| config.gcsBucket | string | None, must be set | Name of GCS bucket in which to store results | -| config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) | -| config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket | -| config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. | -| config.tapSchemaAddress | string | `"cadc-tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data | -| fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the tap image | -| image.repository | string | `"ghcr.io/lsst-sqre/lsst-tap-service"` | tap image to use | -| image.tag | string | The appVersion of the chart | Tag of tap image to use | -| ingress.anonymousAnnotations | object | `{}` | Additional annotations to use for endpoints that allow anonymous access, such as `/capabilities` and `/availability` | -| ingress.authenticatedAnnotations | object | `{}` | Additional annotations to use for endpoints that are authenticated, such as `/sync`, `/async`, and `/tables` | -| nameOverride | string | `""` | Override the base name for resources | -| nodeSelector | object | `{}` | Node selector rules for the Gafaelfawr frontend pod | -| podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | -| qserv.host | string | `"mock-qserv:3306"` (the mock QServ) | QServ hostname:port to connect to | -| qserv.mock.affinity | object | `{}` | Affinity rules for the mock QServ pod | -| qserv.mock.enabled | bool | `true` | Spin up a container to pretend to be QServ. | -| qserv.mock.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the mock QServ image | -| qserv.mock.image.repository | string | `"ghcr.io/lsst-sqre/lsst-tap-mock-qserv"` | Mock QServ image to use | -| qserv.mock.image.tag | string | The appVersion of the chart | Tag of mock QServ image to use | -| qserv.mock.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | -| qserv.mock.podAnnotations | object | `{}` | Annotations for the mock QServ pod | -| qserv.mock.resources | object | `{}` | Resource limits and requests for the mock QServ pod | -| qserv.mock.tolerations | list | `[]` | Tolerations for the mock QServ pod | -| replicaCount | int | `1` | Number of pods to start | -| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | -| tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | -| tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | -| tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.0.2"` | Tag of TAP schema image | -| tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | -| tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | -| tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | -| tapSchema.tolerations | list | `[]` | Tolerations for the mock QServ pod | -| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | -| uws.affinity | object | `{}` | Affinity rules for the UWS database pod | -| uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | -| uws.image.repository | string | `"ghcr.io/lsst-sqre/lsst-tap-uws-db"` | UWS database image to use | -| uws.image.tag | string | The appVersion of the chart | Tag of UWS database image to use | -| uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | -| uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | -| uws.resources | object | `{}` | Resource limits and requests for the UWS database pod | -| uws.tolerations | list | `[]` | Tolerations for the UWS database pod | -| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/starters/empty/README.md b/tests/data/input/starters/empty/README.md deleted file mode 100644 index d4e2901eed..0000000000 --- a/tests/data/input/starters/empty/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) - -Helm starter chart for a new RSP service - -**Homepage:** > - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/tests/data/input/starters/web-service/README.md b/tests/data/input/starters/web-service/README.md deleted file mode 100644 index 55f6d0c3f9..0000000000 --- a/tests/data/input/starters/web-service/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# - -![Version: 1.0.0](https://img.shields.io/badge/Version-1.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.1.0](https://img.shields.io/badge/AppVersion-0.1.0-informational?style=flat-square) - -Helm starter chart for a new RSP service - -**Homepage:** > - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the deployment pod | -| autoscaling.enabled | bool | `false` | Enable autoscaling of deployment | -| autoscaling.maxReplicas | int | `100` | Maximum number of deployment pods | -| autoscaling.minReplicas | int | `1` | Minimum number of deployment pods | -| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of deployment pods | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the image | -| image.repository | string | `"ghcr.io/lsst-sqre/"` | Image to use in the deployment | -| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | -| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | -| nodeSelector | object | `{}` | Node selection rules for the deployment pod | -| podAnnotations | object | `{}` | Annotations for the deployment pod | -| replicaCount | int | `1` | Number of web deployment pods to start | -| resources | object | `{}` | Resource limits and requests for the deployment pod | -| tolerations | list | `[]` | Tolerations for the deployment pod | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) From e360eaea346aa2ce540e9f921118d69d9ea6417b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 5 Oct 2023 16:57:42 -0700 Subject: [PATCH 063/588] Be clear about token for phalanx vault audit Say explicitly that phalanx vault audit requires a privileged token. --- docs/admin/migrating-secrets.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/admin/migrating-secrets.rst b/docs/admin/migrating-secrets.rst index feac256089..8ad7029bf7 100644 --- a/docs/admin/migrating-secrets.rst +++ b/docs/admin/migrating-secrets.rst @@ -101,6 +101,7 @@ The new secret management system uses Vault AppRoles instead, which are the reco phalanx vault audit This command will print diagnostics if it finds any problems. + You will still need ``VAULT_TOKEN`` set to a privileged token to run this command. Update secrets ============== From b36f4583805a7c122f054b9d04aadf46245a4e44 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 5 Oct 2023 17:04:16 -0700 Subject: [PATCH 064/588] Switch idfdev and idfint to prod CILogon Test CILogon is returning invalid JSON from the token endpoint, so switch to production CILogon for now. --- applications/gafaelfawr/values-idfdev.yaml | 2 +- applications/gafaelfawr/values-idfint.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/gafaelfawr/values-idfdev.yaml b/applications/gafaelfawr/values-idfdev.yaml index 91f77427e0..62c4d7568d 100644 --- a/applications/gafaelfawr/values-idfdev.yaml +++ b/applications/gafaelfawr/values-idfdev.yaml @@ -10,7 +10,7 @@ config: cilogon: clientId: "cilogon:/client_id/46f9ae932fd30e9fb1b246972a3c0720" enrollmentUrl: "https://id-dev.lsst.cloud/registry/co_petitions/start/coef:6" - test: true + test: false usernameClaim: "username" ldap: diff --git a/applications/gafaelfawr/values-idfint.yaml b/applications/gafaelfawr/values-idfint.yaml index 9d85b88fe9..78c03638d5 100644 --- a/applications/gafaelfawr/values-idfint.yaml +++ b/applications/gafaelfawr/values-idfint.yaml @@ -11,7 +11,7 @@ config: cilogon: clientId: "cilogon:/client_id/6b3f86ecfe74f14afa81b73a76be0868" enrollmentUrl: "https://id-int.lsst.cloud/registry/co_petitions/start/coef:10" - test: true + test: false usernameClaim: "username" ldap: From a782ce0b9b4b43e30a693a49efb88c3db0510d68 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 6 Oct 2023 08:59:57 -0700 Subject: [PATCH 065/588] Switch idfdev and idfint back to test CILogon The problem was due to a bad WAF rule in their AWS infrastructure that was blocking connections from Google Cloud. This has now been fixed, so switch those environments back to the test CILogon. --- applications/gafaelfawr/values-idfdev.yaml | 2 +- applications/gafaelfawr/values-idfint.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/gafaelfawr/values-idfdev.yaml b/applications/gafaelfawr/values-idfdev.yaml index 62c4d7568d..91f77427e0 100644 --- a/applications/gafaelfawr/values-idfdev.yaml +++ b/applications/gafaelfawr/values-idfdev.yaml @@ -10,7 +10,7 @@ config: cilogon: clientId: "cilogon:/client_id/46f9ae932fd30e9fb1b246972a3c0720" enrollmentUrl: "https://id-dev.lsst.cloud/registry/co_petitions/start/coef:6" - test: false + test: true usernameClaim: "username" ldap: diff --git a/applications/gafaelfawr/values-idfint.yaml b/applications/gafaelfawr/values-idfint.yaml index 78c03638d5..9d85b88fe9 100644 --- a/applications/gafaelfawr/values-idfint.yaml +++ b/applications/gafaelfawr/values-idfint.yaml @@ -11,7 +11,7 @@ config: cilogon: clientId: "cilogon:/client_id/6b3f86ecfe74f14afa81b73a76be0868" enrollmentUrl: "https://id-int.lsst.cloud/registry/co_petitions/start/coef:10" - test: false + test: true usernameClaim: "username" ldap: From 0d532eef3fcbd404da57b83ba3077162ff2b6747 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 5 Oct 2023 19:05:37 -0400 Subject: [PATCH 066/588] Update vaultPathPrefix for Roundtable --- environments/values-roundtable-prod.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index 655a48a4bd..f260043018 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -3,7 +3,7 @@ fqdn: roundtable.lsst.cloud onepassword: connectUrl: "https://roundtable.lsst.cloud/1password" vaultTitle: "RSP roundtable.lsst.cloud" -vaultPathPrefix: secret/k8s_operator/roundtable.lsst.cloud +vaultPathPrefix: secret/phalanx/roundtable-prod applications: kubernetes-replicator: true From 651c842d84863fae4150c6c94ba1f3fd979852f4 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 5 Oct 2023 19:07:20 -0400 Subject: [PATCH 067/588] Add approle configuration for roundtable-prod --- .../values-roundtable-prod.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/applications/vault-secrets-operator/values-roundtable-prod.yaml b/applications/vault-secrets-operator/values-roundtable-prod.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-roundtable-prod.yaml +++ b/applications/vault-secrets-operator/values-roundtable-prod.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle From f9910bd0fed71ad742080857b290a329a57fc6b9 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Fri, 6 Oct 2023 11:58:16 -0400 Subject: [PATCH 068/588] Enable Squarebot on roundtable-prod --- environments/values-roundtable-prod.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index f260043018..926669adad 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -13,3 +13,4 @@ applications: squareone: true strimzi: true strimzi-access-operator: true + squarebot: true From ce860c920853b60cf3a1c235702b04b43c3652bc Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Fri, 6 Oct 2023 15:04:30 -0400 Subject: [PATCH 069/588] Add values for squarebot on roundtable-prod --- applications/squarebot/values-roundtable-prod.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 applications/squarebot/values-roundtable-prod.yaml diff --git a/applications/squarebot/values-roundtable-prod.yaml b/applications/squarebot/values-roundtable-prod.yaml new file mode 100644 index 0000000000..e69de29bb2 From 3be0e623f2d718609145667abfa38f7186defa93 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 6 Oct 2023 12:46:30 -0700 Subject: [PATCH 070/588] Add resource requests and limits for Gafaelfawr Add resource requests and limits based on the Google console statistics and some educated guesses. --- applications/gafaelfawr/README.md | 8 +++--- applications/gafaelfawr/values.yaml | 40 +++++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 0bf73c6bd9..01eaaa1a6f 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -21,7 +21,7 @@ Authentication and identity system | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | -| cloudsql.resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod | +| cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy pod | | cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | | config.cilogon.clientId | string | `""` | CILogon client ID. One and only one of this, `config.github.clientId`, or `config.oidc.clientId` must be set. | @@ -86,14 +86,14 @@ Authentication and identity system | maintenance.maintenanceSchedule | string | `"5 * * * *"` | Cron schedule string for Gafaelfawr periodic maintenance (in UTC) | | maintenance.nodeSelector | object | `{}` | Node selection rules for Gafaelfawr maintenance and audit pods | | maintenance.podAnnotations | object | `{}` | Annotations for Gafaelfawr maintenance and audit pods | -| maintenance.resources | object | `{}` | Resource limits and requests for Gafaelfawr maintenance and audit pods | +| maintenance.resources | object | See `values.yaml` | Resource limits and requests for Gafaelfawr maintenance and audit pods | | maintenance.tolerations | list | `[]` | Tolerations for Gafaelfawr maintenance and audit pods | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Gafaelfawr frontend pod | | operator.affinity | object | `{}` | Affinity rules for the token management pod | | operator.nodeSelector | object | `{}` | Node selection rules for the token management pod | | operator.podAnnotations | object | `{}` | Annotations for the token management pod | -| operator.resources | object | `{}` | Resource limits and requests for the Gafaelfawr Kubernetes operator | +| operator.resources | object | See `values.yaml` | Resource limits and requests for the Gafaelfawr Kubernetes operator | | operator.tolerations | list | `[]` | Tolerations for the token management pod | | podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | | redis.affinity | object | `{}` | Affinity rules for the Redis pod | @@ -109,5 +109,5 @@ Authentication and identity system | redis.resources | object | See `values.yaml` | Resource limits and requests for the Redis pod | | redis.tolerations | list | `[]` | Tolerations for the Redis pod | | replicaCount | int | `1` | Number of web frontend pods to start | -| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | +| resources | object | See `values.yaml` | Resource limits and requests for the Gafaelfawr frontend pod | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index ed8e2eedd7..1082427b7c 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -21,7 +21,14 @@ image: tag: "" # -- Resource limits and requests for the Gafaelfawr frontend pod -resources: {} +# @default -- See `values.yaml` +resources: + limits: + cpu: "1" + memory: "300Mi" + requests: + cpu: "100m" + memory: "150Mi" # -- Annotations for the Gafaelfawr frontend pod podAnnotations: {} @@ -312,7 +319,14 @@ cloudsql: serviceAccount: "" # -- Resource limits and requests for the Cloud SQL Proxy pod - resources: {} + # @default -- See `values.yaml` + resources: + limits: + cpu: "100m" + memory: "20Mi" + requests: + cpu: "5m" + memory: "7Mi" # -- Annotations for the Cloud SQL Proxy pod podAnnotations: {} @@ -334,7 +348,14 @@ maintenance: maintenanceSchedule: "5 * * * *" # -- Resource limits and requests for Gafaelfawr maintenance and audit pods - resources: {} + # @default -- See `values.yaml` + resources: + limits: + cpu: "100m" + memory: "300Mi" + requests: + cpu: "100m" + memory: "150Mi" # -- Annotations for Gafaelfawr maintenance and audit pods podAnnotations: {} @@ -350,7 +371,14 @@ maintenance: operator: # -- Resource limits and requests for the Gafaelfawr Kubernetes operator - resources: {} + # @default -- See `values.yaml` + resources: + limits: + cpu: "100m" + memory: "300Mi" + requests: + cpu: "10m" + memory: "150Mi" # -- Annotations for the token management pod podAnnotations: {} @@ -398,8 +426,10 @@ redis: resources: limits: cpu: "1" + memory: "20Mi" requests: - cpu: "100m" + cpu: "50m" + memory: "6Mi" # -- Pod annotations for the Redis pod podAnnotations: {} From e81be2b4cf85a145e0456f831e9a70bfc9205020 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 6 Oct 2023 13:40:53 -0700 Subject: [PATCH 071/588] Bump maintenance CPU limits It looks like the maintenance and audit jobs use most of a CPU, but they don't really need that much. Request 0.1 CPU but let them use up to a full CPU. --- applications/gafaelfawr/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 1082427b7c..bbc37431a2 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -351,7 +351,7 @@ maintenance: # @default -- See `values.yaml` resources: limits: - cpu: "100m" + cpu: "1" memory: "300Mi" requests: cpu: "100m" From 6dd27baaddc1dff4d69eb7a89a26b7c080b55010 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 6 Oct 2023 13:49:14 -0700 Subject: [PATCH 072/588] Set deadlines for Gafaelfawr CronJobs Set a deadline of five minutes for the audit and maintenance jobs to run, and set a cleanup time of one day to remove any jobs that are older than that. (For audit, the limit of three successful jobs will normally override this.) --- applications/gafaelfawr/README.md | 2 ++ applications/gafaelfawr/templates/cronjob-audit.yaml | 2 ++ .../gafaelfawr/templates/cronjob-maintenance.yaml | 2 ++ applications/gafaelfawr/values.yaml | 8 ++++++++ 4 files changed, 14 insertions(+) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 01eaaa1a6f..38710700e0 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -83,6 +83,8 @@ Authentication and identity system | ingress.additionalHosts | list | `[]` | Defines additional FQDNs for Gafaelfawr. This doesn't work for cookie or browser authentication, but for token-based services like git-lfs or the webdav server it does. | | maintenance.affinity | object | `{}` | Affinity rules for Gafaelfawr maintenance and audit pods | | maintenance.auditSchedule | string | `"30 3 * * *"` | Cron schedule string for Gafaelfawr data consistency audit (in UTC) | +| maintenance.cleanupSeconds | int | 86400 (1 day) | How long to keep old jobs around before deleting them | +| maintenance.deadlineSeconds | int | 300 (5 minutes) | How long the job is allowed to run before it will be terminated | | maintenance.maintenanceSchedule | string | `"5 * * * *"` | Cron schedule string for Gafaelfawr periodic maintenance (in UTC) | | maintenance.nodeSelector | object | `{}` | Node selection rules for Gafaelfawr maintenance and audit pods | | maintenance.podAnnotations | object | `{}` | Annotations for Gafaelfawr maintenance and audit pods | diff --git a/applications/gafaelfawr/templates/cronjob-audit.yaml b/applications/gafaelfawr/templates/cronjob-audit.yaml index 741313c352..a69bdfb895 100644 --- a/applications/gafaelfawr/templates/cronjob-audit.yaml +++ b/applications/gafaelfawr/templates/cronjob-audit.yaml @@ -10,6 +10,8 @@ spec: concurrencyPolicy: "Forbid" jobTemplate: spec: + activeDeadlineSeconds: {{ .Values.maintenance.deadlineSeconds }} + ttlSecondsAfterFinished: {{ .Values.maintenance.cleanupSeconds }} template: metadata: {{- with .Values.maintenance.podAnnotations }} diff --git a/applications/gafaelfawr/templates/cronjob-maintenance.yaml b/applications/gafaelfawr/templates/cronjob-maintenance.yaml index 22364d99e2..1635bcf17f 100644 --- a/applications/gafaelfawr/templates/cronjob-maintenance.yaml +++ b/applications/gafaelfawr/templates/cronjob-maintenance.yaml @@ -9,6 +9,8 @@ spec: concurrencyPolicy: "Forbid" jobTemplate: spec: + activeDeadlineSeconds: {{ .Values.maintenance.deadlineSeconds }} + ttlSecondsAfterFinished: {{ .Values.maintenance.cleanupSeconds }} template: metadata: {{- with .Values.maintenance.podAnnotations }} diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index bbc37431a2..8001052992 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -347,6 +347,14 @@ maintenance: # -- Cron schedule string for Gafaelfawr periodic maintenance (in UTC) maintenanceSchedule: "5 * * * *" + # -- How long the job is allowed to run before it will be terminated + # @default -- 300 (5 minutes) + deadlineSeconds: 300 + + # -- How long to keep old jobs around before deleting them + # @default -- 86400 (1 day) + cleanupSeconds: 86400 + # -- Resource limits and requests for Gafaelfawr maintenance and audit pods # @default -- See `values.yaml` resources: From ff5c672815cc4b0ba1a12f314ea2cb01f83e08ba Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 07:29:01 +0000 Subject: [PATCH 073/588] Update Helm release telegraf to v1.8.35 --- applications/telegraf/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 8c76be1f93..c4310e44f2 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.34 + version: 1.8.35 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 705a5591b312a0938df63dd75eebec02a7e541a2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 07:29:06 +0000 Subject: [PATCH 074/588] Update Helm release telegraf-ds to v1.1.17 --- applications/telegraf-ds/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index 6ec2b496c5..11c5998df4 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.16 + version: 1.1.17 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 71cc40ab0cc825ddfbb4ddd362899cfc348f45ca Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 12:33:30 +0000 Subject: [PATCH 075/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 296 +++++++++++++++++++++--------------------- requirements/main.txt | 6 +- 2 files changed, 152 insertions(+), 150 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 8cedfa6b3e..69a0e27a82 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -8,9 +8,9 @@ alabaster==0.7.13 \ --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \ --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2 # via sphinx -annotated-types==0.5.0 \ - --hash=sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802 \ - --hash=sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd +annotated-types==0.6.0 \ + --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ + --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d # via # -c requirements/main.txt # pydantic @@ -253,9 +253,9 @@ coverage[toml]==7.3.2 \ # via # -r requirements/dev.in # pytest-cov -cycler==0.12.0 \ - --hash=sha256:7896994252d006771357777d0251f3e34d266f4fa5f2c572247a80ab01440947 \ - --hash=sha256:8cc3a7b4861f91b1095157f9916f748549a617046e67eb7619abed9b34d2c94a +cycler==0.12.1 \ + --hash=sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30 \ + --hash=sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c # via matplotlib diagrams==0.23.3 \ --hash=sha256:543c707c36a2c896dfdf8f23e993a9c7ae48bb1a667f6baf19151eb98e57a134 \ @@ -284,49 +284,49 @@ filelock==3.12.4 \ --hash=sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4 \ --hash=sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd # via virtualenv -fonttools==4.43.0 \ - --hash=sha256:030355fbb0cea59cf75d076d04d3852900583d1258574ff2d7d719abf4513836 \ - --hash=sha256:05056a8c9af048381fdb17e89b17d45f6c8394176d01e8c6fef5ac96ea950d38 \ - --hash=sha256:206808f9717c9b19117f461246372a2c160fa12b9b8dbdfb904ab50ca235ba0a \ - --hash=sha256:20fc43783c432862071fa76da6fa714902ae587bc68441e12ff4099b94b1fcef \ - --hash=sha256:25620b738d4533cfc21fd2a4f4b667e481f7cb60e86b609799f7d98af657854e \ - --hash=sha256:33c40a657fb87ff83185828c0323032d63a4df1279d5c1c38e21f3ec56327803 \ - --hash=sha256:3d7adfa342e6b3a2b36960981f23f480969f833d565a4eba259c2e6f59d2674f \ - --hash=sha256:48078357984214ccd22d7fe0340cd6ff7286b2f74f173603a1a9a40b5dc25afe \ - --hash=sha256:5056f69a18f3f28ab5283202d1efcfe011585d31de09d8560f91c6c88f041e92 \ - --hash=sha256:52e77f23a9c059f8be01a07300ba4c4d23dc271d33eed502aea5a01ab5d2f4c1 \ - --hash=sha256:57c22e5f9f53630d458830f710424dce4f43c5f0d95cb3368c0f5178541e4db7 \ - --hash=sha256:5aa67d1e720fdd902fde4a59d0880854ae9f19fc958f3e1538bceb36f7f4dc92 \ - --hash=sha256:5f9660e70a2430780e23830476332bc3391c3c8694769e2c0032a5038702a662 \ - --hash=sha256:635658464dccff6fa5c3b43fe8f818ae2c386ee6a9e1abc27359d1e255528186 \ - --hash=sha256:6a530fa28c155538d32214eafa0964989098a662bd63e91e790e6a7a4e9c02da \ - --hash=sha256:70f021a6b9eb10dfe7a411b78e63a503a06955dd6d2a4e130906d8760474f77c \ - --hash=sha256:77e5113233a2df07af9dbf493468ce526784c3b179c0e8b9c7838ced37c98b69 \ - --hash=sha256:7c76f32051159f8284f1a5f5b605152b5a530736fb8b55b09957db38dcae5348 \ - --hash=sha256:812142a0e53cc853964d487e6b40963df62f522b1b571e19d1ff8467d7880ceb \ - --hash=sha256:82d8e687a42799df5325e7ee12977b74738f34bf7fde1c296f8140efd699a213 \ - --hash=sha256:8dfd8edfce34ad135bd69de20c77449c06e2c92b38f2a8358d0987737f82b49e \ - --hash=sha256:93c5b6d77baf28f306bc13fa987b0b13edca6a39dc2324eaca299a74ccc6316f \ - --hash=sha256:9d654d3e780e0ceabb1f4eff5a3c042c67d4428d0fe1ea3afd238a721cf171b3 \ - --hash=sha256:a682fb5cbf8837d1822b80acc0be5ff2ea0c49ca836e468a21ffd388ef280fd3 \ - --hash=sha256:a68b71adc3b3a90346e4ac92f0a69ab9caeba391f3b04ab6f1e98f2c8ebe88e3 \ - --hash=sha256:a6a2e99bb9ea51e0974bbe71768df42c6dd189308c22f3f00560c3341b345646 \ - --hash=sha256:ab80e7d6bb01316d5fc8161a2660ca2e9e597d0880db4927bc866c76474472ef \ - --hash=sha256:ace0fd5afb79849f599f76af5c6aa5e865bd042c811e4e047bbaa7752cc26126 \ - --hash=sha256:ace51902ab67ef5fe225e8b361039e996db153e467e24a28d35f74849b37b7ce \ - --hash=sha256:af38f5145258e9866da5881580507e6d17ff7756beef175d13213a43a84244e9 \ - --hash=sha256:b3813f57f85bbc0e4011a0e1e9211f9ee52f87f402e41dc05bc5135f03fa51c1 \ - --hash=sha256:b5e760198f0b87e42478bb35a6eae385c636208f6f0d413e100b9c9c5efafb6a \ - --hash=sha256:b62a53a4ca83c32c6b78cac64464f88d02929779373c716f738af6968c8c821e \ - --hash=sha256:d08a694b280d615460563a6b4e2afb0b1b9df708c799ec212bf966652b94fc84 \ - --hash=sha256:d27d960e10cf7617d70cf3104c32a69b008dde56f2d55a9bed4ba6e3df611544 \ - --hash=sha256:da78f39b601ed0b4262929403186d65cf7a016f91ff349ab18fdc5a7080af465 \ - --hash=sha256:dcc01cea0a121fb0c009993497bad93cae25e77db7dee5345fec9cce1aaa09cd \ - --hash=sha256:e3f8acc6ef4a627394021246e099faee4b343afd3ffe2e517d8195b4ebf20289 \ - --hash=sha256:e4bc589d8da09267c7c4ceaaaa4fc01a7908ac5b43b286ac9279afe76407c384 \ - --hash=sha256:e5d53eddaf436fa131042f44a76ea1ead0a17c354ab9de0d80e818f0cb1629f1 \ - --hash=sha256:ee728d5af70f117581712966a21e2e07031e92c687ef1fdc457ac8d281016f64 \ - --hash=sha256:f19c2b1c65d57cbea25cabb80941fea3fbf2625ff0cdcae8900b5fb1c145704f +fonttools==4.43.1 \ + --hash=sha256:10003ebd81fec0192c889e63a9c8c63f88c7d72ae0460b7ba0cd2a1db246e5ad \ + --hash=sha256:10b3922875ffcba636674f406f9ab9a559564fdbaa253d66222019d569db869c \ + --hash=sha256:13a9a185259ed144def3682f74fdcf6596f2294e56fe62dfd2be736674500dba \ + --hash=sha256:17dbc2eeafb38d5d0e865dcce16e313c58265a6d2d20081c435f84dc5a9d8212 \ + --hash=sha256:18a2477c62a728f4d6e88c45ee9ee0229405e7267d7d79ce1f5ce0f3e9f8ab86 \ + --hash=sha256:18eefac1b247049a3a44bcd6e8c8fd8b97f3cad6f728173b5d81dced12d6c477 \ + --hash=sha256:1952c89a45caceedf2ab2506d9a95756e12b235c7182a7a0fff4f5e52227204f \ + --hash=sha256:1cf9e974f63b1080b1d2686180fc1fbfd3bfcfa3e1128695b5de337eb9075cef \ + --hash=sha256:1e09da7e8519e336239fbd375156488a4c4945f11c4c5792ee086dd84f784d02 \ + --hash=sha256:2062542a7565091cea4cc14dd99feff473268b5b8afdee564f7067dd9fff5860 \ + --hash=sha256:25d3da8a01442cbc1106490eddb6d31d7dffb38c1edbfabbcc8db371b3386d72 \ + --hash=sha256:34f713dad41aa21c637b4e04fe507c36b986a40f7179dcc86402237e2d39dcd3 \ + --hash=sha256:360201d46165fc0753229afe785900bc9596ee6974833124f4e5e9f98d0f592b \ + --hash=sha256:3b7ad05b2beeebafb86aa01982e9768d61c2232f16470f9d0d8e385798e37184 \ + --hash=sha256:4c54466f642d2116686268c3e5f35ebb10e49b0d48d41a847f0e171c785f7ac7 \ + --hash=sha256:4d9740e3783c748521e77d3c397dc0662062c88fd93600a3c2087d3d627cd5e5 \ + --hash=sha256:4f88cae635bfe4bbbdc29d479a297bb525a94889184bb69fa9560c2d4834ddb9 \ + --hash=sha256:51669b60ee2a4ad6c7fc17539a43ffffc8ef69fd5dbed186a38a79c0ac1f5db7 \ + --hash=sha256:5db46659cfe4e321158de74c6f71617e65dc92e54980086823a207f1c1c0e24b \ + --hash=sha256:5f37e31291bf99a63328668bb83b0669f2688f329c4c0d80643acee6e63cd933 \ + --hash=sha256:6bb5ea9076e0e39defa2c325fc086593ae582088e91c0746bee7a5a197be3da0 \ + --hash=sha256:748015d6f28f704e7d95cd3c808b483c5fb87fd3eefe172a9da54746ad56bfb6 \ + --hash=sha256:7bbbf8174501285049e64d174e29f9578495e1b3b16c07c31910d55ad57683d8 \ + --hash=sha256:884ef38a5a2fd47b0c1291647b15f4e88b9de5338ffa24ee52c77d52b4dfd09c \ + --hash=sha256:8da417431bfc9885a505e86ba706f03f598c85f5a9c54f67d63e84b9948ce590 \ + --hash=sha256:95e974d70238fc2be5f444fa91f6347191d0e914d5d8ae002c9aa189572cc215 \ + --hash=sha256:9648518ef687ba818db3fcc5d9aae27a369253ac09a81ed25c3867e8657a0680 \ + --hash=sha256:9a2f0aa6ca7c9bc1058a9d0b35483d4216e0c1bbe3962bc62ce112749954c7b8 \ + --hash=sha256:9c36da88422e0270fbc7fd959dc9749d31a958506c1d000e16703c2fce43e3d0 \ + --hash=sha256:9c60ecfa62839f7184f741d0509b5c039d391c3aff71dc5bc57b87cc305cff3b \ + --hash=sha256:9f727c3e3d08fd25352ed76cc3cb61486f8ed3f46109edf39e5a60fc9fecf6ca \ + --hash=sha256:a7a06f8d95b7496e53af80d974d63516ffb263a468e614978f3899a6df52d4b3 \ + --hash=sha256:ad0b3f6342cfa14be996971ea2b28b125ad681c6277c4cd0fbdb50340220dfb6 \ + --hash=sha256:b2adca1b46d69dce4a37eecc096fe01a65d81a2f5c13b25ad54d5430ae430b13 \ + --hash=sha256:b84a1c00f832feb9d0585ca8432fba104c819e42ff685fcce83537e2e7e91204 \ + --hash=sha256:bb6d2f8ef81ea076877d76acfb6f9534a9c5f31dc94ba70ad001267ac3a8e56f \ + --hash=sha256:bf11e2cca121df35e295bd34b309046c29476ee739753bc6bc9d5050de319273 \ + --hash=sha256:d21099b411e2006d3c3e1f9aaf339e12037dbf7bf9337faf0e93ec915991f43b \ + --hash=sha256:d4071bd1c183b8d0b368cc9ed3c07a0f6eb1bdfc4941c4c024c49a35429ac7cd \ + --hash=sha256:e117a92b07407a061cde48158c03587ab97e74e7d73cb65e6aadb17af191162a \ + --hash=sha256:f7a58eb5e736d7cf198eee94844b81c9573102ae5989ebcaa1d1a37acd04b33d \ + --hash=sha256:fe9b1ec799b6086460a7480e0f55c447b1aca0a4eecc53e444f639e967348896 # via matplotlib gitdb==4.0.10 \ --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ @@ -994,104 +994,106 @@ requests==2.31.0 \ # -c requirements/main.txt # documenteer # sphinx -rpds-py==0.10.3 \ - --hash=sha256:015de2ce2af1586ff5dc873e804434185199a15f7d96920ce67e50604592cae9 \ - --hash=sha256:061c3ff1f51ecec256e916cf71cc01f9975af8fb3af9b94d3c0cc8702cfea637 \ - --hash=sha256:08a80cf4884920863623a9ee9a285ee04cef57ebedc1cc87b3e3e0f24c8acfe5 \ - --hash=sha256:09362f86ec201288d5687d1dc476b07bf39c08478cde837cb710b302864e7ec9 \ - --hash=sha256:0bb4f48bd0dd18eebe826395e6a48b7331291078a879295bae4e5d053be50d4c \ - --hash=sha256:106af1653007cc569d5fbb5f08c6648a49fe4de74c2df814e234e282ebc06957 \ - --hash=sha256:11fdd1192240dda8d6c5d18a06146e9045cb7e3ba7c06de6973000ff035df7c6 \ - --hash=sha256:16a472300bc6c83fe4c2072cc22b3972f90d718d56f241adabc7ae509f53f154 \ - --hash=sha256:176287bb998fd1e9846a9b666e240e58f8d3373e3bf87e7642f15af5405187b8 \ - --hash=sha256:177914f81f66c86c012311f8c7f46887ec375cfcfd2a2f28233a3053ac93a569 \ - --hash=sha256:177c9dd834cdf4dc39c27436ade6fdf9fe81484758885f2d616d5d03c0a83bd2 \ - --hash=sha256:187700668c018a7e76e89424b7c1042f317c8df9161f00c0c903c82b0a8cac5c \ - --hash=sha256:1d9b5ee46dcb498fa3e46d4dfabcb531e1f2e76b477e0d99ef114f17bbd38453 \ - --hash=sha256:22da15b902f9f8e267020d1c8bcfc4831ca646fecb60254f7bc71763569f56b1 \ - --hash=sha256:24cd91a03543a0f8d09cb18d1cb27df80a84b5553d2bd94cba5979ef6af5c6e7 \ - --hash=sha256:255f1a10ae39b52122cce26ce0781f7a616f502feecce9e616976f6a87992d6b \ - --hash=sha256:271c360fdc464fe6a75f13ea0c08ddf71a321f4c55fc20a3fe62ea3ef09df7d9 \ - --hash=sha256:2ed83d53a8c5902ec48b90b2ac045e28e1698c0bea9441af9409fc844dc79496 \ - --hash=sha256:2f3e1867dd574014253b4b8f01ba443b9c914e61d45f3674e452a915d6e929a3 \ - --hash=sha256:35fbd23c1c8732cde7a94abe7fb071ec173c2f58c0bd0d7e5b669fdfc80a2c7b \ - --hash=sha256:37d0c59548ae56fae01c14998918d04ee0d5d3277363c10208eef8c4e2b68ed6 \ - --hash=sha256:39d05e65f23a0fe897b6ac395f2a8d48c56ac0f583f5d663e0afec1da89b95da \ - --hash=sha256:3ad59efe24a4d54c2742929001f2d02803aafc15d6d781c21379e3f7f66ec842 \ - --hash=sha256:3aed39db2f0ace76faa94f465d4234aac72e2f32b009f15da6492a561b3bbebd \ - --hash=sha256:3bbac1953c17252f9cc675bb19372444aadf0179b5df575ac4b56faaec9f6294 \ - --hash=sha256:40bc802a696887b14c002edd43c18082cb7b6f9ee8b838239b03b56574d97f71 \ - --hash=sha256:42f712b4668831c0cd85e0a5b5a308700fe068e37dcd24c0062904c4e372b093 \ - --hash=sha256:448a66b8266de0b581246ca7cd6a73b8d98d15100fb7165974535fa3b577340e \ - --hash=sha256:485301ee56ce87a51ccb182a4b180d852c5cb2b3cb3a82f7d4714b4141119d8c \ - --hash=sha256:485747ee62da83366a44fbba963c5fe017860ad408ccd6cd99aa66ea80d32b2e \ - --hash=sha256:4cf0855a842c5b5c391dd32ca273b09e86abf8367572073bd1edfc52bc44446b \ - --hash=sha256:4eca20917a06d2fca7628ef3c8b94a8c358f6b43f1a621c9815243462dcccf97 \ - --hash=sha256:4ed172d0c79f156c1b954e99c03bc2e3033c17efce8dd1a7c781bc4d5793dfac \ - --hash=sha256:5267cfda873ad62591b9332fd9472d2409f7cf02a34a9c9cb367e2c0255994bf \ - --hash=sha256:52b5cbc0469328e58180021138207e6ec91d7ca2e037d3549cc9e34e2187330a \ - --hash=sha256:53d7a3cd46cdc1689296348cb05ffd4f4280035770aee0c8ead3bbd4d6529acc \ - --hash=sha256:563646d74a4b4456d0cf3b714ca522e725243c603e8254ad85c3b59b7c0c4bf0 \ - --hash=sha256:570cc326e78ff23dec7f41487aa9c3dffd02e5ee9ab43a8f6ccc3df8f9327623 \ - --hash=sha256:5aca759ada6b1967fcfd4336dcf460d02a8a23e6abe06e90ea7881e5c22c4de6 \ - --hash=sha256:5de11c041486681ce854c814844f4ce3282b6ea1656faae19208ebe09d31c5b8 \ - --hash=sha256:5e271dd97c7bb8eefda5cca38cd0b0373a1fea50f71e8071376b46968582af9b \ - --hash=sha256:642ed0a209ced4be3a46f8cb094f2d76f1f479e2a1ceca6de6346a096cd3409d \ - --hash=sha256:6446002739ca29249f0beaaf067fcbc2b5aab4bc7ee8fb941bd194947ce19aff \ - --hash=sha256:691d50c99a937709ac4c4cd570d959a006bd6a6d970a484c84cc99543d4a5bbb \ - --hash=sha256:69b857a7d8bd4f5d6e0db4086da8c46309a26e8cefdfc778c0c5cc17d4b11e08 \ - --hash=sha256:6ac3fefb0d168c7c6cab24fdfc80ec62cd2b4dfd9e65b84bdceb1cb01d385c33 \ - --hash=sha256:6c9141af27a4e5819d74d67d227d5047a20fa3c7d4d9df43037a955b4c748ec5 \ - --hash=sha256:7170cbde4070dc3c77dec82abf86f3b210633d4f89550fa0ad2d4b549a05572a \ - --hash=sha256:763ad59e105fca09705d9f9b29ecffb95ecdc3b0363be3bb56081b2c6de7977a \ - --hash=sha256:77076bdc8776a2b029e1e6ffbe6d7056e35f56f5e80d9dc0bad26ad4a024a762 \ - --hash=sha256:7cd020b1fb41e3ab7716d4d2c3972d4588fdfbab9bfbbb64acc7078eccef8860 \ - --hash=sha256:821392559d37759caa67d622d0d2994c7a3f2fb29274948ac799d496d92bca73 \ - --hash=sha256:829e91f3a8574888b73e7a3feb3b1af698e717513597e23136ff4eba0bc8387a \ - --hash=sha256:850c272e0e0d1a5c5d73b1b7871b0a7c2446b304cec55ccdb3eaac0d792bb065 \ - --hash=sha256:87d9b206b1bd7a0523375dc2020a6ce88bca5330682ae2fe25e86fd5d45cea9c \ - --hash=sha256:8bd01ff4032abaed03f2db702fa9a61078bee37add0bd884a6190b05e63b028c \ - --hash=sha256:8d54bbdf5d56e2c8cf81a1857250f3ea132de77af543d0ba5dce667183b61fec \ - --hash=sha256:8efaeb08ede95066da3a3e3c420fcc0a21693fcd0c4396d0585b019613d28515 \ - --hash=sha256:8f94fdd756ba1f79f988855d948ae0bad9ddf44df296770d9a58c774cfbcca72 \ - --hash=sha256:95cde244e7195b2c07ec9b73fa4c5026d4a27233451485caa1cd0c1b55f26dbd \ - --hash=sha256:975382d9aa90dc59253d6a83a5ca72e07f4ada3ae3d6c0575ced513db322b8ec \ - --hash=sha256:9dd9d9d9e898b9d30683bdd2b6c1849449158647d1049a125879cb397ee9cd12 \ - --hash=sha256:a019a344312d0b1f429c00d49c3be62fa273d4a1094e1b224f403716b6d03be1 \ - --hash=sha256:a4d9bfda3f84fc563868fe25ca160c8ff0e69bc4443c5647f960d59400ce6557 \ - --hash=sha256:a657250807b6efd19b28f5922520ae002a54cb43c2401e6f3d0230c352564d25 \ - --hash=sha256:a771417c9c06c56c9d53d11a5b084d1de75de82978e23c544270ab25e7c066ff \ - --hash=sha256:aad6ed9e70ddfb34d849b761fb243be58c735be6a9265b9060d6ddb77751e3e8 \ - --hash=sha256:ae87137951bb3dc08c7d8bfb8988d8c119f3230731b08a71146e84aaa919a7a9 \ - --hash=sha256:af247fd4f12cca4129c1b82090244ea5a9d5bb089e9a82feb5a2f7c6a9fe181d \ - --hash=sha256:b5d4bdd697195f3876d134101c40c7d06d46c6ab25159ed5cbd44105c715278a \ - --hash=sha256:b9255e7165083de7c1d605e818025e8860636348f34a79d84ec533546064f07e \ - --hash=sha256:c22211c165166de6683de8136229721f3d5c8606cc2c3d1562da9a3a5058049c \ - --hash=sha256:c55f9821f88e8bee4b7a72c82cfb5ecd22b6aad04033334f33c329b29bfa4da0 \ - --hash=sha256:c7aed97f2e676561416c927b063802c8a6285e9b55e1b83213dfd99a8f4f9e48 \ - --hash=sha256:cd2163f42868865597d89399a01aa33b7594ce8e2c4a28503127c81a2f17784e \ - --hash=sha256:ce5e7504db95b76fc89055c7f41e367eaadef5b1d059e27e1d6eabf2b55ca314 \ - --hash=sha256:cff7351c251c7546407827b6a37bcef6416304fc54d12d44dbfecbb717064717 \ - --hash=sha256:d27aa6bbc1f33be920bb7adbb95581452cdf23005d5611b29a12bb6a3468cc95 \ - --hash=sha256:d3b52a67ac66a3a64a7e710ba629f62d1e26ca0504c29ee8cbd99b97df7079a8 \ - --hash=sha256:de61e424062173b4f70eec07e12469edde7e17fa180019a2a0d75c13a5c5dc57 \ - --hash=sha256:e10e6a1ed2b8661201e79dff5531f8ad4cdd83548a0f81c95cf79b3184b20c33 \ - --hash=sha256:e1a0ffc39f51aa5f5c22114a8f1906b3c17eba68c5babb86c5f77d8b1bba14d1 \ - --hash=sha256:e22491d25f97199fc3581ad8dd8ce198d8c8fdb8dae80dea3512e1ce6d5fa99f \ - --hash=sha256:e626b864725680cd3904414d72e7b0bd81c0e5b2b53a5b30b4273034253bb41f \ - --hash=sha256:e8c71ea77536149e36c4c784f6d420ffd20bea041e3ba21ed021cb40ce58e2c9 \ - --hash=sha256:e8d0f0eca087630d58b8c662085529781fd5dc80f0a54eda42d5c9029f812599 \ - --hash=sha256:ea65b59882d5fa8c74a23f8960db579e5e341534934f43f3b18ec1839b893e41 \ - --hash=sha256:ea93163472db26ac6043e8f7f93a05d9b59e0505c760da2a3cd22c7dd7111391 \ - --hash=sha256:eab75a8569a095f2ad470b342f2751d9902f7944704f0571c8af46bede438475 \ - --hash=sha256:ed8313809571a5463fd7db43aaca68ecb43ca7a58f5b23b6e6c6c5d02bdc7882 \ - --hash=sha256:ef5fddfb264e89c435be4adb3953cef5d2936fdeb4463b4161a6ba2f22e7b740 \ - --hash=sha256:ef750a20de1b65657a1425f77c525b0183eac63fe7b8f5ac0dd16f3668d3e64f \ - --hash=sha256:efb9ece97e696bb56e31166a9dd7919f8f0c6b31967b454718c6509f29ef6fee \ - --hash=sha256:f4c179a7aeae10ddf44c6bac87938134c1379c49c884529f090f9bf05566c836 \ - --hash=sha256:f602881d80ee4228a2355c68da6b296a296cd22bbb91e5418d54577bbf17fa7c \ - --hash=sha256:fc2200e79d75b5238c8d69f6a30f8284290c777039d331e7340b6c17cad24a5a \ - --hash=sha256:fcc1ebb7561a3e24a6588f7c6ded15d80aec22c66a070c757559b57b17ffd1cb +rpds-py==0.10.4 \ + --hash=sha256:00a88003db3cc953f8656b59fc9af9d0637a1fb93c235814007988f8c153b2f2 \ + --hash=sha256:049098dabfe705e9638c55a3321137a821399c50940041a6fcce267a22c70db2 \ + --hash=sha256:08f07150c8ebbdbce1d2d51b8e9f4d588749a2af6a98035485ebe45c7ad9394e \ + --hash=sha256:125776d5db15162fdd9135372bef7fe4fb7c5f5810cf25898eb74a06a0816aec \ + --hash=sha256:13cbd79ccedc6b39c279af31ebfb0aec0467ad5d14641ddb15738bf6e4146157 \ + --hash=sha256:18d5ff7fbd305a1d564273e9eb22de83ae3cd9cd6329fddc8f12f6428a711a6a \ + --hash=sha256:1c27942722cd5039bbf5098c7e21935a96243fed00ea11a9589f3c6c6424bd84 \ + --hash=sha256:255a23bded80605e9f3997753e3a4b89c9aec9efb07ec036b1ca81440efcc1a9 \ + --hash=sha256:2573ec23ad3a59dd2bc622befac845695972f3f2d08dc1a4405d017d20a6c225 \ + --hash=sha256:2603e084054351cc65097da326570102c4c5bd07426ba8471ceaefdb0b642cc9 \ + --hash=sha256:28b4942ec7d9d6114c1e08cace0157db92ef674636a38093cab779ace5742d3a \ + --hash=sha256:28e29dac59df890972f73c511948072897f512974714a803fe793635b80ff8c7 \ + --hash=sha256:2a97406d5e08b7095428f01dac0d3c091dc072351151945a167e7968d2755559 \ + --hash=sha256:2a9e864ec051a58fdb6bb2e6da03942adb20273897bc70067aee283e62bbac4d \ + --hash=sha256:2e0e2e01c5f61ddf47e3ed2d1fe1c9136e780ca6222d57a2517b9b02afd4710c \ + --hash=sha256:2e79eeeff8394284b09577f36316d410525e0cf0133abb3de10660e704d3d38e \ + --hash=sha256:2f2ac8bb01f705c5caaa7fe77ffd9b03f92f1b5061b94228f6ea5eaa0fca68ad \ + --hash=sha256:32819b662e3b4c26355a4403ea2f60c0a00db45b640fe722dd12db3d2ef807fb \ + --hash=sha256:3507c459767cf24c11e9520e2a37c89674266abe8e65453e5cb66398aa47ee7b \ + --hash=sha256:362faeae52dc6ccc50c0b6a01fa2ec0830bb61c292033f3749a46040b876f4ba \ + --hash=sha256:3650eae998dc718960e90120eb45d42bd57b18b21b10cb9ee05f91bff2345d48 \ + --hash=sha256:36ff30385fb9fb3ac23a28bffdd4a230a5229ed5b15704b708b7c84bfb7fce51 \ + --hash=sha256:3bc561c183684636c0099f9c3fbab8c1671841942edbce784bb01b4707d17924 \ + --hash=sha256:3bd38b80491ef9686f719c1ad3d24d14fbd0e069988fdd4e7d1a6ffcdd7f4a13 \ + --hash=sha256:3e37f1f134037601eb4b1f46854194f0cc082435dac2ee3de11e51529f7831f2 \ + --hash=sha256:40f6e53461b19ddbb3354fe5bcf3d50d4333604ae4bf25b478333d83ca68002c \ + --hash=sha256:49db6c0a0e6626c2b97f5e7f8f7074da21cbd8ec73340c25e839a2457c007efa \ + --hash=sha256:4bcb1abecd998a72ad4e36a0fca93577fd0c059a6aacc44f16247031b98f6ff4 \ + --hash=sha256:4cb55454a20d1b935f9eaab52e6ceab624a2efd8b52927c7ae7a43e02828dbe0 \ + --hash=sha256:4f92d2372ec992c82fd7c74aa21e2a1910b3dcdc6a7e6392919a138f21d528a3 \ + --hash=sha256:576d48e1e45c211e99fc02655ade65c32a75d3e383ccfd98ce59cece133ed02c \ + --hash=sha256:58bae860d1d116e6b4e1aad0cdc48a187d5893994f56d26db0c5534df7a47afd \ + --hash=sha256:5bb3f3cb6072c73e6ec1f865d8b80419b599f1597acf33f63fbf02252aab5a03 \ + --hash=sha256:5db93f9017b384a4f194e1d89e1ce82d0a41b1fafdbbd3e0c8912baf13f2950f \ + --hash=sha256:5e41d5b334e8de4bc3f38843f31b2afa9a0c472ebf73119d3fd55cde08974bdf \ + --hash=sha256:60018626e637528a1fa64bb3a2b3e46ab7bf672052316d61c3629814d5e65052 \ + --hash=sha256:6090ba604ea06b525a231450ae5d343917a393cbf50423900dea968daf61d16f \ + --hash=sha256:628fbb8be71a103499d10b189af7764996ab2634ed7b44b423f1e19901606e0e \ + --hash=sha256:6baea8a4f6f01e69e75cfdef3edd4a4d1c4b56238febbdf123ce96d09fbff010 \ + --hash=sha256:6c5ca3eb817fb54bfd066740b64a2b31536eb8fe0b183dc35b09a7bd628ed680 \ + --hash=sha256:70563a1596d2e0660ca2cebb738443437fc0e38597e7cbb276de0a7363924a52 \ + --hash=sha256:7089d8bfa8064b28b2e39f5af7bf12d42f61caed884e35b9b4ea9e6fb1175077 \ + --hash=sha256:72e9b1e92830c876cd49565d8404e4dcc9928302d348ea2517bc3f9e3a873a2a \ + --hash=sha256:7c7ca791bedda059e5195cf7c6b77384657a51429357cdd23e64ac1d4973d6dc \ + --hash=sha256:7f050ceffd8c730c1619a16bbf0b9cd037dcdb94b54710928ba38c7bde67e4a4 \ + --hash=sha256:83da147124499fe41ed86edf34b4e81e951b3fe28edcc46288aac24e8a5c8484 \ + --hash=sha256:86e8d6ff15fa7a9590c0addaf3ce52fb58bda4299cab2c2d0afa404db6848dab \ + --hash=sha256:8709eb4ab477c533b7d0a76cd3065d7d95c9e25e6b9f6e27caeeb8c63e8799c9 \ + --hash=sha256:8e69bbe0ede8f7fe2616e779421bbdb37f025c802335a90f6416e4d98b368a37 \ + --hash=sha256:8f90fc6dd505867514c8b8ef68a712dc0be90031a773c1ae2ad469f04062daef \ + --hash=sha256:9123ba0f3f98ff79780eebca9984a2b525f88563844b740f94cffb9099701230 \ + --hash=sha256:927e3461dae0c09b1f2e0066e50c1a9204f8a64a3060f596e9a6742d3b307785 \ + --hash=sha256:94876c21512535955a960f42a155213315e6ab06a4ce8ce372341a2a1b143eeb \ + --hash=sha256:98c0aecf661c175ce9cb17347fc51a5c98c3e9189ca57e8fcd9348dae18541db \ + --hash=sha256:9c7e7bd1fa1f535af71dfcd3700fc83a6dc261a1204f8f5327d8ffe82e52905d \ + --hash=sha256:9e7b3ad9f53ea9e085b3d27286dd13f8290969c0a153f8a52c8b5c46002c374b \ + --hash=sha256:9f9184744fb800c9f28e155a5896ecb54816296ee79d5d1978be6a2ae60f53c4 \ + --hash=sha256:a3628815fd170a64624001bfb4e28946fd515bd672e68a1902d9e0290186eaf3 \ + --hash=sha256:a5c330cb125983c5d380fef4a4155248a276297c86d64625fdaf500157e1981c \ + --hash=sha256:aa45cc71bf23a3181b8aa62466b5a2b7b7fb90fdc01df67ca433cd4fce7ec94d \ + --hash=sha256:aab24b9bbaa3d49e666e9309556591aa00748bd24ea74257a405f7fed9e8b10d \ + --hash=sha256:ac83f5228459b84fa6279e4126a53abfdd73cd9cc183947ee5084153880f65d7 \ + --hash=sha256:ad21c60fc880204798f320387164dcacc25818a7b4ec2a0bf6b6c1d57b007d23 \ + --hash=sha256:ae8a32ab77a84cc870bbfb60645851ca0f7d58fd251085ad67464b1445d632ca \ + --hash=sha256:b0f1d336786cb62613c72c00578c98e5bb8cd57b49c5bae5d4ab906ca7872f98 \ + --hash=sha256:b28b9668a22ca2cfca4433441ba9acb2899624a323787a509a3dc5fbfa79c49d \ + --hash=sha256:b953d11b544ca5f2705bb77b177d8e17ab1bfd69e0fd99790a11549d2302258c \ + --hash=sha256:b9d8884d58ea8801e5906a491ab34af975091af76d1a389173db491ee7e316bb \ + --hash=sha256:ba3246c60303eab3d0e562addf25a983d60bddc36f4d1edc2510f056d19df255 \ + --hash=sha256:bd0ad98c7d72b0e4cbfe89cdfa12cd07d2fd6ed22864341cdce12b318a383442 \ + --hash=sha256:bf032367f921201deaecf221d4cc895ea84b3decf50a9c73ee106f961885a0ad \ + --hash=sha256:c31ecfc53ac03dad4928a1712f3a2893008bfba1b3cde49e1c14ff67faae2290 \ + --hash=sha256:cbec8e43cace64e63398155dc585dc479a89fef1e57ead06c22d3441e1bd09c3 \ + --hash=sha256:cc688a59c100f038fa9fec9e4ab457c2e2d1fca350fe7ea395016666f0d0a2dc \ + --hash=sha256:cd7da2adc721ccf19ac7ec86cae3a4fcaba03d9c477d5bd64ded6e9bb817bf3f \ + --hash=sha256:cd7e62e7d5bcfa38a62d8397fba6d0428b970ab7954c2197501cd1624f7f0bbb \ + --hash=sha256:d0f7f77a77c37159c9f417b8dd847f67a29e98c6acb52ee98fc6b91efbd1b2b6 \ + --hash=sha256:d230fddc60caced271cc038e43e6fb8f4dd6b2dbaa44ac9763f2d76d05b0365a \ + --hash=sha256:d37f27ad80f742ef82796af3fe091888864958ad0bc8bab03da1830fa00c6004 \ + --hash=sha256:d5ad7b1a1f6964d19b1a8acfc14bf7864f39587b3e25c16ca04f6cd1815026b3 \ + --hash=sha256:d81359911c3bb31c899c6a5c23b403bdc0279215e5b3bc0d2a692489fed38632 \ + --hash=sha256:d98802b78093c7083cc51f83da41a5be5a57d406798c9f69424bd75f8ae0812a \ + --hash=sha256:db0589e0bf41ff6ce284ab045ca89f27be1adf19e7bce26c2e7de6739a70c18b \ + --hash=sha256:ddbd113a37307638f94be5ae232a325155fd24dbfae2c56455da8724b471e7be \ + --hash=sha256:e3ece9aa6d07e18c966f14b4352a4c6f40249f6174d3d2c694c1062e19c6adbb \ + --hash=sha256:e3f9c9e5dd8eba4768e15f19044e1b5e216929a43a54b4ab329e103aed9f3eda \ + --hash=sha256:e41824343c2c129599645373992b1ce17720bb8a514f04ff9567031e1c26951e \ + --hash=sha256:e5dba1c11e089b526379e74f6c636202e4c5bad9a48c7416502b8a5b0d026c91 \ + --hash=sha256:e791e3d13b14d0a7921804d0efe4d7bd15508bbcf8cb7a0c1ee1a27319a5f033 \ + --hash=sha256:ec001689402b9104700b50a005c2d3d0218eae90eaa8bdbbd776fe78fe8a74b7 \ + --hash=sha256:efffa359cc69840c8793f0c05a7b663de6afa7b9078fa6c80309ee38b9db677d \ + --hash=sha256:f1f191befea279cb9669b57be97ab1785781c8bab805900e95742ebfaa9cbf1d \ + --hash=sha256:f3331a3684192659fa1090bf2b448db928152fcba08222e58106f44758ef25f7 \ + --hash=sha256:f40413d2859737ce6d95c29ce2dde0ef7cdc3063b5830ae4342fef5922c3bba7 \ + --hash=sha256:f7ea49ddf51d5ec0c3cbd95190dd15e077a3153c8d4b22a33da43b5dd2b3c640 \ + --hash=sha256:f82abb5c5b83dc30e96be99ce76239a030b62a73a13c64410e429660a5602bfd \ + --hash=sha256:fc20dadb102140dff63529e08ce6f9745dbd36e673ebb2b1c4a63e134bca81c2 \ + --hash=sha256:fd37ab9a24021821b715478357af1cf369d5a42ac7405e83e5822be00732f463 \ + --hash=sha256:ffd539d213c1ea2989ab92a5b9371ae7159c8c03cf2bcb9f2f594752f755ecd3 # via # jsonschema # referencing diff --git a/requirements/main.txt b/requirements/main.txt index a4284be259..7e340be9af 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -4,9 +4,9 @@ # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/main.txt requirements/main.in # -annotated-types==0.5.0 \ - --hash=sha256:47cdc3490d9ac1506ce92c7aaa76c579dc3509ff11e098fc867e5130ab7be802 \ - --hash=sha256:58da39888f92c276ad970249761ebea80ba544b77acddaa1a4d6cf78287d45fd +annotated-types==0.6.0 \ + --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ + --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d # via pydantic anyio==3.7.1 \ --hash=sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780 \ From 67b00a531c7c13f938a815bf375ef6386d43427e Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 9 Oct 2023 14:08:09 -0700 Subject: [PATCH 076/588] Add persistence to Chronograf/roundtable-dev --- applications/monitoring/values-roundtable-dev.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/applications/monitoring/values-roundtable-dev.yaml b/applications/monitoring/values-roundtable-dev.yaml index ec948c5cce..232dd87c7b 100644 --- a/applications/monitoring/values-roundtable-dev.yaml +++ b/applications/monitoring/values-roundtable-dev.yaml @@ -1,4 +1,8 @@ chronograf: + persistence: + enabled: true + size: 1Gi + storageClass: standard-rwo env: GH_CLIENT_ID: "e85fe410b0021a251180" cronjob: From 3092b00be9b7040ca0e73a928a3ee1d420575745 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 9 Oct 2023 12:42:52 -0700 Subject: [PATCH 077/588] Improve pytest output with pytest-pretty Install pytest-pretty as a dev dependency for better test summary output. Set CI when running pytest to force verbose diffs of data structures, and drop -vv so that pytest will show the shorter summary of tests in progress instead of printing the name of every test function on its own line. Increase the column width for pytest-pretty output on GitHub Actions so that the table is more useful. --- .github/workflows/ci.yaml | 2 ++ requirements/dev.in | 1 + requirements/dev.txt | 11 +++++++++++ requirements/main.txt | 6 +++--- tox.ini | 5 ++++- 5 files changed, 21 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index d086d8485a..c2a70961bd 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -44,6 +44,8 @@ jobs: python-version: ${{ matrix.python }} tox-envs: "typing,py,coverage-report" cache-key-prefix: test + env: + COLUMNS: 120 helm: runs-on: ubuntu-latest diff --git a/requirements/dev.in b/requirements/dev.in index 3571b429c5..6a1b0b9aa5 100644 --- a/requirements/dev.in +++ b/requirements/dev.in @@ -13,6 +13,7 @@ mypy pre-commit pytest pytest-cov +pytest-pretty ruff types-PyYAML diff --git a/requirements/dev.txt b/requirements/dev.txt index 69a0e27a82..949852017a 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -501,6 +501,7 @@ markdown-it-py[linkify]==3.0.0 \ # documenteer # mdit-py-plugins # myst-parser + # rich markupsafe==2.1.3 \ --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ @@ -897,6 +898,7 @@ pygments==2.16.1 \ --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29 # via # pydata-sphinx-theme + # rich # sphinx # sphinx-prompt pyparsing==3.1.1 \ @@ -909,10 +911,15 @@ pytest==7.4.2 \ # via # -r requirements/dev.in # pytest-cov + # pytest-pretty pytest-cov==4.1.0 \ --hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \ --hash=sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a # via -r requirements/dev.in +pytest-pretty==1.2.0 \ + --hash=sha256:105a355f128e392860ad2c478ae173ff96d2f03044692f9818ff3d49205d3a60 \ + --hash=sha256:6f79122bf53864ae2951b6c9e94d7a06a87ef753476acd4588aeac018f062036 + # via -r requirements/dev.in python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 @@ -994,6 +1001,10 @@ requests==2.31.0 \ # -c requirements/main.txt # documenteer # sphinx +rich==13.6.0 \ + --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \ + --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef + # via pytest-pretty rpds-py==0.10.4 \ --hash=sha256:00a88003db3cc953f8656b59fc9af9d0637a1fb93c235814007988f8c153b2f2 \ --hash=sha256:049098dabfe705e9638c55a3321137a821399c50940041a6fcce267a22c70db2 \ diff --git a/requirements/main.txt b/requirements/main.txt index 7e340be9af..f2b53e33ab 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -552,9 +552,9 @@ starlette==0.27.0 \ # via # fastapi # safir -structlog==23.1.0 \ - --hash=sha256:270d681dd7d163c11ba500bc914b2472d2b50a8ef00faa999ded5ff83a2f906b \ - --hash=sha256:79b9e68e48b54e373441e130fa447944e6f87a05b35de23138e475c05d0f7e0e +structlog==23.2.0 \ + --hash=sha256:16a167e87b9fa7fae9a972d5d12805ef90e04857a93eba479d4be3801a6a1482 \ + --hash=sha256:334666b94707f89dbc4c81a22a8ccd34449f0201d5b1ee097a030b577fa8c858 # via safir typing-extensions==4.8.0 \ --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ diff --git a/tox.ini b/tox.ini index fee1dec8a1..a695e6949b 100644 --- a/tox.ini +++ b/tox.ini @@ -49,7 +49,10 @@ commands = neophile update {posargs} [testenv:py] description = Run pytest commands = - pytest -vv --cov=phalanx --cov-branch --cov-report= {posargs} + pytest --cov=phalanx --cov-branch --cov-report= {posargs} +# Ensure pytest never trucates diffs on assertions. +setenv = + CI = true [testenv:typing] description = Run mypy. From 255c7bf95e3a24fdd054fa33674e49a18481a02c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 14:26:21 -0700 Subject: [PATCH 078/588] Add empty values file for strimzi Although this chart does not need a non-environment values file, accounting for the case where it could be missing would require special code in the Phalanx tooling. It's easier to create an empty file and make the chart consistent with other charts. Fix another warning during documentation generation. --- applications/strimzi/README.md | 6 ++++++ applications/strimzi/values.yaml | 0 src/phalanx/storage/config.py | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 applications/strimzi/README.md create mode 100644 applications/strimzi/values.yaml diff --git a/applications/strimzi/README.md b/applications/strimzi/README.md new file mode 100644 index 0000000000..b7b3698bc4 --- /dev/null +++ b/applications/strimzi/README.md @@ -0,0 +1,6 @@ +# strimzi + +Strimzi Kafka Operator + +**Homepage:** + diff --git a/applications/strimzi/values.yaml b/applications/strimzi/values.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 60b471b9f4..bc08c31beb 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -533,7 +533,7 @@ def _load_application_config(self, name: str) -> ApplicationConfig: values_path = base_path / "values.yaml" if values_path.exists(): with values_path.open("r") as fh: - values = yaml.safe_load(fh) + values = yaml.safe_load(fh) or {} else: values = {} From e5ab4003a8f0cf9d78490a39e9b28e1ad6fde68e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 14:56:48 -0700 Subject: [PATCH 079/588] Remove unused usdfdev cert-manager configuration cert-manager is disabled on usdfdev and the values-usdfdev.yaml file was out of date and didn't have the correct spelling of settings. Delete it; we can always make a new one should usdfdev start running cert-manager again. --- applications/cert-manager/values-usdfdev.yaml | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 applications/cert-manager/values-usdfdev.yaml diff --git a/applications/cert-manager/values-usdfdev.yaml b/applications/cert-manager/values-usdfdev.yaml deleted file mode 100644 index 9a069163fb..0000000000 --- a/applications/cert-manager/values-usdfdev.yaml +++ /dev/null @@ -1,5 +0,0 @@ -solver: - route53: - aws_access_key_id: AKIAQSJOS2SFL5I4TYND - hosted_zone: Z0567328105IEHEMIXLCO - vault_secret_path: "secret/rubin/data-dev.lsst.cloud/cert-manager" From d17d79854d1a4ca2fa71de8b1de0396a46017313 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 17:30:57 -0700 Subject: [PATCH 080/588] Stop installing Python for minikube testing The current installer script doesn't use any Python at all, so we don't need to install Python or any of its dependencies for minikube testing. --- .github/workflows/ci.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c2a70961bd..aa416af16a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -88,14 +88,6 @@ jobs: - name: Checkout uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: "3.11" - - - name: Install test dependencies - run: make init - - name: Filter paths uses: dorny/paths-filter@v2 id: filter From 9723e40b0548e017b46fca7d98ede760ccf688e0 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 10:38:51 -0700 Subject: [PATCH 081/588] Exit non-zero on secrets audit failure phalanx vault audit returned non-zero if there was audit output, but phalanx secrets audit did not. Make it match, and match the normal expectations of an audit tool. --- src/phalanx/cli.py | 5 ++++- tests/cli/secrets_test.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 8bef679e0f..ac01cf1c52 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -209,7 +209,10 @@ def secrets_audit( static_secrets = StaticSecrets.from_path(secrets) if secrets else None factory = Factory(config) secrets_service = factory.create_secrets_service() - sys.stdout.write(secrets_service.audit(environment, static_secrets)) + report = secrets_service.audit(environment, static_secrets) + if report: + sys.stdout.write(report) + sys.exit(1) @secrets.command("list") diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py index 159de2673b..4865abaddc 100644 --- a/tests/cli/secrets_test.py +++ b/tests/cli/secrets_test.py @@ -59,7 +59,7 @@ def test_audit(factory: Factory, mock_vault: MockVaultClient) -> None: result = run_cli( "secrets", "audit", "--secrets", str(secrets_path), "idfdev" ) - assert result.exit_code == 0 + assert result.exit_code == 1 assert result.output == read_output_data("idfdev", "secrets-audit") @@ -78,7 +78,7 @@ def test_audit_onepassword_missing( mock_vault.load_test_data(environment.vault_path_prefix, "minikube") result = run_cli("secrets", "audit", "minikube") - assert result.exit_code == 0 + assert result.exit_code == 1 assert result.output == read_output_data( "minikube", "audit-missing-output" ) From c4939b8e1216adccefb5fef960c9eb0a5f49da2f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 13:21:10 -0700 Subject: [PATCH 082/588] Upgrade Nublado controller Install the 0.8.0 release, which has a lot of internal refactoring and better home directory configuration support. --- applications/nublado/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 9133539d24..b649507bd9 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -6,7 +6,7 @@ sources: - https://github.com/lsst-sqre/jupyterlab-controller - https://github.com/lsst-sqre/rsp-restspawner home: https://github.com/lsst-sqre/jupyterlab-controller -appVersion: 0.7.3 +appVersion: 0.8.0 dependencies: - name: jupyterhub From 2c6a1bbf5cd72a484baab49fff601ceff190e2fb Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 6 Oct 2023 16:35:17 -0700 Subject: [PATCH 083/588] Add phalanx application add-helm-repos command Add a command to register all of the Helm repos used by an application, or by every application in the configuration tree. This is the first step towards a new implementation of Helm chart linting that is actually useful, unlike chart-tester. --- src/phalanx/cli.py | 27 ++++++++++++ src/phalanx/services/application.py | 23 ++++++++++ src/phalanx/storage/config.py | 42 ++++++++++++++++++ src/phalanx/storage/helm.py | 34 +++++++++++++++ tests/cli/application_test.py | 25 +++++++++++ tests/conftest.py | 7 +++ tests/support/helm.py | 68 +++++++++++++++++++++++++++++ 7 files changed, 226 insertions(+) create mode 100644 tests/support/helm.py diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index ac01cf1c52..94fb4d2411 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -22,6 +22,7 @@ "help", "main", "application", + "application_add_helm_repos", "application_create", "environment", "environment_schema", @@ -91,6 +92,32 @@ def application() -> None: """Commands for Phalanx application configuration.""" +@application.command("add-helm-repos") +@click.argument("name", required=False) +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def application_add_helm_repos( + name: str | None = None, *, config: Path | None +) -> None: + """Add all third-party Helm repositories to Helm. + + In order to perform other Helm operations, such as linting, all + third-party Helm chart repositories used by Phalanx applications have to + be added to the local Helm cache. This command does that for every Phalanx + application. + """ + if not config: + config = _find_config() + factory = Factory(config) + application_service = factory.create_application_service() + application_service.add_helm_repositories(name) + + @application.command("create") @click.argument("name") @click.option( diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index b2cd629145..ed207df935 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -43,6 +43,29 @@ def __init__( autoescape=jinja2.select_autoescape(disabled_extensions=["jinja"]), ) + def add_helm_repositories(self, application: str | None = None) -> None: + """Add all Helm repositories used by any application to Helm's cache. + + To perform other Helm operations, such as downloading third-party + charts in order to run :command:`helm lint`, all third-party Helm + chart repositories have to be added to Helm's cache. This does that + for every application in the Phalanx configuration. + + Consistent names for the Helm repositories are used so that this + command can be run repeatedly. + + Parameters + ---------- + application + If given, only add Helm repositories required by this application. + """ + if application: + repo_urls = self._config.get_dependency_repositories(application) + else: + repo_urls = self._config.get_all_dependency_repositories() + for url in sorted(repo_urls): + self._helm.repo_add(url) + def create_application( self, name: str, starter: HelmStarter, description: str ) -> None: diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index bc08c31beb..295423fe7a 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -155,6 +155,24 @@ def add_application_setting(self, application: str, setting: str) -> None: new.write(setting + "\n") path_new.rename(path) + def get_all_dependency_repositories(self) -> set[str]: + """List the URLs of all referenced third-party Helm repositories. + + Returns + ------- + set of str + URLs of third-party Helm repositories referenced by some + application chart. + """ + repo_urls = set() + for app_path in (self._path / "applications").iterdir(): + chart_path = app_path / "Chart.yaml" + if not chart_path.exists(): + continue + urls = self.get_dependency_repositories(app_path.name) + repo_urls.update(urls) + return repo_urls + def get_application_chart_path(self, application: str) -> Path: """Determine the path to an application Helm chart. @@ -173,6 +191,30 @@ def get_application_chart_path(self, application: str) -> Path: """ return self._path / "applications" / application + def get_dependency_repositories(self, application: str) -> set[str]: + """Return URLs for dependency Helm repositories for this application. + + Parameters + ---------- + application + Name of the application. + + Returns + ------- + set of str + URLs of Helm repositories used by dependencies of this + application's chart. + """ + path = self.get_application_chart_path(application) / "Chart.yaml" + chart = yaml.safe_load(path.read_text()) + repo_urls = set() + for dependency in chart.get("dependencies", []): + if "repository" in dependency: + repository = dependency["repository"] + if not repository.startswith("file:"): + repo_urls.add(repository) + return repo_urls + def get_starter_path(self, starter: HelmStarter) -> Path: """Determine the path to a Helm starter template. diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index a8b63d2a18..9ed77879d2 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -4,6 +4,7 @@ import subprocess from pathlib import Path +from urllib.parse import urlparse from ..exceptions import HelmFailedError from ..models.helm import HelmStarter @@ -47,6 +48,39 @@ def create(self, application: str, starter: HelmStarter) -> None: cwd=application_path.parent, ) + def repo_add(self, url: str) -> None: + """Add a Helm chart repository to Helm's cache. + + Used primarily to enable Helm linting and templating, since both + require any third-party chart repositories be added first. + + Annoyingly, Helm requires you to name repositories, but chart + configurations don't include repository names. Automating adding Helm + repositories therefore requires making up a name. This uses some + arbitrary heuristics that produce consistent names and hopefully won't + produce conflicts. + + Parameters + ---------- + url + Chart repository to add. + + Raises + ------ + ValueError + Raised if the Helm repository URL is invalid. + """ + hostname = urlparse(url).hostname + if not hostname: + raise ValueError(f"Invalid Helm repository URL {url}") + if hostname.endswith("github.io"): + name = hostname.split(".", 1)[0] + elif "." in hostname: + name = hostname.split(".")[-2] + else: + name = hostname + self._run_helm("repo", "add", name, url) + def _run_helm( self, command: str, *args: str, cwd: Path | None = None ) -> None: diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index 0048775d99..a8e982fafb 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -11,6 +11,31 @@ from ..support.cli import run_cli from ..support.data import phalanx_test_path, read_output_data +from ..support.helm import MockHelm + + +def test_add_helm_repos(mock_helm: MockHelm) -> None: + result = run_cli("application", "add-helm-repos", "argocd") + assert result.output == "" + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + ["repo", "add", "argoproj", "https://argoproj.github.io/argo-helm"] + ] + + mock_helm.reset_mock() + result = run_cli("application", "add-helm-repos") + assert result.output == "" + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + ["repo", "add", "argoproj", "https://argoproj.github.io/argo-helm"], + [ + "repo", + "add", + "jupyterhub", + "https://jupyterhub.github.io/helm-chart/", + ], + ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], + ] def test_create(tmp_path: Path) -> None: diff --git a/tests/conftest.py b/tests/conftest.py index 9b769a7a55..e6353a401e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,6 +10,7 @@ from phalanx.factory import Factory from .support.data import phalanx_test_path +from .support.helm import MockHelm, patch_helm from .support.onepassword import MockOnepasswordClient, patch_onepassword from .support.vault import MockVaultClient, patch_vault @@ -20,6 +21,12 @@ def factory() -> Factory: return Factory(phalanx_test_path()) +@pytest.fixture +def mock_helm() -> Iterator[MockHelm]: + """Mock out Helm commands.""" + yield from patch_helm() + + @pytest.fixture def mock_onepassword() -> Iterator[MockOnepasswordClient]: """Mock out the 1Password Connect client API.""" diff --git a/tests/support/helm.py b/tests/support/helm.py new file mode 100644 index 0000000000..f55925dfb8 --- /dev/null +++ b/tests/support/helm.py @@ -0,0 +1,68 @@ +"""Mock Helm command for testing.""" + +from __future__ import annotations + +from collections.abc import Iterator +from pathlib import Path +from unittest.mock import patch + +from phalanx.storage.helm import HelmStorage + +__all__ = [ + "MockHelm", + "patch_helm", +] + + +class MockHelm: + """Mocked Helm commands captured during testing. + + This class holds a record of every Helm command that the Phalanx tooling + under test attempted to run. It is patched into the standard Helm storage + class, replacing the invocation of Helm via subprocess. + + Attributes + ---------- + call_args_list + Each call to Helm, as a list of arguments to the Helm command. The + name is chosen to match the `unittest.mock.Mock` interface. + """ + + def __init__(self) -> None: + self.call_args_list: list[list[str]] = [] + + def reset_mock(self) -> None: + """Clear the list of previous calls.""" + self.call_args_list = [] + + def run(self, command: str, *args: str, cwd: Path | None = None) -> None: + """Capture a Helm command. + + Parameters + ---------- + command + Helm subcommand being run run. + *args + Arguments for that subcommand. + cwd + If provided, the caller is requesting to change working + directories to this path before running the Helm command. + (Currently ignored.) + """ + self.call_args_list.append([command, *args]) + + +def patch_helm() -> Iterator[MockHelm]: + """Intercept Helm invocations with a mock. + + Each attempt to run a Helm command will be captured in the mock and not + actually run. + + Yields + ------ + MockHelm + Class that captures the attempted Helm commands. + """ + mock = MockHelm() + with patch.object(HelmStorage, "_run_helm", side_effect=mock.run): + yield mock From aa9d7a03dbfab4ffa84ddcadc0bb0234cddfea54 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 24 Aug 2023 09:24:57 -0700 Subject: [PATCH 084/588] Add chart linting command Add a command that lints a single application chart for a given environment, using helm lint. This is just the start of what will be needed for proper linting, since helm lint doesn't check nearly as much stuff as one would like, but at least it's a start. --- src/phalanx/cli.py | 30 +++- src/phalanx/exceptions.py | 2 + src/phalanx/services/application.py | 75 ++++++++++ src/phalanx/storage/config.py | 24 +++- src/phalanx/storage/helm.py | 136 ++++++++++++++++++ tests/cli/application_test.py | 64 ++++++++- tests/data/output/idfdev/lint-set-values.json | 6 + tests/support/helm.py | 81 ++++++++++- 8 files changed, 407 insertions(+), 11 deletions(-) create mode 100644 tests/data/output/idfdev/lint-set-values.json diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 94fb4d2411..511422d941 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -24,6 +24,7 @@ "application", "application_add_helm_repos", "application_create", + "application_lint", "environment", "environment_schema", "secrets", @@ -104,7 +105,7 @@ def application() -> None: def application_add_helm_repos( name: str | None = None, *, config: Path | None ) -> None: - """Add all third-party Helm repositories to Helm. + """Configure dependency Helm repositories in Helm. In order to perform other Helm operations, such as linting, all third-party Helm chart repositories used by Phalanx applications have to @@ -166,6 +167,33 @@ def application_create( ) +@application.command("lint") +@click.argument("name") +@click.argument("environment") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def application_lint( + name: str, environment: str, *, config: Path | None +) -> None: + """Lint the Helm chart for an application. + + Update and download any third-party dependency charts and then lint the + Helm chart for an application as configured for the given environment. + """ + if not config: + config = _find_config() + factory = Factory(config) + application_service = factory.create_application_service() + success = application_service.lint_application(name, environment) + if not success: + sys.exit(1) + + @main.group() def environment() -> None: """Commands for Phalanx environment configuration.""" diff --git a/src/phalanx/exceptions.py b/src/phalanx/exceptions.py index fc7102d2a0..978c76488c 100644 --- a/src/phalanx/exceptions.py +++ b/src/phalanx/exceptions.py @@ -59,6 +59,8 @@ def __init__( args_str = " ".join(args) msg = f"helm {command} {args_str} failed with status {exc.returncode}" super().__init__(msg) + self.stdout = exc.stdout + self.stderr = exc.stderr class InvalidApplicationConfigError(Exception): diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index ed207df935..8bdcb40426 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -8,6 +8,7 @@ import yaml from ..exceptions import ApplicationExistsError +from ..models.environments import Environment from ..models.helm import HelmStarter from ..storage.config import ConfigStorage from ..storage.helm import HelmStorage @@ -113,6 +114,80 @@ def create_application( # Add the documentation. self._create_application_docs(name, description) + def lint_application(self, application: str, env_name: str) -> bool: + """Lint an application with Helm. + + Registers any required Helm repositories, refreshes them, downloads + dependencies, and runs :command:`helm lint` on the application chart, + configured for the given environment. + + Parameters + ---------- + application + Name of the application. + env_name + Name of the environment. + + Returns + ------- + bool + Whether linting passed. + """ + environment = self._config.load_environment(env_name) + self.add_helm_repositories(application) + self._helm.repo_update() + self._helm.dependency_update(application) + extra_values = self._build_injected_values(application, environment) + return self._helm.lint_application(application, env_name, extra_values) + + def _build_injected_values( + self, application: str, environment: Environment + ) -> dict[str, str]: + """Construct extra injected Helm values. + + To simulate the chart as it will be configured by Argo CD, we have to + add the values that are injected via the Argo CD application. + + Parameters + ---------- + application + Name of the application. + environment + Environment whose globals should be injected. + + Returns + ------- + dict of str + Dictionary of Helm settings to their (string) values. + + Notes + ----- + This is a bit of a hack, since it hard-codes the injected values + rather than reading them out of the ``Application`` object definition. + It therefore must be updated every time we inject a new type of value + into charts. + + All globals that would be injected into any chart are injected here, + even if this chart doesn't use them. That should be harmless, although + it doesn't exactly simulate what Argo CD does. + """ + enabled_apps = [a.name for a in environment.all_applications()] + values = { + "global.enabledServices": "@" + "@".join(enabled_apps), + "global.host": environment.fqdn, + "global.baseUrl": f"https://{environment.fqdn}", + "global.vaultSecretsPath": environment.vault_path_prefix, + } + + # vault-secrets-operator gets the Vault host injected into it. Use the + # existence of its subchart configuration tree as a cue to inject the + # same thing here. + if application == "vault-secrets-operator": + key = "vault-secrets-operator.vault.address" + values[key] = str(environment.vault_url) + + return values + def _create_application_template(self, name: str) -> None: """Add the ``Application`` template and environment values setting. diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 295423fe7a..a4504be801 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -186,7 +186,7 @@ def get_application_chart_path(self, application: str) -> Path: Returns ------- - pathlib.Path + Path Path to that application's chart. """ return self._path / "applications" / application @@ -230,6 +230,20 @@ def get_starter_path(self, starter: HelmStarter) -> Path: """ return self._path / "starters" / starter.value + def list_environments(self) -> list[str]: + """List all of the available environments. + + Returns + ------- + list of str + Names of all available environments. + """ + environments_path = self._path / "environments" + return [ + v.stem.removeprefix("values-") + for v in sorted(environments_path.glob("values-*.yaml")) + ] + def load_environment(self, environment_name: str) -> Environment: """Load the configuration of a Phalanx environment from disk. @@ -316,11 +330,9 @@ def load_phalanx_config(self) -> PhalanxConfig: InvalidEnvironmentConfigError Raised if the configuration for an environment is invalid. """ - environments_path = self._path / "environments" - environments = [] - for values_path in sorted(environments_path.glob("values-*.yaml")): - environment_name = values_path.stem.removeprefix("values-") - environments.append(self.load_environment_config(environment_name)) + environments = [ + self.load_environment_config(e) for e in self.list_environments() + ] # Load the configurations of all applications. all_applications: set[str] = set() diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index 9ed77879d2..4e2bdbc5f9 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -3,6 +3,7 @@ from __future__ import annotations import subprocess +import sys from pathlib import Path from urllib.parse import urlparse @@ -48,6 +49,82 @@ def create(self, application: str, starter: HelmStarter) -> None: cwd=application_path.parent, ) + def dependency_update(self, application: str) -> None: + """Download chart dependencies for an application. + + Tell Helm to update any third-party chart dependencies for an + application and store them in the :file:`charts` subdirectory. This is + a prerequisite for :command:`helm lint` or :command:`helm template`. + + Assumes that remote repositories have already been refreshed with + `repo_update` and tells Helm to skip that. + + Parameters + ---------- + application + Application whose dependencies should be updated. + """ + application_path = self._config.get_application_chart_path(application) + self._run_helm( + "dependency", "update", "--skip-refresh", cwd=application_path + ) + + def lint_application( + self, application: str, environment: str, values: dict[str, str] + ) -> bool: + """Lint an application chart with Helm. + + Assumes that :command:`helm dependency update` has already been run to + download any third-party charts. Any output is sent to standard output + and standard error, and if Helm fails, a failure message will be + printed to standard error. + + Parameters + ---------- + application + Name of the application. + environment + Name of the environment in which to lint that application chart, + used to select the :file:`values-{environment}.yaml` file to add. + values + Extra key/value pairs to set, reflecting the settings injected by + Argo CD. + + Returns + ------- + bool + Whether linting passed. + """ + application_path = self._config.get_application_chart_path(application) + set_arg = ",".join(f"{k}={v}" for k, v in values.items()) + try: + result = self._capture_helm( + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + f"values-{environment}.yaml", + "--set", + set_arg, + cwd=application_path, + ) + except HelmFailedError as e: + self._print_lint_output(e.stdout) + if e.stderr: + sys.stderr.write(e.stderr) + msg = ( + f"Error: Application {application} in environment" + f" {environment} has errors\n" + ) + sys.stderr.write(msg) + return False + else: + self._print_lint_output(result.stdout) + if result.stderr: + sys.stderr.write(result.stderr) + return True + def repo_add(self, url: str) -> None: """Add a Helm chart repository to Helm's cache. @@ -81,6 +158,65 @@ def repo_add(self, url: str) -> None: name = hostname self._run_helm("repo", "add", name, url) + def repo_update(self) -> None: + """Update Helm's cache of upstream repository indices.""" + self._run_helm("repo", "update") + + def _capture_helm( + self, command: str, *args: str, cwd: Path | None = None + ) -> subprocess.CompletedProcess: + """Run Helm, checking for errors and capturing the output. + + Parameters + ---------- + command + Helm subcommand to run. + *args + Arguments for that subcommand. + cwd + If provided, change working directories to this path before + running the Helm command. + + Returns + ------- + subprocess.CompletedProcess + Results of the process, containing the standard output and + standard error streams. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + try: + result = subprocess.run( + ["helm", command, *args], + check=True, + cwd=cwd, + capture_output=True, + text=True, + ) + except subprocess.CalledProcessError as e: + raise HelmFailedError(command, args, e) from e + return result + + def _print_lint_output(self, output: str | None) -> None: + """Print filtered output from Helm's lint. + + :command:`helm lint` has no apparent way to disable certain checks, + and there are some warnings that we will never care about. + + Parameters + ---------- + output + Raw output from :command:`helm lint`. + """ + if not output: + return + for line in output.removesuffix("\n").split("\n"): + if "icon is recommended" not in line: + print(line) + def _run_helm( self, command: str, *args: str, cwd: Path | None = None ) -> None: diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index a8e982fafb..5ff8879ef7 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -3,6 +3,7 @@ from __future__ import annotations import shutil +import subprocess from pathlib import Path import yaml @@ -10,7 +11,11 @@ from phalanx.factory import Factory from ..support.cli import run_cli -from ..support.data import phalanx_test_path, read_output_data +from ..support.data import ( + phalanx_test_path, + read_output_data, + read_output_json, +) from ..support.helm import MockHelm @@ -195,3 +200,60 @@ def test_create_prompt(tmp_path: Path) -> None: with (app_path / "Chart.yaml").open() as fh: chart = yaml.safe_load(fh) assert chart["description"] == "Some application" + + +def test_lint(mock_helm: MockHelm) -> None: + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "lint": + output = ( + "==> Linting .\n" + "[INFO] Chart.yaml: icon is recommended\n" + "1 chart(s) linted, 0 chart(s) failed\n" + ) + return subprocess.CompletedProcess( + returncode=0, + args=command, + stdout=output, + stderr=None, + ) + + mock_helm.set_capture_callback(callback) + result = run_cli("application", "lint", "gafaelfawr", "idfdev") + assert result.output == ( + "==> Linting .\n1 chart(s) linted, 0 chart(s) failed\n" + ) + assert result.exit_code == 0 + + set_args = read_output_json("idfdev", "lint-set-values") + assert mock_helm.call_args_list == [ + ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], + ["repo", "update"], + ["dependency", "update", "--skip-refresh"], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + ] + + def callback_error(*command: str) -> subprocess.CompletedProcess: + return subprocess.CompletedProcess( + returncode=1, + args=command, + stdout="", + stderr="Some error\n", + ) + + mock_helm.set_capture_callback(callback_error) + result = run_cli("application", "lint", "gafaelfawr", "idfdev") + assert result.output == ( + "Some error\n" + "Error: Application gafaelfawr in environment idfdev has errors\n" + ) + assert result.exit_code == 1 diff --git a/tests/data/output/idfdev/lint-set-values.json b/tests/data/output/idfdev/lint-set-values.json new file mode 100644 index 0000000000..47d57c3698 --- /dev/null +++ b/tests/data/output/idfdev/lint-set-values.json @@ -0,0 +1,6 @@ +[ + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres", + "global.host=data-dev.lsst.cloud", + "global.baseUrl=https://data-dev.lsst.cloud", + "global.vaultSecretsPath=secret/phalanx/idfdev" +] diff --git a/tests/support/helm.py b/tests/support/helm.py index f55925dfb8..6427deceeb 100644 --- a/tests/support/helm.py +++ b/tests/support/helm.py @@ -2,18 +2,30 @@ from __future__ import annotations +import subprocess from collections.abc import Iterator from pathlib import Path from unittest.mock import patch +from typing_extensions import Protocol + +from phalanx.exceptions import HelmFailedError from phalanx.storage.helm import HelmStorage __all__ = [ "MockHelm", + "MockHelmCallback", "patch_helm", ] +class MockHelmCallback(Protocol): + """Protocol for Helm callbacks.""" + + def __call__(*command: str) -> subprocess.CompletedProcess: + ... + + class MockHelm: """Mocked Helm commands captured during testing. @@ -30,13 +42,58 @@ class MockHelm: def __init__(self) -> None: self.call_args_list: list[list[str]] = [] + self._callback: MockHelmCallback | None = None + + def capture( + self, command: str, *args: str, cwd: Path | None = None + ) -> subprocess.CompletedProcess: + """Mock capturing the output of a Helm command. + + Parameters + ---------- + command + Helm subcommand to run. + *args + Arguments for that subcommand. + cwd + If provided, change working directories to this path before + running the Helm command. + + Returns + ------- + subprocess.CompletedProcess + Results of the process, containing the standard output and + standard error streams. + + Raises + ------ + HelmFailedError + Raised if the ``returncode`` returned by a callback is non-zero. + """ + self.call_args_list.append([command, *args]) + if self._callback: + callback = self._callback + result = callback(command, *args) + if result.returncode != 0: + exc = subprocess.CalledProcessError( + returncode=result.returncode, + cmd=[command, *args], + output=result.stdout, + stderr=result.stderr, + ) + raise HelmFailedError(command, args, exc) + return result + else: + return subprocess.CompletedProcess( + args=[command, *args], returncode=0, stdout=None, stderr=None + ) def reset_mock(self) -> None: """Clear the list of previous calls.""" self.call_args_list = [] def run(self, command: str, *args: str, cwd: Path | None = None) -> None: - """Capture a Helm command. + """Mock running a Helm command. Parameters ---------- @@ -51,6 +108,23 @@ def run(self, command: str, *args: str, cwd: Path | None = None) -> None: """ self.call_args_list.append([command, *args]) + def set_capture_callback(self, callback: MockHelmCallback) -> None: + """Set the callback called when capturing Helm command output. + + If no callback is set, empty standard output and standard error will + be returned by the mock. + + Parameters + ---------- + callback + Callback run whenever the Phalanx code under test captures the + output of a Helm command. The callback will be passed the Helm + command as a list, and is expected to return a + `subprocess.CompletedProcess` object. If ``returncode`` is + non-zero, the mock will raise `subprocess.CalledProcessError`. + """ + self._callback = callback + def patch_helm() -> Iterator[MockHelm]: """Intercept Helm invocations with a mock. @@ -64,5 +138,6 @@ def patch_helm() -> Iterator[MockHelm]: Class that captures the attempted Helm commands. """ mock = MockHelm() - with patch.object(HelmStorage, "_run_helm", side_effect=mock.run): - yield mock + with patch.object(HelmStorage, "_capture_helm", side_effect=mock.capture): + with patch.object(HelmStorage, "_run_helm", side_effect=mock.run): + yield mock From da4eb36e079ac893f35c739c1e0c0f36a0b3999e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 9 Oct 2023 17:28:46 -0700 Subject: [PATCH 085/588] Support linting application for all environments Allow the environment to be omitted, in which case the application is linted for all environments for which it has a configuration. Improve the helm lint output a bit by getting rid of more useless output and rewriting the header line to include the application and environment names. --- src/phalanx/cli.py | 4 +-- src/phalanx/services/application.py | 51 ++++++++++++++++++++++++----- src/phalanx/storage/config.py | 28 ++++++++++++++++ src/phalanx/storage/helm.py | 25 +++++++++++--- tests/cli/application_test.py | 45 ++++++++++++++++++++++--- 5 files changed, 134 insertions(+), 19 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 511422d941..8df897965a 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -169,7 +169,7 @@ def application_create( @application.command("lint") @click.argument("name") -@click.argument("environment") +@click.argument("environment", required=False) @click.option( "-c", "--config", @@ -178,7 +178,7 @@ def application_create( help="Path to root of Phalanx configuration.", ) def application_lint( - name: str, environment: str, *, config: Path | None + name: str, environment: str | None = None, *, config: Path | None ) -> None: """Lint the Helm chart for an application. diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index 8bdcb40426..2a1f760e55 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -114,7 +114,7 @@ def create_application( # Add the documentation. self._create_application_docs(name, description) - def lint_application(self, application: str, env_name: str) -> bool: + def lint_application(self, app_name: str, env_name: str | None) -> bool: """Lint an application with Helm. Registers any required Helm repositories, refreshes them, downloads @@ -123,22 +123,33 @@ def lint_application(self, application: str, env_name: str) -> bool: Parameters ---------- - application + app_name Name of the application. env_name - Name of the environment. + Name of the environment. If not given, lint all environments for + which this application has a configuration. Returns ------- bool Whether linting passed. """ - environment = self._config.load_environment(env_name) - self.add_helm_repositories(application) + self.add_helm_repositories(app_name) self._helm.repo_update() - self._helm.dependency_update(application) - extra_values = self._build_injected_values(application, environment) - return self._helm.lint_application(application, env_name, extra_values) + self._helm.dependency_update(app_name) + if env_name: + environments = [self._config.load_environment(env_name)] + else: + env_names = self._config.get_application_environments(app_name) + environments = [ + self._config.load_environment(e) for e in env_names + ] + success = True + for environment in environments: + name = environment.name + values = self._build_injected_values(app_name, environment) + success &= self._helm.lint_application(app_name, name, values) + return success def _build_injected_values( self, application: str, environment: Environment @@ -238,3 +249,27 @@ def _create_application_docs(self, name: str, description: str) -> None: template = self._templates.get_template("application-values.md.jinja") values = template.render({"name": name}) (docs_path / "values.md").write_text(values) + + def _lint_application_environment( + self, application: str, environment: Environment + ) -> bool: + """Lint an application Helm chart for a specific environment. + + Output is printed to standard output and standard error. + + Parameters + ---------- + application + Name of the application. + environment + Environment to use for linting. + + Returns + ------- + bool + Whether the lint passes. + """ + extra_values = self._build_injected_values(application, environment) + return self._helm.lint_application( + application, environment.name, extra_values + ) diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index a4504be801..a4bb76d0a2 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -191,6 +191,34 @@ def get_application_chart_path(self, application: str) -> Path: """ return self._path / "applications" / application + def get_application_environments(self, application: str) -> list[str]: + """List all environments for which an application is configured. + + This is based entirely on the presence of + :file:`values-{environment}.yaml` configuration files in the + application directory, not on which environments enable the + application. This is intentional since this is used to constrain which + environments are linted, and we want to lint applications in + environments that aren't currently enabled to ensure they've not + bitrotted. + + Parameters + ---------- + application + Name of the application. + + Returns + ------- + list of str + List of environment names for which that application is + configured. + """ + path = self.get_application_chart_path(application) + return [ + v.stem.removeprefix("values-") + for v in sorted(path.glob("values-*.yaml")) + ] + def get_dependency_repositories(self, application: str) -> set[str]: """Return URLs for dependency Helm repositories for this application. diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index 4e2bdbc5f9..805f3a9717 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -110,7 +110,7 @@ def lint_application( cwd=application_path, ) except HelmFailedError as e: - self._print_lint_output(e.stdout) + self._print_lint_output(application, environment, e.stdout) if e.stderr: sys.stderr.write(e.stderr) msg = ( @@ -120,7 +120,7 @@ def lint_application( sys.stderr.write(msg) return False else: - self._print_lint_output(result.stdout) + self._print_lint_output(application, environment, result.stdout) if result.stderr: sys.stderr.write(result.stderr) return True @@ -200,21 +200,36 @@ def _capture_helm( raise HelmFailedError(command, args, e) from e return result - def _print_lint_output(self, output: str | None) -> None: + def _print_lint_output( + self, application: str, environment: str, output: str | None + ) -> None: """Print filtered output from Helm's lint. :command:`helm lint` has no apparent way to disable certain checks, - and there are some warnings that we will never care about. + and there are some warnings that we will never care about. It also + doesn't have very useful output formatting. Parameters ---------- + application + Name of the application. + environment + Name of the environment in which to lint that application chart, output Raw output from :command:`helm lint`. """ if not output: return for line in output.removesuffix("\n").split("\n"): - if "icon is recommended" not in line: + if "icon is recommended" in line: + continue + if line == "": + continue + if "1 chart(s) linted" in line: + continue + if "==> Linting ." in line: + print(f"==> Linting {application} (environment {environment})") + else: print(line) def _run_helm( diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index 5ff8879ef7..c96208b72d 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -5,6 +5,7 @@ import shutil import subprocess from pathlib import Path +from unittest.mock import ANY import yaml @@ -209,6 +210,7 @@ def callback(*command: str) -> subprocess.CompletedProcess: output = ( "==> Linting .\n" "[INFO] Chart.yaml: icon is recommended\n" + "\n" "1 chart(s) linted, 0 chart(s) failed\n" ) return subprocess.CompletedProcess( @@ -218,13 +220,13 @@ def callback(*command: str) -> subprocess.CompletedProcess: stderr=None, ) + # Lint a single application that will succeed, and check that the icon + # line is filtered out of the output. mock_helm.set_capture_callback(callback) result = run_cli("application", "lint", "gafaelfawr", "idfdev") - assert result.output == ( - "==> Linting .\n1 chart(s) linted, 0 chart(s) failed\n" - ) + expected = "==> Linting gafaelfawr (environment idfdev)\n" + assert result.output == expected assert result.exit_code == 0 - set_args = read_output_json("idfdev", "lint-set-values") assert mock_helm.call_args_list == [ ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], @@ -242,6 +244,40 @@ def callback(*command: str) -> subprocess.CompletedProcess: ], ] + # Lint the same application for both environmments. We won't bother to + # check the --set flag for the second environment. The important part is + # that we call helm lint twice, but all of the setup is only called once. + mock_helm.reset_mock() + result = run_cli("application", "lint", "gafaelfawr") + expected += "==> Linting gafaelfawr (environment minikube)\n" + assert result.output == expected + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], + ["repo", "update"], + ["dependency", "update", "--skip-refresh"], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-minikube.yaml", + "--set", + ANY, + ], + ] + def callback_error(*command: str) -> subprocess.CompletedProcess: return subprocess.CompletedProcess( returncode=1, @@ -250,6 +286,7 @@ def callback_error(*command: str) -> subprocess.CompletedProcess: stderr="Some error\n", ) + mock_helm.reset_mock() mock_helm.set_capture_callback(callback_error) result = run_cli("application", "lint", "gafaelfawr", "idfdev") assert result.output == ( From 3e9de97c785abba62903d92eea7c2faf440b84fa Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 11:17:28 -0700 Subject: [PATCH 086/588] Add phalanx application template command Add a command to expand the templates for an application for a given environment, allowing someone to inspect the resources. For that command, still download third-party charts it depends on, but silence the output from those commands so that the output can be piped into kubectl if desired. --- src/phalanx/cli.py | 33 ++++++++-- src/phalanx/services/application.py | 43 +++++++++++-- src/phalanx/storage/helm.py | 95 ++++++++++++++++++++++++++--- tests/cli/application_test.py | 36 +++++++++++ tests/support/helm.py | 10 ++- 5 files changed, 199 insertions(+), 18 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 8df897965a..64749b06bc 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -25,6 +25,7 @@ "application_add_helm_repos", "application_create", "application_lint", + "application_template", "environment", "environment_schema", "secrets", @@ -162,9 +163,7 @@ def application_create( raise click.UsageError("Description must start with capital letter") factory = Factory(config) application_service = factory.create_application_service() - application_service.create_application( - name, HelmStarter(starter), description - ) + application_service.create(name, HelmStarter(starter), description) @application.command("lint") @@ -189,11 +188,37 @@ def application_lint( config = _find_config() factory = Factory(config) application_service = factory.create_application_service() - success = application_service.lint_application(name, environment) + success = application_service.lint(name, environment) if not success: sys.exit(1) +@application.command("template") +@click.argument("name") +@click.argument("environment") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def application_template( + name: str, environment: str, *, config: Path | None +) -> None: + """Expand the chart of an application for an environment. + + Print the expanded Kubernetes resources for an application as configured + for the given environment to standard output. This is intended for testing + and debugging purposes; normally, charts should be installed with Argo CD. + """ + if not config: + config = _find_config() + factory = Factory(config) + application_service = factory.create_application_service() + sys.stdout.write(application_service.template(name, environment)) + + @main.group() def environment() -> None: """Commands for Phalanx environment configuration.""" diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index 2a1f760e55..fb03ec0d57 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -44,7 +44,9 @@ def __init__( autoescape=jinja2.select_autoescape(disabled_extensions=["jinja"]), ) - def add_helm_repositories(self, application: str | None = None) -> None: + def add_helm_repositories( + self, application: str | None = None, *, quiet: bool = False + ) -> None: """Add all Helm repositories used by any application to Helm's cache. To perform other Helm operations, such as downloading third-party @@ -59,15 +61,17 @@ def add_helm_repositories(self, application: str | None = None) -> None: ---------- application If given, only add Helm repositories required by this application. + quiet + Whether to suppress Helm's standard output. """ if application: repo_urls = self._config.get_dependency_repositories(application) else: repo_urls = self._config.get_all_dependency_repositories() for url in sorted(repo_urls): - self._helm.repo_add(url) + self._helm.repo_add(url, quiet=quiet) - def create_application( + def create( self, name: str, starter: HelmStarter, description: str ) -> None: """Create configuration for a new application. @@ -114,7 +118,7 @@ def create_application( # Add the documentation. self._create_application_docs(name, description) - def lint_application(self, app_name: str, env_name: str | None) -> bool: + def lint(self, app_name: str, env_name: str | None) -> bool: """Lint an application with Helm. Registers any required Helm repositories, refreshes them, downloads @@ -151,6 +155,37 @@ def lint_application(self, app_name: str, env_name: str | None) -> bool: success &= self._helm.lint_application(app_name, name, values) return success + def template(self, app_name: str, env_name: str) -> str: + """Expand the templates of an application chart. + + Run :command:`helm template` for an application chart, passing in the + appropriate parameters for that environment. + + Parameters + ---------- + app_name + Name of the application. + env_name + Name of the environment. If not given, lint all environments for + which this application has a configuration. + + Returns + ------- + str + Output from :command:`helm template`. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + self.add_helm_repositories(app_name, quiet=True) + self._helm.repo_update(quiet=True) + self._helm.dependency_update(app_name, quiet=True) + environment = self._config.load_environment(env_name) + values = self._build_injected_values(app_name, environment) + return self._helm.template_application(app_name, env_name, values) + def _build_injected_values( self, application: str, environment: Environment ) -> dict[str, str]: diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index 805f3a9717..7eb321139b 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -49,7 +49,9 @@ def create(self, application: str, starter: HelmStarter) -> None: cwd=application_path.parent, ) - def dependency_update(self, application: str) -> None: + def dependency_update( + self, application: str, *, quiet: bool = False + ) -> None: """Download chart dependencies for an application. Tell Helm to update any third-party chart dependencies for an @@ -63,10 +65,16 @@ def dependency_update(self, application: str) -> None: ---------- application Application whose dependencies should be updated. + quiet + Whether to suppress Helm's standard output. """ application_path = self._config.get_application_chart_path(application) self._run_helm( - "dependency", "update", "--skip-refresh", cwd=application_path + "dependency", + "update", + "--skip-refresh", + cwd=application_path, + quiet=quiet, ) def lint_application( @@ -125,7 +133,7 @@ def lint_application( sys.stderr.write(result.stderr) return True - def repo_add(self, url: str) -> None: + def repo_add(self, url: str, *, quiet: bool = False) -> None: """Add a Helm chart repository to Helm's cache. Used primarily to enable Helm linting and templating, since both @@ -141,6 +149,8 @@ def repo_add(self, url: str) -> None: ---------- url Chart repository to add. + quiet + Whether to suppress Helm's standard output. Raises ------ @@ -156,11 +166,72 @@ def repo_add(self, url: str) -> None: name = hostname.split(".")[-2] else: name = hostname - self._run_helm("repo", "add", name, url) + self._run_helm("repo", "add", name, url, quiet=quiet) + + def repo_update(self, *, quiet: bool = False) -> None: + """Update Helm's cache of upstream repository indices. - def repo_update(self) -> None: - """Update Helm's cache of upstream repository indices.""" - self._run_helm("repo", "update") + Parameters + ---------- + quiet + Whether to suppress Helm's standard output. + """ + self._run_helm("repo", "update", quiet=quiet) + + def template_application( + self, application: str, environment: str, values: dict[str, str] + ) -> str: + """Expand an application chart into its Kubernetes resources. + + Runs :command:`helm template` to expand a chart into its Kubernetes + resources for a given environment. Assumes that :command:`helm + dependency update` has already been run to download any third-party + charts. Any output to standard error is passed along. + + Parameters + ---------- + application + Name of the application. + environment + Name of the environment in which to lint that application chart, + used to select the :file:`values-{environment}.yaml` file to add. + values + Extra key/value pairs to set, reflecting the settings injected by + Argo CD. + + Returns + ------- + str + Kubernetes resources created by the chart. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + application_path = self._config.get_application_chart_path(application) + set_arg = ",".join(f"{k}={v}" for k, v in values.items()) + try: + result = self._capture_helm( + "template", + application, + str(application_path), + "--include-crds", + "--values", + f"{application}/values.yaml", + "--values", + f"{application}/values-{environment}.yaml", + "--set", + set_arg, + cwd=application_path.parent, + ) + except HelmFailedError as e: + if e.stderr: + sys.stderr.write(e.stderr) + raise + if result.stderr: + sys.stderr.write(result.stderr) + return result.stdout def _capture_helm( self, command: str, *args: str, cwd: Path | None = None @@ -233,7 +304,11 @@ def _print_lint_output( print(line) def _run_helm( - self, command: str, *args: str, cwd: Path | None = None + self, + command: str, + *args: str, + cwd: Path | None = None, + quiet: bool = False, ) -> None: """Run Helm, checking for errors. @@ -254,7 +329,9 @@ def _run_helm( HelmFailedError Raised if Helm fails. """ + cmdline = ["helm", command, *args] + stdout = subprocess.DEVNULL if quiet else None try: - subprocess.run(["helm", command, *args], check=True, cwd=cwd) + subprocess.run(cmdline, check=True, stdout=stdout, cwd=cwd) except subprocess.CalledProcessError as e: raise HelmFailedError(command, args, e) from e diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index c96208b72d..5945b055cf 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -294,3 +294,39 @@ def callback_error(*command: str) -> subprocess.CompletedProcess: "Error: Application gafaelfawr in environment idfdev has errors\n" ) assert result.exit_code == 1 + + +def test_template(mock_helm: MockHelm) -> None: + test_path = phalanx_test_path() + + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "template": + output = "this is some template\n" + return subprocess.CompletedProcess( + returncode=0, args=command, stdout=output, stderr=None + ) + + mock_helm.set_capture_callback(callback) + result = run_cli("application", "template", "gafaelfawr", "idfdev") + expected = "this is some template\n" + assert result.output == expected + assert result.exit_code == 0 + set_args = read_output_json("idfdev", "lint-set-values") + assert mock_helm.call_args_list == [ + ["repo", "add", "lsst-sqre", "https://lsst-sqre.github.io/charts/"], + ["repo", "update"], + ["dependency", "update", "--skip-refresh"], + [ + "template", + "gafaelfawr", + str(test_path / "applications" / "gafaelfawr"), + "--include-crds", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + ] diff --git a/tests/support/helm.py b/tests/support/helm.py index 6427deceeb..a47c7e3e3b 100644 --- a/tests/support/helm.py +++ b/tests/support/helm.py @@ -92,7 +92,13 @@ def reset_mock(self) -> None: """Clear the list of previous calls.""" self.call_args_list = [] - def run(self, command: str, *args: str, cwd: Path | None = None) -> None: + def run( + self, + command: str, + *args: str, + cwd: Path | None = None, + quiet: bool = False, + ) -> None: """Mock running a Helm command. Parameters @@ -105,6 +111,8 @@ def run(self, command: str, *args: str, cwd: Path | None = None) -> None: If provided, the caller is requesting to change working directories to this path before running the Helm command. (Currently ignored.) + quiet + Whether to suppress Helm's standard output. (Currently ignored.) """ self.call_args_list.append([command, *args]) From 4194177c736a4dd8fe87c6927cabd212433ff40e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 12:53:36 -0700 Subject: [PATCH 087/588] Test all application environments are known Add a test for whether all environments for which an application is configured are known. If we remove an environment, we want to clean up; history is in Git. --- tests/config_test.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/config_test.py b/tests/config_test.py index 26a6936304..41ecd190b5 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -9,6 +9,8 @@ import yaml +from phalanx.factory import Factory + _ALLOW_NO_SECRETS = ( "giftless", "linters", @@ -45,6 +47,19 @@ def test_application_version() -> None: ), f"Shared chart {shared_chart.name} has incorrect version" +def test_enviroments() -> None: + """Ensure applications don't have configs for unknown environments.""" + factory = Factory(Path.cwd()) + config_storage = factory.create_config_storage() + environments = set(config_storage.list_environments()) + for path in all_charts("applications"): + app_envs = set(config_storage.get_application_environments(path.name)) + if not app_envs <= environments: + unknown = ", ".join(sorted(app_envs - environments)) + msg = f"{path.name} configured for unknown environments: {unknown}" + raise AssertionError(msg) + + def test_secrets_defined() -> None: """Any application with a VaultSecret should have secrets.yaml.""" for application in all_charts("applications"): From 7a37db3b9d49b096ac1c986ba2b31aac717d6508 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 14:26:21 -0700 Subject: [PATCH 088/588] Add empty values file for strimzi Although this chart does not need a non-environment values file, accounting for the case where it could be missing would require special code in the Phalanx tooling. It's easier to create an empty file and make the chart consistent with other charts. Fix another warning during documentation generation. --- src/phalanx/storage/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index a4bb76d0a2..70d611b47d 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -186,7 +186,7 @@ def get_application_chart_path(self, application: str) -> Path: Returns ------- - Path + pathlib.Path Path to that application's chart. """ return self._path / "applications" / application From 6d8239ace147109ecdbca401f8a10f00b8be5cb1 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 15:11:41 -0700 Subject: [PATCH 089/588] Add linting of all application charts Add a new phalanx application lint-all command that lints all application charts. Use a separate command rather than reusing phalanx application lint, since it will have different flags (the --git flag to only lint modified things). --- src/phalanx/cli.py | 26 +++- src/phalanx/services/application.py | 54 ++++--- src/phalanx/storage/config.py | 15 +- src/phalanx/storage/helm.py | 11 ++ tests/cli/application_test.py | 8 + tests/config_test.py | 6 +- tests/data/output/idfdev/lint-all-calls.json | 149 +++++++++++++++++++ 7 files changed, 238 insertions(+), 31 deletions(-) create mode 100644 tests/data/output/idfdev/lint-all-calls.json diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 64749b06bc..9503457bf9 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -25,6 +25,7 @@ "application_add_helm_repos", "application_create", "application_lint", + "application_lint_all", "application_template", "environment", "environment_schema", @@ -188,8 +189,29 @@ def application_lint( config = _find_config() factory = Factory(config) application_service = factory.create_application_service() - success = application_service.lint(name, environment) - if not success: + if not application_service.lint(name, environment): + sys.exit(1) + + +@application.command("lint-all") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def application_lint_all(*, config: Path | None) -> None: + """Lint the Helm charts for every application and environment. + + Update and download any third-party dependency charts and then lint the + Helm charts for each application and environment combination. + """ + if not config: + config = _find_config() + factory = Factory(config) + application_service = factory.create_application_service() + if not application_service.lint_all(): sys.exit(1) diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index fb03ec0d57..815789ea28 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -155,6 +155,36 @@ def lint(self, app_name: str, env_name: str | None) -> bool: success &= self._helm.lint_application(app_name, name, values) return success + def lint_all(self) -> bool: + """Lint all applications with Helm. + + Registers any required Helm repositories, refreshes them, downloads + dependencies, and runs :command:`helm lint` on every combination of + application chart and configured environment. + + Returns + ------- + bool + Whether linting passed. + """ + self.add_helm_repositories() + self._helm.repo_update() + environments = { + e: self._config.load_environment(e) + for e in self._config.list_environments() + } + success = True + for app_name in self._config.list_applications(): + self._helm.dependency_update(app_name, quiet=True) + app_envs = self._config.get_application_environments(app_name) + for env_name in app_envs: + environment = environments[env_name] + values = self._build_injected_values(app_name, environment) + success &= self._helm.lint_application( + app_name, env_name, values + ) + return success + def template(self, app_name: str, env_name: str) -> str: """Expand the templates of an application chart. @@ -284,27 +314,3 @@ def _create_application_docs(self, name: str, description: str) -> None: template = self._templates.get_template("application-values.md.jinja") values = template.render({"name": name}) (docs_path / "values.md").write_text(values) - - def _lint_application_environment( - self, application: str, environment: Environment - ) -> bool: - """Lint an application Helm chart for a specific environment. - - Output is printed to standard output and standard error. - - Parameters - ---------- - application - Name of the application. - environment - Environment to use for linting. - - Returns - ------- - bool - Whether the lint passes. - """ - extra_values = self._build_injected_values(application, environment) - return self._helm.lint_application( - application, environment.name, extra_values - ) diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 70d611b47d..063fc06cbb 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -258,6 +258,17 @@ def get_starter_path(self, starter: HelmStarter) -> Path: """ return self._path / "starters" / starter.value + def list_applications(self) -> list[str]: + """List all available applications. + + Returns + ------- + list of str + Names of all applications. + """ + path = self._path / "applications" + return sorted(v.name for v in path.iterdir() if v.is_dir()) + def list_environments(self) -> list[str]: """List all of the available environments. @@ -266,10 +277,10 @@ def list_environments(self) -> list[str]: list of str Names of all available environments. """ - environments_path = self._path / "environments" + path = self._path / "environments" return [ v.stem.removeprefix("values-") - for v in sorted(environments_path.glob("values-*.yaml")) + for v in sorted(path.glob("values-*.yaml")) ] def load_environment(self, environment_name: str) -> Environment: diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index 7eb321139b..b08b80e972 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -104,6 +104,17 @@ def lint_application( Whether linting passed. """ application_path = self._config.get_application_chart_path(application) + + # helm lint complains about any chart without a templates directory, + # but many of our charts are wrappers around third-party charts and + # intentionally don't have such a directory. To silence the warning, + # create an empty templates directory if needed. Git ignores empty + # directories, so this is essentially a no-op in a Git checkout. + if not (application_path / "templates").exists(): + (application_path / "templates").mkdir() + + # Run helm lint with the appropriate flag for the environment in which + # the chart is being linted. set_arg = ",".join(f"{k}={v}" for k, v in values.items()) try: result = self._capture_helm( diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index 5945b055cf..d8fdb26cdb 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -296,6 +296,14 @@ def callback_error(*command: str) -> subprocess.CompletedProcess: assert result.exit_code == 1 +def test_lint_all(mock_helm: MockHelm) -> None: + result = run_cli("application", "lint-all") + assert result.output == "" + assert result.exit_code == 0 + expected_calls = read_output_json("idfdev", "lint-all-calls") + assert mock_helm.call_args_list == expected_calls + + def test_template(mock_helm: MockHelm) -> None: test_path = phalanx_test_path() diff --git a/tests/config_test.py b/tests/config_test.py index 41ecd190b5..ca5df3dba8 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -52,11 +52,11 @@ def test_enviroments() -> None: factory = Factory(Path.cwd()) config_storage = factory.create_config_storage() environments = set(config_storage.list_environments()) - for path in all_charts("applications"): - app_envs = set(config_storage.get_application_environments(path.name)) + for app_name in config_storage.list_applications(): + app_envs = set(config_storage.get_application_environments(app_name)) if not app_envs <= environments: unknown = ", ".join(sorted(app_envs - environments)) - msg = f"{path.name} configured for unknown environments: {unknown}" + msg = f"{app_name} configured for unknown environments: {unknown}" raise AssertionError(msg) diff --git a/tests/data/output/idfdev/lint-all-calls.json b/tests/data/output/idfdev/lint-all-calls.json new file mode 100644 index 0000000000..fb8df20422 --- /dev/null +++ b/tests/data/output/idfdev/lint-all-calls.json @@ -0,0 +1,149 @@ +[ + [ + "repo", + "add", + "argoproj", + "https://argoproj.github.io/argo-helm" + ], + [ + "repo", + "add", + "jupyterhub", + "https://jupyterhub.github.io/helm-chart/" + ], + [ + "repo", + "add", + "lsst-sqre", + "https://lsst-sqre.github.io/charts/" + ], + [ + "repo", + "update" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-minikube.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-usdfdev-prompt-processing.yaml", + "--set", + "global.enabledServices=@argocd,global.host=usdf-prompt-processing-dev.slac.stanford.edu,global.baseUrl=https://usdf-prompt-processing-dev.slac.stanford.edu,global.vaultSecretsPath=secret/rubin/usdf-prompt-processing-dev" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-minikube.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + ], + [ + "lint", + "--strict", + "--values", + "values.yaml", + "--values", + "values-minikube.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" + ], + [ + "dependency", + "update", + "--skip-refresh" + ] +] From c14b1a4c71fc89cc6c2d1c74df4df68b5e1e341b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 16:58:38 -0700 Subject: [PATCH 090/588] Support linting only changed applications Add a --git flag to phalanx application lint-all that compares against the origin/main branch and only lints application and environment pairs that could have changed based on the changes between those Git branches. Pass a path argument to helm lint to make it easier to analyze the test results. --- src/phalanx/cli.py | 9 +- src/phalanx/services/application.py | 28 +++-- src/phalanx/storage/config.py | 105 +++++++++++++++++- src/phalanx/storage/helm.py | 9 +- tests/cli/application_test.py | 64 ++++++++++- .../portal/templates/_helpers.tpl | 51 +++++++++ .../portal/templates/vault-secrets.yaml | 19 ++++ tests/data/output/idfdev/lint-all-calls.json | 55 +++++---- tests/data/output/idfdev/lint-git-calls.json | 83 ++++++++++++++ 9 files changed, 374 insertions(+), 49 deletions(-) create mode 100644 tests/data/input/applications/portal/templates/_helpers.tpl create mode 100644 tests/data/input/applications/portal/templates/vault-secrets.yaml create mode 100644 tests/data/output/idfdev/lint-git-calls.json diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 9503457bf9..d099c63d12 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -201,7 +201,12 @@ def application_lint( default=None, help="Path to root of Phalanx configuration.", ) -def application_lint_all(*, config: Path | None) -> None: +@click.option( + "--git", + is_flag=True, + help="Only lint applications changed relative to origin/main.", +) +def application_lint_all(*, config: Path | None, git: bool = False) -> None: """Lint the Helm charts for every application and environment. Update and download any third-party dependency charts and then lint the @@ -211,7 +216,7 @@ def application_lint_all(*, config: Path | None) -> None: config = _find_config() factory = Factory(config) application_service = factory.create_application_service() - if not application_service.lint_all(): + if not application_service.lint_all(git=git): sys.exit(1) diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index 815789ea28..38bb3348c0 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -155,13 +155,19 @@ def lint(self, app_name: str, env_name: str | None) -> bool: success &= self._helm.lint_application(app_name, name, values) return success - def lint_all(self) -> bool: + def lint_all(self, *, git: bool = False) -> bool: """Lint all applications with Helm. Registers any required Helm repositories, refreshes them, downloads dependencies, and runs :command:`helm lint` on every combination of application chart and configured environment. + Parameters + ---------- + git + Whether to only lint application and environment pairs that may + have been affected by Git changes relative to the main branch. + Returns ------- bool @@ -169,16 +175,22 @@ def lint_all(self) -> bool: """ self.add_helm_repositories() self._helm.repo_update() - environments = { - e: self._config.load_environment(e) - for e in self._config.list_environments() - } + if git: + to_lint = self._config.get_modified_applications("origin/main") + else: + to_lint = self._config.list_application_environments() + environments: dict[str, Environment] = {} success = True - for app_name in self._config.list_applications(): + for app_name, app_envs in sorted(to_lint.items()): + if not app_envs: + continue self._helm.dependency_update(app_name, quiet=True) - app_envs = self._config.get_application_environments(app_name) for env_name in app_envs: - environment = environments[env_name] + if env_name in environments: + environment = environments[env_name] + else: + environment = self._config.load_environment(env_name) + environments[env_name] = environment values = self._build_injected_values(app_name, environment) success &= self._helm.lint_application( app_name, env_name, values diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 063fc06cbb..3a8ae637e6 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -5,10 +5,13 @@ import re from collections import defaultdict from contextlib import suppress +from dataclasses import dataclass from pathlib import Path -from typing import Any +from typing import Any, Self import yaml +from git import Diff +from git.repo import Repo from pydantic import ValidationError from ..constants import HELM_DOCLINK_ANNOTATION @@ -67,6 +70,60 @@ def _merge_overrides( return new +@dataclass +class _ApplicationChange: + """Holds the analysis of a diff affecting a Phalanx application chart.""" + + application: str + """Name of the affected application.""" + + path: str + """Path of changed file relative to the top of the chart.""" + + is_delete: bool + """Whether this change is a file deletion.""" + + @classmethod + def from_diff(cls, diff: Diff) -> Self: + """Create a change based on a Git diff. + + Parameters + ---------- + diff + One Git diff affecting a single file. + + Returns + ------- + _ApplicationChange + Corresponding parsed change. + + Raises + ------ + ValueError + Raised if this is not a change to an application chart. + """ + full_path = diff.b_path or diff.a_path + if not full_path: + raise ValueError("Not a change to an application") + m = re.match("applications/([^/]+)/(.+)", full_path) + if not m: + raise ValueError("Not a change to an application") + return cls( + application=m.group(1), + path=m.group(2), + is_delete=diff.change_type == "D", + ) + + @property + def affects_all_envs(self) -> bool: + """Whether this change may affect any environment.""" + if self.path in ("Chart.yaml", "values.yaml"): + return True + if self.path.startswith(("crds/", "templates/")): + return True + return False + + class ConfigStorage: """Analyze Phalanx configuration and convert it to models. @@ -243,6 +300,38 @@ def get_dependency_repositories(self, application: str) -> set[str]: repo_urls.add(repository) return repo_urls + def get_modified_applications(self, branch: str) -> dict[str, list[str]]: + """Get all modified application and environment pairs. + + Parameters + ---------- + branch + Git branch against which to compare to see what modifications + have been made. + + Returns + ------- + dict of list of str + Dictionary of all modified applications to the list of + environments configured for that application that may have been + affected. + """ + result: defaultdict[str, list[str]] = defaultdict(list) + repo = Repo(str(self._path)) + diffs = repo.head.commit.diff(branch, paths=["applications"], R=True) + for diff in diffs: + try: + change = _ApplicationChange.from_diff(diff) + except ValueError: + continue + if change.affects_all_envs: + envs = self.get_application_environments(change.application) + result[change.application] = envs + if not change.is_delete: + if m := re.match("values-([^.]+).yaml$", change.path): + result[change.application].append(m.group(1)) + return result + def get_starter_path(self, starter: HelmStarter) -> Path: """Determine the path to a Helm starter template. @@ -258,6 +347,20 @@ def get_starter_path(self, starter: HelmStarter) -> Path: """ return self._path / "starters" / starter.value + def list_application_environments(self) -> dict[str, list[str]]: + """List all available applications and their environments. + + Returns + ------- + dict of list of str + Dictionary of all applications to lists of environments for which + that application has a configuration. + """ + return { + a: self.get_application_environments(a) + for a in self.list_applications() + } + def list_applications(self) -> list[str]: """List all available applications. diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index b08b80e972..e3cd4a9d8f 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -119,14 +119,15 @@ def lint_application( try: result = self._capture_helm( "lint", + application, "--strict", "--values", - "values.yaml", + f"{application}/values.yaml", "--values", - f"values-{environment}.yaml", + f"{application}/values-{environment}.yaml", "--set", set_arg, - cwd=application_path, + cwd=application_path.parent, ) except HelmFailedError as e: self._print_lint_output(application, environment, e.stdout) @@ -309,7 +310,7 @@ def _print_lint_output( continue if "1 chart(s) linted" in line: continue - if "==> Linting ." in line: + if line.startswith("==> Linting"): print(f"==> Linting {application} (environment {environment})") else: print(line) diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index d8fdb26cdb..363b2f27f7 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -8,6 +8,8 @@ from unittest.mock import ANY import yaml +from git.repo import Repo +from git.util import Actor from phalanx.factory import Factory @@ -234,11 +236,12 @@ def callback(*command: str) -> subprocess.CompletedProcess: ["dependency", "update", "--skip-refresh"], [ "lint", + "gafaelfawr", "--strict", "--values", - "values.yaml", + "gafaelfawr/values.yaml", "--values", - "values-idfdev.yaml", + "gafaelfawr/values-idfdev.yaml", "--set", ",".join(set_args), ], @@ -258,21 +261,23 @@ def callback(*command: str) -> subprocess.CompletedProcess: ["dependency", "update", "--skip-refresh"], [ "lint", + "gafaelfawr", "--strict", "--values", - "values.yaml", + "gafaelfawr/values.yaml", "--values", - "values-idfdev.yaml", + "gafaelfawr/values-idfdev.yaml", "--set", ",".join(set_args), ], [ "lint", + "gafaelfawr", "--strict", "--values", - "values.yaml", + "gafaelfawr/values.yaml", "--values", - "values-minikube.yaml", + "gafaelfawr/values-minikube.yaml", "--set", ANY, ], @@ -304,6 +309,53 @@ def test_lint_all(mock_helm: MockHelm) -> None: assert mock_helm.call_args_list == expected_calls +def test_lint_all_git(tmp_path: Path, mock_helm: MockHelm) -> None: + upstream_path = tmp_path / "upstream" + shutil.copytree(str(phalanx_test_path()), str(upstream_path)) + upstream_repo = Repo.init(str(upstream_path), initial_branch="main") + upstream_repo.index.add(["applications", "environments"]) + actor = Actor("Someone", "someone@example.com") + upstream_repo.index.commit("Initial commit", author=actor, committer=actor) + change_path = tmp_path / "change" + repo = Repo.clone_from(str(upstream_path), str(change_path)) + + # Now, make a few changes that should trigger linting. + # + # - argocd (only idfdev) + # - gafaelfawr (values change so all environments) + # - portal (templates deletion so all environments) + # - postgres (irrelevant change, no linting) + path = change_path / "applications" / "argocd" / "values-idfdev.yaml" + with path.open("a") as fh: + fh.write("foo: bar\n") + path = change_path / "applications" / "gafaelfawr" / "values.yaml" + with path.open("a") as fh: + fh.write("foo: bar\n") + repo.index.remove( + "applications/portal/templates/vault-secrets.yaml", working_tree=True + ) + repo.index.remove( + "applications/postgres/values-idfdev.yaml", working_tree=True + ) + repo.index.add(["applications"]) + repo.index.commit("Some changes", author=actor, committer=actor) + + # Okay, now we can run the lint and check the helm commands that were run + # against the expected output. + result = run_cli( + "application", + "lint-all", + "--git", + "--config", + str(change_path), + needs_config=False, + ) + assert result.output == "" + assert result.exit_code == 0 + expected_calls = read_output_json("idfdev", "lint-git-calls") + assert mock_helm.call_args_list == expected_calls + + def test_template(mock_helm: MockHelm) -> None: test_path = phalanx_test_path() diff --git a/tests/data/input/applications/portal/templates/_helpers.tpl b/tests/data/input/applications/portal/templates/_helpers.tpl new file mode 100644 index 0000000000..114b6681fe --- /dev/null +++ b/tests/data/input/applications/portal/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "portal.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "portal.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "portal.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "portal.labels" -}} +helm.sh/chart: {{ include "portal.chart" . }} +{{ include "portal.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "portal.selectorLabels" -}} +app.kubernetes.io/name: {{ include "portal.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/tests/data/input/applications/portal/templates/vault-secrets.yaml b/tests/data/input/applications/portal/templates/vault-secrets.yaml new file mode 100644 index 0000000000..c3bbbb8046 --- /dev/null +++ b/tests/data/input/applications/portal/templates/vault-secrets.yaml @@ -0,0 +1,19 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ template "portal.fullname" . }}-secret + labels: + {{- include "portal.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/portal" + type: "Opaque" +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: "pull-secret" + labels: + {{- include "portal.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/pull-secret" + type: "kubernetes.io/dockerconfigjson" diff --git a/tests/data/output/idfdev/lint-all-calls.json b/tests/data/output/idfdev/lint-all-calls.json index fb8df20422..3fd9997730 100644 --- a/tests/data/output/idfdev/lint-all-calls.json +++ b/tests/data/output/idfdev/lint-all-calls.json @@ -28,31 +28,34 @@ ], [ "lint", + "argocd", "--strict", "--values", - "values.yaml", + "argocd/values.yaml", "--values", - "values-idfdev.yaml", + "argocd/values-idfdev.yaml", "--set", "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" ], [ "lint", + "argocd", "--strict", "--values", - "values.yaml", + "argocd/values.yaml", "--values", - "values-minikube.yaml", + "argocd/values-minikube.yaml", "--set", "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" ], [ "lint", + "argocd", "--strict", "--values", - "values.yaml", + "argocd/values.yaml", "--values", - "values-usdfdev-prompt-processing.yaml", + "argocd/values-usdfdev-prompt-processing.yaml", "--set", "global.enabledServices=@argocd,global.host=usdf-prompt-processing-dev.slac.stanford.edu,global.baseUrl=https://usdf-prompt-processing-dev.slac.stanford.edu,global.vaultSecretsPath=secret/rubin/usdf-prompt-processing-dev" ], @@ -63,21 +66,23 @@ ], [ "lint", + "gafaelfawr", "--strict", "--values", - "values.yaml", + "gafaelfawr/values.yaml", "--values", - "values-idfdev.yaml", + "gafaelfawr/values-idfdev.yaml", "--set", "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" ], [ "lint", + "gafaelfawr", "--strict", "--values", - "values.yaml", + "gafaelfawr/values.yaml", "--values", - "values-minikube.yaml", + "gafaelfawr/values-minikube.yaml", "--set", "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" ], @@ -86,18 +91,14 @@ "update", "--skip-refresh" ], - [ - "dependency", - "update", - "--skip-refresh" - ], [ "lint", + "nublado", "--strict", "--values", - "values.yaml", + "nublado/values.yaml", "--values", - "values-idfdev.yaml", + "nublado/values-idfdev.yaml", "--set", "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" ], @@ -108,11 +109,12 @@ ], [ "lint", + "portal", "--strict", "--values", - "values.yaml", + "portal/values.yaml", "--values", - "values-idfdev.yaml", + "portal/values-idfdev.yaml", "--set", "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" ], @@ -123,27 +125,24 @@ ], [ "lint", + "postgres", "--strict", "--values", - "values.yaml", + "postgres/values.yaml", "--values", - "values-idfdev.yaml", + "postgres/values-idfdev.yaml", "--set", "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" ], [ "lint", + "postgres", "--strict", "--values", - "values.yaml", + "postgres/values.yaml", "--values", - "values-minikube.yaml", + "postgres/values-minikube.yaml", "--set", "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" - ], - [ - "dependency", - "update", - "--skip-refresh" ] ] diff --git a/tests/data/output/idfdev/lint-git-calls.json b/tests/data/output/idfdev/lint-git-calls.json new file mode 100644 index 0000000000..58f97b63f5 --- /dev/null +++ b/tests/data/output/idfdev/lint-git-calls.json @@ -0,0 +1,83 @@ +[ + [ + "repo", + "add", + "argoproj", + "https://argoproj.github.io/argo-helm" + ], + [ + "repo", + "add", + "jupyterhub", + "https://jupyterhub.github.io/helm-chart/" + ], + [ + "repo", + "add", + "lsst-sqre", + "https://lsst-sqre.github.io/charts/" + ], + [ + "repo", + "update" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "argocd", + "--strict", + "--values", + "argocd/values.yaml", + "--values", + "argocd/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + ], + [ + "lint", + "gafaelfawr", + "--strict", + "--values", + "gafaelfawr/values.yaml", + "--values", + "gafaelfawr/values-minikube.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@postgres,global.host=minikube.lsst.cloud,global.baseUrl=https://minikube.lsst.cloud,global.vaultSecretsPath=secret/phalanx/minikube" + ], + [ + "dependency", + "update", + "--skip-refresh" + ], + [ + "lint", + "portal", + "--strict", + "--values", + "portal/values.yaml", + "--values", + "portal/values-idfdev.yaml", + "--set", + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + ] +] From a91566044ce554234cfe015addbd464d6b7aa65f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 10 Oct 2023 17:29:31 -0700 Subject: [PATCH 091/588] Replace chart linting with new Phalanx tool Get rid of the chart testing action and the complicated machinery that we used to expand charts and that was no longer working. Instead, run phalanx application lint-all --git. Do this via tox, so that it can share an installation cache with Python tests. --- .github/workflows/ci.yaml | 22 +++---- ct.yaml | 13 ----- pyproject.toml | 1 - src/phalanx/testing/__init__.py | 0 src/phalanx/testing/expandcharts.py | 90 ----------------------------- tox.ini | 5 ++ 6 files changed, 13 insertions(+), 118 deletions(-) delete mode 100644 ct.yaml delete mode 100644 src/phalanx/testing/__init__.py delete mode 100644 src/phalanx/testing/expandcharts.py diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index aa416af16a..51dbad3218 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -56,22 +56,16 @@ jobs: with: fetch-depth: 0 - - name: Set up Python - uses: actions/setup-python@v4 + - uses: azure/setup-helm@v3 with: - python-version: "3.11" - - - name: Install test dependencies - run: make init - - - name: Expand modified charts - run: expand-charts + # Used to query GitHub for the latest Helm release. + token: ${{ secrets.GITHUB_TOKEN }} - - name: Set up chart-testing - uses: helm/chart-testing-action@v2.4.0 - - - name: Run chart-testing (lint) - run: ct lint --all --config ct.yaml + - uses: lsst-sqre/run-tox@v1 + with: + python-version: "3.11" + tox-envs: phalanx-lint-change + cache-key-prefix: test # The minikube job always runs, but it quickly does nothing if no files that # would affect minikube were changed. This unfortunately requires a lot of diff --git a/ct.yaml b/ct.yaml deleted file mode 100644 index 794eff7ca4..0000000000 --- a/ct.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Configuration for helm/cert-testing, run via chart-testing-action in -# GitHub Actions. https://github.com/helm/chart-testing#configuration -# -# Intended to be run after tests/expand-services, which generates the -# services-expanded directory of charts for every combination of -# environment and base chart. - -chart-dirs: - - "applications-expanded" - - "environments" -check-version-increment: false -validate-maintainers: false -validate-chart-schema: false diff --git a/pyproject.toml b/pyproject.toml index eb71c49f1a..6583388dd1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,6 @@ classifiers = [ requires-python = ">=3.11" [project.scripts] -expand-charts = "phalanx.testing.expandcharts:main" phalanx = "phalanx.cli:main" [project.urls] diff --git a/src/phalanx/testing/__init__.py b/src/phalanx/testing/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/phalanx/testing/expandcharts.py b/src/phalanx/testing/expandcharts.py deleted file mode 100644 index 6e6714ae96..0000000000 --- a/src/phalanx/testing/expandcharts.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Expand Helm charts for testing. - -Discover the list of supported environments, find all charts that have changed -relative to main, and then expand those charts into directories for each -chart and environment pair and a values.yaml file for that environment. - -This is a workaround for limitations in the helm/chart-testing tool, which -doesn't understand multi-environment patterns. -""" - -from __future__ import annotations - -import shutil -from pathlib import Path -from typing import TYPE_CHECKING - -from git import DiffIndex -from git.repo import Repo - -if TYPE_CHECKING: - from collections.abc import Sequence - - -def get_changed_charts() -> list[str]: - """Get a list of charts that have changed relative to main.""" - repo = Repo(str(Path.cwd())) - - charts = [] - for path in (Path.cwd() / "applications").iterdir(): - if (path / "Chart.yaml").exists(): - diff = repo.head.commit.diff("origin/main", paths=[str(path)]) - for change_type in DiffIndex.change_type: - changes = diff.iter_change_type( - change_type # type: ignore[arg-type] - ) - if any(changes): - print("Found changed chart", path.name) - charts.append(path.name) - break - - return charts - - -def get_environments() -> list[str]: - """Get the list of supported environments.""" - science_platform_path = Path.cwd() / "environments" - - environments = [] - for path in science_platform_path.iterdir(): - name = path.name - if not name.startswith("values-"): - continue - environment = name[len("values-") : -len(".yaml")] - print("Found environment", environment) - environments.append(environment) - - return environments - - -def expand_chart(chart: str, environments: Sequence[str]) -> None: - """Expand charts from applications into applications-expanded.""" - chart_path = Path.cwd() / "applications" / chart - expanded_path = Path.cwd() / "applications-expanded" - expanded_path.mkdir(exist_ok=True) - - if (chart_path / "values.yaml").exists(): - print("Copying simple chart", chart) - shutil.copytree(chart_path, expanded_path / chart) - else: - for environment in environments: - values_path = chart_path / f"values-{environment}.yaml" - if not values_path.exists(): - continue - print("Expanding chart", chart, "for environment", environment) - chart_expanded_path = expanded_path / f"{chart}-{environment}" - shutil.copytree(chart_path, chart_expanded_path) - shutil.copyfile(values_path, chart_expanded_path / "values.yaml") - - -def main() -> None: - """Entry point for expand-charts command.""" - expanded_path = Path.cwd() / "applications-expanded" - if expanded_path.exists(): - shutil.rmtree(expanded_path) - expanded_path.mkdir() - - charts = get_changed_charts() - environments = get_environments() - for chart in charts: - expand_chart(chart, environments) diff --git a/tox.ini b/tox.ini index a695e6949b..76726511f6 100644 --- a/tox.ini +++ b/tox.ini @@ -46,6 +46,11 @@ deps = neophile commands = neophile update {posargs} +[testenv:phalanx-lint-change] +description = Lint application chart changes determined by Git +commands = + phalanx application lint-all --git + [testenv:py] description = Run pytest commands = From 42a529d830ee2fa03b4d2c2590d70c750a43c864 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 08:58:45 -0700 Subject: [PATCH 092/588] Add support for linting the top-level chart The top-level chart is a bit different (and simpler), but could also have lint problems if people change the application templates. Add support for linting it, either for a specific environment or for all environments, with a new phalanx environment lint command. --- src/phalanx/cli.py | 27 ++++++++ src/phalanx/factory.py | 13 ++++ src/phalanx/services/environment.py | 46 ++++++++++++++ src/phalanx/storage/config.py | 10 +++ src/phalanx/storage/helm.py | 54 +++++++++++++++- tests/cli/environment_test.py | 95 +++++++++++++++++++++++++++++ 6 files changed, 242 insertions(+), 3 deletions(-) create mode 100644 src/phalanx/services/environment.py diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index d099c63d12..eb404d8efe 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -28,6 +28,7 @@ "application_lint_all", "application_template", "environment", + "environment_lint", "environment_schema", "secrets", "secrets_audit", @@ -251,6 +252,32 @@ def environment() -> None: """Commands for Phalanx environment configuration.""" +@environment.command("lint") +@click.argument("environment", required=False) +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def environment_lint( + environment: str | None = None, *, config: Path | None, git: bool = False +) -> None: + """Lint the top-level Helm chart for an environment. + + Lint the parent Argo CD Helm chart that installs the Argo CD applications + for an environment. If the environment is not given, lints the + instantiation of that chart for each environment. + """ + if not config: + config = _find_config() + factory = Factory(config) + environment_service = factory.create_environment_service() + if not environment_service.lint(environment): + sys.exit(1) + + @environment.command("schema") @click.option( "-o", diff --git a/src/phalanx/factory.py b/src/phalanx/factory.py index bc681f22db..4e03ed0cf9 100644 --- a/src/phalanx/factory.py +++ b/src/phalanx/factory.py @@ -5,6 +5,7 @@ from pathlib import Path from .services.application import ApplicationService +from .services.environment import EnvironmentService from .services.secrets import SecretsService from .services.vault import VaultService from .storage.config import ConfigStorage @@ -49,6 +50,18 @@ def create_config_storage(self) -> ConfigStorage: """ return ConfigStorage(self._path) + def create_environment_service(self) -> EnvironmentService: + """Create service for manipulating Phalanx environments. + + Returns + ------- + EnvironmentService + Service for manipulating environments. + """ + config_storage = self.create_config_storage() + helm_storage = HelmStorage(config_storage) + return EnvironmentService(config_storage, helm_storage) + def create_secrets_service(self) -> SecretsService: """Create service for manipulating Phalanx secrets. diff --git a/src/phalanx/services/environment.py b/src/phalanx/services/environment.py new file mode 100644 index 0000000000..1868f57bd2 --- /dev/null +++ b/src/phalanx/services/environment.py @@ -0,0 +1,46 @@ +"""Service for manipulating Phalanx environments.""" + +from __future__ import annotations + +from ..storage.config import ConfigStorage +from ..storage.helm import HelmStorage + +__all__ = ["EnvironmentService"] + + +class EnvironmentService: + """Service for manipulating Phalanx environments. + + Parameters + ---------- + config_storage + Storage object for the Phalanx configuration. + helm_storage + Interface to Helm actions. + """ + + def __init__( + self, config_storage: ConfigStorage, helm_storage: HelmStorage + ) -> None: + self._config = config_storage + self._helm = helm_storage + + def lint(self, environment: str | None = None) -> bool: + """Lint the Helm chart for environments. + + Parameters + ---------- + environment + If given, lint only the specified environment. + + Returns + ------- + bool + Whether linting passed. + """ + if environment: + return self._helm.lint_environment(environment) + success = True + for environment in self._config.list_environments(): + success &= self._helm.lint_environment(environment) + return success diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 3a8ae637e6..67c03d81f6 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -300,6 +300,16 @@ def get_dependency_repositories(self, application: str) -> set[str]: repo_urls.add(repository) return repo_urls + def get_environment_chart_path(self) -> Path: + """Determine the path to the top-level environment chart. + + Returns + ------- + pathlib.Path + Path to the top-level environment chart. + """ + return self._path / "environments" + def get_modified_applications(self, branch: str) -> dict[str, list[str]]: """Get all modified application and environment pairs. diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index e3cd4a9d8f..557ecfed0e 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -145,6 +145,50 @@ def lint_application( sys.stderr.write(result.stderr) return True + def lint_environment(self, environment: str) -> bool: + """Lint the top-level chart for an environment with Helm. + + Any output is sent to standard output and standard error, and if Helm + fails, a failure message will be printed to standard error. + + Parameters + ---------- + environment + Name of the environment. + + Returns + ------- + bool + Whether linting passed. + """ + path = self._config.get_environment_chart_path() + try: + result = self._capture_helm( + "lint", + path.name, + "--strict", + "--values", + f"{path.name}/values.yaml", + "--values", + f"{path.name}/values-{environment}.yaml", + cwd=path.parent, + ) + except HelmFailedError as e: + self._print_lint_output(None, environment, e.stdout) + if e.stderr: + sys.stderr.write(e.stderr) + msg = ( + f"Error: Top-level chart for environment {environment} has" + " errors\n" + ) + sys.stderr.write(msg) + return False + else: + self._print_lint_output(None, environment, result.stdout) + if result.stderr: + sys.stderr.write(result.stderr) + return True + def repo_add(self, url: str, *, quiet: bool = False) -> None: """Add a Helm chart repository to Helm's cache. @@ -284,7 +328,7 @@ def _capture_helm( return result def _print_lint_output( - self, application: str, environment: str, output: str | None + self, application: str | None, environment: str, output: str | None ) -> None: """Print filtered output from Helm's lint. @@ -295,7 +339,7 @@ def _print_lint_output( Parameters ---------- application - Name of the application. + Name of the application, or `None` if linting the top-level chart. environment Name of the environment in which to lint that application chart, output @@ -303,6 +347,10 @@ def _print_lint_output( """ if not output: return + if application: + prelude = f"==> Linting {application} (environment {environment})" + else: + prelude = f"==> Linting top-level chart for {environment}" for line in output.removesuffix("\n").split("\n"): if "icon is recommended" in line: continue @@ -311,7 +359,7 @@ def _print_lint_output( if "1 chart(s) linted" in line: continue if line.startswith("==> Linting"): - print(f"==> Linting {application} (environment {environment})") + print(prelude) else: print(line) diff --git a/tests/cli/environment_test.py b/tests/cli/environment_test.py index 0d1daf2e93..554fb03012 100644 --- a/tests/cli/environment_test.py +++ b/tests/cli/environment_test.py @@ -2,9 +2,104 @@ from __future__ import annotations +import subprocess from pathlib import Path from ..support.cli import run_cli +from ..support.helm import MockHelm + + +def test_lint(mock_helm: MockHelm) -> None: + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "lint": + output = ( + "==> Linting .\n" + "[INFO] Chart.yaml: icon is recommended\n" + "\n" + "1 chart(s) linted, 0 chart(s) failed\n" + ) + return subprocess.CompletedProcess( + returncode=0, + args=command, + stdout=output, + stderr=None, + ) + + # Lint a single environment and check that the output is filtered. + mock_helm.set_capture_callback(callback) + result = run_cli("environment", "lint", "idfdev") + expected = "==> Linting top-level chart for idfdev\n" + assert result.output == expected + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + [ + "lint", + "environments", + "--strict", + "--values", + "environments/values.yaml", + "--values", + "environments/values-idfdev.yaml", + ] + ] + + # Lint all environments. + mock_helm.reset_mock() + result = run_cli("environment", "lint") + expected += ( + "==> Linting top-level chart for minikube\n" + "==> Linting top-level chart for usdfdev-prompt-processing\n" + ) + assert result.output == expected + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + [ + "lint", + "environments", + "--strict", + "--values", + "environments/values.yaml", + "--values", + "environments/values-idfdev.yaml", + ], + [ + "lint", + "environments", + "--strict", + "--values", + "environments/values.yaml", + "--values", + "environments/values-minikube.yaml", + ], + [ + "lint", + "environments", + "--strict", + "--values", + "environments/values.yaml", + "--values", + "environments/values-usdfdev-prompt-processing.yaml", + ], + ] + + def callback_error(*command: str) -> subprocess.CompletedProcess: + return subprocess.CompletedProcess( + returncode=1, + args=command, + stdout="", + stderr="Some error\n", + ) + + # Test with an error. + mock_helm.reset_mock() + mock_helm.set_capture_callback(callback_error) + result = run_cli("environment", "lint", "idfdev") + assert result.output == ( + "Some error\n" + "Error: Top-level chart for environment idfdev has errors\n" + ) + assert result.exit_code == 1 def test_schema() -> None: From 5b48a1525ca4b516ed4447a156905b1a65b63735 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 08:59:44 -0700 Subject: [PATCH 093/588] Lint the top-level chart on any change In the phalanx-lint-change environment, also lint the top-level chart. Since this is fast, do it for all environments without bothering to do analysis of what may have changed. --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 76726511f6..838af0bb31 100644 --- a/tox.ini +++ b/tox.ini @@ -50,6 +50,7 @@ commands = neophile update {posargs} description = Lint application chart changes determined by Git commands = phalanx application lint-all --git + phalanx environment lint [testenv:py] description = Run pytest From 3b75827757d985715a9f6c8b20b9a643c1d20c20 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 09:19:16 -0700 Subject: [PATCH 094/588] Allow branch for lint-all to be configured Add a command-line flag to configure the branch against which to compare for phalanx application lint-all --git, and set the value from GITHUB_BASE_REF if it is set, since this will be set for GitHub PRs. This will allow merge requests against non-main branches to be properly tested. --- src/phalanx/cli.py | 19 ++++++++++++++++--- src/phalanx/services/application.py | 12 ++++++------ 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index eb404d8efe..0df877b5b0 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -205,9 +205,21 @@ def application_lint( @click.option( "--git", is_flag=True, - help="Only lint applications changed relative to origin/main.", + help="Only lint applications changed relative to a Git branch.", ) -def application_lint_all(*, config: Path | None, git: bool = False) -> None: +@click.option( + "--git-branch", + type=str, + metavar="BRANCH", + default="origin/main", + show_default=True, + show_envvar=True, + envvar="GITHUB_BASE_REF", + help="Base Git branch against which to compare.", +) +def application_lint_all( + *, config: Path | None, git: bool = False, git_branch: str +) -> None: """Lint the Helm charts for every application and environment. Update and download any third-party dependency charts and then lint the @@ -217,7 +229,8 @@ def application_lint_all(*, config: Path | None, git: bool = False) -> None: config = _find_config() factory = Factory(config) application_service = factory.create_application_service() - if not application_service.lint_all(git=git): + changes_vs_branch = git_branch if git else None + if not application_service.lint_all(changes_vs_branch=changes_vs_branch): sys.exit(1) diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index 38bb3348c0..db911aeb8c 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -155,7 +155,7 @@ def lint(self, app_name: str, env_name: str | None) -> bool: success &= self._helm.lint_application(app_name, name, values) return success - def lint_all(self, *, git: bool = False) -> bool: + def lint_all(self, *, changes_vs_branch: str | None = None) -> bool: """Lint all applications with Helm. Registers any required Helm repositories, refreshes them, downloads @@ -164,9 +164,9 @@ def lint_all(self, *, git: bool = False) -> bool: Parameters ---------- - git - Whether to only lint application and environment pairs that may - have been affected by Git changes relative to the main branch. + changes_vs_branch + If given, only lint application and environment pairs that may + have been affected by Git changes relative to the given branch. Returns ------- @@ -175,8 +175,8 @@ def lint_all(self, *, git: bool = False) -> bool: """ self.add_helm_repositories() self._helm.repo_update() - if git: - to_lint = self._config.get_modified_applications("origin/main") + if changes_vs_branch: + to_lint = self._config.get_modified_applications(changes_vs_branch) else: to_lint = self._config.list_application_environments() environments: dict[str, Environment] = {} From ca506894f477725cc8e457f195049126490307f7 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 09:30:59 -0700 Subject: [PATCH 095/588] Only add required dependency repos in lint-all We know all of the applications that we'll be checking when calling lint-all with the --git flag, so use that list to limit which dependency repositories we configure. Downloading the index takes a noticable amount of time when doing incremental lint checks. --- src/phalanx/cli.py | 2 +- src/phalanx/services/application.py | 23 ++++++++++++-------- tests/data/output/idfdev/lint-git-calls.json | 6 ----- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 0df877b5b0..9320834249 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -119,7 +119,7 @@ def application_add_helm_repos( config = _find_config() factory = Factory(config) application_service = factory.create_application_service() - application_service.add_helm_repositories(name) + application_service.add_helm_repositories([name] if name else None) @application.command("create") diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index db911aeb8c..224ecbb6d5 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -2,6 +2,7 @@ from __future__ import annotations +from collections.abc import Iterable from pathlib import Path import jinja2 @@ -45,7 +46,7 @@ def __init__( ) def add_helm_repositories( - self, application: str | None = None, *, quiet: bool = False + self, applications: Iterable[str] | None = None, *, quiet: bool = False ) -> None: """Add all Helm repositories used by any application to Helm's cache. @@ -59,13 +60,17 @@ def add_helm_repositories( Parameters ---------- - application - If given, only add Helm repositories required by this application. + applications + If given, only add Helm repositories required by these + applications. quiet Whether to suppress Helm's standard output. """ - if application: - repo_urls = self._config.get_dependency_repositories(application) + if applications: + repo_urls = set() + for application in applications: + urls = self._config.get_dependency_repositories(application) + repo_urls.update(urls) else: repo_urls = self._config.get_all_dependency_repositories() for url in sorted(repo_urls): @@ -138,7 +143,7 @@ def lint(self, app_name: str, env_name: str | None) -> bool: bool Whether linting passed. """ - self.add_helm_repositories(app_name) + self.add_helm_repositories([app_name]) self._helm.repo_update() self._helm.dependency_update(app_name) if env_name: @@ -173,12 +178,12 @@ def lint_all(self, *, changes_vs_branch: str | None = None) -> bool: bool Whether linting passed. """ - self.add_helm_repositories() - self._helm.repo_update() if changes_vs_branch: to_lint = self._config.get_modified_applications(changes_vs_branch) else: to_lint = self._config.list_application_environments() + self.add_helm_repositories(to_lint.keys()) + self._helm.repo_update() environments: dict[str, Environment] = {} success = True for app_name, app_envs in sorted(to_lint.items()): @@ -221,7 +226,7 @@ def template(self, app_name: str, env_name: str) -> str: HelmFailedError Raised if Helm fails. """ - self.add_helm_repositories(app_name, quiet=True) + self.add_helm_repositories([app_name], quiet=True) self._helm.repo_update(quiet=True) self._helm.dependency_update(app_name, quiet=True) environment = self._config.load_environment(env_name) diff --git a/tests/data/output/idfdev/lint-git-calls.json b/tests/data/output/idfdev/lint-git-calls.json index 58f97b63f5..5a67fedca2 100644 --- a/tests/data/output/idfdev/lint-git-calls.json +++ b/tests/data/output/idfdev/lint-git-calls.json @@ -5,12 +5,6 @@ "argoproj", "https://argoproj.github.io/argo-helm" ], - [ - "repo", - "add", - "jupyterhub", - "https://jupyterhub.github.io/helm-chart/" - ], [ "repo", "add", From f3e96d2954e8475e2e8284c1cf13e04f3638a606 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 10:25:53 -0700 Subject: [PATCH 096/588] Add phalanx environment template command Add a command similar to phalanx application template that expands the top-level chart for a specific environment. --- src/phalanx/cli.py | 24 ++++++++++++++++ src/phalanx/services/environment.py | 23 +++++++++++++++ src/phalanx/storage/helm.py | 43 +++++++++++++++++++++++++++++ tests/cli/application_test.py | 3 +- tests/cli/environment_test.py | 28 +++++++++++++++++++ 5 files changed, 119 insertions(+), 2 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 9320834249..aa00e399ef 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -30,6 +30,7 @@ "environment", "environment_lint", "environment_schema", + "environment_template", "secrets", "secrets_audit", "secrets_list", @@ -318,6 +319,29 @@ def environment_schema(*, output: Path | None) -> None: sys.stdout.write(json_schema) +@environment.command("template") +@click.argument("environment") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def environment_template(environment: str, *, config: Path | None) -> None: + """Expand the top-level chart for an environment. + + Print the expanded Kubernetes resources for the top-level chart configured + for the given environment. This is intended for testing and debugging + purposes; normally, charts should be installed with Argo CD. + """ + if not config: + config = _find_config() + factory = Factory(config) + environment_service = factory.create_environment_service() + sys.stdout.write(environment_service.template(environment)) + + @main.group() def secrets() -> None: """Secret manipulation commands.""" diff --git a/src/phalanx/services/environment.py b/src/phalanx/services/environment.py index 1868f57bd2..edf5f67d59 100644 --- a/src/phalanx/services/environment.py +++ b/src/phalanx/services/environment.py @@ -44,3 +44,26 @@ def lint(self, environment: str | None = None) -> bool: for environment in self._config.list_environments(): success &= self._helm.lint_environment(environment) return success + + def template(self, environment: str) -> str: + """Expand the templates of the top-level chart. + + Run :command:`helm template` for a top-level chart, passing in the + appropriate parameters for the given environment. + + Parameters + ---------- + environment + Environment for which to expand the top-level chart. + + Returns + ------- + str + Output from :command:`helm template`. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + return self._helm.template_environment(environment) diff --git a/src/phalanx/storage/helm.py b/src/phalanx/storage/helm.py index 557ecfed0e..edefa3fe6b 100644 --- a/src/phalanx/storage/helm.py +++ b/src/phalanx/storage/helm.py @@ -289,6 +289,49 @@ def template_application( sys.stderr.write(result.stderr) return result.stdout + def template_environment(self, environment: str) -> str: + """Expand the top-level chart into its Kubernetes resources. + + Runs :command:`helm template` to expand the top-level chart into its + Kubernetes resources for a given environment. Any output to standard + error is passed along. + + Parameters + ---------- + environment + Name of the environment for which to expand the chart. + + Returns + ------- + str + Kubernetes resources created by the chart. + + Raises + ------ + HelmFailedError + Raised if Helm fails. + """ + path = self._config.get_environment_chart_path() + try: + result = self._capture_helm( + "template", + "science-platform", + str(path), + "--include-crds", + "--values", + "environments/values.yaml", + "--values", + f"environments/values-{environment}.yaml", + cwd=path.parent, + ) + except HelmFailedError as e: + if e.stderr: + sys.stderr.write(e.stderr) + raise + if result.stderr: + sys.stderr.write(result.stderr) + return result.stdout + def _capture_helm( self, command: str, *args: str, cwd: Path | None = None ) -> subprocess.CompletedProcess: diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index 363b2f27f7..3caabf6b92 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -369,8 +369,7 @@ def callback(*command: str) -> subprocess.CompletedProcess: mock_helm.set_capture_callback(callback) result = run_cli("application", "template", "gafaelfawr", "idfdev") - expected = "this is some template\n" - assert result.output == expected + assert result.output == "this is some template\n" assert result.exit_code == 0 set_args = read_output_json("idfdev", "lint-set-values") assert mock_helm.call_args_list == [ diff --git a/tests/cli/environment_test.py b/tests/cli/environment_test.py index 554fb03012..eec241358c 100644 --- a/tests/cli/environment_test.py +++ b/tests/cli/environment_test.py @@ -6,6 +6,7 @@ from pathlib import Path from ..support.cli import run_cli +from ..support.data import phalanx_test_path from ..support.helm import MockHelm @@ -113,3 +114,30 @@ def test_schema() -> None: / "environment.json" ) assert result.output == current.read_text() + + +def test_template(mock_helm: MockHelm) -> None: + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "template": + output = "this is some template\n" + return subprocess.CompletedProcess( + returncode=0, args=command, stdout=output, stderr=None + ) + + mock_helm.set_capture_callback(callback) + result = run_cli("environment", "template", "idfdev") + assert result.output == "this is some template\n" + assert result.exit_code == 0 + assert mock_helm.call_args_list == [ + [ + "template", + "science-platform", + str(phalanx_test_path() / "environments"), + "--include-crds", + "--values", + "environments/values.yaml", + "--values", + "environments/values-idfdev.yaml", + ], + ] From 501beca629565c453d9fcf1167210b9cb866964b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 10:33:25 -0700 Subject: [PATCH 097/588] Add documentation for manually linting charts Tell developers about phalanx application lint and phalanx application template in the documentation about writing Helm charts. --- docs/developers/write-a-helm-chart.rst | 31 ++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/docs/developers/write-a-helm-chart.rst b/docs/developers/write-a-helm-chart.rst index 3e1ead63de..808c14e2e9 100644 --- a/docs/developers/write-a-helm-chart.rst +++ b/docs/developers/write-a-helm-chart.rst @@ -253,6 +253,37 @@ For example: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .ChartAppVersion }}" +Checking the chart +================== + +Most of the testing of your chart will have to be done by deploying it in a test Kubernetes environment. +See :doc:`add-application` for more details about how to do that. +However, you can check the chart for basic syntax and some errors in Helm templating before deploying it. + +To check your chart, run: + +.. prompt:: bash + + phalanx application lint + +Replace ```` with the name of your new application. +This will run :command:`helm lint` on the chart with the appropriate values files and injected settings for each environment for which it has a configuration and report any errors. +:command:`helm lint` does not check resources against their schemas, alas, but it will at least diagnose YAML and Helm templating syntax errors. + +You can limit the linting to a specific environment by additional passing the name of the environment after the name of the application. + +This lint check will also be done via GitHub Actions when you create a Phalanx PR, and the PR cannot be merged until this lint check passes. + +You can also ask for the fully-expanded Kubernetes resources that would be installed in the cluster when the chart is installed. +Do this with: + +.. prompt:: bash + + phalanx application template + +As above, replace ```` with the name of your application and ```` with the name of the environment for which you want to generate its resources. +This will print to standard output the expanded YAML Kubernetes resources that would be created in the cluster by this chart. + Examples ======== From cb18cf43253a8ff5bfce3f616f5c8aef350c9efb Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 10:34:48 -0700 Subject: [PATCH 098/588] Improve the helm installation instructions The installation instructions for helm were a bit duplicative and didn't clearly tell people to go to the Helm guide for installation instructions. --- docs/about/local-environment-setup.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/about/local-environment-setup.rst b/docs/about/local-environment-setup.rst index 7b566b282e..f480087128 100644 --- a/docs/about/local-environment-setup.rst +++ b/docs/about/local-environment-setup.rst @@ -79,9 +79,7 @@ Install helm Some Phalanx commands require Helm (v3 or later) to be available on your PATH. Any version of Helm after v3 should be okay. -You therefore must have it installed on your PATH. - -See the `Helm installation guide `__ for more details. +See the `Helm installation guide `__ for installation instructions. If you don't want to (or don't have access to) install helm globally on your system, you can put the binary in the :file:`bin` directory of the virtual environment you created in :ref:`about-venv`. From 65afe43fcd3ea38cf8bb1b86b0d569f7d6469b3b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 11 Oct 2023 11:28:15 -0700 Subject: [PATCH 099/588] Clarify use of application add-helm-repos Make it clear that this command is not required and this will be done by the lint commands. The command is mostly there to make it easier to test it independently and because it might be convenient. --- src/phalanx/cli.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index aa00e399ef..a60aef1361 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -111,10 +111,12 @@ def application_add_helm_repos( ) -> None: """Configure dependency Helm repositories in Helm. - In order to perform other Helm operations, such as linting, all - third-party Helm chart repositories used by Phalanx applications have to - be added to the local Helm cache. This command does that for every Phalanx - application. + Add all third-party Helm chart repositories used by Phalanx applications + to the local Helm cache. + + This will also be done as necessary by lint commands, so using this + command is not necessary. It is provided as a convenience for helping to + manage your local Helm configuration. """ if not config: config = _find_config() From b028a353f3aba2d1709735fad59c77c3e46295df Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 12 Oct 2023 13:08:24 -0700 Subject: [PATCH 100/588] Rework the syntax for phalanx application lint Rather than taking the environment as an optional second argument, allow multiple applications to be specified so that they can all be linted at the same time, and move the environment to a flag. This makes more sense since usually linting for all configured environments is the desired behavior (it's very fast, so usually there's no reason to save the time). --- docs/developers/write-a-helm-chart.rst | 6 +++-- src/phalanx/cli.py | 25 ++++++++++++++----- src/phalanx/services/application.py | 33 ++++++++++++++------------ tests/cli/application_test.py | 29 ++++++++++++++++------ 4 files changed, 63 insertions(+), 30 deletions(-) diff --git a/docs/developers/write-a-helm-chart.rst b/docs/developers/write-a-helm-chart.rst index 808c14e2e9..641cb1bf7e 100644 --- a/docs/developers/write-a-helm-chart.rst +++ b/docs/developers/write-a-helm-chart.rst @@ -267,10 +267,12 @@ To check your chart, run: phalanx application lint Replace ```` with the name of your new application. +Multiple applications may be listed to lint all of them. + This will run :command:`helm lint` on the chart with the appropriate values files and injected settings for each environment for which it has a configuration and report any errors. :command:`helm lint` does not check resources against their schemas, alas, but it will at least diagnose YAML and Helm templating syntax errors. -You can limit the linting to a specific environment by additional passing the name of the environment after the name of the application. +You can limit the linting to a specific environment by specifying an environment with the ``--environment`` (or ``-e`` or ``--env``) flag. This lint check will also be done via GitHub Actions when you create a Phalanx PR, and the PR cannot be merged until this lint check passes. @@ -281,7 +283,7 @@ Do this with: phalanx application template -As above, replace ```` with the name of your application and ```` with the name of the environment for which you want to generate its resources. +Replace ```` with the name of your application and ```` with the name of the environment for which you want to generate its resources. This will print to standard output the expanded YAML Kubernetes resources that would be created in the cluster by this chart. Examples diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index a60aef1361..6ab158f15a 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -172,8 +172,7 @@ def application_create( @application.command("lint") -@click.argument("name") -@click.argument("environment", required=False) +@click.argument("applications", metavar="APPLICATION ...", nargs=-1) @click.option( "-c", "--config", @@ -181,19 +180,33 @@ def application_create( default=None, help="Path to root of Phalanx configuration.", ) +@click.option( + "-e", + "--environment", + "--env", + type=str, + metavar="ENV", + default=None, + help="Only lint this environment.", +) def application_lint( - name: str, environment: str | None = None, *, config: Path | None + applications: list[str], + *, + environment: str | None = None, + config: Path | None, ) -> None: - """Lint the Helm chart for an application. + """Lint the Helm charts for applications. Update and download any third-party dependency charts and then lint the - Helm chart for an application as configured for the given environment. + Helm chart for the given applications. If no environment is specified, + each chart is linted for all environments for which it has a + configuration. """ if not config: config = _find_config() factory = Factory(config) application_service = factory.create_application_service() - if not application_service.lint(name, environment): + if not application_service.lint(applications, environment): sys.exit(1) diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index 224ecbb6d5..51e76aa699 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -123,7 +123,7 @@ def create( # Add the documentation. self._create_application_docs(name, description) - def lint(self, app_name: str, env_name: str | None) -> bool: + def lint(self, app_names: list[str], env_name: str | None) -> bool: """Lint an application with Helm. Registers any required Helm repositories, refreshes them, downloads @@ -132,8 +132,8 @@ def lint(self, app_name: str, env_name: str | None) -> bool: Parameters ---------- - app_name - Name of the application. + app_names + Names of the applications to lint. env_name Name of the environment. If not given, lint all environments for which this application has a configuration. @@ -143,21 +143,24 @@ def lint(self, app_name: str, env_name: str | None) -> bool: bool Whether linting passed. """ - self.add_helm_repositories([app_name]) + self.add_helm_repositories(app_names) self._helm.repo_update() - self._helm.dependency_update(app_name) + environments: dict[str, Environment] = {} if env_name: - environments = [self._config.load_environment(env_name)] - else: - env_names = self._config.get_application_environments(app_name) - environments = [ - self._config.load_environment(e) for e in env_names - ] + environments[env_name] = self._config.load_environment(env_name) success = True - for environment in environments: - name = environment.name - values = self._build_injected_values(app_name, environment) - success &= self._helm.lint_application(app_name, name, values) + for app_name in app_names: + self._helm.dependency_update(app_name) + if env_name: + app_envs = [env_name] + else: + app_envs = self._config.get_application_environments(app_name) + for env in app_envs: + if env not in environments: + environments[env] = self._config.load_environment(env) + environment = environments[env] + values = self._build_injected_values(app_name, environment) + success &= self._helm.lint_application(app_name, env, values) return success def lint_all(self, *, changes_vs_branch: str | None = None) -> bool: diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index 3caabf6b92..7efd2ea2b7 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -225,7 +225,7 @@ def callback(*command: str) -> subprocess.CompletedProcess: # Lint a single application that will succeed, and check that the icon # line is filtered out of the output. mock_helm.set_capture_callback(callback) - result = run_cli("application", "lint", "gafaelfawr", "idfdev") + result = run_cli("application", "lint", "gafaelfawr", "-e", "idfdev") expected = "==> Linting gafaelfawr (environment idfdev)\n" assert result.output == expected assert result.exit_code == 0 @@ -247,12 +247,15 @@ def callback(*command: str) -> subprocess.CompletedProcess: ], ] - # Lint the same application for both environmments. We won't bother to - # check the --set flag for the second environment. The important part is - # that we call helm lint twice, but all of the setup is only called once. + # Lint both gafaelfawr and portal for all configured environmments. We + # won't bother to check the --set flag again. The important part is that + # we call helm lint twice, but all of the setup is only called once. mock_helm.reset_mock() - result = run_cli("application", "lint", "gafaelfawr") - expected += "==> Linting gafaelfawr (environment minikube)\n" + result = run_cli("application", "lint", "gafaelfawr", "portal") + expected += ( + "==> Linting gafaelfawr (environment minikube)\n" + "==> Linting portal (environment idfdev)\n" + ) assert result.output == expected assert result.exit_code == 0 assert mock_helm.call_args_list == [ @@ -281,6 +284,18 @@ def callback(*command: str) -> subprocess.CompletedProcess: "--set", ANY, ], + ["dependency", "update", "--skip-refresh"], + [ + "lint", + "portal", + "--strict", + "--values", + "portal/values.yaml", + "--values", + "portal/values-idfdev.yaml", + "--set", + ",".join(set_args), + ], ] def callback_error(*command: str) -> subprocess.CompletedProcess: @@ -293,7 +308,7 @@ def callback_error(*command: str) -> subprocess.CompletedProcess: mock_helm.reset_mock() mock_helm.set_capture_callback(callback_error) - result = run_cli("application", "lint", "gafaelfawr", "idfdev") + result = run_cli("application", "lint", "gafaelfawr", "--env", "idfdev") assert result.output == ( "Some error\n" "Error: Application gafaelfawr in environment idfdev has errors\n" From eff3f836e7ae9b7b1a84ee1bfbf7aa825f87e4c2 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 12 Oct 2023 13:12:00 -0700 Subject: [PATCH 101/588] Use better variable names for lint-all --git Try to use really explicit variable names in the API when linting only changes relative to some branch. --- src/phalanx/cli.py | 4 ++-- src/phalanx/services/application.py | 12 ++++++++---- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 6ab158f15a..220a9bf50c 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -245,8 +245,8 @@ def application_lint_all( config = _find_config() factory = Factory(config) application_service = factory.create_application_service() - changes_vs_branch = git_branch if git else None - if not application_service.lint_all(changes_vs_branch=changes_vs_branch): + branch = git_branch if git else None + if not application_service.lint_all(only_changes_from_branch=branch): sys.exit(1) diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index 51e76aa699..e59ddfd34a 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -163,7 +163,7 @@ def lint(self, app_names: list[str], env_name: str | None) -> bool: success &= self._helm.lint_application(app_name, env, values) return success - def lint_all(self, *, changes_vs_branch: str | None = None) -> bool: + def lint_all(self, *, only_changes_from_branch: str | None = None) -> bool: """Lint all applications with Helm. Registers any required Helm repositories, refreshes them, downloads @@ -172,17 +172,21 @@ def lint_all(self, *, changes_vs_branch: str | None = None) -> bool: Parameters ---------- - changes_vs_branch + only_changes_from_branch If given, only lint application and environment pairs that may have been affected by Git changes relative to the given branch. + In other words, assume all application chart configurations + identical to the given branch are uninteresting, and only lint the + ones that have changed. Returns ------- bool Whether linting passed. """ - if changes_vs_branch: - to_lint = self._config.get_modified_applications(changes_vs_branch) + if only_changes_from_branch: + branch = only_changes_from_branch + to_lint = self._config.get_modified_applications(branch) else: to_lint = self._config.list_application_environments() self.add_helm_repositories(to_lint.keys()) From 0d0a0602798a0fc8ba35af29db863881c3301668 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 12 Oct 2023 13:19:30 -0700 Subject: [PATCH 102/588] Add phalanx.services.environment to API docs --- docs/internals/api.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/internals/api.rst b/docs/internals/api.rst index 0f400a4fd2..6166bcd1f8 100644 --- a/docs/internals/api.rst +++ b/docs/internals/api.rst @@ -46,6 +46,9 @@ This API is only intended for use within the Phalanx code itself. .. automodapi:: phalanx.services.application :include-all-objects: +.. automodapi:: phalanx.services.environment + :include-all-objects: + .. automodapi:: phalanx.services.secrets :include-all-objects: From fba8e3fd31d0a473b52f13a5c297d47f2a6f1940 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 12 Oct 2023 10:59:13 -0700 Subject: [PATCH 103/588] Bump JupyterHub chart to 3.1.0 --- applications/nublado/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index b649507bd9..11de173acf 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -12,7 +12,7 @@ dependencies: - name: jupyterhub # This is the Zero To Jupyterhub version, *not* the version of the # Jupyterhub package itself. - version: "2.0.0" + version: "3.1.0" repository: https://jupyterhub.github.io/helm-chart/ annotations: From decce1f990dccf5e40e77bb5ca284992054f245c Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 12 Oct 2023 12:18:53 -0700 Subject: [PATCH 104/588] Use 0.4.0 restspawner for data-dev --- applications/nublado/values-idfdev.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 90fb492f09..2802bc096d 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -68,8 +68,11 @@ controller: server: "10.87.86.26" jupyterhub: hub: + image: + tag: "0.4.0" db: url: "postgresql://nublado3@postgres.postgres/nublado3" + upgrade: true secrets: templateSecrets: true From f2fba02073b7e393acb72cb65bee4981c2ccf6c1 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 12 Oct 2023 15:37:19 -0700 Subject: [PATCH 105/588] Bump Gafaelfawr Redis limit to 40MiB USDF dev was using 32MiB of memory for Gafaelfawr Redis for reasons that we don't understand, but this limit is low and there's no reason not to make it larger. --- applications/gafaelfawr/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 8001052992..c710993c43 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -434,7 +434,7 @@ redis: resources: limits: cpu: "1" - memory: "20Mi" + memory: "40Mi" requests: cpu: "50m" memory: "6Mi" From be31e192e7fe7a8366f34ed9ec866ef1cede1668 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 12 Oct 2023 14:45:04 -0700 Subject: [PATCH 106/588] disable nublado2, cachemachine, moneypenny on usdfprod remove config files related to nublado2 --- applications/cachemachine/values-usdfdev.yaml | 20 - .../cachemachine/values-usdfprod.yaml | 17 - applications/moneypenny/values-usdfdev.yaml | 3 - applications/moneypenny/values-usdfprod.yaml | 3 - applications/nublado2/values-usdfdev.yaml | 410 ------------------ applications/nublado2/values-usdfprod.yaml | 410 ------------------ environments/values-usdfprod.yaml | 3 - 7 files changed, 866 deletions(-) delete mode 100644 applications/cachemachine/values-usdfdev.yaml delete mode 100644 applications/cachemachine/values-usdfprod.yaml delete mode 100644 applications/moneypenny/values-usdfdev.yaml delete mode 100644 applications/moneypenny/values-usdfprod.yaml delete mode 100644 applications/nublado2/values-usdfdev.yaml delete mode 100644 applications/nublado2/values-usdfprod.yaml diff --git a/applications/cachemachine/values-usdfdev.yaml b/applications/cachemachine/values-usdfdev.yaml deleted file mode 100644 index 155360e916..0000000000 --- a/applications/cachemachine/values-usdfdev.yaml +++ /dev/null @@ -1,20 +0,0 @@ -image: - tag: "1.2.3" - -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "docker-registry.slac.stanford.edu", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } diff --git a/applications/cachemachine/values-usdfprod.yaml b/applications/cachemachine/values-usdfprod.yaml deleted file mode 100644 index d9693daab3..0000000000 --- a/applications/cachemachine/values-usdfprod.yaml +++ /dev/null @@ -1,17 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "docker-registry.slac.stanford.edu", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } diff --git a/applications/moneypenny/values-usdfdev.yaml b/applications/moneypenny/values-usdfdev.yaml deleted file mode 100644 index fe9848cc82..0000000000 --- a/applications/moneypenny/values-usdfdev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -orders: - commission: [] - retire: [] diff --git a/applications/moneypenny/values-usdfprod.yaml b/applications/moneypenny/values-usdfprod.yaml deleted file mode 100644 index fe9848cc82..0000000000 --- a/applications/moneypenny/values-usdfprod.yaml +++ /dev/null @@ -1,3 +0,0 @@ -orders: - commission: [] - retire: [] diff --git a/applications/nublado2/values-usdfdev.yaml b/applications/nublado2/values-usdfdev.yaml deleted file mode 100644 index 77d80bc08e..0000000000 --- a/applications/nublado2/values-usdfdev.yaml +++ /dev/null @@ -1,410 +0,0 @@ -jupyterhub: - - hub: - baseUrl: "/n2" - config: - ServerApp: - shutdown_no_activity_timeout: 432000 - - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["usdf-rsp-dev.slac.stanford.edu"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://usdf-rsp-dev.slac.stanford.edu/login" - nginx.ingress.kubernetes.io/auth-url: "https://usdf-rsp-dev.slac.stanford.edu/auth?scope=exec:notebook¬ebook=true" - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30s" - nginx.ingress.kubernetes.io/proxy-read-timeout: "20s" - nginx.ingress.kubernetes.io/client-max-body-size: "50m" - nginx.ingress.kubernetes.io/proxy-body-size: "50m" - -config: - base_url: "https://usdf-rsp-dev.slac.stanford.edu" - butler_secret_path: "secret/rubin/usdf-rsp-dev/butler-secret" - pull_secret_path: "secret/rubin/usdf-rsp-dev/pull-secret" - cachemachine_image_policy: "desired" - - lab_environment: - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "rubin" - AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" - AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks - AUTO_REPO_BRANCH: prod - AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_KERNEL_CONNECTED: "True" - CULL_KERNEL_INTERVAL: "300" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - CULL_TERMINAL_INTERVAL: "300" - http_proxy: http://sdfproxy.sdf.slac.stanford.edu:3128 - https_proxy: http://sdfproxy.sdf.slac.stanford.edu:3128 - no_proxy: hub.nublado2,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1 - - volumes: - - name: home - persistentVolumeClaim: - claimName: sdf-home - - name: sdf-group-rubin - persistentVolumeClaim: - claimName: sdf-group-rubin - - name: sdf-data-rubin - persistentVolumeClaim: - claimName: sdf-data-rubin - - name: fs-ddn-sdf-group-rubin - persistentVolumeClaim: - claimName: fs-ddn-sdf-group-rubin - - name: sdf-scratch - persistentVolumeClaim: - claimName: sdf-scratch - - name: fs-ddn-sdf-group-lsst - persistentVolumeClaim: - claimName: fs-ddn-sdf-group-lsst - volume_mounts: - - name: home - mountPath: "/home/" - - name: sdf-data-rubin - mountPath: /repo - subPath: repo - - name: sdf-group-rubin - mountPath: /project - subPath: g - - name: sdf-group-rubin - mountPath: /sdf/group/rubin - - name: sdf-data-rubin - mountPath: /sdf/data/rubin - - name: sdf-scratch - mountPath: /scratch - - name: fs-ddn-sdf-group-rubin - mountPath: /fs/ddn/sdf/group/rubin - - name: fs-ddn-sdf-group-lsst - mountPath: /fs/ddn/sdf/group/lsst - - - # Workaround to impose resource quotas at IDF - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - tss:x:59: - cgred:x:997: - screen:x:84: - provisionator:x:769: - rubin_users:x:4085:{% for group in groups %} - {{ group.name }}:x:{{ group.id }}:{{ user }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: gshadow - namespace: "{{ user_namespace }}" - data: - gshadow: | - root:!:: - bin:!:: - daemon:!:: - sys:!:: - adm:!:: - tty:!:: - disk:!:: - lp:!:: - mem:!:: - kmem:!:: - wheel:!:: - cdrom:!:: - mail:!:: - man:!:: - dialout:!:: - floppy:!:: - games:!:: - tape:!:: - video:!:: - ftp:!:: - lock:!:: - audio:!:: - nobody:!:: - users:!:: - utmp:!:: - utempter:!:: - input:!:: - systemd-journal:!:: - systemd-network:!:: - dbus:!:: - ssh_keys:!:: - tss:!:: - cgred:!:: - screen:!:: - provisionator:!:: - rubin_users:!::{% for g in groups %} - {{ g.name }}:!::{{ user }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - provisionator:x:769:769:Lab provisioning user:/home/provisionator:/bin/bash - {{ user }}:x:{{ uid }}:4085:rubin_users:/home/{{ user[0] }}/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: shadow - namespace: "{{ user_namespace }}" - data: - shadow: | - root:*:18000:0:99999:7::: - bin:*:18000:0:99999:7::: - daemon:*:18000:0:99999:7::: - adm:*:18000:0:99999:7::: - lp:*:18000:0:99999:7::: - sync:*:18000:0:99999:7::: - shutdown:*:18000:0:99999:7::: - halt:*:18000:0:99999:7::: - mail:*:18000:0:99999:7::: - operator:*:18000:0:99999:7::: - games:*:18000:0:99999:7::: - ftp:*:18000:0:99999:7::: - nobody:*:18000:0:99999:7::: - systemd-network:*:18000:0:99999:7::: - dbus:*:18000:0:99999:7::: - lsst_lcl:*:18000:0:99999:7::: - tss:*:18000:0:99999:7::: - provisionator:*:18000:0:99999:7::: - {{user}}:*:18000:0:99999:7::: - - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds_cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - - apiVersion: v1 - kind: ResourceQuota - metadata: - name: user-quota - namespace: "{{ user_namespace }}" - spec: - hard: - limits.cpu: 9 - limits.memory: 27Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-group-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-group-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-data-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-data-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-home - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-home - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-ddn-sdf-group-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-ddn-sdf-group-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-ddn-sdf-group-lsst - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-ddn-sdf-group-lsst - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-scratch - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-scratch - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - -vault_secret_path: "secret/rubin/usdf-rsp-dev/nublado2" - -pull-secret: - enabled: true - path: "secret/rubin/usdf-rsp-dev/pull-secret" diff --git a/applications/nublado2/values-usdfprod.yaml b/applications/nublado2/values-usdfprod.yaml deleted file mode 100644 index 690fd7c7bd..0000000000 --- a/applications/nublado2/values-usdfprod.yaml +++ /dev/null @@ -1,410 +0,0 @@ -jupyterhub: - - hub: - baseUrl: "/n2" - config: - ServerApp: - shutdown_no_activity_timeout: 432000 - - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["usdf-rsp.slac.stanford.edu"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://usdf-rsp.slac.stanford.edu/login" - nginx.ingress.kubernetes.io/auth-url: "https://usdf-rsp.slac.stanford.edu/auth?scope=exec:notebook¬ebook=true" - nginx.ingress.kubernetes.io/proxy-connect-timeout: "30s" - nginx.ingress.kubernetes.io/proxy-read-timeout: "20s" - nginx.ingress.kubernetes.io/client-max-body-size: "50m" - nginx.ingress.kubernetes.io/proxy-body-size: "50m" - -config: - base_url: "https://usdf-rsp.slac.stanford.edu" - butler_secret_path: "secret/rubin/usdf-rsp/butler-secret" - pull_secret_path: "secret/rubin/usdf-rsp/pull-secret" - cachemachine_image_policy: "desired" - - lab_environment: - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "rubin" - AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" - AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks - AUTO_REPO_BRANCH: prod - AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_KERNEL_CONNECTED: "True" - CULL_KERNEL_INTERVAL: "300" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - CULL_TERMINAL_INTERVAL: "300" - http_proxy: http://sdfproxy.sdf.slac.stanford.edu:3128 - https_proxy: http://sdfproxy.sdf.slac.stanford.edu:3128 - no_proxy: hub.nublado2,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1 - - volumes: - - name: home - persistentVolumeClaim: - claimName: sdf-home - - name: sdf-group-rubin - persistentVolumeClaim: - claimName: sdf-group-rubin - - name: sdf-data-rubin - persistentVolumeClaim: - claimName: sdf-data-rubin - - name: fs-ddn-sdf-group-rubin - persistentVolumeClaim: - claimName: fs-ddn-sdf-group-rubin - - name: sdf-scratch - persistentVolumeClaim: - claimName: sdf-scratch - - name: fs-ddn-sdf-group-lsst - persistentVolumeClaim: - claimName: fs-ddn-sdf-group-lsst - volume_mounts: - - name: home - mountPath: "/home/" - - name: sdf-data-rubin - mountPath: /repo - subPath: repo - - name: sdf-group-rubin - mountPath: /project - subPath: g - - name: sdf-group-rubin - mountPath: /sdf/group/rubin - - name: sdf-data-rubin - mountPath: /sdf/data/rubin - - name: sdf-scratch - mountPath: /scratch - - name: fs-ddn-sdf-group-rubin - mountPath: /fs/ddn/sdf/group/rubin - - name: fs-ddn-sdf-group-lsst - mountPath: /fs/ddn/sdf/group/lsst - - - # Workaround to impose resource quotas at IDF - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - tss:x:59: - cgred:x:997: - screen:x:84: - provisionator:x:769: - rubin_users:x:4085:{% for group in groups %} - {{ group.name }}:x:{{ group.id }}:{{ user }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: gshadow - namespace: "{{ user_namespace }}" - data: - gshadow: | - root:!:: - bin:!:: - daemon:!:: - sys:!:: - adm:!:: - tty:!:: - disk:!:: - lp:!:: - mem:!:: - kmem:!:: - wheel:!:: - cdrom:!:: - mail:!:: - man:!:: - dialout:!:: - floppy:!:: - games:!:: - tape:!:: - video:!:: - ftp:!:: - lock:!:: - audio:!:: - nobody:!:: - users:!:: - utmp:!:: - utempter:!:: - input:!:: - systemd-journal:!:: - systemd-network:!:: - dbus:!:: - ssh_keys:!:: - tss:!:: - cgred:!:: - screen:!:: - provisionator:!:: - rubin_users:!::{% for g in groups %} - {{ g.name }}:!::{{ user }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - provisionator:x:769:769:Lab provisioning user:/home/provisionator:/bin/bash - {{ user }}:x:{{ uid }}:4085:rubin_users:/home/{{ user[0] }}/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: shadow - namespace: "{{ user_namespace }}" - data: - shadow: | - root:*:18000:0:99999:7::: - bin:*:18000:0:99999:7::: - daemon:*:18000:0:99999:7::: - adm:*:18000:0:99999:7::: - lp:*:18000:0:99999:7::: - sync:*:18000:0:99999:7::: - shutdown:*:18000:0:99999:7::: - halt:*:18000:0:99999:7::: - mail:*:18000:0:99999:7::: - operator:*:18000:0:99999:7::: - games:*:18000:0:99999:7::: - ftp:*:18000:0:99999:7::: - nobody:*:18000:0:99999:7::: - systemd-network:*:18000:0:99999:7::: - dbus:*:18000:0:99999:7::: - lsst_lcl:*:18000:0:99999:7::: - tss:*:18000:0:99999:7::: - provisionator:*:18000:0:99999:7::: - {{user}}:*:18000:0:99999:7::: - - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds_cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - - apiVersion: v1 - kind: ResourceQuota - metadata: - name: user-quota - namespace: "{{ user_namespace }}" - spec: - hard: - limits.cpu: 9 - limits.memory: 27Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-group-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-group-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-data-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-data-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-home - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-home - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-ddn-sdf-group-rubin - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-ddn-sdf-group-rubin - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-ddn-sdf-group-lsst - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-ddn-sdf-group-lsst - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: sdf-scratch - namespace: "{{ user_namespace }}" - spec: - storageClassName: sdf-scratch - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi - -vault_secret_path: "secret/rubin/usdf-rsp/nublado2" - -pull-secret: - enabled: true - path: "secret/rubin/usdf-rsp/pull-secret" diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index c4919839d4..b32ee742bc 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -10,13 +10,10 @@ applications: cert-manager: false ingress-nginx: false - cachemachine: true datalinker: true livetap: true mobu: true - moneypenny: true nublado: true - nublado2: true plot-navigator: true portal: true postgres: true From 366d88513167f2eb002f1f8c86431e9c3e852d21 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 12 Oct 2023 16:16:26 -0700 Subject: [PATCH 107/588] temp fix: increase gafaelfawr redis memory limit --- applications/gafaelfawr/values-usdfdev.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml index 74d3c872c6..4eb80ac833 100644 --- a/applications/gafaelfawr/values-usdfdev.yaml +++ b/applications/gafaelfawr/values-usdfdev.yaml @@ -4,6 +4,13 @@ replicaCount: 2 redis: persistence: storageClass: "wekafs--sdf-k8s01" + resources: + limits: + cpu: "1" + memory: "200Mi" + requests: + cpu: "50m" + memory: "200Mi" config: internalDatabase: true From ec5866f7aa76fd655e56bc5df74077b537be06a8 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 12 Oct 2023 16:28:14 -0700 Subject: [PATCH 108/588] change request to 50Mi --- applications/gafaelfawr/values-usdfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml index 4eb80ac833..d44dcdb055 100644 --- a/applications/gafaelfawr/values-usdfdev.yaml +++ b/applications/gafaelfawr/values-usdfdev.yaml @@ -10,7 +10,7 @@ redis: memory: "200Mi" requests: cpu: "50m" - memory: "200Mi" + memory: "50Mi" config: internalDatabase: true From 95001108b72a235eb57c0ee1085aa42e57830b57 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Fri, 29 Sep 2023 12:51:41 -0700 Subject: [PATCH 109/588] update sso for usdf environments --- applications/argocd/values-usdf-tel-rsp.yaml | 4 ++-- applications/argocd/values-usdfdev-alert-stream-broker.yaml | 4 ++-- applications/argocd/values-usdfdev-prompt-processing.yaml | 4 ++-- applications/argocd/values-usdfprod-prompt-processing.yaml | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/applications/argocd/values-usdf-tel-rsp.yaml b/applications/argocd/values-usdf-tel-rsp.yaml index 280ffe7033..8399177d91 100644 --- a/applications/argocd/values-usdf-tel-rsp.yaml +++ b/applications/argocd/values-usdf-tel-rsp.yaml @@ -30,8 +30,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: vcluster--usdf-tel-rsp + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/argocd/values-usdfdev-alert-stream-broker.yaml b/applications/argocd/values-usdfdev-alert-stream-broker.yaml index 61287b465c..120229e94d 100644 --- a/applications/argocd/values-usdfdev-alert-stream-broker.yaml +++ b/applications/argocd/values-usdfdev-alert-stream-broker.yaml @@ -32,8 +32,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: vcluster--usdf-alert-stream-broker-dev + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/argocd/values-usdfdev-prompt-processing.yaml b/applications/argocd/values-usdfdev-prompt-processing.yaml index 2be48eed63..6cbf7a6d7a 100644 --- a/applications/argocd/values-usdfdev-prompt-processing.yaml +++ b/applications/argocd/values-usdfdev-prompt-processing.yaml @@ -31,8 +31,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: vcluster--usdf-prompt-processing-dev + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. diff --git a/applications/argocd/values-usdfprod-prompt-processing.yaml b/applications/argocd/values-usdfprod-prompt-processing.yaml index 8aff78959e..a2267e17f8 100644 --- a/applications/argocd/values-usdfprod-prompt-processing.yaml +++ b/applications/argocd/values-usdfprod-prompt-processing.yaml @@ -31,8 +31,8 @@ argo-cd: oidc.config: | name: SLAC issuer: https://dex.slac.stanford.edu - clientID: $oidc.clientId - clientSecret: $oidc.clientSecret + clientID: vcluster--usdf-prompt-processing + clientSecret: $dex.clientSecret # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] requestedScopes: ["openid", "profile", "email", "groups"] # Optional set of OIDC claims to request on the ID token. From 5bf7eab55f0bc598409618a051ed000014453fda Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:20:23 +0000 Subject: [PATCH 110/588] Update Helm release argo-cd to v5.46.8 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 15fff6ccad..a36043c030 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.46.7 + version: 5.46.8 repository: https://argoproj.github.io/argo-helm From e536c482f7a176aa3fbe043e5c186fb0b79b68b4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 10:20:27 +0000 Subject: [PATCH 111/588] Update Helm release ingress-nginx to v4.8.2 --- applications/ingress-nginx/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index f8df8e3666..176ded64f3 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.8.1 + version: 4.8.2 repository: https://kubernetes.github.io/ingress-nginx From cc946d4a6a010b5127899ea57f44738f395e2edf Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:32:29 +0000 Subject: [PATCH 112/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 449 +++++++++++++++++++++--------------------- requirements/main.txt | 8 +- 2 files changed, 232 insertions(+), 225 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 949852017a..4ac33c4f12 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -265,10 +265,12 @@ distlib==0.3.7 \ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -documenteer[guide]==1.0.0a9 \ - --hash=sha256:3661510f3acec78fa07fb20d2eb82677c302b66a283ed3911ebc54b451cd51c8 \ - --hash=sha256:53c4e5e697abe366bcff3a33a437060413627239e31a827a7cfbe254758633c6 - # via -r requirements/dev.in +documenteer[guide]==1.0.0a10 \ + --hash=sha256:01d56f716e9b8b303eb71fb25f354ba5554adbdc03f60b8bf0d155367b661c0f \ + --hash=sha256:c43505700b99873d431571249e0ded9c59faeb76860e099bc362b6ed8fe3e43e + # via + # -r requirements/dev.in + # documenteer docutils==0.19 \ --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc @@ -604,34 +606,34 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.5.1 \ - --hash=sha256:159aa9acb16086b79bbb0016145034a1a05360626046a929f84579ce1666b315 \ - --hash=sha256:258b22210a4a258ccd077426c7a181d789d1121aca6db73a83f79372f5569ae0 \ - --hash=sha256:26f71b535dfc158a71264e6dc805a9f8d2e60b67215ca0bfa26e2e1aa4d4d373 \ - --hash=sha256:26fb32e4d4afa205b24bf645eddfbb36a1e17e995c5c99d6d00edb24b693406a \ - --hash=sha256:2fc3a600f749b1008cc75e02b6fb3d4db8dbcca2d733030fe7a3b3502902f161 \ - --hash=sha256:32cb59609b0534f0bd67faebb6e022fe534bdb0e2ecab4290d683d248be1b275 \ - --hash=sha256:330857f9507c24de5c5724235e66858f8364a0693894342485e543f5b07c8693 \ - --hash=sha256:361da43c4f5a96173220eb53340ace68cda81845cd88218f8862dfb0adc8cddb \ - --hash=sha256:4a465ea2ca12804d5b34bb056be3a29dc47aea5973b892d0417c6a10a40b2d65 \ - --hash=sha256:51cb1323064b1099e177098cb939eab2da42fea5d818d40113957ec954fc85f4 \ - --hash=sha256:57b10c56016adce71fba6bc6e9fd45d8083f74361f629390c556738565af8eeb \ - --hash=sha256:596fae69f2bfcb7305808c75c00f81fe2829b6236eadda536f00610ac5ec2243 \ - --hash=sha256:5d627124700b92b6bbaa99f27cbe615c8ea7b3402960f6372ea7d65faf376c14 \ - --hash=sha256:6ac9c21bfe7bc9f7f1b6fae441746e6a106e48fc9de530dea29e8cd37a2c0cc4 \ - --hash=sha256:82cb6193de9bbb3844bab4c7cf80e6227d5225cc7625b068a06d005d861ad5f1 \ - --hash=sha256:8f772942d372c8cbac575be99f9cc9d9fb3bd95c8bc2de6c01411e2c84ebca8a \ - --hash=sha256:9fece120dbb041771a63eb95e4896791386fe287fefb2837258925b8326d6160 \ - --hash=sha256:a156e6390944c265eb56afa67c74c0636f10283429171018446b732f1a05af25 \ - --hash=sha256:a9ec1f695f0c25986e6f7f8778e5ce61659063268836a38c951200c57479cc12 \ - --hash=sha256:abed92d9c8f08643c7d831300b739562b0a6c9fcb028d211134fc9ab20ccad5d \ - --hash=sha256:b031b9601f1060bf1281feab89697324726ba0c0bae9d7cd7ab4b690940f0b92 \ - --hash=sha256:c543214ffdd422623e9fedd0869166c2f16affe4ba37463975043ef7d2ea8770 \ - --hash=sha256:d28ddc3e3dfeab553e743e532fb95b4e6afad51d4706dd22f28e1e5e664828d2 \ - --hash=sha256:f33592ddf9655a4894aef22d134de7393e95fcbdc2d15c1ab65828eee5c66c70 \ - --hash=sha256:f6b0e77db9ff4fda74de7df13f30016a0a663928d669c9f2c057048ba44f09bb \ - --hash=sha256:f757063a83970d67c444f6e01d9550a7402322af3557ce7630d3c957386fa8f5 \ - --hash=sha256:ff0cedc84184115202475bbb46dd99f8dcb87fe24d5d0ddfc0fe6b8575c88d2f +mypy==1.6.0 \ + --hash=sha256:091f53ff88cb093dcc33c29eee522c087a438df65eb92acd371161c1f4380ff0 \ + --hash=sha256:1a69db3018b87b3e6e9dd28970f983ea6c933800c9edf8c503c3135b3274d5ad \ + --hash=sha256:24f3de8b9e7021cd794ad9dfbf2e9fe3f069ff5e28cb57af6f873ffec1cb0425 \ + --hash=sha256:31eba8a7a71f0071f55227a8057468b8d2eb5bf578c8502c7f01abaec8141b2f \ + --hash=sha256:3c8835a07b8442da900db47ccfda76c92c69c3a575872a5b764332c4bacb5a0a \ + --hash=sha256:3df87094028e52766b0a59a3e46481bb98b27986ed6ded6a6cc35ecc75bb9182 \ + --hash=sha256:49499cf1e464f533fc45be54d20a6351a312f96ae7892d8e9f1708140e27ce41 \ + --hash=sha256:4c192445899c69f07874dabda7e931b0cc811ea055bf82c1ababf358b9b2a72c \ + --hash=sha256:4f3d27537abde1be6d5f2c96c29a454da333a2a271ae7d5bc7110e6d4b7beb3f \ + --hash=sha256:7469545380dddce5719e3656b80bdfbb217cfe8dbb1438532d6abc754b828fed \ + --hash=sha256:7807a2a61e636af9ca247ba8494031fb060a0a744b9fee7de3a54bed8a753323 \ + --hash=sha256:856bad61ebc7d21dbc019b719e98303dc6256cec6dcc9ebb0b214b81d6901bd8 \ + --hash=sha256:89513ddfda06b5c8ebd64f026d20a61ef264e89125dc82633f3c34eeb50e7d60 \ + --hash=sha256:8e0db37ac4ebb2fee7702767dfc1b773c7365731c22787cb99f507285014fcaf \ + --hash=sha256:971104bcb180e4fed0d7bd85504c9036346ab44b7416c75dd93b5c8c6bb7e28f \ + --hash=sha256:9e1589ca150a51d9d00bb839bfeca2f7a04f32cd62fad87a847bc0818e15d7dc \ + --hash=sha256:9f8464ed410ada641c29f5de3e6716cbdd4f460b31cf755b2af52f2d5ea79ead \ + --hash=sha256:ab98b8f6fdf669711f3abe83a745f67f50e3cbaea3998b90e8608d2b459fd566 \ + --hash=sha256:b19006055dde8a5425baa5f3b57a19fa79df621606540493e5e893500148c72f \ + --hash=sha256:c69051274762cccd13498b568ed2430f8d22baa4b179911ad0c1577d336ed849 \ + --hash=sha256:d2dad072e01764823d4b2f06bc7365bb1d4b6c2f38c4d42fade3c8d45b0b4b67 \ + --hash=sha256:dccd850a2e3863891871c9e16c54c742dba5470f5120ffed8152956e9e0a5e13 \ + --hash=sha256:e28d7b221898c401494f3b77db3bac78a03ad0a0fff29a950317d87885c655d2 \ + --hash=sha256:e4b7a99275a61aa22256bab5839c35fe8a6887781862471df82afb4b445daae6 \ + --hash=sha256:eb7ff4007865833c470a601498ba30462b7374342580e2346bf7884557e40531 \ + --hash=sha256:f8598307150b5722854f035d2e70a1ad9cc3c72d392c34fffd8c66d888c90f17 \ + --hash=sha256:fea451a3125bf0bfe716e5d7ad4b92033c471e4b5b3e154c67525539d14dc15a # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ @@ -645,39 +647,39 @@ nodeenv==1.8.0 \ --hash=sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2 \ --hash=sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec # via pre-commit -numpy==1.26.0 \ - --hash=sha256:020cdbee66ed46b671429c7265cf00d8ac91c046901c55684954c3958525dab2 \ - --hash=sha256:0621f7daf973d34d18b4e4bafb210bbaf1ef5e0100b5fa750bd9cde84c7ac292 \ - --hash=sha256:0792824ce2f7ea0c82ed2e4fecc29bb86bee0567a080dacaf2e0a01fe7654369 \ - --hash=sha256:09aaee96c2cbdea95de76ecb8a586cb687d281c881f5f17bfc0fb7f5890f6b91 \ - --hash=sha256:166b36197e9debc4e384e9c652ba60c0bacc216d0fc89e78f973a9760b503388 \ - --hash=sha256:186ba67fad3c60dbe8a3abff3b67a91351100f2661c8e2a80364ae6279720299 \ - --hash=sha256:306545e234503a24fe9ae95ebf84d25cba1fdc27db971aa2d9f1ab6bba19a9dd \ - --hash=sha256:436c8e9a4bdeeee84e3e59614d38c3dbd3235838a877af8c211cfcac8a80b8d3 \ - --hash=sha256:4a873a8180479bc829313e8d9798d5234dfacfc2e8a7ac188418189bb8eafbd2 \ - --hash=sha256:4acc65dd65da28060e206c8f27a573455ed724e6179941edb19f97e58161bb69 \ - --hash=sha256:51be5f8c349fdd1a5568e72713a21f518e7d6707bcf8503b528b88d33b57dc68 \ - --hash=sha256:546b7dd7e22f3c6861463bebb000646fa730e55df5ee4a0224408b5694cc6148 \ - --hash=sha256:5671338034b820c8d58c81ad1dafc0ed5a00771a82fccc71d6438df00302094b \ - --hash=sha256:637c58b468a69869258b8ae26f4a4c6ff8abffd4a8334c830ffb63e0feefe99a \ - --hash=sha256:767254ad364991ccfc4d81b8152912e53e103ec192d1bb4ea6b1f5a7117040be \ - --hash=sha256:7d484292eaeb3e84a51432a94f53578689ffdea3f90e10c8b203a99be5af57d8 \ - --hash=sha256:7f6bad22a791226d0a5c7c27a80a20e11cfe09ad5ef9084d4d3fc4a299cca505 \ - --hash=sha256:86f737708b366c36b76e953c46ba5827d8c27b7a8c9d0f471810728e5a2fe57c \ - --hash=sha256:8c6adc33561bd1d46f81131d5352348350fc23df4d742bb246cdfca606ea1208 \ - --hash=sha256:914b28d3215e0c721dc75db3ad6d62f51f630cb0c277e6b3bcb39519bed10bd8 \ - --hash=sha256:b44e6a09afc12952a7d2a58ca0a2429ee0d49a4f89d83a0a11052da696440e49 \ - --hash=sha256:bb0d9a1aaf5f1cb7967320e80690a1d7ff69f1d47ebc5a9bea013e3a21faec95 \ - --hash=sha256:c0b45c8b65b79337dee5134d038346d30e109e9e2e9d43464a2970e5c0e93229 \ - --hash=sha256:c2e698cb0c6dda9372ea98a0344245ee65bdc1c9dd939cceed6bb91256837896 \ - --hash=sha256:c78a22e95182fb2e7874712433eaa610478a3caf86f28c621708d35fa4fd6e7f \ - --hash=sha256:e062aa24638bb5018b7841977c360d2f5917268d125c833a686b7cbabbec496c \ - --hash=sha256:e5e18e5b14a7560d8acf1c596688f4dfd19b4f2945b245a71e5af4ddb7422feb \ - --hash=sha256:eae430ecf5794cb7ae7fa3808740b015aa80747e5266153128ef055975a72b99 \ - --hash=sha256:ee84ca3c58fe48b8ddafdeb1db87388dce2c3c3f701bf447b05e4cfcc3679112 \ - --hash=sha256:f042f66d0b4ae6d48e70e28d487376204d3cbf43b84c03bac57e28dac6151581 \ - --hash=sha256:f8db2f125746e44dce707dd44d4f4efeea8d7e2b43aace3f8d1f235cfa2733dd \ - --hash=sha256:f93fc78fe8bf15afe2b8d6b6499f1c73953169fad1e9a8dd086cdff3190e7fdf +numpy==1.26.1 \ + --hash=sha256:06934e1a22c54636a059215d6da99e23286424f316fddd979f5071093b648668 \ + --hash=sha256:1c59c046c31a43310ad0199d6299e59f57a289e22f0f36951ced1c9eac3665b9 \ + --hash=sha256:1d1bd82d539607951cac963388534da3b7ea0e18b149a53cf883d8f699178c0f \ + --hash=sha256:1e11668d6f756ca5ef534b5be8653d16c5352cbb210a5c2a79ff288e937010d5 \ + --hash=sha256:3649d566e2fc067597125428db15d60eb42a4e0897fc48d28cb75dc2e0454e53 \ + --hash=sha256:59227c981d43425ca5e5c01094d59eb14e8772ce6975d4b2fc1e106a833d5ae2 \ + --hash=sha256:6081aed64714a18c72b168a9276095ef9155dd7888b9e74b5987808f0dd0a974 \ + --hash=sha256:6965888d65d2848e8768824ca8288db0a81263c1efccec881cb35a0d805fcd2f \ + --hash=sha256:76ff661a867d9272cd2a99eed002470f46dbe0943a5ffd140f49be84f68ffc42 \ + --hash=sha256:78ca54b2f9daffa5f323f34cdf21e1d9779a54073f0018a3094ab907938331a2 \ + --hash=sha256:82e871307a6331b5f09efda3c22e03c095d957f04bf6bc1804f30048d0e5e7af \ + --hash=sha256:8ab9163ca8aeb7fd32fe93866490654d2f7dda4e61bc6297bf72ce07fdc02f67 \ + --hash=sha256:9696aa2e35cc41e398a6d42d147cf326f8f9d81befcb399bc1ed7ffea339b64e \ + --hash=sha256:97e5d6a9f0702c2863aaabf19f0d1b6c2628fbe476438ce0b5ce06e83085064c \ + --hash=sha256:9f42284ebf91bdf32fafac29d29d4c07e5e9d1af862ea73686581773ef9e73a7 \ + --hash=sha256:a03fb25610ef560a6201ff06df4f8105292ba56e7cdd196ea350d123fc32e24e \ + --hash=sha256:a5b411040beead47a228bde3b2241100454a6abde9df139ed087bd73fc0a4908 \ + --hash=sha256:af22f3d8e228d84d1c0c44c1fbdeb80f97a15a0abe4f080960393a00db733b66 \ + --hash=sha256:afd5ced4e5a96dac6725daeb5242a35494243f2239244fad10a90ce58b071d24 \ + --hash=sha256:b9d45d1dbb9de84894cc50efece5b09939752a2d75aab3a8b0cef6f3a35ecd6b \ + --hash=sha256:bb894accfd16b867d8643fc2ba6c8617c78ba2828051e9a69511644ce86ce83e \ + --hash=sha256:c8c6c72d4a9f831f328efb1312642a1cafafaa88981d9ab76368d50d07d93cbe \ + --hash=sha256:cd7837b2b734ca72959a1caf3309457a318c934abef7a43a14bb984e574bbb9a \ + --hash=sha256:cdd9ec98f0063d93baeb01aad472a1a0840dee302842a2746a7a8e92968f9575 \ + --hash=sha256:d1cfc92db6af1fd37a7bb58e55c8383b4aa1ba23d012bdbba26b4bcca45ac297 \ + --hash=sha256:d1d2c6b7dd618c41e202c59c1413ef9b2c8e8a15f5039e344af64195459e3104 \ + --hash=sha256:d2984cb6caaf05294b8466966627e80bf6c7afd273279077679cb010acb0e5ab \ + --hash=sha256:d58e8c51a7cf43090d124d5073bc29ab2755822181fcad978b12e144e5e5a4b3 \ + --hash=sha256:d78f269e0c4fd365fc2992c00353e4530d274ba68f15e968d8bc3c69ce5f5244 \ + --hash=sha256:dcfaf015b79d1f9f9c9fd0731a907407dc3e45769262d657d754c3a028586124 \ + --hash=sha256:e44ccb93f30c75dfc0c3aa3ce38f33486a75ec9abadabd4e59f114994a9c4617 \ + --hash=sha256:e509cbc488c735b43b5ffea175235cec24bbc57b227ef1acc691725beb230d1c # via # contourpy # matplotlib @@ -689,61 +691,61 @@ packaging==23.2 \ # pydata-sphinx-theme # pytest # sphinx -pillow==10.0.1 \ - --hash=sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff \ - --hash=sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f \ - --hash=sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21 \ - --hash=sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635 \ - --hash=sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a \ - --hash=sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f \ - --hash=sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1 \ - --hash=sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d \ - --hash=sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db \ - --hash=sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849 \ - --hash=sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7 \ - --hash=sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876 \ - --hash=sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3 \ - --hash=sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317 \ - --hash=sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91 \ - --hash=sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d \ - --hash=sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b \ - --hash=sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd \ - --hash=sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed \ - --hash=sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500 \ - --hash=sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7 \ - --hash=sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a \ - --hash=sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a \ - --hash=sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0 \ - --hash=sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf \ - --hash=sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f \ - --hash=sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1 \ - --hash=sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088 \ - --hash=sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971 \ - --hash=sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a \ - --hash=sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205 \ - --hash=sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54 \ - --hash=sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08 \ - --hash=sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21 \ - --hash=sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d \ - --hash=sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08 \ - --hash=sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e \ - --hash=sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf \ - --hash=sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b \ - --hash=sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145 \ - --hash=sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2 \ - --hash=sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d \ - --hash=sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d \ - --hash=sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf \ - --hash=sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad \ - --hash=sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d \ - --hash=sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1 \ - --hash=sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4 \ - --hash=sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2 \ - --hash=sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19 \ - --hash=sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37 \ - --hash=sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4 \ - --hash=sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68 \ - --hash=sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1 +pillow==10.1.0 \ + --hash=sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d \ + --hash=sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de \ + --hash=sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616 \ + --hash=sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839 \ + --hash=sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099 \ + --hash=sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a \ + --hash=sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219 \ + --hash=sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106 \ + --hash=sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b \ + --hash=sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412 \ + --hash=sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b \ + --hash=sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7 \ + --hash=sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2 \ + --hash=sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7 \ + --hash=sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14 \ + --hash=sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f \ + --hash=sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27 \ + --hash=sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57 \ + --hash=sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262 \ + --hash=sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28 \ + --hash=sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610 \ + --hash=sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172 \ + --hash=sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273 \ + --hash=sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e \ + --hash=sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d \ + --hash=sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818 \ + --hash=sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f \ + --hash=sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9 \ + --hash=sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01 \ + --hash=sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7 \ + --hash=sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651 \ + --hash=sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312 \ + --hash=sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80 \ + --hash=sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666 \ + --hash=sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061 \ + --hash=sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b \ + --hash=sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992 \ + --hash=sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593 \ + --hash=sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4 \ + --hash=sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db \ + --hash=sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba \ + --hash=sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd \ + --hash=sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e \ + --hash=sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212 \ + --hash=sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb \ + --hash=sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2 \ + --hash=sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34 \ + --hash=sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256 \ + --hash=sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f \ + --hash=sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2 \ + --hash=sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38 \ + --hash=sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996 \ + --hash=sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a \ + --hash=sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793 # via matplotlib platformdirs==3.11.0 \ --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ @@ -753,9 +755,9 @@ pluggy==1.3.0 \ --hash=sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12 \ --hash=sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7 # via pytest -pre-commit==3.4.0 \ - --hash=sha256:6bbd5129a64cad4c0dfaeeb12cd8f7ea7e15b77028d985341478c8af3c759522 \ - --hash=sha256:96d529a951f8b677f730a7212442027e8ba53f9b04d217c4c67dc56c393ad945 +pre-commit==3.5.0 \ + --hash=sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32 \ + --hash=sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660 # via -r requirements/dev.in pybtex==0.24.0 \ --hash=sha256:818eae35b61733e5c007c3fcd2cfb75ed1bc8b4173c1f70b56cc4c0802d34755 \ @@ -1005,106 +1007,106 @@ rich==13.6.0 \ --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \ --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef # via pytest-pretty -rpds-py==0.10.4 \ - --hash=sha256:00a88003db3cc953f8656b59fc9af9d0637a1fb93c235814007988f8c153b2f2 \ - --hash=sha256:049098dabfe705e9638c55a3321137a821399c50940041a6fcce267a22c70db2 \ - --hash=sha256:08f07150c8ebbdbce1d2d51b8e9f4d588749a2af6a98035485ebe45c7ad9394e \ - --hash=sha256:125776d5db15162fdd9135372bef7fe4fb7c5f5810cf25898eb74a06a0816aec \ - --hash=sha256:13cbd79ccedc6b39c279af31ebfb0aec0467ad5d14641ddb15738bf6e4146157 \ - --hash=sha256:18d5ff7fbd305a1d564273e9eb22de83ae3cd9cd6329fddc8f12f6428a711a6a \ - --hash=sha256:1c27942722cd5039bbf5098c7e21935a96243fed00ea11a9589f3c6c6424bd84 \ - --hash=sha256:255a23bded80605e9f3997753e3a4b89c9aec9efb07ec036b1ca81440efcc1a9 \ - --hash=sha256:2573ec23ad3a59dd2bc622befac845695972f3f2d08dc1a4405d017d20a6c225 \ - --hash=sha256:2603e084054351cc65097da326570102c4c5bd07426ba8471ceaefdb0b642cc9 \ - --hash=sha256:28b4942ec7d9d6114c1e08cace0157db92ef674636a38093cab779ace5742d3a \ - --hash=sha256:28e29dac59df890972f73c511948072897f512974714a803fe793635b80ff8c7 \ - --hash=sha256:2a97406d5e08b7095428f01dac0d3c091dc072351151945a167e7968d2755559 \ - --hash=sha256:2a9e864ec051a58fdb6bb2e6da03942adb20273897bc70067aee283e62bbac4d \ - --hash=sha256:2e0e2e01c5f61ddf47e3ed2d1fe1c9136e780ca6222d57a2517b9b02afd4710c \ - --hash=sha256:2e79eeeff8394284b09577f36316d410525e0cf0133abb3de10660e704d3d38e \ - --hash=sha256:2f2ac8bb01f705c5caaa7fe77ffd9b03f92f1b5061b94228f6ea5eaa0fca68ad \ - --hash=sha256:32819b662e3b4c26355a4403ea2f60c0a00db45b640fe722dd12db3d2ef807fb \ - --hash=sha256:3507c459767cf24c11e9520e2a37c89674266abe8e65453e5cb66398aa47ee7b \ - --hash=sha256:362faeae52dc6ccc50c0b6a01fa2ec0830bb61c292033f3749a46040b876f4ba \ - --hash=sha256:3650eae998dc718960e90120eb45d42bd57b18b21b10cb9ee05f91bff2345d48 \ - --hash=sha256:36ff30385fb9fb3ac23a28bffdd4a230a5229ed5b15704b708b7c84bfb7fce51 \ - --hash=sha256:3bc561c183684636c0099f9c3fbab8c1671841942edbce784bb01b4707d17924 \ - --hash=sha256:3bd38b80491ef9686f719c1ad3d24d14fbd0e069988fdd4e7d1a6ffcdd7f4a13 \ - --hash=sha256:3e37f1f134037601eb4b1f46854194f0cc082435dac2ee3de11e51529f7831f2 \ - --hash=sha256:40f6e53461b19ddbb3354fe5bcf3d50d4333604ae4bf25b478333d83ca68002c \ - --hash=sha256:49db6c0a0e6626c2b97f5e7f8f7074da21cbd8ec73340c25e839a2457c007efa \ - --hash=sha256:4bcb1abecd998a72ad4e36a0fca93577fd0c059a6aacc44f16247031b98f6ff4 \ - --hash=sha256:4cb55454a20d1b935f9eaab52e6ceab624a2efd8b52927c7ae7a43e02828dbe0 \ - --hash=sha256:4f92d2372ec992c82fd7c74aa21e2a1910b3dcdc6a7e6392919a138f21d528a3 \ - --hash=sha256:576d48e1e45c211e99fc02655ade65c32a75d3e383ccfd98ce59cece133ed02c \ - --hash=sha256:58bae860d1d116e6b4e1aad0cdc48a187d5893994f56d26db0c5534df7a47afd \ - --hash=sha256:5bb3f3cb6072c73e6ec1f865d8b80419b599f1597acf33f63fbf02252aab5a03 \ - --hash=sha256:5db93f9017b384a4f194e1d89e1ce82d0a41b1fafdbbd3e0c8912baf13f2950f \ - --hash=sha256:5e41d5b334e8de4bc3f38843f31b2afa9a0c472ebf73119d3fd55cde08974bdf \ - --hash=sha256:60018626e637528a1fa64bb3a2b3e46ab7bf672052316d61c3629814d5e65052 \ - --hash=sha256:6090ba604ea06b525a231450ae5d343917a393cbf50423900dea968daf61d16f \ - --hash=sha256:628fbb8be71a103499d10b189af7764996ab2634ed7b44b423f1e19901606e0e \ - --hash=sha256:6baea8a4f6f01e69e75cfdef3edd4a4d1c4b56238febbdf123ce96d09fbff010 \ - --hash=sha256:6c5ca3eb817fb54bfd066740b64a2b31536eb8fe0b183dc35b09a7bd628ed680 \ - --hash=sha256:70563a1596d2e0660ca2cebb738443437fc0e38597e7cbb276de0a7363924a52 \ - --hash=sha256:7089d8bfa8064b28b2e39f5af7bf12d42f61caed884e35b9b4ea9e6fb1175077 \ - --hash=sha256:72e9b1e92830c876cd49565d8404e4dcc9928302d348ea2517bc3f9e3a873a2a \ - --hash=sha256:7c7ca791bedda059e5195cf7c6b77384657a51429357cdd23e64ac1d4973d6dc \ - --hash=sha256:7f050ceffd8c730c1619a16bbf0b9cd037dcdb94b54710928ba38c7bde67e4a4 \ - --hash=sha256:83da147124499fe41ed86edf34b4e81e951b3fe28edcc46288aac24e8a5c8484 \ - --hash=sha256:86e8d6ff15fa7a9590c0addaf3ce52fb58bda4299cab2c2d0afa404db6848dab \ - --hash=sha256:8709eb4ab477c533b7d0a76cd3065d7d95c9e25e6b9f6e27caeeb8c63e8799c9 \ - --hash=sha256:8e69bbe0ede8f7fe2616e779421bbdb37f025c802335a90f6416e4d98b368a37 \ - --hash=sha256:8f90fc6dd505867514c8b8ef68a712dc0be90031a773c1ae2ad469f04062daef \ - --hash=sha256:9123ba0f3f98ff79780eebca9984a2b525f88563844b740f94cffb9099701230 \ - --hash=sha256:927e3461dae0c09b1f2e0066e50c1a9204f8a64a3060f596e9a6742d3b307785 \ - --hash=sha256:94876c21512535955a960f42a155213315e6ab06a4ce8ce372341a2a1b143eeb \ - --hash=sha256:98c0aecf661c175ce9cb17347fc51a5c98c3e9189ca57e8fcd9348dae18541db \ - --hash=sha256:9c7e7bd1fa1f535af71dfcd3700fc83a6dc261a1204f8f5327d8ffe82e52905d \ - --hash=sha256:9e7b3ad9f53ea9e085b3d27286dd13f8290969c0a153f8a52c8b5c46002c374b \ - --hash=sha256:9f9184744fb800c9f28e155a5896ecb54816296ee79d5d1978be6a2ae60f53c4 \ - --hash=sha256:a3628815fd170a64624001bfb4e28946fd515bd672e68a1902d9e0290186eaf3 \ - --hash=sha256:a5c330cb125983c5d380fef4a4155248a276297c86d64625fdaf500157e1981c \ - --hash=sha256:aa45cc71bf23a3181b8aa62466b5a2b7b7fb90fdc01df67ca433cd4fce7ec94d \ - --hash=sha256:aab24b9bbaa3d49e666e9309556591aa00748bd24ea74257a405f7fed9e8b10d \ - --hash=sha256:ac83f5228459b84fa6279e4126a53abfdd73cd9cc183947ee5084153880f65d7 \ - --hash=sha256:ad21c60fc880204798f320387164dcacc25818a7b4ec2a0bf6b6c1d57b007d23 \ - --hash=sha256:ae8a32ab77a84cc870bbfb60645851ca0f7d58fd251085ad67464b1445d632ca \ - --hash=sha256:b0f1d336786cb62613c72c00578c98e5bb8cd57b49c5bae5d4ab906ca7872f98 \ - --hash=sha256:b28b9668a22ca2cfca4433441ba9acb2899624a323787a509a3dc5fbfa79c49d \ - --hash=sha256:b953d11b544ca5f2705bb77b177d8e17ab1bfd69e0fd99790a11549d2302258c \ - --hash=sha256:b9d8884d58ea8801e5906a491ab34af975091af76d1a389173db491ee7e316bb \ - --hash=sha256:ba3246c60303eab3d0e562addf25a983d60bddc36f4d1edc2510f056d19df255 \ - --hash=sha256:bd0ad98c7d72b0e4cbfe89cdfa12cd07d2fd6ed22864341cdce12b318a383442 \ - --hash=sha256:bf032367f921201deaecf221d4cc895ea84b3decf50a9c73ee106f961885a0ad \ - --hash=sha256:c31ecfc53ac03dad4928a1712f3a2893008bfba1b3cde49e1c14ff67faae2290 \ - --hash=sha256:cbec8e43cace64e63398155dc585dc479a89fef1e57ead06c22d3441e1bd09c3 \ - --hash=sha256:cc688a59c100f038fa9fec9e4ab457c2e2d1fca350fe7ea395016666f0d0a2dc \ - --hash=sha256:cd7da2adc721ccf19ac7ec86cae3a4fcaba03d9c477d5bd64ded6e9bb817bf3f \ - --hash=sha256:cd7e62e7d5bcfa38a62d8397fba6d0428b970ab7954c2197501cd1624f7f0bbb \ - --hash=sha256:d0f7f77a77c37159c9f417b8dd847f67a29e98c6acb52ee98fc6b91efbd1b2b6 \ - --hash=sha256:d230fddc60caced271cc038e43e6fb8f4dd6b2dbaa44ac9763f2d76d05b0365a \ - --hash=sha256:d37f27ad80f742ef82796af3fe091888864958ad0bc8bab03da1830fa00c6004 \ - --hash=sha256:d5ad7b1a1f6964d19b1a8acfc14bf7864f39587b3e25c16ca04f6cd1815026b3 \ - --hash=sha256:d81359911c3bb31c899c6a5c23b403bdc0279215e5b3bc0d2a692489fed38632 \ - --hash=sha256:d98802b78093c7083cc51f83da41a5be5a57d406798c9f69424bd75f8ae0812a \ - --hash=sha256:db0589e0bf41ff6ce284ab045ca89f27be1adf19e7bce26c2e7de6739a70c18b \ - --hash=sha256:ddbd113a37307638f94be5ae232a325155fd24dbfae2c56455da8724b471e7be \ - --hash=sha256:e3ece9aa6d07e18c966f14b4352a4c6f40249f6174d3d2c694c1062e19c6adbb \ - --hash=sha256:e3f9c9e5dd8eba4768e15f19044e1b5e216929a43a54b4ab329e103aed9f3eda \ - --hash=sha256:e41824343c2c129599645373992b1ce17720bb8a514f04ff9567031e1c26951e \ - --hash=sha256:e5dba1c11e089b526379e74f6c636202e4c5bad9a48c7416502b8a5b0d026c91 \ - --hash=sha256:e791e3d13b14d0a7921804d0efe4d7bd15508bbcf8cb7a0c1ee1a27319a5f033 \ - --hash=sha256:ec001689402b9104700b50a005c2d3d0218eae90eaa8bdbbd776fe78fe8a74b7 \ - --hash=sha256:efffa359cc69840c8793f0c05a7b663de6afa7b9078fa6c80309ee38b9db677d \ - --hash=sha256:f1f191befea279cb9669b57be97ab1785781c8bab805900e95742ebfaa9cbf1d \ - --hash=sha256:f3331a3684192659fa1090bf2b448db928152fcba08222e58106f44758ef25f7 \ - --hash=sha256:f40413d2859737ce6d95c29ce2dde0ef7cdc3063b5830ae4342fef5922c3bba7 \ - --hash=sha256:f7ea49ddf51d5ec0c3cbd95190dd15e077a3153c8d4b22a33da43b5dd2b3c640 \ - --hash=sha256:f82abb5c5b83dc30e96be99ce76239a030b62a73a13c64410e429660a5602bfd \ - --hash=sha256:fc20dadb102140dff63529e08ce6f9745dbd36e673ebb2b1c4a63e134bca81c2 \ - --hash=sha256:fd37ab9a24021821b715478357af1cf369d5a42ac7405e83e5822be00732f463 \ - --hash=sha256:ffd539d213c1ea2989ab92a5b9371ae7159c8c03cf2bcb9f2f594752f755ecd3 +rpds-py==0.10.6 \ + --hash=sha256:023574366002bf1bd751ebaf3e580aef4a468b3d3c216d2f3f7e16fdabd885ed \ + --hash=sha256:031f76fc87644a234883b51145e43985aa2d0c19b063e91d44379cd2786144f8 \ + --hash=sha256:052a832078943d2b2627aea0d19381f607fe331cc0eb5df01991268253af8417 \ + --hash=sha256:0699ab6b8c98df998c3eacf51a3b25864ca93dab157abe358af46dc95ecd9801 \ + --hash=sha256:0713631d6e2d6c316c2f7b9320a34f44abb644fc487b77161d1724d883662e31 \ + --hash=sha256:0774a46b38e70fdde0c6ded8d6d73115a7c39d7839a164cc833f170bbf539116 \ + --hash=sha256:0898173249141ee99ffcd45e3829abe7bcee47d941af7434ccbf97717df020e5 \ + --hash=sha256:09586f51a215d17efdb3a5f090d7cbf1633b7f3708f60a044757a5d48a83b393 \ + --hash=sha256:102eac53bb0bf0f9a275b438e6cf6904904908562a1463a6fc3323cf47d7a532 \ + --hash=sha256:10f32b53f424fc75ff7b713b2edb286fdbfc94bf16317890260a81c2c00385dc \ + --hash=sha256:150eec465dbc9cbca943c8e557a21afdcf9bab8aaabf386c44b794c2f94143d2 \ + --hash=sha256:1d7360573f1e046cb3b0dceeb8864025aa78d98be4bb69f067ec1c40a9e2d9df \ + --hash=sha256:1f36a9d751f86455dc5278517e8b65580eeee37d61606183897f122c9e51cef3 \ + --hash=sha256:24656dc36f866c33856baa3ab309da0b6a60f37d25d14be916bd3e79d9f3afcf \ + --hash=sha256:25860ed5c4e7f5e10c496ea78af46ae8d8468e0be745bd233bab9ca99bfd2647 \ + --hash=sha256:26857f0f44f0e791f4a266595a7a09d21f6b589580ee0585f330aaccccb836e3 \ + --hash=sha256:2bb2e4826be25e72013916eecd3d30f66fd076110de09f0e750163b416500721 \ + --hash=sha256:2f6da6d842195fddc1cd34c3da8a40f6e99e4a113918faa5e60bf132f917c247 \ + --hash=sha256:30adb75ecd7c2a52f5e76af50644b3e0b5ba036321c390b8e7ec1bb2a16dd43c \ + --hash=sha256:3339eca941568ed52d9ad0f1b8eb9fe0958fa245381747cecf2e9a78a5539c42 \ + --hash=sha256:34ad87a831940521d462ac11f1774edf867c34172010f5390b2f06b85dcc6014 \ + --hash=sha256:3777cc9dea0e6c464e4b24760664bd8831738cc582c1d8aacf1c3f546bef3f65 \ + --hash=sha256:3953c6926a63f8ea5514644b7afb42659b505ece4183fdaaa8f61d978754349e \ + --hash=sha256:3c4eff26eddac49d52697a98ea01b0246e44ca82ab09354e94aae8823e8bda02 \ + --hash=sha256:40578a6469e5d1df71b006936ce95804edb5df47b520c69cf5af264d462f2cbb \ + --hash=sha256:40f93086eef235623aa14dbddef1b9fb4b22b99454cb39a8d2e04c994fb9868c \ + --hash=sha256:4134aa2342f9b2ab6c33d5c172e40f9ef802c61bb9ca30d21782f6e035ed0043 \ + --hash=sha256:442626328600bde1d09dc3bb00434f5374948838ce75c41a52152615689f9403 \ + --hash=sha256:4a5ee600477b918ab345209eddafde9f91c0acd931f3776369585a1c55b04c57 \ + --hash=sha256:4ce5a708d65a8dbf3748d2474b580d606b1b9f91b5c6ab2a316e0b0cf7a4ba50 \ + --hash=sha256:516a611a2de12fbea70c78271e558f725c660ce38e0006f75139ba337d56b1f6 \ + --hash=sha256:52c215eb46307c25f9fd2771cac8135d14b11a92ae48d17968eda5aa9aaf5071 \ + --hash=sha256:53c43e10d398e365da2d4cc0bcaf0854b79b4c50ee9689652cdc72948e86f487 \ + --hash=sha256:5752b761902cd15073a527b51de76bbae63d938dc7c5c4ad1e7d8df10e765138 \ + --hash=sha256:5e8a78bd4879bff82daef48c14d5d4057f6856149094848c3ed0ecaf49f5aec2 \ + --hash=sha256:5ed505ec6305abd2c2c9586a7b04fbd4baf42d4d684a9c12ec6110deefe2a063 \ + --hash=sha256:5ee97c683eaface61d38ec9a489e353d36444cdebb128a27fe486a291647aff6 \ + --hash=sha256:61fa268da6e2e1cd350739bb61011121fa550aa2545762e3dc02ea177ee4de35 \ + --hash=sha256:64ccc28683666672d7c166ed465c09cee36e306c156e787acef3c0c62f90da5a \ + --hash=sha256:66414dafe4326bca200e165c2e789976cab2587ec71beb80f59f4796b786a238 \ + --hash=sha256:68fe9199184c18d997d2e4293b34327c0009a78599ce703e15cd9a0f47349bba \ + --hash=sha256:6a555ae3d2e61118a9d3e549737bb4a56ff0cec88a22bd1dfcad5b4e04759175 \ + --hash=sha256:6bdc11f9623870d75692cc33c59804b5a18d7b8a4b79ef0b00b773a27397d1f6 \ + --hash=sha256:6cf4393c7b41abbf07c88eb83e8af5013606b1cdb7f6bc96b1b3536b53a574b8 \ + --hash=sha256:6eef672de005736a6efd565577101277db6057f65640a813de6c2707dc69f396 \ + --hash=sha256:734c41f9f57cc28658d98270d3436dba65bed0cfc730d115b290e970150c540d \ + --hash=sha256:73e0a78a9b843b8c2128028864901f55190401ba38aae685350cf69b98d9f7c9 \ + --hash=sha256:775049dfa63fb58293990fc59473e659fcafd953bba1d00fc5f0631a8fd61977 \ + --hash=sha256:7854a207ef77319ec457c1eb79c361b48807d252d94348305db4f4b62f40f7f3 \ + --hash=sha256:78ca33811e1d95cac8c2e49cb86c0fb71f4d8409d8cbea0cb495b6dbddb30a55 \ + --hash=sha256:79edd779cfc46b2e15b0830eecd8b4b93f1a96649bcb502453df471a54ce7977 \ + --hash=sha256:7bf347b495b197992efc81a7408e9a83b931b2f056728529956a4d0858608b80 \ + --hash=sha256:7fde6d0e00b2fd0dbbb40c0eeec463ef147819f23725eda58105ba9ca48744f4 \ + --hash=sha256:81de24a1c51cfb32e1fbf018ab0bdbc79c04c035986526f76c33e3f9e0f3356c \ + --hash=sha256:879fb24304ead6b62dbe5034e7b644b71def53c70e19363f3c3be2705c17a3b4 \ + --hash=sha256:8e7f2219cb72474571974d29a191714d822e58be1eb171f229732bc6fdedf0ac \ + --hash=sha256:9164ec8010327ab9af931d7ccd12ab8d8b5dc2f4c6a16cbdd9d087861eaaefa1 \ + --hash=sha256:945eb4b6bb8144909b203a88a35e0a03d22b57aefb06c9b26c6e16d72e5eb0f0 \ + --hash=sha256:99a57006b4ec39dbfb3ed67e5b27192792ffb0553206a107e4aadb39c5004cd5 \ + --hash=sha256:9e9184fa6c52a74a5521e3e87badbf9692549c0fcced47443585876fcc47e469 \ + --hash=sha256:9ff93d3aedef11f9c4540cf347f8bb135dd9323a2fc705633d83210d464c579d \ + --hash=sha256:a360cfd0881d36c6dc271992ce1eda65dba5e9368575663de993eeb4523d895f \ + --hash=sha256:a5d7ed104d158c0042a6a73799cf0eb576dfd5fc1ace9c47996e52320c37cb7c \ + --hash=sha256:ac17044876e64a8ea20ab132080ddc73b895b4abe9976e263b0e30ee5be7b9c2 \ + --hash=sha256:ad857f42831e5b8d41a32437f88d86ead6c191455a3499c4b6d15e007936d4cf \ + --hash=sha256:b2039f8d545f20c4e52713eea51a275e62153ee96c8035a32b2abb772b6fc9e5 \ + --hash=sha256:b455492cab07107bfe8711e20cd920cc96003e0da3c1f91297235b1603d2aca7 \ + --hash=sha256:b4a9fe992887ac68256c930a2011255bae0bf5ec837475bc6f7edd7c8dfa254e \ + --hash=sha256:b5a53f5998b4bbff1cb2e967e66ab2addc67326a274567697379dd1e326bded7 \ + --hash=sha256:b788276a3c114e9f51e257f2a6f544c32c02dab4aa7a5816b96444e3f9ffc336 \ + --hash=sha256:bddd4f91eede9ca5275e70479ed3656e76c8cdaaa1b354e544cbcf94c6fc8ac4 \ + --hash=sha256:c0503c5b681566e8b722fe8c4c47cce5c7a51f6935d5c7012c4aefe952a35eed \ + --hash=sha256:c1b3cd23d905589cb205710b3988fc8f46d4a198cf12862887b09d7aaa6bf9b9 \ + --hash=sha256:c48f3fbc3e92c7dd6681a258d22f23adc2eb183c8cb1557d2fcc5a024e80b094 \ + --hash=sha256:c63c3ef43f0b3fb00571cff6c3967cc261c0ebd14a0a134a12e83bdb8f49f21f \ + --hash=sha256:c6c45a2d2b68c51fe3d9352733fe048291e483376c94f7723458cfd7b473136b \ + --hash=sha256:caa1afc70a02645809c744eefb7d6ee8fef7e2fad170ffdeacca267fd2674f13 \ + --hash=sha256:cc435d059f926fdc5b05822b1be4ff2a3a040f3ae0a7bbbe672babb468944722 \ + --hash=sha256:cf693eb4a08eccc1a1b636e4392322582db2a47470d52e824b25eca7a3977b53 \ + --hash=sha256:cf71343646756a072b85f228d35b1d7407da1669a3de3cf47f8bbafe0c8183a4 \ + --hash=sha256:d08f63561c8a695afec4975fae445245386d645e3e446e6f260e81663bfd2e38 \ + --hash=sha256:d29ddefeab1791e3c751e0189d5f4b3dbc0bbe033b06e9c333dca1f99e1d523e \ + --hash=sha256:d7f5e15c953ace2e8dde9824bdab4bec50adb91a5663df08d7d994240ae6fa31 \ + --hash=sha256:d858532212f0650be12b6042ff4378dc2efbb7792a286bee4489eaa7ba010586 \ + --hash=sha256:d97dd44683802000277bbf142fd9f6b271746b4846d0acaf0cefa6b2eaf2a7ad \ + --hash=sha256:dcdc88b6b01015da066da3fb76545e8bb9a6880a5ebf89e0f0b2e3ca557b3ab7 \ + --hash=sha256:dd609fafdcdde6e67a139898196698af37438b035b25ad63704fd9097d9a3482 \ + --hash=sha256:defa2c0c68734f4a82028c26bcc85e6b92cced99866af118cd6a89b734ad8e0d \ + --hash=sha256:e22260a4741a0e7a206e175232867b48a16e0401ef5bce3c67ca5b9705879066 \ + --hash=sha256:e225a6a14ecf44499aadea165299092ab0cba918bb9ccd9304eab1138844490b \ + --hash=sha256:e3df0bc35e746cce42579826b89579d13fd27c3d5319a6afca9893a9b784ff1b \ + --hash=sha256:e6fcc026a3f27c1282c7ed24b7fcac82cdd70a0e84cc848c0841a3ab1e3dea2d \ + --hash=sha256:e782379c2028a3611285a795b89b99a52722946d19fc06f002f8b53e3ea26ea9 \ + --hash=sha256:e8cdd52744f680346ff8c1ecdad5f4d11117e1724d4f4e1874f3a67598821069 \ + --hash=sha256:e9616f5bd2595f7f4a04b67039d890348ab826e943a9bfdbe4938d0eba606971 \ + --hash=sha256:e98c4c07ee4c4b3acf787e91b27688409d918212dfd34c872201273fdd5a0e18 \ + --hash=sha256:ebdab79f42c5961682654b851f3f0fc68e6cc7cd8727c2ac4ffff955154123c1 \ + --hash=sha256:f0f17f2ce0f3529177a5fff5525204fad7b43dd437d017dd0317f2746773443d \ + --hash=sha256:f4e56860a5af16a0fcfa070a0a20c42fbb2012eed1eb5ceeddcc7f8079214281 # via # jsonschema # referencing @@ -1312,6 +1314,7 @@ urllib3==2.0.6 \ --hash=sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564 # via # -c requirements/main.txt + # documenteer # requests virtualenv==20.24.5 \ --hash=sha256:b80039f280f4919c77b30f1c23294ae357c4c8701042086e3fc005963e4e537b \ diff --git a/requirements/main.txt b/requirements/main.txt index f2b53e33ab..986bf09cf5 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -462,7 +462,9 @@ pyhcl==0.4.5 \ pyjwt[crypto]==2.8.0 \ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 - # via gidgethub + # via + # gidgethub + # pyjwt python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 @@ -526,7 +528,9 @@ requests==2.31.0 \ rfc3986[idna2008]==1.5.0 \ --hash=sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835 \ --hash=sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97 - # via httpx + # via + # httpx + # rfc3986 safir==5.0.0a2 \ --hash=sha256:a13ac781a345d67ae43fd8a0a2434904e5dfca9f9321c15547e4d18b50144fe4 \ --hash=sha256:c8ab7f043e0e65ccda4fef2a15697802224b2c42876991c1a12d0b41115d0bc5 From 2794c6262e94f04bca0f8c25733a74123591f053 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:32:51 +0000 Subject: [PATCH 113/588] Update Helm release argo-workflows to v0.35.0 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index ce1ac7671f..af8b6e7d40 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.33.3 + version: 0.35.0 repository: https://argoproj.github.io/argo-helm From 808248866a2103c638dedb100499993e3603ff2b Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 16 Oct 2023 08:43:06 -0700 Subject: [PATCH 114/588] BTS: Update nublado to Cycle 33. --- applications/nublado/values-base.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index e1d3af2ad2..556ea84102 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -8,8 +8,8 @@ controller: num_releases: 0 num_weeklies: 3 num_dailies: 2 - cycle: 32 - recommended_tag: "recommended_c0032" + cycle: 33 + recommended_tag: "recommended_c0033" lab: pullSecret: "pull-secret" extraAnnotations: From 94fd72d2de6bf67eba67a81332f27b2f2d922a23 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 16 Oct 2023 10:14:26 -0700 Subject: [PATCH 115/588] BTS: Add application annotation for nublado. --- applications/nublado/values-base.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 556ea84102..80e404f2fc 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -11,6 +11,7 @@ controller: cycle: 33 recommended_tag: "recommended_c0033" lab: + application: "nublado-users" pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" From 56da305f17369db6b17765c93774cfcd4419840a Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 16 Oct 2023 10:17:18 -0700 Subject: [PATCH 116/588] Add default ArgoCD application for nublado user objects --- applications/nublado/README.md | 1 + applications/nublado/values.yaml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index b4e666b5f9..2af83051cb 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -28,6 +28,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.images.pin | list | `[]` | List of additional image tags to prepull. Listing the image tagged as recommended here is recommended when using a Docker image source to ensure its name can be expanded properly in the menu. | | controller.config.images.recommendedTag | string | `"recommended"` | Tag marking the recommended image (shown first in the menu) | | controller.config.images.source | object | None, must be specified | Source for prepulled images. For Docker, set `type` to `docker`, `registry` to the hostname and `repository` to the name of the repository. For Google Artifact Repository, set `type` to `google`, `location` to the region, `projectId` to the Google project, `repository` to the name of the repository, and `image` to the name of the image. | +| controller.config.lab.application | string | See `values.yaml` | ArgcoCD application in which to collect user lab objects. | | controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab. | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | | controller.config.lab.initcontainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image reference), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with `allowPrivilegeEscalation` true. Otherwise it will, run as UID 1000. | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index e23d3133d9..3befd435d3 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -97,6 +97,9 @@ controller: aliasTags: [] lab: + # -- ArgcoCD application in which to collect user lab objects. + # @default -- See `values.yaml` + application: "nublado-users" # -- Environment variables to set for every user lab. # @default -- See `values.yaml` env: From 2ddf42ae4891914df0b0e318777117aed19d1038 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 16 Oct 2023 12:55:49 -0700 Subject: [PATCH 117/588] Remove idf-dev n2 configuration --- applications/nublado2/secrets-idfdev.yaml | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 applications/nublado2/secrets-idfdev.yaml diff --git a/applications/nublado2/secrets-idfdev.yaml b/applications/nublado2/secrets-idfdev.yaml deleted file mode 100644 index 97d5af3ca8..0000000000 --- a/applications/nublado2/secrets-idfdev.yaml +++ /dev/null @@ -1,15 +0,0 @@ -"aws-credentials.ini": - description: >- - Google Cloud Storage credentials to the Butler data store, formatted using - AWS syntax for use with boto. -"butler-gcs-idf-creds.json": - description: >- - Google Cloud Storage credentials to the Butler data store in the native - Google syntax, containing the private asymmetric key. -"butler-hmac-idf-creds.json": - description: >- - Google Cloud Storage credentials to the Butler data store in the private - key syntax used for HMACs. -"postgres-credentials.txt": - description: >- - PostgreSQL credentials in its pgpass format for the Butler database. From 444ff2f6881c939aa3b81bab7be2c43937201cf6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 07:27:34 +0000 Subject: [PATCH 118/588] Update Helm release vault-secrets-operator to v2.5.3 --- applications/vault-secrets-operator/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml index b85db7d0ca..db63283353 100644 --- a/applications/vault-secrets-operator/Chart.yaml +++ b/applications/vault-secrets-operator/Chart.yaml @@ -5,7 +5,7 @@ sources: - https://github.com/ricoberger/vault-secrets-operator dependencies: - name: vault-secrets-operator - version: 2.5.1 + version: 2.5.3 repository: https://ricoberger.github.io/helm-charts/ annotations: phalanx.lsst.io/docs: | From 59181de1704f1cc4d3fb682668380ad75f2cc73b Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 18 Oct 2023 12:44:02 -0700 Subject: [PATCH 119/588] Disable Summit EFD replication at the base - Summit EFD replication is not currently working at the base. Disable MM2 and related source deployments for the moment. The need for having two SR deployments will be reviewed when we replace the InfluxDB Sink connectors with the telegraf-based connectors. --- applications/sasquatch/values-base.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 907f25f040..9fb57217c7 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -1,6 +1,6 @@ strimzi-kafka: mirrormaker2: - enabled: true + enabled: false source: bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094 topicsPattern: "lsst.sal.*, registry-schemas" @@ -9,10 +9,10 @@ strimzi-kafka: separator: "." class: "org.apache.kafka.connect.mirror.DefaultReplicationPolicy" sourceRegistry: - enabled: true + enabled: false schemaTopic: source.registry-schemas sourceConnect: - enabled: true + enabled: false resources: requests: cpu: 2 @@ -61,7 +61,7 @@ influxdb-staging: hostname: base-lsp.lsst.codes source-influxdb: - enabled: true + enabled: false persistence: storageClass: rook-ceph-block size: 10Ti @@ -320,7 +320,7 @@ telegraf-kafka-consumer: # environment where data is replicated from. # We need to remove the "source." prefix from the topic name before writing to InfluxDB. source-kafka-connect-manager: - enabled: true + enabled: false influxdbSink: connectInfluxUrl: "http://sasquatch-influxdb-staging.sasquatch:8086" connectInfluxDb: "efd" @@ -438,7 +438,7 @@ chronograf: STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/base.json source-kapacitor: - enabled: true + enabled: false persistence: storageClass: rook-ceph-block From 1c9fb8c845f5b183abdf410e20b99ed669a47a79 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 18 Oct 2023 13:08:20 -0700 Subject: [PATCH 120/588] Avoid helm update if there are no dependencies If we didn't configure any external Helm repositories, also don't run helm update. Not only does this save some time, helm update has the obnoxious behavior of failing if no remote repositories are configured, causing CI tests of charts with no dependencies to always fail. --- src/phalanx/services/application.py | 18 +++++++++++---- tests/cli/application_test.py | 36 +++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index e59ddfd34a..7b0890c578 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -47,7 +47,7 @@ def __init__( def add_helm_repositories( self, applications: Iterable[str] | None = None, *, quiet: bool = False - ) -> None: + ) -> bool: """Add all Helm repositories used by any application to Helm's cache. To perform other Helm operations, such as downloading third-party @@ -65,6 +65,13 @@ def add_helm_repositories( applications. quiet Whether to suppress Helm's standard output. + + Returns + ------- + bool + Whether any Helm repositories were added. If there were none, the + caller should not call :command:`helm update`, because it fails + if there are no repositories. """ if applications: repo_urls = set() @@ -75,6 +82,7 @@ def add_helm_repositories( repo_urls = self._config.get_all_dependency_repositories() for url in sorted(repo_urls): self._helm.repo_add(url, quiet=quiet) + return bool(repo_urls) def create( self, name: str, starter: HelmStarter, description: str @@ -143,8 +151,8 @@ def lint(self, app_names: list[str], env_name: str | None) -> bool: bool Whether linting passed. """ - self.add_helm_repositories(app_names) - self._helm.repo_update() + if self.add_helm_repositories(app_names): + self._helm.repo_update() environments: dict[str, Environment] = {} if env_name: environments[env_name] = self._config.load_environment(env_name) @@ -189,8 +197,8 @@ def lint_all(self, *, only_changes_from_branch: str | None = None) -> bool: to_lint = self._config.get_modified_applications(branch) else: to_lint = self._config.list_application_environments() - self.add_helm_repositories(to_lint.keys()) - self._helm.repo_update() + if self.add_helm_repositories(to_lint.keys()): + self._helm.repo_update() environments: dict[str, Environment] = {} success = True for app_name, app_envs in sorted(to_lint.items()): diff --git a/tests/cli/application_test.py b/tests/cli/application_test.py index 7efd2ea2b7..2e5c60225f 100644 --- a/tests/cli/application_test.py +++ b/tests/cli/application_test.py @@ -316,6 +316,42 @@ def callback_error(*command: str) -> subprocess.CompletedProcess: assert result.exit_code == 1 +def test_lint_no_repos(mock_helm: MockHelm) -> None: + def callback(*command: str) -> subprocess.CompletedProcess: + output = None + if command[0] == "lint": + output = "==> Linting .\n" + return subprocess.CompletedProcess( + returncode=0, + args=command, + stdout=output, + stderr=None, + ) + + # Lint a single application that has no dependency charts, and make sure + # we don't try to run repo update, which may fail. + mock_helm.set_capture_callback(callback) + result = run_cli("application", "lint", "postgres", "-e", "idfdev") + expected = "==> Linting postgres (environment idfdev)\n" + assert result.output == expected + assert result.exit_code == 0 + set_args = read_output_json("idfdev", "lint-set-values") + assert mock_helm.call_args_list == [ + ["dependency", "update", "--skip-refresh"], + [ + "lint", + "postgres", + "--strict", + "--values", + "postgres/values.yaml", + "--values", + "postgres/values-idfdev.yaml", + "--set", + ",".join(set_args), + ], + ] + + def test_lint_all(mock_helm: MockHelm) -> None: result = run_cli("application", "lint-all") assert result.output == "" From 31a35b176720c6c0d4b97ddae63c5bbc0616d540 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 13 Oct 2023 10:44:55 -0700 Subject: [PATCH 121/588] Add cloudsql to nublado (idf-dev) --- applications/nublado/README.md | 11 ++++ .../templates/cloudsql-deployment.yaml | 63 +++++++++++++++++++ .../templates/cloudsql-networkpolicy.yaml | 26 ++++++++ .../nublado/templates/cloudsql-service.yaml | 16 +++++ .../templates/cloudsql-serviceaccount.yaml | 10 +++ applications/nublado/values-idfdev.yaml | 5 +- applications/nublado/values.yaml | 49 +++++++++++++++ 7 files changed, 179 insertions(+), 1 deletion(-) create mode 100644 applications/nublado/templates/cloudsql-deployment.yaml create mode 100644 applications/nublado/templates/cloudsql-networkpolicy.yaml create mode 100644 applications/nublado/templates/cloudsql-service.yaml create mode 100644 applications/nublado/templates/cloudsql-serviceaccount.yaml diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 2af83051cb..6fbb761a63 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -13,6 +13,17 @@ JupyterHub and custom spawner for the Rubin Science Platform | Key | Type | Default | Description | |-----|------|---------|-------------| +| cloudsql.affinity | object | `{}` | Affinity rules for the Cloud SQL Proxy pod | +| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a separate service, because shoehorning it into Zero to Jupyterhub's extraContainers looks messy, and it's not necessary that it be very performant. | +| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | +| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | +| cloudsql.image.tag | string | `"1.33.11"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | +| cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | +| cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | +| cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy pod | +| cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | +| cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | | controller.affinity | object | `{}` | Affinity rules for the lab controller pod | | controller.config.fileserver.enabled | bool | `false` | Enable fileserver management | | controller.config.fileserver.image | string | `"ghcr.io/lsst-sqre/worblehat"` | Image for fileserver container | diff --git a/applications/nublado/templates/cloudsql-deployment.yaml b/applications/nublado/templates/cloudsql-deployment.yaml new file mode 100644 index 0000000000..f0eb9449b4 --- /dev/null +++ b/applications/nublado/templates/cloudsql-deployment.yaml @@ -0,0 +1,63 @@ +{{- if .Values.cloudsql.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cloud-sql-proxy + labels: + {{- include "nublado.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.cloudsql.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/component: "cloud-sql-proxy" + template: + metadata: + {{- with .Values.cloudsql.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + app.kubernetes.io/component: "cloud-sql-proxy" + spec: + serviceAccountName: "cloud-sql-proxy" + containers: + - name: "cloud-sql-proxy" + command: + - "/cloud_sql_proxy" + - "-ip_address_types=PRIVATE" + - "-log_debug_stdout=true" + - "-structured_logs=true" + - "-instances={{ required "cloudsql.instanceConnectionName must be specified" .Values.cloudsql.instanceConnectionName }}=tcp:0.0.0.0:5432" + image: "{{ .Values.cloudsql.image.repository }}:{{ .Values.cloudsql.image.tag }}" + imagePullPolicy: {{ .Values.cloudsql.image.pullPolicy | quote }} + ports: + - containerPort: 5432 + name: "http" + protocol: "TCP" + {{- with .Values.cloudsql.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + {{- with .Values.cloudsql.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.cloudsql.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.cloudsql.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/nublado/templates/cloudsql-networkpolicy.yaml b/applications/nublado/templates/cloudsql-networkpolicy.yaml new file mode 100644 index 0000000000..114540980c --- /dev/null +++ b/applications/nublado/templates/cloudsql-networkpolicy.yaml @@ -0,0 +1,26 @@ +{{- if .Values.cloudsql.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "cloud-sql-proxy" + labels: + {{- include "nublado.labels" . | nindent 4 }} +spec: + podSelector: + # This policy controls inbound and outbound access to the Cloud SQL Proxy. + matchLabels: + {{- include "nublado.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "cloud-sql-proxy" + policyTypes: + - Ingress + ingress: + # Allow inbound access to the Cloud SQL Proxy from the Hub. + - from: + - podSelector: + matchLabels: + app: "jupyterhub" + component: "hub" + ports: + - protocol: "TCP" + port: 5432 +{{- end }} diff --git a/applications/nublado/templates/cloudsql-service.yaml b/applications/nublado/templates/cloudsql-service.yaml new file mode 100644 index 0000000000..3c29083064 --- /dev/null +++ b/applications/nublado/templates/cloudsql-service.yaml @@ -0,0 +1,16 @@ +{{- if .Values.cloudsql.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: "cloud-sql-proxy" + labels: + {{- include "nublado.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - protocol: "TCP" + port: 5432 + targetPort: "http" + selector: + app.kubernetes.io/component: "cloud-sql-proxy" +{{- end }} diff --git a/applications/nublado/templates/cloudsql-serviceaccount.yaml b/applications/nublado/templates/cloudsql-serviceaccount.yaml new file mode 100644 index 0000000000..69cd1acc71 --- /dev/null +++ b/applications/nublado/templates/cloudsql-serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.cloudsql.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-sql-proxy + labels: + {{- include "nublado.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: {{ required "cloudsql.serviceAccount must be set to a valid Google service account" .Values.cloudsql.serviceAccount | quote }} +{{- end }} diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 2802bc096d..4229d0a089 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -73,6 +73,9 @@ jupyterhub: db: url: "postgresql://nublado3@postgres.postgres/nublado3" upgrade: true - +cloudsql: + enabled: true + instanceConnectionName: "FIXME" + serviceAccount: "nublado@science-platform-dev-7696.iam.gserviceaccount.com" secrets: templateSecrets: true diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 3befd435d3..d1537389dd 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -457,6 +457,55 @@ jupyterhub: # autoscaling in advance of running out of resources enabled: false +cloudsql: + # -- Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google + # Cloud. This will be run as a separate service, because shoehorning + # it into Zero to Jupyterhub's extraContainers looks messy, and it's + # not necessary that it be very performant. + enabled: false + + image: + # -- Cloud SQL Auth Proxy image to use + repository: "gcr.io/cloudsql-docker/gce-proxy" + + # -- Cloud SQL Auth Proxy tag to use + tag: "1.33.11" + + # -- Pull policy for Cloud SQL Auth Proxy images + pullPolicy: "IfNotPresent" + + # -- Instance connection name for a CloudSQL PostgreSQL instance + # @default -- None, must be set if Cloud SQL Auth Proxy is enabled + instanceConnectionName: "" + + # -- The Google service account that has an IAM binding to the `gafaelfawr` + # Kubernetes service account and has the `cloudsql.client` role + # @default -- None, must be set if Cloud SQL Auth Proxy is enabled + serviceAccount: "" + + # -- Resource limits and requests for the Cloud SQL Proxy pod + # @default -- See `values.yaml` + resources: + limits: + cpu: "100m" + memory: "20Mi" + requests: + cpu: "5m" + memory: "7Mi" + + # -- Annotations for the Cloud SQL Proxy pod + podAnnotations: {} + + # -- Node selection rules for the Cloud SQL Proxy pod + nodeSelector: {} + + # -- Tolerations for the Cloud SQL Proxy pod + tolerations: [] + + # -- Affinity rules for the Cloud SQL Proxy pod + affinity: {} + + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: From ebfe4e4a6cb7eea123b67e6ac255537fe3822134 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 13 Oct 2023 11:45:44 -0700 Subject: [PATCH 122/588] update DB connection information for nublado/idf-dev --- applications/nublado/values-idfdev.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 4229d0a089..fdf4b5675e 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -71,11 +71,11 @@ jupyterhub: image: tag: "0.4.0" db: - url: "postgresql://nublado3@postgres.postgres/nublado3" + url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" upgrade: true cloudsql: enabled: true - instanceConnectionName: "FIXME" + instanceConnectionName: "science-platform-dev-7696:us-central1:science-platform-dev-e9e11de2" serviceAccount: "nublado@science-platform-dev-7696.iam.gserviceaccount.com" secrets: templateSecrets: true From d095ad9b17d16a4343ea9a1bfad1de53c0cb7f51 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 13 Oct 2023 11:50:01 -0700 Subject: [PATCH 123/588] Set hub.internalDatabase false for idf-dev nublado --- applications/nublado/values-idfdev.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index fdf4b5675e..8d5b33ebba 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -73,6 +73,8 @@ jupyterhub: db: url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" upgrade: true +hub: + internalDatabase: false cloudsql: enabled: true instanceConnectionName: "science-platform-dev-7696:us-central1:science-platform-dev-e9e11de2" From f716ee0daa05e5cc9c15c4015e0292f1a2db5ce5 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 16 Oct 2023 09:57:37 -0700 Subject: [PATCH 124/588] Add application back to config --- applications/nublado/values-idfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 8d5b33ebba..44f7955423 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -19,6 +19,7 @@ controller: numWeeklies: 2 numDailies: 3 lab: + application: "nublado-users" env: AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" From 2babfe2637cbdfb446b9aff7592f1509272b1e24 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 16 Oct 2023 12:46:44 -0700 Subject: [PATCH 125/588] Remove postgres from idf-dev --- applications/nublado/values-idfdev.yaml | 2 +- applications/postgres/values-idfdev.yaml | 3 --- environments/values-idfdev.yaml | 1 - 3 files changed, 1 insertion(+), 5 deletions(-) delete mode 100644 applications/postgres/values-idfdev.yaml diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 44f7955423..33414aec8a 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -70,7 +70,7 @@ controller: jupyterhub: hub: image: - tag: "0.4.0" + tag: "tickets-DM-41198B" db: url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" upgrade: true diff --git a/applications/postgres/values-idfdev.yaml b/applications/postgres/values-idfdev.yaml deleted file mode 100644 index 20c336e86a..0000000000 --- a/applications/postgres/values-idfdev.yaml +++ /dev/null @@ -1,3 +0,0 @@ -nublado3_db: - user: "nublado3" - db: "nublado3" diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index c5938ef961..975200f4f2 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -14,7 +14,6 @@ applications: noteburst: true nublado: true portal: true - postgres: true sasquatch: true semaphore: true sherlock: true From 3281e0355b62bb6519bd51aeae52522cd375e00b Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 17 Oct 2023 10:37:00 -0700 Subject: [PATCH 126/588] Move back to tagged version --- applications/nublado/values-idfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 33414aec8a..478d45ab8f 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -70,7 +70,7 @@ controller: jupyterhub: hub: image: - tag: "tickets-DM-41198B" + tag: "0.4.1" db: url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" upgrade: true From 3ba2ffe978add98125891be8cdb89823eae10fad Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Tue, 12 Sep 2023 09:28:24 -0700 Subject: [PATCH 127/588] Update LATISS dev configs in prompt processing This supports the tester `upload.py` to run LATISS data. --- .../values-usdfdev-prompt-processing.yaml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index b55cdc8572..d462eb56fc 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -8,10 +8,11 @@ prompt-proto-service: repository: ghcr.io/lsst-dm/prompt-proto-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. - tag: AuxTel20230524-w_2023_20 + tag: latest instrument: - calibRepo: s3://rubin-summit-users/ + pipelines: (survey="SURVEY")=[${PROMPT_PROTOTYPE_DIR}/pipelines/${RUBIN_INSTRUMENT}/ApPipe.yaml] + calibRepo: s3://rubin-pp-users/central_repo/ s3: imageBucket: rubin-pp @@ -26,6 +27,8 @@ prompt-proto-service: ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 registry: # TODO: remove on DM-40839 - ip: usdf-butler.slac.stanford.edu:5432 + ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 + db: ppcentralbutler + user: pp fullnameOverride: "prompt-proto-service-latiss" From 57354f4485827a0b21b62589cdcab028f3233804 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 19 Oct 2023 09:20:54 -0700 Subject: [PATCH 128/588] BTS: Update gafaelfawr permissions to use argo-workflows. --- applications/gafaelfawr/values-base.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/gafaelfawr/values-base.yaml b/applications/gafaelfawr/values-base.yaml index 586d34c848..ceef9a995f 100644 --- a/applications/gafaelfawr/values-base.yaml +++ b/applications/gafaelfawr/values-base.yaml @@ -23,6 +23,9 @@ config: - github: organization: "lsst-sqre" team: "square" + - github: + organization: "lsst-ts" + team: "integration-testing-team" "exec:internal-tools": - github: organization: "lsst-sqre" From 08aba65c58d1f6675974b503e6992ec496502e24 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 20 Oct 2023 14:27:16 -0700 Subject: [PATCH 129/588] Summit: Update nublado and cachemachine to Cycle 33. --- applications/cachemachine/values-summit.yaml | 4 ++-- applications/nublado/values-summit.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/cachemachine/values-summit.yaml b/applications/cachemachine/values-summit.yaml index 215f2b1988..703f6e6a50 100644 --- a/applications/cachemachine/values-summit.yaml +++ b/applications/cachemachine/values-summit.yaml @@ -8,11 +8,11 @@ autostart: "type": "RubinRepoMan", "registry_url": "ts-dockerhub.lsst.org", "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0032", + "recommended_tag": "recommended_c0033", "num_releases": 0, "num_weeklies": 3, "num_dailies": 2, - "cycle": 32, + "cycle": 33, "alias_tags": [ "latest", "latest_daily", diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index ac9585fefb..6c225d716c 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -8,8 +8,8 @@ controller: num_releases: 0 num_weeklies: 3 num_dailies: 2 - cycle: 32 - recommended_tag: "recommended_c0032" + cycle: 33 + recommended_tag: "recommended_c0033" lab: pullSecret: "pull-secret" extraAnnotations: From a1b6b87027353205259c4de628a11921e056af15 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 09:36:18 +0000 Subject: [PATCH 130/588] Update gcr.io/cloudsql-docker/gce-proxy Docker tag to v1.33.12 --- applications/gafaelfawr/values.yaml | 2 +- applications/nublado/values.yaml | 2 +- applications/sqlproxy-cross-project/values.yaml | 2 +- applications/times-square/values.yaml | 2 +- applications/vo-cutouts/values.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index c710993c43..33e2b3f945 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -304,7 +304,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.11" + tag: "1.33.12" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index d1537389dd..94e6d5bc8a 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -469,7 +469,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.11" + tag: "1.33.12" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index 67b1d9e1e4..6f0f0f421d 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.33.11" + tag: "1.33.12" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 3bf8abb1cb..eace86d016 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -126,7 +126,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.11" + tag: "1.33.12" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index 9ee9b42d2e..a3251b6057 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -75,7 +75,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.11" + tag: "1.33.12" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" From 4369e8dc25001a25c3ba4134cadb55949e596302 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:31:39 +0000 Subject: [PATCH 131/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 292 +++++++++++++++++++++--------------------- requirements/main.txt | 216 +++++++++++++++---------------- 2 files changed, 252 insertions(+), 256 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 4ac33c4f12..724bf6fb04 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -42,97 +42,97 @@ cfgv==3.4.0 \ --hash=sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9 \ --hash=sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560 # via pre-commit -charset-normalizer==3.3.0 \ - --hash=sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843 \ - --hash=sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786 \ - --hash=sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e \ - --hash=sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8 \ - --hash=sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4 \ - --hash=sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa \ - --hash=sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d \ - --hash=sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82 \ - --hash=sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7 \ - --hash=sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895 \ - --hash=sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d \ - --hash=sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a \ - --hash=sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382 \ - --hash=sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678 \ - --hash=sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b \ - --hash=sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e \ - --hash=sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741 \ - --hash=sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4 \ - --hash=sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596 \ - --hash=sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9 \ - --hash=sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69 \ - --hash=sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c \ - --hash=sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77 \ - --hash=sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13 \ - --hash=sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459 \ - --hash=sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e \ - --hash=sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7 \ - --hash=sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908 \ - --hash=sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a \ - --hash=sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f \ - --hash=sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8 \ - --hash=sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482 \ - --hash=sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d \ - --hash=sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d \ - --hash=sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545 \ - --hash=sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34 \ - --hash=sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86 \ - --hash=sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6 \ - --hash=sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe \ - --hash=sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e \ - --hash=sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc \ - --hash=sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7 \ - --hash=sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd \ - --hash=sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c \ - --hash=sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557 \ - --hash=sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a \ - --hash=sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89 \ - --hash=sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078 \ - --hash=sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e \ - --hash=sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4 \ - --hash=sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403 \ - --hash=sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0 \ - --hash=sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89 \ - --hash=sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115 \ - --hash=sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9 \ - --hash=sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05 \ - --hash=sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a \ - --hash=sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec \ - --hash=sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56 \ - --hash=sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38 \ - --hash=sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479 \ - --hash=sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c \ - --hash=sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e \ - --hash=sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd \ - --hash=sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186 \ - --hash=sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455 \ - --hash=sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c \ - --hash=sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65 \ - --hash=sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78 \ - --hash=sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287 \ - --hash=sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df \ - --hash=sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43 \ - --hash=sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1 \ - --hash=sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7 \ - --hash=sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989 \ - --hash=sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a \ - --hash=sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63 \ - --hash=sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884 \ - --hash=sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649 \ - --hash=sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810 \ - --hash=sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828 \ - --hash=sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4 \ - --hash=sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2 \ - --hash=sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd \ - --hash=sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5 \ - --hash=sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe \ - --hash=sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293 \ - --hash=sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e \ - --hash=sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e \ - --hash=sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8 +charset-normalizer==3.3.1 \ + --hash=sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5 \ + --hash=sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93 \ + --hash=sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a \ + --hash=sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d \ + --hash=sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c \ + --hash=sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1 \ + --hash=sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58 \ + --hash=sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2 \ + --hash=sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557 \ + --hash=sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147 \ + --hash=sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041 \ + --hash=sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2 \ + --hash=sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2 \ + --hash=sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7 \ + --hash=sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296 \ + --hash=sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690 \ + --hash=sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67 \ + --hash=sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57 \ + --hash=sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597 \ + --hash=sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846 \ + --hash=sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b \ + --hash=sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97 \ + --hash=sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c \ + --hash=sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62 \ + --hash=sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa \ + --hash=sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f \ + --hash=sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e \ + --hash=sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821 \ + --hash=sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3 \ + --hash=sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4 \ + --hash=sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb \ + --hash=sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727 \ + --hash=sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514 \ + --hash=sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d \ + --hash=sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761 \ + --hash=sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55 \ + --hash=sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f \ + --hash=sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c \ + --hash=sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034 \ + --hash=sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6 \ + --hash=sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae \ + --hash=sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1 \ + --hash=sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14 \ + --hash=sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1 \ + --hash=sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228 \ + --hash=sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708 \ + --hash=sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48 \ + --hash=sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f \ + --hash=sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5 \ + --hash=sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f \ + --hash=sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4 \ + --hash=sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8 \ + --hash=sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff \ + --hash=sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61 \ + --hash=sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b \ + --hash=sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97 \ + --hash=sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b \ + --hash=sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605 \ + --hash=sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728 \ + --hash=sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d \ + --hash=sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c \ + --hash=sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf \ + --hash=sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673 \ + --hash=sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1 \ + --hash=sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b \ + --hash=sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41 \ + --hash=sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8 \ + --hash=sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f \ + --hash=sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4 \ + --hash=sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008 \ + --hash=sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9 \ + --hash=sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5 \ + --hash=sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f \ + --hash=sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e \ + --hash=sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273 \ + --hash=sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45 \ + --hash=sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e \ + --hash=sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656 \ + --hash=sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e \ + --hash=sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c \ + --hash=sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2 \ + --hash=sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72 \ + --hash=sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056 \ + --hash=sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397 \ + --hash=sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42 \ + --hash=sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd \ + --hash=sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3 \ + --hash=sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213 \ + --hash=sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf \ + --hash=sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67 # via # -c requirements/main.txt # requests @@ -330,15 +330,15 @@ fonttools==4.43.1 \ --hash=sha256:f7a58eb5e736d7cf198eee94844b81c9573102ae5989ebcaa1d1a37acd04b33d \ --hash=sha256:fe9b1ec799b6086460a7480e0f55c447b1aca0a4eecc53e444f639e967348896 # via matplotlib -gitdb==4.0.10 \ - --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ - --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 +gitdb==4.0.11 \ + --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ + --hash=sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b # via # -c requirements/main.txt # gitpython -gitpython==3.1.37 \ - --hash=sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33 \ - --hash=sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54 +gitpython==3.1.40 \ + --hash=sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4 \ + --hash=sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a # via # -c requirements/main.txt # documenteer @@ -606,34 +606,34 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.6.0 \ - --hash=sha256:091f53ff88cb093dcc33c29eee522c087a438df65eb92acd371161c1f4380ff0 \ - --hash=sha256:1a69db3018b87b3e6e9dd28970f983ea6c933800c9edf8c503c3135b3274d5ad \ - --hash=sha256:24f3de8b9e7021cd794ad9dfbf2e9fe3f069ff5e28cb57af6f873ffec1cb0425 \ - --hash=sha256:31eba8a7a71f0071f55227a8057468b8d2eb5bf578c8502c7f01abaec8141b2f \ - --hash=sha256:3c8835a07b8442da900db47ccfda76c92c69c3a575872a5b764332c4bacb5a0a \ - --hash=sha256:3df87094028e52766b0a59a3e46481bb98b27986ed6ded6a6cc35ecc75bb9182 \ - --hash=sha256:49499cf1e464f533fc45be54d20a6351a312f96ae7892d8e9f1708140e27ce41 \ - --hash=sha256:4c192445899c69f07874dabda7e931b0cc811ea055bf82c1ababf358b9b2a72c \ - --hash=sha256:4f3d27537abde1be6d5f2c96c29a454da333a2a271ae7d5bc7110e6d4b7beb3f \ - --hash=sha256:7469545380dddce5719e3656b80bdfbb217cfe8dbb1438532d6abc754b828fed \ - --hash=sha256:7807a2a61e636af9ca247ba8494031fb060a0a744b9fee7de3a54bed8a753323 \ - --hash=sha256:856bad61ebc7d21dbc019b719e98303dc6256cec6dcc9ebb0b214b81d6901bd8 \ - --hash=sha256:89513ddfda06b5c8ebd64f026d20a61ef264e89125dc82633f3c34eeb50e7d60 \ - --hash=sha256:8e0db37ac4ebb2fee7702767dfc1b773c7365731c22787cb99f507285014fcaf \ - --hash=sha256:971104bcb180e4fed0d7bd85504c9036346ab44b7416c75dd93b5c8c6bb7e28f \ - --hash=sha256:9e1589ca150a51d9d00bb839bfeca2f7a04f32cd62fad87a847bc0818e15d7dc \ - --hash=sha256:9f8464ed410ada641c29f5de3e6716cbdd4f460b31cf755b2af52f2d5ea79ead \ - --hash=sha256:ab98b8f6fdf669711f3abe83a745f67f50e3cbaea3998b90e8608d2b459fd566 \ - --hash=sha256:b19006055dde8a5425baa5f3b57a19fa79df621606540493e5e893500148c72f \ - --hash=sha256:c69051274762cccd13498b568ed2430f8d22baa4b179911ad0c1577d336ed849 \ - --hash=sha256:d2dad072e01764823d4b2f06bc7365bb1d4b6c2f38c4d42fade3c8d45b0b4b67 \ - --hash=sha256:dccd850a2e3863891871c9e16c54c742dba5470f5120ffed8152956e9e0a5e13 \ - --hash=sha256:e28d7b221898c401494f3b77db3bac78a03ad0a0fff29a950317d87885c655d2 \ - --hash=sha256:e4b7a99275a61aa22256bab5839c35fe8a6887781862471df82afb4b445daae6 \ - --hash=sha256:eb7ff4007865833c470a601498ba30462b7374342580e2346bf7884557e40531 \ - --hash=sha256:f8598307150b5722854f035d2e70a1ad9cc3c72d392c34fffd8c66d888c90f17 \ - --hash=sha256:fea451a3125bf0bfe716e5d7ad4b92033c471e4b5b3e154c67525539d14dc15a +mypy==1.6.1 \ + --hash=sha256:19f905bcfd9e167159b3d63ecd8cb5e696151c3e59a1742e79bc3bcb540c42c7 \ + --hash=sha256:21a1ad938fee7d2d96ca666c77b7c494c3c5bd88dff792220e1afbebb2925b5e \ + --hash=sha256:40b1844d2e8b232ed92e50a4bd11c48d2daa351f9deee6c194b83bf03e418b0c \ + --hash=sha256:41697773aa0bf53ff917aa077e2cde7aa50254f28750f9b88884acea38a16169 \ + --hash=sha256:49ae115da099dcc0922a7a895c1eec82c1518109ea5c162ed50e3b3594c71208 \ + --hash=sha256:4c46b51de523817a0045b150ed11b56f9fff55f12b9edd0f3ed35b15a2809de0 \ + --hash=sha256:4cbe68ef919c28ea561165206a2dcb68591c50f3bcf777932323bc208d949cf1 \ + --hash=sha256:4d01c00d09a0be62a4ca3f933e315455bde83f37f892ba4b08ce92f3cf44bcc1 \ + --hash=sha256:59a0d7d24dfb26729e0a068639a6ce3500e31d6655df8557156c51c1cb874ce7 \ + --hash=sha256:68351911e85145f582b5aa6cd9ad666c8958bcae897a1bfda8f4940472463c45 \ + --hash=sha256:7274b0c57737bd3476d2229c6389b2ec9eefeb090bbaf77777e9d6b1b5a9d143 \ + --hash=sha256:81af8adaa5e3099469e7623436881eff6b3b06db5ef75e6f5b6d4871263547e5 \ + --hash=sha256:82e469518d3e9a321912955cc702d418773a2fd1e91c651280a1bda10622f02f \ + --hash=sha256:8b27958f8c76bed8edaa63da0739d76e4e9ad4ed325c814f9b3851425582a3cd \ + --hash=sha256:8c223fa57cb154c7eab5156856c231c3f5eace1e0bed9b32a24696b7ba3c3245 \ + --hash=sha256:8f57e6b6927a49550da3d122f0cb983d400f843a8a82e65b3b380d3d7259468f \ + --hash=sha256:925cd6a3b7b55dfba252b7c4561892311c5358c6b5a601847015a1ad4eb7d332 \ + --hash=sha256:a43ef1c8ddfdb9575691720b6352761f3f53d85f1b57d7745701041053deff30 \ + --hash=sha256:a8032e00ce71c3ceb93eeba63963b864bf635a18f6c0c12da6c13c450eedb183 \ + --hash=sha256:b96ae2c1279d1065413965c607712006205a9ac541895004a1e0d4f281f2ff9f \ + --hash=sha256:bb8ccb4724f7d8601938571bf3f24da0da791fe2db7be3d9e79849cb64e0ae85 \ + --hash=sha256:bbaf4662e498c8c2e352da5f5bca5ab29d378895fa2d980630656178bd607c46 \ + --hash=sha256:cfd13d47b29ed3bbaafaff7d8b21e90d827631afda134836962011acb5904b71 \ + --hash=sha256:d4473c22cc296425bbbce7e9429588e76e05bc7342da359d6520b6427bf76660 \ + --hash=sha256:d8fbb68711905f8912e5af474ca8b78d077447d8f3918997fecbf26943ff3cbb \ + --hash=sha256:e5012e5cc2ac628177eaac0e83d622b2dd499e28253d4107a08ecc59ede3fc2c \ + --hash=sha256:eb4f18589d196a4cbe5290b435d135dee96567e07c2b2d43b5c4621b6501531a # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ @@ -1110,24 +1110,24 @@ rpds-py==0.10.6 \ # via # jsonschema # referencing -ruff==0.0.292 \ - --hash=sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96 \ - --hash=sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac \ - --hash=sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade \ - --hash=sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205 \ - --hash=sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4 \ - --hash=sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68 \ - --hash=sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0 \ - --hash=sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9 \ - --hash=sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81 \ - --hash=sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8 \ - --hash=sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0 \ - --hash=sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016 \ - --hash=sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7 \ - --hash=sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003 \ - --hash=sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a \ - --hash=sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c \ - --hash=sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4 +ruff==0.1.1 \ + --hash=sha256:2a909d3930afdbc2e9fd893b0034479e90e7981791879aab50ce3d9f55205bd6 \ + --hash=sha256:2d68367d1379a6b47e61bc9de144a47bcdb1aad7903bbf256e4c3d31f11a87ae \ + --hash=sha256:3305d1cb4eb8ff6d3e63a48d1659d20aab43b49fe987b3ca4900528342367145 \ + --hash=sha256:3521bf910104bf781e6753282282acc145cbe3eff79a1ce6b920404cd756075a \ + --hash=sha256:3ff3006c97d9dc396b87fb46bb65818e614ad0181f059322df82bbfe6944e264 \ + --hash=sha256:620d4b34302538dbd8bbbe8fdb8e8f98d72d29bd47e972e2b59ce6c1e8862257 \ + --hash=sha256:6aa7e63c3852cf8fe62698aef31e563e97143a4b801b57f920012d0e07049a8d \ + --hash=sha256:8f5b24daddf35b6c207619301170cae5d2699955829cda77b6ce1e5fc69340df \ + --hash=sha256:b7cdc893aef23ccc14c54bd79a8109a82a2c527e11d030b62201d86f6c2b81c5 \ + --hash=sha256:ba3208543ab91d3e4032db2652dcb6c22a25787b85b8dc3aeff084afdc612e5c \ + --hash=sha256:bc11955f6ce3398d2afe81ad7e49d0ebf0a581d8bcb27b8c300281737735e3a3 \ + --hash=sha256:c34ae501d0ec71acf19ee5d4d889e379863dcc4b796bf8ce2934a9357dc31db7 \ + --hash=sha256:c90461ae4abec261609e5ea436de4a4b5f2822921cf04c16d2cc9327182dbbcc \ + --hash=sha256:cbbd8eead88ea83a250499074e2a8e9d80975f0b324b1e2e679e4594da318c25 \ + --hash=sha256:d3f9ac658ba29e07b95c80fa742b059a55aefffa8b1e078bc3c08768bdd4b11a \ + --hash=sha256:e140bd717c49164c8feb4f65c644046fe929c46f42493672853e3213d7bdbce2 \ + --hash=sha256:f4780e2bb52f3863a565ec3f699319d3493b83ff95ebbb4993e59c62aaf6e75e # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -1309,9 +1309,9 @@ uc-micro-py==1.0.2 \ --hash=sha256:30ae2ac9c49f39ac6dce743bd187fcd2b574b16ca095fa74cd9396795c954c54 \ --hash=sha256:8c9110c309db9d9e87302e2f4ad2c3152770930d88ab385cd544e7a7e75f3de0 # via linkify-it-py -urllib3==2.0.6 \ - --hash=sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2 \ - --hash=sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564 +urllib3==2.0.7 \ + --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \ + --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e # via # -c requirements/main.txt # documenteer diff --git a/requirements/main.txt b/requirements/main.txt index 986bf09cf5..5815e59056 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -99,97 +99,97 @@ cffi==1.16.0 \ --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 # via cryptography -charset-normalizer==3.3.0 \ - --hash=sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843 \ - --hash=sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786 \ - --hash=sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e \ - --hash=sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8 \ - --hash=sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4 \ - --hash=sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa \ - --hash=sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d \ - --hash=sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82 \ - --hash=sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7 \ - --hash=sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895 \ - --hash=sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d \ - --hash=sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a \ - --hash=sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382 \ - --hash=sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678 \ - --hash=sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b \ - --hash=sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e \ - --hash=sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741 \ - --hash=sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4 \ - --hash=sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596 \ - --hash=sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9 \ - --hash=sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69 \ - --hash=sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c \ - --hash=sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77 \ - --hash=sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13 \ - --hash=sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459 \ - --hash=sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e \ - --hash=sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7 \ - --hash=sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908 \ - --hash=sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a \ - --hash=sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f \ - --hash=sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8 \ - --hash=sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482 \ - --hash=sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d \ - --hash=sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d \ - --hash=sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545 \ - --hash=sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34 \ - --hash=sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86 \ - --hash=sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6 \ - --hash=sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe \ - --hash=sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e \ - --hash=sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc \ - --hash=sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7 \ - --hash=sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd \ - --hash=sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c \ - --hash=sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557 \ - --hash=sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a \ - --hash=sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89 \ - --hash=sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078 \ - --hash=sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e \ - --hash=sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4 \ - --hash=sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403 \ - --hash=sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0 \ - --hash=sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89 \ - --hash=sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115 \ - --hash=sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9 \ - --hash=sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05 \ - --hash=sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a \ - --hash=sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec \ - --hash=sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56 \ - --hash=sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38 \ - --hash=sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479 \ - --hash=sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c \ - --hash=sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e \ - --hash=sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd \ - --hash=sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186 \ - --hash=sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455 \ - --hash=sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c \ - --hash=sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65 \ - --hash=sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78 \ - --hash=sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287 \ - --hash=sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df \ - --hash=sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43 \ - --hash=sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1 \ - --hash=sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7 \ - --hash=sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989 \ - --hash=sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a \ - --hash=sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63 \ - --hash=sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884 \ - --hash=sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649 \ - --hash=sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810 \ - --hash=sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828 \ - --hash=sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4 \ - --hash=sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2 \ - --hash=sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd \ - --hash=sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5 \ - --hash=sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe \ - --hash=sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293 \ - --hash=sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e \ - --hash=sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e \ - --hash=sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8 +charset-normalizer==3.3.1 \ + --hash=sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5 \ + --hash=sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93 \ + --hash=sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a \ + --hash=sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d \ + --hash=sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c \ + --hash=sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1 \ + --hash=sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58 \ + --hash=sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2 \ + --hash=sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557 \ + --hash=sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147 \ + --hash=sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041 \ + --hash=sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2 \ + --hash=sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2 \ + --hash=sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7 \ + --hash=sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296 \ + --hash=sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690 \ + --hash=sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67 \ + --hash=sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57 \ + --hash=sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597 \ + --hash=sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846 \ + --hash=sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b \ + --hash=sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97 \ + --hash=sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c \ + --hash=sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62 \ + --hash=sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa \ + --hash=sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f \ + --hash=sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e \ + --hash=sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821 \ + --hash=sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3 \ + --hash=sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4 \ + --hash=sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb \ + --hash=sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727 \ + --hash=sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514 \ + --hash=sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d \ + --hash=sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761 \ + --hash=sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55 \ + --hash=sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f \ + --hash=sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c \ + --hash=sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034 \ + --hash=sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6 \ + --hash=sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae \ + --hash=sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1 \ + --hash=sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14 \ + --hash=sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1 \ + --hash=sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228 \ + --hash=sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708 \ + --hash=sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48 \ + --hash=sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f \ + --hash=sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5 \ + --hash=sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f \ + --hash=sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4 \ + --hash=sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8 \ + --hash=sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff \ + --hash=sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61 \ + --hash=sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b \ + --hash=sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97 \ + --hash=sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b \ + --hash=sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605 \ + --hash=sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728 \ + --hash=sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d \ + --hash=sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c \ + --hash=sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf \ + --hash=sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673 \ + --hash=sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1 \ + --hash=sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b \ + --hash=sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41 \ + --hash=sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8 \ + --hash=sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f \ + --hash=sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4 \ + --hash=sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008 \ + --hash=sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9 \ + --hash=sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5 \ + --hash=sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f \ + --hash=sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e \ + --hash=sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273 \ + --hash=sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45 \ + --hash=sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e \ + --hash=sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656 \ + --hash=sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e \ + --hash=sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c \ + --hash=sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2 \ + --hash=sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72 \ + --hash=sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056 \ + --hash=sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397 \ + --hash=sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42 \ + --hash=sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd \ + --hash=sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3 \ + --hash=sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213 \ + --hash=sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf \ + --hash=sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67 # via requests click==8.1.7 \ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ @@ -225,21 +225,21 @@ cryptography==41.0.4 \ # -r requirements/main.in # pyjwt # safir -fastapi==0.103.2 \ - --hash=sha256:3270de872f0fe9ec809d4bd3d4d890c6d5cc7b9611d721d6438f9dacc8c4ef2e \ - --hash=sha256:75a11f6bfb8fc4d2bec0bd710c2d5f2829659c0e8c0afd5560fdda6ce25ec653 +fastapi==0.104.0 \ + --hash=sha256:456482c1178fb7beb2814b88e1885bc49f9a81f079665016feffe3e1c6a7663e \ + --hash=sha256:9c44de45693ae037b0c6914727a29c49a40668432b67c859a87851fc6a7b74c6 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ --hash=sha256:9ece7d37fbceb819b80560e7ed58f936e48a65d37ec5f56db79145156b426a25 # via safir -gitdb==4.0.10 \ - --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ - --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 +gitdb==4.0.11 \ + --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ + --hash=sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b # via gitpython -gitpython==3.1.37 \ - --hash=sha256:5f4c4187de49616d710a77e98ddf17b4782060a1788df441846bddefbb89ab33 \ - --hash=sha256:f9b9ddc0761c125d5780eab2d64be4873fc6817c2899cbcb34b02344bdc7bc54 +gitpython==3.1.40 \ + --hash=sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4 \ + --hash=sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a # via -r requirements/main.in h11==0.14.0 \ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ @@ -255,9 +255,9 @@ httpx==0.23.3 \ # via # onepasswordconnectsdk # safir -hvac==1.2.1 \ - --hash=sha256:c786e3dfa1f35239810e5317cccadbe358f49b8c9001a1f2f68b79a250b9f8a1 \ - --hash=sha256:cb87f5724be8fd5f57507f5d5a94e6c42d2675128b460bf3186f966e07d4db78 +hvac==2.0.0 \ + --hash=sha256:3b14d0979b98ea993eca73b7dac7161b5547ede369a9b28f4fa40f18e74ec3f3 \ + --hash=sha256:6a51cb9a0d22fe13e824cb0b0a1ce2eeacb9ce6af68b7d1b6689e25ec1becaf5 # via -r requirements/main.in idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ @@ -455,10 +455,6 @@ pydantic-core==2.10.1 \ --hash=sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6 \ --hash=sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521 # via pydantic -pyhcl==0.4.5 \ - --hash=sha256:30ee337d330d1f90c9f5ed8f49c468f66c8e6e43192bdc7c6ece1420beb3070c \ - --hash=sha256:c47293a51ccdd25e18bb5c8c0ab0ffe355b37c87f8d6f9d3280dc41efd4740bc - # via hvac pyjwt[crypto]==2.8.0 \ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 @@ -571,7 +567,7 @@ uritemplate==4.1.1 \ --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e # via gidgethub -urllib3==2.0.6 \ - --hash=sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2 \ - --hash=sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564 +urllib3==2.0.7 \ + --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \ + --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e # via requests From 9ae07584b931c8c4c0e79d336aa256d3f3033aac Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:31:56 +0000 Subject: [PATCH 132/588] Update Helm release strimzi-kafka-operator to v0.38.0 --- applications/strimzi/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml index 6308212ce0..ef2f43b9bc 100644 --- a/applications/strimzi/Chart.yaml +++ b/applications/strimzi/Chart.yaml @@ -7,5 +7,5 @@ home: https://strimzi.io appVersion: "0.26.0" dependencies: - name: strimzi-kafka-operator - version: "0.37.0" + version: "0.38.0" repository: https://strimzi.io/charts/ From 1c574ba68344fde4b266190c8ddc8e055c866538 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 24 Oct 2023 15:50:40 +0000 Subject: [PATCH 133/588] Update Helm release argo-workflows to v0.37.0 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index af8b6e7d40..d7eb85d18f 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.35.0 + version: 0.37.0 repository: https://argoproj.github.io/argo-helm From 13836cae6fd0d67c48c9ad2ccb07ee5355bdb923 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 17 Oct 2023 11:37:04 -0700 Subject: [PATCH 134/588] Migrate IDF int to new secret schema --- .../vault-secrets-operator/values-idfint.yaml | 14 ++++++++++++++ environments/values-idfint.yaml | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/applications/vault-secrets-operator/values-idfint.yaml b/applications/vault-secrets-operator/values-idfint.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-idfint.yaml +++ b/applications/vault-secrets-operator/values-idfint.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index f07106e224..43d1493443 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -1,7 +1,7 @@ butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" fqdn: data-int.lsst.cloud name: idfint -vaultPathPrefix: secret/k8s_operator/data-int.lsst.cloud +vaultPathPrefix: secret/phalanx/idfint applications: alert-stream-broker: true From bcdd6a2bf714f5f0177f8253922bf3d35dd55078 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 18 Oct 2023 13:41:58 -0700 Subject: [PATCH 135/588] Completed through 'Switch to the new secrets tree' step 2 --- applications/datalinker/values-idfint.yaml | 2 ++ applications/nublado/secrets-idfint.yaml | 19 ++++++++++++++++++ applications/nublado/values-idfint.yaml | 3 +++ .../plot-navigator/values-idfint.yaml | 2 ++ .../production-tools/values-idfint.yaml | 2 ++ applications/vo-cutouts/secrets-idfint.yaml | 20 +++++++++++++++++++ environments/values-idfint.yaml | 3 +++ 7 files changed, 51 insertions(+) create mode 100644 applications/nublado/secrets-idfint.yaml create mode 100644 applications/vo-cutouts/secrets-idfint.yaml diff --git a/applications/datalinker/values-idfint.yaml b/applications/datalinker/values-idfint.yaml index e69de29bb2..288a3da54a 100644 --- a/applications/datalinker/values-idfint.yaml +++ b/applications/datalinker/values-idfint.yaml @@ -0,0 +1,2 @@ +config: + separateSecrets: true diff --git a/applications/nublado/secrets-idfint.yaml b/applications/nublado/secrets-idfint.yaml new file mode 100644 index 0000000000..6f66967c08 --- /dev/null +++ b/applications/nublado/secrets-idfint.yaml @@ -0,0 +1,19 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + onepassword: + encoded: true +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. +"butler-hmac-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the private + key syntax used for HMACs. +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + onepassword: + encoded: true diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 7c794d2cd1..3be1eac95c 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -102,3 +102,6 @@ jupyterhub: timeout: 432000 every: 300 maxAge: 2160000 + +secrets: + templateSecrets: true diff --git a/applications/plot-navigator/values-idfint.yaml b/applications/plot-navigator/values-idfint.yaml index 4dc30dc478..2a8515e988 100644 --- a/applications/plot-navigator/values-idfint.yaml +++ b/applications/plot-navigator/values-idfint.yaml @@ -3,3 +3,5 @@ environment: PGPASSFILE: "/home/worker/.lsst/postgres-credentials.txt" AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" S3_ENDPOINT_URL: "https://storage.googleapis.com" +config: + separateSecrets: true diff --git a/applications/production-tools/values-idfint.yaml b/applications/production-tools/values-idfint.yaml index b89176b204..c846f462a2 100644 --- a/applications/production-tools/values-idfint.yaml +++ b/applications/production-tools/values-idfint.yaml @@ -3,3 +3,5 @@ environment: LOG_BUCKET: "drp-us-central1-logging" LOG_PREFIX: "Panda-RubinLog" WEB_CONCURRENCY: "4" +config: + separateSecrets: true diff --git a/applications/vo-cutouts/secrets-idfint.yaml b/applications/vo-cutouts/secrets-idfint.yaml new file mode 100644 index 0000000000..57998942f8 --- /dev/null +++ b/applications/vo-cutouts/secrets-idfint.yaml @@ -0,0 +1,20 @@ +aws-credentials: + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +google-credentials: + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +postgres-credentials: + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 43d1493443..73bb271cbd 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -1,6 +1,9 @@ butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" fqdn: data-int.lsst.cloud name: idfint +onepassword: + connectUrl: "https://roundtable.lsst.cloud/1password" + vaultTitle: "RSP data-int.lsst.cloud" vaultPathPrefix: secret/phalanx/idfint applications: From ff0293b7c6a8dce7c92c5210b6ee8277216217da Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 18 Oct 2023 15:01:33 -0700 Subject: [PATCH 136/588] Add secrets placeholders --- .../alert-stream-broker/secrets-idfint.yaml | 18 ++++++++++++++++++ applications/alert-stream-broker/secrets.yaml | 2 ++ applications/sasquatch/secrets-idfint.yaml | 16 ++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 applications/alert-stream-broker/secrets-idfint.yaml create mode 100644 applications/alert-stream-broker/secrets.yaml create mode 100644 applications/sasquatch/secrets-idfint.yaml diff --git a/applications/alert-stream-broker/secrets-idfint.yaml b/applications/alert-stream-broker/secrets-idfint.yaml new file mode 100644 index 0000000000..9522a0504a --- /dev/null +++ b/applications/alert-stream-broker/secrets-idfint.yaml @@ -0,0 +1,18 @@ +"alerce-idfint-password": + description: "?" +"ampel-idfint-password": + description: "?" +"antares-idfint-password": + description: "?" +"babamul-idfint-password": + description: "?" +"fink-idfint-password": + description: "?" +"lasair-idfint-password": + description: "?" +"pittgoogle-idfint-password": + description: "?" +"rubin-communitybroker-idfint-password": + description: "?" +"rubin-devel-idfint-password": + description: "?" diff --git a/applications/alert-stream-broker/secrets.yaml b/applications/alert-stream-broker/secrets.yaml new file mode 100644 index 0000000000..cab38f58c0 --- /dev/null +++ b/applications/alert-stream-broker/secrets.yaml @@ -0,0 +1,2 @@ +"kafka-admin-password": + description: "?" diff --git a/applications/sasquatch/secrets-idfint.yaml b/applications/sasquatch/secrets-idfint.yaml new file mode 100644 index 0000000000..1ab5fdb169 --- /dev/null +++ b/applications/sasquatch/secrets-idfint.yaml @@ -0,0 +1,16 @@ +"kafka-connect-manager-password": + description: "?" +"prompt-processing-password": + description: "?" +"rest-proxy-password": + description: "?" +"rest-proxy-sasl-jass-config": + description: "?" +"sasquatch-test-kafka-properties": + description: "?" +"sasquatch-test-password": + description: "?" +"telegraf-password": + description: "?" +"ts-salkafka-password": + description: "?" From a2bf20b53a09677267b11b97609833499a073189 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Tue, 24 Oct 2023 13:56:15 -0700 Subject: [PATCH 137/588] [DM-41251] Bump tap schema image to 2.1.2 This contains a foreign key in the tap schema tables. --- charts/cadc-tap/README.md | 2 +- charts/cadc-tap/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index 8d7e4cf370..58e8faf771 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -56,7 +56,7 @@ IVOA TAP service | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.1.1"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"2.1.2"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the TAP schema database pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the TAP schema database pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index 7a0027c5b4..c4518a5ba5 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -165,7 +165,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.1.1" + tag: "2.1.2" # -- Resource limits and requests for the TAP schema database pod resources: {} From e569a9497777d3a9d6bad23cfcc76b979e509b8d Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 24 Oct 2023 14:56:47 -0700 Subject: [PATCH 138/588] point all apps to branch --- environments/values-idfint.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 73bb271cbd..7ac4c20379 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -1,6 +1,8 @@ butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" fqdn: data-int.lsst.cloud name: idfint +# FIXME undo this change when we merge everything +targetRevision: "tickets/DM-41237" onepassword: connectUrl: "https://roundtable.lsst.cloud/1password" vaultTitle: "RSP data-int.lsst.cloud" From f90b916c201aff09d05c9a51b05cc747b947a30f Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 24 Oct 2023 14:59:15 -0700 Subject: [PATCH 139/588] Actually don't reset branch for all apps; just do VSO separately --- environments/values-idfint.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 7ac4c20379..73bb271cbd 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -1,8 +1,6 @@ butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" fqdn: data-int.lsst.cloud name: idfint -# FIXME undo this change when we merge everything -targetRevision: "tickets/DM-41237" onepassword: connectUrl: "https://roundtable.lsst.cloud/1password" vaultTitle: "RSP data-int.lsst.cloud" From 74d3f7d97ae733b1eab3c3ddbf87ea8a5387451b Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 24 Oct 2023 15:18:30 -0700 Subject: [PATCH 140/588] manually update alert-stream-broker secret path --- applications/alert-stream-broker/values-idfint.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/alert-stream-broker/values-idfint.yaml b/applications/alert-stream-broker/values-idfint.yaml index 3b94677a96..11c1c2a2a7 100644 --- a/applications/alert-stream-broker/values-idfint.yaml +++ b/applications/alert-stream-broker/values-idfint.yaml @@ -39,7 +39,7 @@ alert-stream-broker: - key: kafka value: ok effect: NoSchedule - vaultSecretsPath: "secret/k8s_operator/data-int.lsst.cloud/alert-stream-broker" + vaultSecretsPath: "secret/phalanx/idfint/alert-stream-broker" users: # A user for development purposes by the Rubin team, with access to all From 3c4987e8249a6edf9cdd8ad144d0cbe2e35bc6ad Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 4 Oct 2023 12:57:35 -0700 Subject: [PATCH 141/588] [DM-40944] Add siav2 application --- applications/siav2/.helmignore | 23 +++++++ applications/siav2/Chart.yaml | 8 +++ applications/siav2/README.md | 29 +++++++++ applications/siav2/templates/_helpers.tpl | 26 ++++++++ applications/siav2/templates/deployment.yaml | 59 +++++++++++++++++ applications/siav2/templates/hpa.yaml | 28 ++++++++ applications/siav2/templates/ingress.yaml | 32 ++++++++++ .../siav2/templates/networkpolicy.yaml | 21 ++++++ applications/siav2/templates/service.yaml | 15 +++++ applications/siav2/values.yaml | 64 +++++++++++++++++++ docs/applications/siav2/index.rst | 16 +++++ docs/applications/siav2/values.md | 12 ++++ environments/README.md | 1 + environments/templates/siav2-application.yaml | 34 ++++++++++ environments/values.yaml | 3 + 15 files changed, 371 insertions(+) create mode 100644 applications/siav2/.helmignore create mode 100644 applications/siav2/Chart.yaml create mode 100644 applications/siav2/README.md create mode 100644 applications/siav2/templates/_helpers.tpl create mode 100644 applications/siav2/templates/deployment.yaml create mode 100644 applications/siav2/templates/hpa.yaml create mode 100644 applications/siav2/templates/ingress.yaml create mode 100644 applications/siav2/templates/networkpolicy.yaml create mode 100644 applications/siav2/templates/service.yaml create mode 100644 applications/siav2/values.yaml create mode 100644 docs/applications/siav2/index.rst create mode 100644 docs/applications/siav2/values.md create mode 100644 environments/templates/siav2-application.yaml diff --git a/applications/siav2/.helmignore b/applications/siav2/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/siav2/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/siav2/Chart.yaml b/applications/siav2/Chart.yaml new file mode 100644 index 0000000000..fb6b8b378a --- /dev/null +++ b/applications/siav2/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: Simple Image Access v2 service +name: siav2 +sources: +- https://github.com/lsst-sqre/siav2 +type: application +version: 1.0.0 diff --git a/applications/siav2/README.md b/applications/siav2/README.md new file mode 100644 index 0000000000..b6369c866c --- /dev/null +++ b/applications/siav2/README.md @@ -0,0 +1,29 @@ +# siav2 + +Simple Image Access v2 service + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the siav2 deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of siav2 deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of siav2 deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of siav2 deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of siav2 deployment pods | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the siav2 image | +| image.repository | string | `"ghcr.io/lsst-sqre/siav2"` | Image to use in the siav2 deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the siav2 deployment pod | +| podAnnotations | object | `{}` | Annotations for the siav2 deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the siav2 deployment pod | +| tolerations | list | `[]` | Tolerations for the siav2 deployment pod | diff --git a/applications/siav2/templates/_helpers.tpl b/applications/siav2/templates/_helpers.tpl new file mode 100644 index 0000000000..0cadb5116e --- /dev/null +++ b/applications/siav2/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "siav2.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "siav2.labels" -}} +helm.sh/chart: {{ include "siav2.chart" . }} +{{ include "siav2.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "siav2.selectorLabels" -}} +app.kubernetes.io/name: "siav2" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/siav2/templates/deployment.yaml b/applications/siav2/templates/deployment.yaml new file mode 100644 index 0000000000..63aeec9088 --- /dev/null +++ b/applications/siav2/templates/deployment.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "siav2" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "siav2.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/siav2/templates/hpa.yaml b/applications/siav2/templates/hpa.yaml new file mode 100644 index 0000000000..b1b59e6b4f --- /dev/null +++ b/applications/siav2/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "siav2" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "siav2" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/siav2/templates/ingress.yaml b/applications/siav2/templates/ingress.yaml new file mode 100644 index 0000000000..1017d39458 --- /dev/null +++ b/applications/siav2/templates/ingress.yaml @@ -0,0 +1,32 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "siav2" + labels: + {{- include "siav2.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + loginRedirect: true +template: + metadata: + name: "siav2" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + ingressClassName: "nginx" + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/siav2" + pathType: "Prefix" + backend: + service: + name: "siav2" + port: + number: 8080 diff --git a/applications/siav2/templates/networkpolicy.yaml b/applications/siav2/templates/networkpolicy.yaml new file mode 100644 index 0000000000..0ab737938e --- /dev/null +++ b/applications/siav2/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "siav2" +spec: + podSelector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/siav2/templates/service.yaml b/applications/siav2/templates/service.yaml new file mode 100644 index 0000000000..b05d2f791b --- /dev/null +++ b/applications/siav2/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "siav2" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "siav2.selectorLabels" . | nindent 4 }} diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml new file mode 100644 index 0000000000..f62dc55447 --- /dev/null +++ b/applications/siav2/values.yaml @@ -0,0 +1,64 @@ +# Default values for siav2. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the siav2 deployment + repository: "ghcr.io/lsst-sqre/siav2" + + # -- Pull policy for the siav2 image + pullPolicy: "IfNotPresent" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +autoscaling: + # -- Enable autoscaling of siav2 deployment + enabled: false + + # -- Minimum number of siav2 deployment pods + minReplicas: 1 + + # -- Maximum number of siav2 deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of siav2 deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the siav2 deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the siav2 deployment pod +resources: {} + +# -- Node selection rules for the siav2 deployment pod +nodeSelector: {} + +# -- Tolerations for the siav2 deployment pod +tolerations: [] + +# -- Affinity rules for the siav2 deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/docs/applications/siav2/index.rst b/docs/applications/siav2/index.rst new file mode 100644 index 0000000000..2621b3d49b --- /dev/null +++ b/docs/applications/siav2/index.rst @@ -0,0 +1,16 @@ +.. px-app:: siav2 + +###################################### +siav2 — Simple Image Access v2 service +###################################### + +.. jinja:: siav2 + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/siav2/values.md b/docs/applications/siav2/values.md new file mode 100644 index 0000000000..b5230c4985 --- /dev/null +++ b/docs/applications/siav2/values.md @@ -0,0 +1,12 @@ +```{px-app-values} siav2 +``` + +# siav2 Helm values reference + +Helm values reference table for the {px-app}`siav2` application. + +```{include} ../../../applications/siav2/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/environments/README.md b/environments/README.md index 2673d51902..216d0b7c8b 100644 --- a/environments/README.md +++ b/environments/README.md @@ -41,6 +41,7 @@ | applications.sasquatch | bool | `false` | Enable the sasquatch application | | applications.semaphore | bool | `false` | Enable the semaphore application | | applications.sherlock | bool | `false` | Enable the sherlock application | +| applications.siav2 | bool | `false` | Enable the siav2 application | | applications.sqlproxy-cross-project | bool | `false` | Enable the sqlproxy-cross-project application | | applications.squarebot | bool | `false` | Enable the squarebot application | | applications.squareone | bool | `false` | Enable the squareone application | diff --git a/environments/templates/siav2-application.yaml b/environments/templates/siav2-application.yaml new file mode 100644 index 0000000000..115c44c25d --- /dev/null +++ b/environments/templates/siav2-application.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "siav2") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "siav2" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "siav2" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "siav2" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/siav2" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/values.yaml b/environments/values.yaml index 0dbce119ba..e399ca8e7a 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -125,6 +125,9 @@ applications: # -- Enable the sasquatch application sasquatch: false + # -- Enable the siav2 application + siav2: false + # -- Enable the ssotap application ssotap: false From 96c97510a78302bf4eb598a495cd9c7389053bf1 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 5 Oct 2023 13:18:08 -0700 Subject: [PATCH 142/588] [DM-40944] Finish up siav2 chart Add in a uws postgres db, set up the configs, and ingress. Also add full name to _helpers, which didn't seem to exist --- applications/siav2/README.md | 8 +++ applications/siav2/templates/_helpers.tpl | 18 +++++++ applications/siav2/templates/configmap.yaml | 31 +++++++++++ applications/siav2/templates/deployment.yaml | 19 +++++-- applications/siav2/templates/ingress.yaml | 2 +- .../siav2/templates/uws-db-deployment.yaml | 51 +++++++++++++++++++ .../siav2/templates/uws-db-networkpolicy.yaml | 23 +++++++++ .../siav2/templates/uws-db-service.yaml | 14 +++++ applications/siav2/values.yaml | 33 ++++++++++++ 9 files changed, 193 insertions(+), 6 deletions(-) create mode 100644 applications/siav2/templates/configmap.yaml create mode 100644 applications/siav2/templates/uws-db-deployment.yaml create mode 100644 applications/siav2/templates/uws-db-networkpolicy.yaml create mode 100644 applications/siav2/templates/uws-db-service.yaml diff --git a/applications/siav2/README.md b/applications/siav2/README.md index b6369c866c..f605d1c79f 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -27,3 +27,11 @@ Simple Image Access v2 service | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the siav2 deployment pod | | tolerations | list | `[]` | Tolerations for the siav2 deployment pod | +| uws.affinity | object | `{}` | Affinity rules for the UWS database pod | +| uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | +| uws.image.repository | string | `"ghcr.io/lsst-sqre/lsst-tap-uws-db"` | UWS database image to use | +| uws.image.tag | string | Version of QServ TAP image | Tag of UWS database image to use | +| uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | +| uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | +| uws.resources | object | `{"limits":{"cpu":2,"memory":"4Gi"},"requests":{"cpu":0.25,"memory":"1Gi"}}` | Resource limits and requests for the UWS database pod | +| uws.tolerations | list | `[]` | Tolerations for the UWS database pod | diff --git a/applications/siav2/templates/_helpers.tpl b/applications/siav2/templates/_helpers.tpl index 0cadb5116e..97f049c4e0 100644 --- a/applications/siav2/templates/_helpers.tpl +++ b/applications/siav2/templates/_helpers.tpl @@ -24,3 +24,21 @@ Selector labels app.kubernetes.io/name: "siav2" app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "siav2.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} diff --git a/applications/siav2/templates/configmap.yaml b/applications/siav2/templates/configmap.yaml new file mode 100644 index 0000000000..baa16470b7 --- /dev/null +++ b/applications/siav2/templates/configmap.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "siav2.fullname" . }}-config + labels: + {{- include "siav2.labels" . | nindent 4 }} +data: + cadc-registry.properties: | + ivo://ivoa.net/sso#OpenID = {{ .Values.global.baseUrl }}/auth/openid + catalina.properties: | + # tomcat properties + tomcat.connector.connectionTimeout=20000 + tomcat.connector.keepAliveTimeout=120000 + tomcat.connector.secure=false + tomcat.connector.scheme=http + tomcat.connector.proxyName=localhost + tomcat.connector.proxyPort=8080 + + # database connection pools for uws + org.opencadc.sia2.uws.maxActive=5 + org.opencadc.sia2.uws.username=postgres + org.opencadc.sia2.uws.password= + org.opencadc.sia2.uws.url=jdbc:postgresql://siav2-uws-db/uws + + # authentication provider + # ca.nrc.cadc.auth.IdentityManager=org.opencadc.auth.StandardIdentityManager + sia2.properties: | + # TAP service + org.opencadc.sia2.queryService = {{ .Values.global.baseUrl }}/api/tap + war-rename.conf: | + mv sia2.war api#siav2.war diff --git a/applications/siav2/templates/deployment.yaml b/applications/siav2/templates/deployment.yaml index 63aeec9088..51637c4a40 100644 --- a/applications/siav2/templates/deployment.yaml +++ b/applications/siav2/templates/deployment.yaml @@ -11,12 +11,14 @@ spec: selector: matchLabels: {{- include "siav2.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "server" template: metadata: - {{- with .Values.podAnnotations }} annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} labels: {{- include "siav2.selectorLabels" . | nindent 8 }} spec: @@ -37,14 +39,21 @@ spec: protocol: "TCP" readinessProbe: httpGet: - path: "/" + path: "/api/sia2/availability" port: "http" resources: {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: config-volume + mountPath: /config + volumes: + - name: config-volume + configMap: + name: {{ template "siav2.fullname" . }}-config securityContext: runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 + runAsUser: 8675309 + runAsGroup: 8675309 {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/applications/siav2/templates/ingress.yaml b/applications/siav2/templates/ingress.yaml index 1017d39458..716c396a0e 100644 --- a/applications/siav2/templates/ingress.yaml +++ b/applications/siav2/templates/ingress.yaml @@ -23,7 +23,7 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/siav2" + - path: "/api/siav2" pathType: "Prefix" backend: service: diff --git a/applications/siav2/templates/uws-db-deployment.yaml b/applications/siav2/templates/uws-db-deployment.yaml new file mode 100644 index 0000000000..e7fb95b457 --- /dev/null +++ b/applications/siav2/templates/uws-db-deployment.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "siav2-uws-db" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "siav2-uws-db" + template: + metadata: + {{- with .Values.uws.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "siav2.labels" . | nindent 8 }} + app.kubernetes.io/component: "siav2-uws-db" + spec: + automountServiceAccountToken: false + containers: + - name: "uws" + image: "{{ .Values.uws.image.repository }}:{{ .Values.uws.image.tag }}" + imagePullPolicy: {{ .Values.uws.image.pullPolicy | quote }} + ports: + - containerPort: 5432 + {{- with .Values.uws.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: "data" + mountPath: "/var/lib/postgresql/data" + volumes: + - name: "data" + emptyDir: {} + {{- with .Values.uws.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.uws.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.uws.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/siav2/templates/uws-db-networkpolicy.yaml b/applications/siav2/templates/uws-db-networkpolicy.yaml new file mode 100644 index 0000000000..a6c59dbb0c --- /dev/null +++ b/applications/siav2/templates/uws-db-networkpolicy.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "siav2-uws-db" +spec: + podSelector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "sia2-uws-db" + policyTypes: + - Ingress + # Deny all outbound access; PostgreSQL doesn't need to talk to anything. + - Egress + ingress: + # Allow inbound access to UWS database from the server. + - from: + - podSelector: + matchLabels: + {{- include "siav2.selectorLabels" . | nindent 14 }} + app.kubernetes.io/component: "siav2-uws-db" + ports: + - protocol: "TCP" + port: 5432 diff --git a/applications/siav2/templates/uws-db-service.yaml b/applications/siav2/templates/uws-db-service.yaml new file mode 100644 index 0000000000..97e1550881 --- /dev/null +++ b/applications/siav2/templates/uws-db-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: "siav2-uws-db" + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + ports: + - protocol: "TCP" + port: 5432 + targetPort: 5432 + selector: + {{- include "siav2.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: "siav2-uws-db" diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml index f62dc55447..9e78c7b6e3 100644 --- a/applications/siav2/values.yaml +++ b/applications/siav2/values.yaml @@ -62,3 +62,36 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD vaultSecretsPath: "" + +uws: + image: + # -- UWS database image to use + repository: "ghcr.io/lsst-sqre/lsst-tap-uws-db" + + # -- Pull policy for the UWS database image + pullPolicy: "IfNotPresent" + + # -- Tag of UWS database image to use + # @default -- Version of QServ TAP image + tag: "2.1.0" + + # -- Resource limits and requests for the UWS database pod + resources: + requests: + cpu: 0.25 + memory: "1Gi" + limits: + cpu: 2.0 + memory: "4Gi" + + # -- Annotations for the UWS databse pod + podAnnotations: {} + + # -- Node selection rules for the UWS database pod + nodeSelector: {} + + # -- Tolerations for the UWS database pod + tolerations: [] + + # -- Affinity rules for the UWS database pod + affinity: {} From 089601fd365ab811a4c6ae56082915cc86a9c47c Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 5 Oct 2023 14:58:41 -0700 Subject: [PATCH 143/588] [DM-40944] Turn on siav2 for data-dev.lsst.cloud --- environments/values-idfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 975200f4f2..d81d5dd034 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -17,6 +17,7 @@ applications: sasquatch: true semaphore: true sherlock: true + siav2: true ssotap: true squareone: true sqlproxy-cross-project: true From d61a8e87d10da585b0ff526cc78ec12e3eaf5c51 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 5 Oct 2023 15:04:13 -0700 Subject: [PATCH 144/588] [DM-40944] Add values file for data-dev --- applications/siav2/values-idfdev.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 applications/siav2/values-idfdev.yaml diff --git a/applications/siav2/values-idfdev.yaml b/applications/siav2/values-idfdev.yaml new file mode 100644 index 0000000000..e69de29bb2 From ac8d477006f556c0972c6551b5d46ca255624115 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 5 Oct 2023 15:14:43 -0700 Subject: [PATCH 145/588] [DM-40944] Fix siav2 deployment labels --- applications/siav2/templates/deployment.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/applications/siav2/templates/deployment.yaml b/applications/siav2/templates/deployment.yaml index 51637c4a40..a7424ea237 100644 --- a/applications/siav2/templates/deployment.yaml +++ b/applications/siav2/templates/deployment.yaml @@ -21,6 +21,7 @@ spec: {{- end }} labels: {{- include "siav2.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "server" spec: automountServiceAccountToken: false containers: @@ -39,7 +40,7 @@ spec: protocol: "TCP" readinessProbe: httpGet: - path: "/api/sia2/availability" + path: "/api/siav2/availability" port: "http" resources: {{- toYaml .Values.resources | nindent 12 }} From 19a2709e51402a6ee21612a187866464b720379c Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 5 Oct 2023 15:43:33 -0700 Subject: [PATCH 146/588] [DM-40944] Get rid of ingressClassName This shouldn't be there but it was put there by the starter --- applications/siav2/templates/ingress.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/applications/siav2/templates/ingress.yaml b/applications/siav2/templates/ingress.yaml index 716c396a0e..f75b111866 100644 --- a/applications/siav2/templates/ingress.yaml +++ b/applications/siav2/templates/ingress.yaml @@ -18,7 +18,6 @@ template: {{- toYaml . | nindent 6 }} {{- end }} spec: - ingressClassName: "nginx" rules: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: From 7c7549b04257c8ecfd0bbd42753b0e6575abdcdc Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 5 Oct 2023 15:46:59 -0700 Subject: [PATCH 147/588] [DM-40944] Get rid of fullname usage While fullname might be better if we're installing multiple instances into one namespace, we've just decided we're not doing that so we're trying to get rid of using fullname --- applications/siav2/templates/_helpers.tpl | 18 ------------------ applications/siav2/templates/configmap.yaml | 2 +- applications/siav2/templates/deployment.yaml | 2 +- 3 files changed, 2 insertions(+), 20 deletions(-) diff --git a/applications/siav2/templates/_helpers.tpl b/applications/siav2/templates/_helpers.tpl index 97f049c4e0..0cadb5116e 100644 --- a/applications/siav2/templates/_helpers.tpl +++ b/applications/siav2/templates/_helpers.tpl @@ -24,21 +24,3 @@ Selector labels app.kubernetes.io/name: "siav2" app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "siav2.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} diff --git a/applications/siav2/templates/configmap.yaml b/applications/siav2/templates/configmap.yaml index baa16470b7..afa99c8c7c 100644 --- a/applications/siav2/templates/configmap.yaml +++ b/applications/siav2/templates/configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ template "siav2.fullname" . }}-config + name: siav2-config labels: {{- include "siav2.labels" . | nindent 4 }} data: diff --git a/applications/siav2/templates/deployment.yaml b/applications/siav2/templates/deployment.yaml index a7424ea237..17d9d89278 100644 --- a/applications/siav2/templates/deployment.yaml +++ b/applications/siav2/templates/deployment.yaml @@ -50,7 +50,7 @@ spec: volumes: - name: config-volume configMap: - name: {{ template "siav2.fullname" . }}-config + name: siav2-config securityContext: runAsNonRoot: true runAsUser: 8675309 From dad03a23b399cd25b7c9af4ae221ca60368a655c Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 5 Oct 2023 15:59:58 -0700 Subject: [PATCH 148/588] [DM-40944] Add a pull secret --- applications/siav2/templates/vault-secrets.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 applications/siav2/templates/vault-secrets.yaml diff --git a/applications/siav2/templates/vault-secrets.yaml b/applications/siav2/templates/vault-secrets.yaml new file mode 100644 index 0000000000..df02f02b05 --- /dev/null +++ b/applications/siav2/templates/vault-secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: pull-secret + labels: + {{- include "siav2.labels" . | nindent 4 }} +spec: + path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" + type: kubernetes.io/dockerconfigjson From e5d94eeceb2c8817a0bb6c1996981c3463f093c6 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Fri, 6 Oct 2023 16:12:46 -0700 Subject: [PATCH 149/588] [DM-40944] Use my private docker image, dont use read only First, we don't have a repo building sia images yet, that will come after CADC gets things sqquared away. I'm using my built image off of Pat's repo until we can handle that. Next, the default for images is to keep things readonly, but tomcat doesn't like that with the rename war file, which allows us to not have to do a bunch of proxy rewriting. --- applications/siav2/README.md | 2 +- applications/siav2/templates/deployment.yaml | 2 +- applications/siav2/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/siav2/README.md b/applications/siav2/README.md index f605d1c79f..4068b737fb 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -19,7 +19,7 @@ Simple Image Access v2 service | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the siav2 image | -| image.repository | string | `"ghcr.io/lsst-sqre/siav2"` | Image to use in the siav2 deployment | +| image.repository | string | `"docker.io/cbanek/siav2"` | Image to use in the siav2 deployment | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | | nodeSelector | object | `{}` | Node selection rules for the siav2 deployment pod | diff --git a/applications/siav2/templates/deployment.yaml b/applications/siav2/templates/deployment.yaml index 17d9d89278..823238e00e 100644 --- a/applications/siav2/templates/deployment.yaml +++ b/applications/siav2/templates/deployment.yaml @@ -31,7 +31,7 @@ spec: capabilities: drop: - "all" - readOnlyRootFilesystem: true + readOnlyRootFilesystem: false image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml index 9e78c7b6e3..b647ba8d4d 100644 --- a/applications/siav2/values.yaml +++ b/applications/siav2/values.yaml @@ -7,7 +7,7 @@ replicaCount: 1 image: # -- Image to use in the siav2 deployment - repository: "ghcr.io/lsst-sqre/siav2" + repository: "docker.io/cbanek/siav2" # -- Pull policy for the siav2 image pullPolicy: "IfNotPresent" From 7f40da9594b910f24ebff9b7dc2e402a971810eb Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Fri, 6 Oct 2023 16:54:35 -0700 Subject: [PATCH 150/588] [DM-40944] Fix up network policies --- applications/siav2/templates/networkpolicy.yaml | 1 + applications/siav2/templates/uws-db-networkpolicy.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/applications/siav2/templates/networkpolicy.yaml b/applications/siav2/templates/networkpolicy.yaml index 0ab737938e..8273676873 100644 --- a/applications/siav2/templates/networkpolicy.yaml +++ b/applications/siav2/templates/networkpolicy.yaml @@ -6,6 +6,7 @@ spec: podSelector: matchLabels: {{- include "siav2.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: "server" policyTypes: - Ingress ingress: diff --git a/applications/siav2/templates/uws-db-networkpolicy.yaml b/applications/siav2/templates/uws-db-networkpolicy.yaml index a6c59dbb0c..219b8a11fe 100644 --- a/applications/siav2/templates/uws-db-networkpolicy.yaml +++ b/applications/siav2/templates/uws-db-networkpolicy.yaml @@ -17,7 +17,7 @@ spec: - podSelector: matchLabels: {{- include "siav2.selectorLabels" . | nindent 14 }} - app.kubernetes.io/component: "siav2-uws-db" + app.kubernetes.io/component: "server" ports: - protocol: "TCP" port: 5432 From 3e738952e8d9e0bdcdaad57a60f9167c33932e48 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Fri, 6 Oct 2023 18:10:23 -0700 Subject: [PATCH 151/588] [DM-40944] Fix URLs in availability endpoint --- applications/siav2/templates/configmap.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/applications/siav2/templates/configmap.yaml b/applications/siav2/templates/configmap.yaml index afa99c8c7c..f1c373f905 100644 --- a/applications/siav2/templates/configmap.yaml +++ b/applications/siav2/templates/configmap.yaml @@ -13,14 +13,15 @@ data: tomcat.connector.keepAliveTimeout=120000 tomcat.connector.secure=false tomcat.connector.scheme=http - tomcat.connector.proxyName=localhost + tomcat.connector.proxyName={{ .Values.global.host }} tomcat.connector.proxyPort=8080 # database connection pools for uws org.opencadc.sia2.uws.maxActive=5 org.opencadc.sia2.uws.username=postgres org.opencadc.sia2.uws.password= - org.opencadc.sia2.uws.url=jdbc:postgresql://siav2-uws-db/uws + org.opencadc.sia2.uws.url=jdbc:postgresql://siav2-uws-db/ + org.opencadc.sia2.uws.db=uws # authentication provider # ca.nrc.cadc.auth.IdentityManager=org.opencadc.auth.StandardIdentityManager From 34064a7c12553fd7a7113a716f0d031657e1f930 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Tue, 10 Oct 2023 16:07:10 -0700 Subject: [PATCH 152/588] [DM-40944] Fix up uws database There's so much going on here. First off, don't tell it in the URL that it is a database named uws, that is assumed, and won't work if you put that in there. It's hardcoded to the name uws. So instead of the tap service uws database, which is behind, and it will try to be upgraded, let's use the library/postgres image, and the only thing I need to do is create the schema. So the configmap has this config script and mounts it in the postgres container in the spot where it runs the initial scripts. --- applications/siav2/README.md | 4 ++-- applications/siav2/templates/configmap.yaml | 1 - applications/siav2/templates/uws-configmap.yaml | 9 +++++++++ applications/siav2/templates/uws-db-deployment.yaml | 9 +++++++++ applications/siav2/values.yaml | 5 ++--- 5 files changed, 22 insertions(+), 6 deletions(-) create mode 100644 applications/siav2/templates/uws-configmap.yaml diff --git a/applications/siav2/README.md b/applications/siav2/README.md index 4068b737fb..836e918f1f 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -29,8 +29,8 @@ Simple Image Access v2 service | tolerations | list | `[]` | Tolerations for the siav2 deployment pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | -| uws.image.repository | string | `"ghcr.io/lsst-sqre/lsst-tap-uws-db"` | UWS database image to use | -| uws.image.tag | string | Version of QServ TAP image | Tag of UWS database image to use | +| uws.image.repository | string | `"library/postgres"` | UWS database image to use | +| uws.image.tag | string | `"16.0"` | Tag of UWS database image to use | | uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{"limits":{"cpu":2,"memory":"4Gi"},"requests":{"cpu":0.25,"memory":"1Gi"}}` | Resource limits and requests for the UWS database pod | diff --git a/applications/siav2/templates/configmap.yaml b/applications/siav2/templates/configmap.yaml index f1c373f905..a265670bef 100644 --- a/applications/siav2/templates/configmap.yaml +++ b/applications/siav2/templates/configmap.yaml @@ -21,7 +21,6 @@ data: org.opencadc.sia2.uws.username=postgres org.opencadc.sia2.uws.password= org.opencadc.sia2.uws.url=jdbc:postgresql://siav2-uws-db/ - org.opencadc.sia2.uws.db=uws # authentication provider # ca.nrc.cadc.auth.IdentityManager=org.opencadc.auth.StandardIdentityManager diff --git a/applications/siav2/templates/uws-configmap.yaml b/applications/siav2/templates/uws-configmap.yaml new file mode 100644 index 0000000000..4c1f532b78 --- /dev/null +++ b/applications/siav2/templates/uws-configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: siav2-uws-configmap + labels: + {{- include "siav2.labels" . | nindent 4 }} +data: + uws-schema-create.sql: | + CREATE SCHEMA uws; diff --git a/applications/siav2/templates/uws-db-deployment.yaml b/applications/siav2/templates/uws-db-deployment.yaml index e7fb95b457..ac7a416d97 100644 --- a/applications/siav2/templates/uws-db-deployment.yaml +++ b/applications/siav2/templates/uws-db-deployment.yaml @@ -14,6 +14,7 @@ spec: metadata: {{- with .Values.uws.podAnnotations }} annotations: + checksum/config: {{ include (print $.Template.BasePath "/uws-configmap.yaml") . | sha256sum }} {{- toYaml . | nindent 8 }} {{- end }} labels: @@ -25,6 +26,9 @@ spec: - name: "uws" image: "{{ .Values.uws.image.repository }}:{{ .Values.uws.image.tag }}" imagePullPolicy: {{ .Values.uws.image.pullPolicy | quote }} + env: + - name: POSTGRES_HOST_AUTH_METHOD + value: trust ports: - containerPort: 5432 {{- with .Values.uws.resources }} @@ -34,9 +38,14 @@ spec: volumeMounts: - name: "data" mountPath: "/var/lib/postgresql/data" + - name: "init-scripts" + mountPath: "/docker-entrypoint-initdb.d/" volumes: - name: "data" emptyDir: {} + - name: "init-scripts" + configMap: + name: siav2-uws-configmap {{- with .Values.uws.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml index b647ba8d4d..4d18e875ea 100644 --- a/applications/siav2/values.yaml +++ b/applications/siav2/values.yaml @@ -66,14 +66,13 @@ global: uws: image: # -- UWS database image to use - repository: "ghcr.io/lsst-sqre/lsst-tap-uws-db" + repository: "library/postgres" # -- Pull policy for the UWS database image pullPolicy: "IfNotPresent" # -- Tag of UWS database image to use - # @default -- Version of QServ TAP image - tag: "2.1.0" + tag: "16.0" # -- Resource limits and requests for the UWS database pod resources: From 8c79bd2c89b749f866fb5060e4a53cea78671e0d Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Fri, 13 Oct 2023 16:44:07 -0700 Subject: [PATCH 153/588] [DM-40944] Get auth working --- applications/siav2/templates/configmap.yaml | 5 +++-- applications/siav2/templates/ingress.yaml | 9 ++++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/applications/siav2/templates/configmap.yaml b/applications/siav2/templates/configmap.yaml index a265670bef..404e0ba112 100644 --- a/applications/siav2/templates/configmap.yaml +++ b/applications/siav2/templates/configmap.yaml @@ -6,7 +6,7 @@ metadata: {{- include "siav2.labels" . | nindent 4 }} data: cadc-registry.properties: | - ivo://ivoa.net/sso#OpenID = {{ .Values.global.baseUrl }}/auth/openid + ivo://ivoa.net/sso#OpenID = {{ .Values.global.baseUrl }}/auth/cadc catalina.properties: | # tomcat properties tomcat.connector.connectionTimeout=20000 @@ -23,7 +23,8 @@ data: org.opencadc.sia2.uws.url=jdbc:postgresql://siav2-uws-db/ # authentication provider - # ca.nrc.cadc.auth.IdentityManager=org.opencadc.auth.StandardIdentityManager + ca.nrc.cadc.auth.IdentityManager=org.opencadc.auth.StandardIdentityManager + #ca.nrc.cadc.auth.PrincipalExtractor.allowBasicATP=true sia2.properties: | # TAP service org.opencadc.sia2.queryService = {{ .Values.global.baseUrl }}/api/tap diff --git a/applications/siav2/templates/ingress.yaml b/applications/siav2/templates/ingress.yaml index f75b111866..23e7e09805 100644 --- a/applications/siav2/templates/ingress.yaml +++ b/applications/siav2/templates/ingress.yaml @@ -6,9 +6,16 @@ metadata: {{- include "siav2.labels" . | nindent 4 }} config: baseUrl: {{ .Values.global.baseUrl | quote }} + delegate: + internal: + scopes: + - read:tap + service: siav2 + useAuthorization: true + loginRedirect: true scopes: all: - - "read:image" + - read:image loginRedirect: true template: metadata: From 3f88fb63167c7190c5b80c7d13c6e014918e78bc Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 24 Oct 2023 15:47:21 -0700 Subject: [PATCH 154/588] Move to CloudSQL in int --- applications/nublado/values-idfint.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 3be1eac95c..0a27c25b73 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -93,8 +93,8 @@ jupyterhub: ServerApp: shutdown_no_activity_timeout: 432000 db: - url: "postgresql://nublado3@postgres.postgres/nublado3" - + url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" + upgrade: true cull: enabled: true users: false @@ -103,5 +103,11 @@ jupyterhub: every: 300 maxAge: 2160000 +hub: + internalDatabase: false +cloudsql: + enabled: true + instanceConnectionName: "science-platform-int-dc5d:us-central1:science-platform-int-8f439af2" + serviceAccount: "nublado@science-platform-int-dc5d.iam.gserviceaccount.com" secrets: templateSecrets: true From 23842e608de370b1b8f3817f6c0cb456a15779c8 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 24 Oct 2023 15:53:04 -0700 Subject: [PATCH 155/588] Move to version of Hub with psql in it --- applications/nublado/values-idfint.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 0a27c25b73..d42692b18c 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -89,6 +89,8 @@ controller: jupyterhub: hub: + image: + tag: "0.4.1" config: ServerApp: shutdown_no_activity_timeout: 432000 From 23692c70c4ff2a5f46bb33f0108094950819e4ec Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Tue, 24 Oct 2023 16:44:17 -0700 Subject: [PATCH 156/588] [DM-40944] Enable siav2 on usdf-dev --- applications/siav2/README.md | 2 ++ applications/siav2/templates/configmap.yaml | 3 ++- applications/siav2/values-usdfdev.yaml | 2 ++ applications/siav2/values.yaml | 6 ++++++ environments/values-usdfdev.yaml | 1 + 5 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 applications/siav2/values-usdfdev.yaml diff --git a/applications/siav2/README.md b/applications/siav2/README.md index 836e918f1f..dd825755a2 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -23,9 +23,11 @@ Simple Image Access v2 service | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | | nodeSelector | object | `{}` | Node selection rules for the siav2 deployment pod | +| obsCoreTable | string | `"ivoa.ObsCore"` | ObsCore table on the TAP service to query | | podAnnotations | object | `{}` | Annotations for the siav2 deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the siav2 deployment pod | +| tapService | string | `"tap"` | Local TAP service endpoint to query | | tolerations | list | `[]` | Tolerations for the siav2 deployment pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | diff --git a/applications/siav2/templates/configmap.yaml b/applications/siav2/templates/configmap.yaml index 404e0ba112..dff5030498 100644 --- a/applications/siav2/templates/configmap.yaml +++ b/applications/siav2/templates/configmap.yaml @@ -27,6 +27,7 @@ data: #ca.nrc.cadc.auth.PrincipalExtractor.allowBasicATP=true sia2.properties: | # TAP service - org.opencadc.sia2.queryService = {{ .Values.global.baseUrl }}/api/tap + org.opencadc.sia2.queryService = {{ .Values.global.baseUrl }}/api/{{ .Values.tapService }} + org.opencadc.sia2.table = {{ .Values.obsCoreTable }} war-rename.conf: | mv sia2.war api#siav2.war diff --git a/applications/siav2/values-usdfdev.yaml b/applications/siav2/values-usdfdev.yaml new file mode 100644 index 0000000000..995d3094f2 --- /dev/null +++ b/applications/siav2/values-usdfdev.yaml @@ -0,0 +1,2 @@ +tapService: "live" +obsCoreTable: "oga.ObsCore" diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml index 4d18e875ea..a6a3a7c58b 100644 --- a/applications/siav2/values.yaml +++ b/applications/siav2/values.yaml @@ -94,3 +94,9 @@ uws: # -- Affinity rules for the UWS database pod affinity: {} + +# -- Local TAP service endpoint to query +tapService: "tap" + +# -- ObsCore table on the TAP service to query +obsCoreTable: "ivoa.ObsCore" diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 3259dc1705..ff328c582a 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -23,6 +23,7 @@ applications: rubintv: true sasquatch: true semaphore: true + siav2: true ssotap: true squareone: true strimzi: true From 0fffae2d92c5c0ca7d39184c26bdd1b64237204c Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 24 Oct 2023 17:12:06 -0700 Subject: [PATCH 157/588] fix secrets path --- applications/nublado2/values-idfint.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado2/values-idfint.yaml b/applications/nublado2/values-idfint.yaml index c5812aedb8..13466360bf 100644 --- a/applications/nublado2/values-idfint.yaml +++ b/applications/nublado2/values-idfint.yaml @@ -19,8 +19,8 @@ jupyterhub: nginx.ingress.kubernetes.io/auth-signin: "https://data-int.lsst.cloud/login" config: base_url: "https://data-int.lsst.cloud" - butler_secret_path: "secret/k8s_operator/data-int.lsst.cloud/butler-secret" - pull_secret_path: "secret/k8s_operator/data-int.lsst.cloud/pull-secret" + butler_secret_path: "secret/phalanx/idfint/butler-secret" + pull_secret_path: "secret/phalanx/idfint/pull-secret" cachemachine_image_policy: "desired" lab_environment: PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" From 670afcdbb98a4660902de7f444b7b61b67128d43 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Fri, 28 Jul 2023 14:11:46 -0700 Subject: [PATCH 158/588] deploy usdfint environment Remove unused applications for nublado2 from usdfint --- applications/argocd/values-usdfint.yaml | 86 +++++++ applications/datalinker/values-usdfint.yaml | 0 applications/gafaelfawr/values-usdfint.yaml | 224 ++++++++++++++++++ applications/livetap/values-usdfint.yaml | 4 + applications/mobu/values-usdfdev.yaml | 22 +- applications/mobu/values-usdfint.yaml | 23 ++ applications/nublado/values-usdfint.yaml | 170 +++++++++++++ .../plot-navigator/values-usdfint.yaml | 26 ++ applications/portal/values-usdfint.yaml | 0 applications/postgres/values-usdfint.yaml | 11 + applications/sasquatch/values-usdfint.yaml | 165 +++++++++++++ applications/semaphore/values-usdfint.yaml | 9 + applications/squareone/values-usdfint.yaml | 5 + applications/ssotap/values-usdfint.yaml | 4 + applications/strimzi/values-usdfint.yaml | 9 + applications/tap/values-usdfint.yaml | 12 + .../values-usdfint.yaml | 22 ++ environments/values-usdfint.yaml | 25 ++ 18 files changed, 796 insertions(+), 21 deletions(-) create mode 100644 applications/argocd/values-usdfint.yaml create mode 100644 applications/datalinker/values-usdfint.yaml create mode 100644 applications/gafaelfawr/values-usdfint.yaml create mode 100644 applications/livetap/values-usdfint.yaml create mode 100644 applications/mobu/values-usdfint.yaml create mode 100644 applications/nublado/values-usdfint.yaml create mode 100644 applications/plot-navigator/values-usdfint.yaml create mode 100644 applications/portal/values-usdfint.yaml create mode 100644 applications/postgres/values-usdfint.yaml create mode 100644 applications/sasquatch/values-usdfint.yaml create mode 100644 applications/semaphore/values-usdfint.yaml create mode 100644 applications/squareone/values-usdfint.yaml create mode 100644 applications/ssotap/values-usdfint.yaml create mode 100644 applications/strimzi/values-usdfint.yaml create mode 100644 applications/tap/values-usdfint.yaml create mode 100644 applications/vault-secrets-operator/values-usdfint.yaml create mode 100644 environments/values-usdfint.yaml diff --git a/applications/argocd/values-usdfint.yaml b/applications/argocd/values-usdfint.yaml new file mode 100644 index 0000000000..90d4f43990 --- /dev/null +++ b/applications/argocd/values-usdfint.yaml @@ -0,0 +1,86 @@ +argo-cd: + redis: + enabled: true + + server: + ingress: + enabled: true + hosts: + - "usdf-rsp-int.slac.stanford.edu" + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + paths: + - /argo-cd(/|$)(.*) + + extraArgs: + - "--basehref=/argo-cd" + - "--insecure=true" + + env: + - name: HTTP_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + config: + url: https://usdf-rsp-int.slac.stanford.edu/argo-cd + oidc.config: | + name: SLAC + issuer: https://dex.slac.stanford.edu + clientID: "vcluster--usdf-rsp-int" + clientSecret: $dex.clientSecret + # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] + requestedScopes: ["openid", "profile", "email", "groups"] + # Optional set of OIDC claims to request on the ID token. + requestedIDTokenClaims: {"groups": {"essential": true}} + rbacConfig: + policy.csv: | + g, ytl@slac.stanford.edu, role:admin + g, ppascual@slac.stanford.edu, role:admin + g, pav@slac.stanford.edu, role:admin + g, dspeck@slac.stanford.edu, role:admin + g, afausti@slac.stanford.edu, role:admin + g, mfl@slac.stanford.edu, role:admin + g, cbanek@slac.stanford.edu, role:admin + g, frossie@slac.stanford.edu, role:admin + g, hchiang2@slac.stanford.edu, role:admin + g, athor@slac.stanford.edu, role:admin + g, jsick@slac.stanford.edu, role:admin + g, reinking@slac.stanford.edu, role:admin + g, smart@slac.stanford.edu, role:admin + g, omullan@slac.stanford.edu, role:admin + g, mreuter@slac.stanford.edu, role:admin + g, rra@slac.stanford.edu, role:admin + scopes: "[email]" + + helm.repositories: | + - url: https://lsst-sqre.github.io/charts/ + name: lsst-sqre + - url: https://charts.helm.sh/stable + name: stable + repoServer: + + env: + - name: HTTP_PROXY + value: http://sdfproxy.sdf.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://sdfproxy.sdf.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + controller: + + env: + - name: HTTP_PROXY + value: http://sdfproxy.sdf.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://sdfproxy.sdf.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + configs: + secret: + createSecret: false diff --git a/applications/datalinker/values-usdfint.yaml b/applications/datalinker/values-usdfint.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/gafaelfawr/values-usdfint.yaml b/applications/gafaelfawr/values-usdfint.yaml new file mode 100644 index 0000000000..75695b8fab --- /dev/null +++ b/applications/gafaelfawr/values-usdfint.yaml @@ -0,0 +1,224 @@ +replicaCount: 2 + +# Use the CSI storage class so that we can use snapshots. +redis: + persistence: + storageClass: "wekafs--sdf-k8s01" + +config: + internalDatabase: true + + oidcServer: + enabled: true + + oidc: + clientId: vcluster--usdf-rsp-int + audience: "vcluster--usdf-rsp-int" + loginUrl: "https://dex.slac.stanford.edu/auth" + tokenUrl: "https://dex.slac.stanford.edu/token" + issuer: "https://dex.slac.stanford.edu" + scopes: + - "openid" + - "email" + - "groups" + - "profile" + usernameClaim: "name" + + ldap: + url: ldaps://ldap-unix.slac.stanford.edu:636 + groupBaseDn: ou=Group,dc=slac,dc=stanford,dc=edu + groupObjectClass: posixGroup + groupMemberAttr: memberUid + userBaseDn: ou=Accounts,dc=slac,dc=stanford,dc=edu + userSearchAttr: uid + addUserGroup: false + uidAttr: uidNumber + gidAttr: gidNumber + nameAttr: gecos + + groupMapping: + "admin:token": + - "rubinmgr" + - "unix-admin" + "exec:admin": + - "rubinmgr" + - "unix-admin" + "exec:notebook": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "exec:portal": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "exec:user": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "read:tap": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "read:image": + - "lsst" + - lsst-ccs + - rubin_users + - rubin_users-a + - rubin_users-b + - rubin_users-c + - rubin_users-d + - rubin_users-e + - rubin_users-f + - rubin_users-g + - rubin_users-h + - rubin_users-i + - rubin_users-j + - rubin_users-k + - rubin_users-l + - rubin_users-m + - rubin_users-n + - rubin_users-o + - rubin_users-p + - rubin_users-q + - rubin_users-r + - rubin_users-s + - rubin_users-t + - rubin_users-u + - rubin_users-v + - rubin_users-w + - rubin_users-x + - rubin_users-y + - rubin_users-z + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "write:sasquatch": + - "rubinmgr" + - "unix-admin" + + initialAdmins: + - "afausti" + - "athor" + - "cbanek" + - "frossie" + - "jonathansick" + - "rra" + - "simonkrughoff" + - "ytl" + - "ppascual" diff --git a/applications/livetap/values-usdfint.yaml b/applications/livetap/values-usdfint.yaml new file mode 100644 index 0000000000..7d89dafb89 --- /dev/null +++ b/applications/livetap/values-usdfint.yaml @@ -0,0 +1,4 @@ +cadc-tap: + tapSchema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-livetap" diff --git a/applications/mobu/values-usdfdev.yaml b/applications/mobu/values-usdfdev.yaml index 1a55036ea1..f2b77ee023 100644 --- a/applications/mobu/values-usdfdev.yaml +++ b/applications/mobu/values-usdfdev.yaml @@ -21,32 +21,12 @@ config: repo_branch: "prod" use_cachemachine: false restart: true - - name: "weekly" + - name: "tap" count: 1 users: - username: "bot-mobu02" uidnumber: 45693 gidnumber: 1126 - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - image: - image_class: "latest-weekly" - repo_url: "https://github.com/lsst-sqre/system-test.git" - repo_branch: "prod" - use_cachemachine: false - restart: true - - name: "tap" - count: 1 - users: - - username: "bot-mobu03" - uidnumber: 45694 - gidnumber: 1126 scopes: ["read:tap"] business: type: "TAPQueryRunner" diff --git a/applications/mobu/values-usdfint.yaml b/applications/mobu/values-usdfint.yaml new file mode 100644 index 0000000000..0bd165194f --- /dev/null +++ b/applications/mobu/values-usdfint.yaml @@ -0,0 +1,23 @@ +config: + debug: true + autostart: + - name: "firefighter" + count: 1 + users: + - username: "bot-mobu03" + uidnumber: 45694 + gidnumber: 1126 + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + image: + image_class: "latest-weekly" + repo_url: "https://github.com/lsst-sqre/system-test.git" + repo_branch: "prod" + use_cachemachine: false + restart: true diff --git a/applications/nublado/values-usdfint.yaml b/applications/nublado/values-usdfint.yaml new file mode 100644 index 0000000000..8b24353338 --- /dev/null +++ b/applications/nublado/values-usdfint.yaml @@ -0,0 +1,170 @@ +controller: + config: + safir: + logLevel: "DEBUG" + fileserver: + enabled: false + timeout: 21600 + + images: + source: + type: "docker" + registry: "docker-registry.slac.stanford.edu" + repository: "lsstsqre/sciplat-lab" + recommendedTag: "recommended" + numReleases: 1 + numWeeklies: 2 + numDailies: 3 + + lab: + pullSecret: "pull-secret" + + homedirSchema: "initialThenUsername" + + env: + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" + DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" + HUB_ROUTE: "/nb/hub" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" + PGUSER: "rubin" + S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" + https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" + no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1" + + files: + # Add rubin_users group (there is not yet a simpler way to do this). + /etc/group: + contents: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + rubin_users:x:4085: + + secrets: + - secretName: "nublado-lab-secret" + secretKey: "aws-credentials.ini" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + + volumes: + - containerPath: "/home" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-home" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/project" + subPath: "g" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-group-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/sdf/group/rubin" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-group-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/sdf/data/rubin" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-data-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/scratch" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-scratch" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/fs/ddn/sdf/group/rubin" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "fs-ddn-sdf-group-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/fs/ddn/sdf/group/lsst" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "fs-ddn-sdf-group-lsst" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + +proxy: + ingress: + annotations: + # proxy-body-size is temporary until USDF uses our normal ingress-nginx, + # which already configures a larger value. + nginx.ingress.kubernetes.io/proxy-body-size: "50m" + +jupyterhub: + hub: + baseUrl: "/nb" + db: + url: "postgresql://nublado3@postgres.postgres/nublado3" + cull: + timeout: 432000 + every: 300 + maxAge: 2160000 diff --git a/applications/plot-navigator/values-usdfint.yaml b/applications/plot-navigator/values-usdfint.yaml new file mode 100644 index 0000000000..72f5b541b2 --- /dev/null +++ b/applications/plot-navigator/values-usdfint.yaml @@ -0,0 +1,26 @@ +environment: + DAF_BUTLER_REPOSITORY_INDEX: "/sdf/group/rubin/shared/data-repos.yaml" + PGPASSFILE: "/home/worker/.lsst/postgres-credentials.txt" + PGUSER: "rubin" + AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" + S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + BUTLER_DEFAULT_REPO: "/repo/main" + +config: + volumes: + - name: sdf-group-rubin + persistentVolumeClaim: + claimName: sdf-group-rubin + - name: sdf-data-rubin + persistentVolumeClaim: + claimName: sdf-data-rubin + volume_mounts: + - name: sdf-group-rubin + mountPath: /sdf/group/rubin + - name: sdf-data-rubin + mountPath: /sdf/data/rubin + persistentVolumeClaims: + - name: sdf-group-rubin + storageClassName: sdf-group-rubin + - name: sdf-data-rubin + storageClassName: sdf-data-rubin diff --git a/applications/portal/values-usdfint.yaml b/applications/portal/values-usdfint.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/postgres/values-usdfint.yaml b/applications/postgres/values-usdfint.yaml new file mode 100644 index 0000000000..dbc5324ac3 --- /dev/null +++ b/applications/postgres/values-usdfint.yaml @@ -0,0 +1,11 @@ +jupyterhub_db: + user: 'jovyan' + db: 'jupyterhub' +nublado3_db: + user: 'nublado3' + db: 'nublado3' +gafaelfawr_db: + user: 'gafaelfawr' + db: 'gafaelfawr' + +postgresStorageClass: 'wekafs--sdf-k8s01' diff --git a/applications/sasquatch/values-usdfint.yaml b/applications/sasquatch/values-usdfint.yaml new file mode 100644 index 0000000000..d0b57610d4 --- /dev/null +++ b/applications/sasquatch/values-usdfint.yaml @@ -0,0 +1,165 @@ +strimzi-kafka: + mirrormaker2: + enabled: false + source: + bootstrapServer: sasquatch-base-kafka-bootstrap.lsst.codes:9094 + topicsPattern: "registry-schemas, lsst.sal.*, lsst.dm.*" + resources: + requests: + cpu: 2 + memory: 4Gi + limits: + cpu: 4 + memory: 8Gi + users: + replicator: + enabled: true + +influxdb: + ingress: + enabled: true + hostname: usdf-rsp-int.slac.stanford.edu + persistence: + enabled: true + size: 15Ti + +kafka-connect-manager: + influxdbSink: + # Based on the kafka producers configuration for the BTS + # https://github.com/lsst-ts/argocd-csc/blob/main/apps/kafka-producers/values-base-teststand.yaml + connectors: + auxtel: + enabled: false + topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" + maintel: + enabled: false + topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" + mtmount: + enabled: false + topicsRegex: ".*MTMount" + comcam: + enabled: false + topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" + eas: + enabled: false + topicsRegex: ".*DIMM|.*DSM|.*WeatherForecast|.*WeatherStation" + latiss: + enabled: false + topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" + m1m3: + enabled: false + topicsRegex: ".*MTM1M3" + m2: + enabled: false + topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" + obssys: + enabled: false + topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" + ocps: + enabled: false + topicsRegex: ".*OCPS" + test: + enabled: false + topicsRegex: ".*Test" + pmd: + enabled: false + topicsRegex: ".*PMD" + calsys: + enabled: false + topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" + mtaircompressor: + enabled: false + topicsRegex: ".*MTAirCompressor" + authorize: + enabled: false + topicsRegex: ".*Authorize" + lasertracker: + enabled: false + topicsRegex: ".*LaserTracker" + genericcamera: + enabled: false + topicsRegex: ".*GCHeaderService|.*GenericCamera" + gis: + enabled: false + topicsRegex: ".*GIS" + lsstdm: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.dm" + topicsRegex: "lsst.dm.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run + lsstdebug: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.debug" + topicsRegex: "lsst.debug.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run + lsstexample: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.example" + topicsRegex: "lsst.example.*" + tags: band,instrument + lsstrubintv: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.rubintv" + topicsRegex: "lsst.rubintv.*" + tags: image_type,observation_reason,science_program,filter,disperser + lsstcamera: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.camera" + topicsRegex: "lsst.camera.*" + lsstverify: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.verify" + topicsRegex: "lsst.verify.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run + lsstlf: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.lf" + topicsRegex: "lsst.lf.*" + tags: benchmark_env,module,benchmark_type + +kafdrop: + ingress: + enabled: true + hostname: usdf-rsp-int.slac.stanford.edu + +rest-proxy: + enabled: false + ingress: + enabled: true + hostname: usdf-rsp-int.slac.stanford.edu + kafka: + topics: + - test.next-visit + topicPrefixes: + - test + - lsst.dm + - lsst.debug + - lsst.example + - lsst.rubintv + - lsst.camera + - lsst.verify + - lsst.lf + +chronograf: + ingress: + enabled: true + hostname: usdf-rsp-int.slac.stanford.edu + + env: + GENERIC_NAME: "OIDC" + GENERIC_AUTH_URL: https://usdf-rsp-int.slac.stanford.edu/auth/openid/login + GENERIC_TOKEN_URL: https://usdf-rsp-int.slac.stanford.edu/auth/openid/token + USE_ID_TOKEN: 1 + JWKS_URL: https://usdf-rsp-int.slac.stanford.edu/.well-known/jwks.json + GENERIC_API_URL: https://usdf-rsp-int.slac.stanford.edu/auth/userinfo + GENERIC_SCOPES: openid + GENERIC_API_KEY: sub + PUBLIC_URL: https://usdf-rsp-int.slac.stanford.edu/ + STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/usdfint.json diff --git a/applications/semaphore/values-usdfint.yaml b/applications/semaphore/values-usdfint.yaml new file mode 100644 index 0000000000..0527b1efed --- /dev/null +++ b/applications/semaphore/values-usdfint.yaml @@ -0,0 +1,9 @@ +semaphore: + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx +config: + phalanx_env: "usdfint" + github_app_id: "337324" + enable_github_app: "True" diff --git a/applications/squareone/values-usdfint.yaml b/applications/squareone/values-usdfint.yaml new file mode 100644 index 0000000000..143b9b6ff0 --- /dev/null +++ b/applications/squareone/values-usdfint.yaml @@ -0,0 +1,5 @@ +replicaCount: 3 +config: + siteName: "Rubin Science Platform" + semaphoreUrl: "https://usdf-rsp-int.slac.stanford.edu/semaphore" + timesSquareUrl: "https://usdf-rsp-int.slac.stanford.edu/times-square/api" diff --git a/applications/ssotap/values-usdfint.yaml b/applications/ssotap/values-usdfint.yaml new file mode 100644 index 0000000000..7ed15d6fb4 --- /dev/null +++ b/applications/ssotap/values-usdfint.yaml @@ -0,0 +1,4 @@ +cadc-tap: + tapSchema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-sso" diff --git a/applications/strimzi/values-usdfint.yaml b/applications/strimzi/values-usdfint.yaml new file mode 100644 index 0000000000..1abe0d7c86 --- /dev/null +++ b/applications/strimzi/values-usdfint.yaml @@ -0,0 +1,9 @@ +strimzi-kafka-operator: + resources: + limits: + memory: "1Gi" + requests: + memory: "512Mi" + watchNamespaces: + - "sasquatch" + logLevel: "INFO" diff --git a/applications/tap/values-usdfint.yaml b/applications/tap/values-usdfint.yaml new file mode 100644 index 0000000000..9627e80cf9 --- /dev/null +++ b/applications/tap/values-usdfint.yaml @@ -0,0 +1,12 @@ +cadc-tap: + tapSchema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-tap" + + config: + qserv: + host: "172.24.49.51:4040" + + gcsBucket: "rubin:rubin-qserv" + gcsBucketUrl: "https://s3dfrgw.slac.stanford.edu" + gcsBucketType: "S3" diff --git a/applications/vault-secrets-operator/values-usdfint.yaml b/applications/vault-secrets-operator/values-usdfint.yaml new file mode 100644 index 0000000000..bfb0f3700f --- /dev/null +++ b/applications/vault-secrets-operator/values-usdfint.yaml @@ -0,0 +1,22 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_AUTH_METHOD + value: approle + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_SECRET_ID + - name: VAULT_TOKEN_MAX_TTL + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_TOKEN_MAX_TTL + vault: + address: "https://vault.slac.stanford.edu" + authMethod: approle diff --git a/environments/values-usdfint.yaml b/environments/values-usdfint.yaml new file mode 100644 index 0000000000..d0a1c79075 --- /dev/null +++ b/environments/values-usdfint.yaml @@ -0,0 +1,25 @@ +butlerRepositoryIndex: "s3://rubin-summit-users/data-repos.yaml" +fqdn: usdf-rsp-int.slac.stanford.edu +name: usdfint +vaultUrl: "https://vault.slac.stanford.edu" +vaultPathPrefix: secret/rubin/usdf-rsp-int + +applications: + # This environment uses an ingress managed in a separate Kubernetes cluster, + # despite that configuration not being officially supported by Phalanx. + cert-manager: false + ingress-nginx: false + + datalinker: true + livetap: true + mobu: true + nublado: true + plot-navigator: true + portal: true + postgres: true + sasquatch: true + semaphore: true + ssotap: true + squareone: true + strimzi: true + tap: true From 3270fa8b321802c8c01de0320b28eb32d7fd7f8b Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 25 Oct 2023 11:48:23 -0700 Subject: [PATCH 159/588] [DM-41251] tap schema 2.1.3 --- charts/cadc-tap/README.md | 2 +- charts/cadc-tap/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index 58e8faf771..69220b5372 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -56,7 +56,7 @@ IVOA TAP service | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.1.2"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"2.1.3"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the TAP schema database pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the TAP schema database pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index c4518a5ba5..82f61b3465 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -165,7 +165,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.1.2" + tag: "2.1.3" # -- Resource limits and requests for the TAP schema database pod resources: {} From 7b16c1e57a7e8a9038081100b8946fde5ed4f486 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 25 Oct 2023 20:09:58 +0000 Subject: [PATCH 160/588] Update Helm release ingress-nginx to v4.8.3 --- applications/ingress-nginx/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index 176ded64f3..ef223689ba 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.8.2 + version: 4.8.3 repository: https://kubernetes.github.io/ingress-nginx From cf3e225be32ca6669e29d298fdbece01a120f5e5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 25 Oct 2023 20:10:02 +0000 Subject: [PATCH 161/588] Update Helm release telegraf to v1.8.37 --- applications/telegraf/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index c4310e44f2..d8dbea8f95 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.35 + version: 1.8.37 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From caed3d1d0dfb9d946ed5a85540370ee622d8991e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 25 Oct 2023 20:10:07 +0000 Subject: [PATCH 162/588] Update Helm release telegraf-ds to v1.1.19 --- applications/telegraf-ds/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index 11c5998df4..3012197051 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.17 + version: 1.1.19 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 70d8d0f07d567a22df818d98bda7a8e8f27c7051 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 25 Oct 2023 13:12:10 -0700 Subject: [PATCH 163/588] Update Helm docs --- applications/gafaelfawr/README.md | 2 +- applications/nublado/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 38710700e0..bf2a6444ec 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -17,7 +17,7 @@ Authentication and identity system | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.11"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.12"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 6fbb761a63..356b7da88b 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -17,7 +17,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a separate service, because shoehorning it into Zero to Jupyterhub's extraContainers looks messy, and it's not necessary that it be very performant. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.11"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.12"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index ecf1781446..62e46350bd 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.33.11"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.33.12"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/times-square/README.md b/applications/times-square/README.md index c2a2dd8e41..0c06f66b42 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -18,7 +18,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.11"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.12"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index b0e8907118..b73bd138aa 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.11"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.12"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | From 526dca6176ec3de8f268505cbf9b6b02c04d2a5b Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 25 Oct 2023 16:11:53 -0700 Subject: [PATCH 164/588] [DM-40944] Turn off login redirect This will get TOPCAT auth working, no service wants to get a client redirected to some login page. --- applications/siav2/templates/ingress.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/siav2/templates/ingress.yaml b/applications/siav2/templates/ingress.yaml index 23e7e09805..14e3f48b92 100644 --- a/applications/siav2/templates/ingress.yaml +++ b/applications/siav2/templates/ingress.yaml @@ -5,6 +5,7 @@ metadata: labels: {{- include "siav2.labels" . | nindent 4 }} config: + authType: basic baseUrl: {{ .Values.global.baseUrl | quote }} delegate: internal: @@ -12,11 +13,10 @@ config: - read:tap service: siav2 useAuthorization: true - loginRedirect: true + loginRedirect: false scopes: all: - read:image - loginRedirect: true template: metadata: name: "siav2" From 7dd16352105634d47dc1eef95244c010563a1fbe Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 25 Oct 2023 23:25:20 +0000 Subject: [PATCH 165/588] Update Helm release redis to v1.0.9 --- applications/gafaelfawr/Chart.yaml | 2 +- applications/noteburst/Chart.yaml | 2 +- applications/portal/Chart.yaml | 2 +- applications/rubintv/Chart.yaml | 2 +- applications/times-square/Chart.yaml | 2 +- applications/vo-cutouts/Chart.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index f182217a06..ea7237dd02 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -9,7 +9,7 @@ appVersion: 9.4.0 dependencies: - name: redis - version: 1.0.8 + version: 1.0.9 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index 2a7acc1739..fa6dcc2828 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -13,7 +13,7 @@ maintainers: dependencies: - name: redis - version: 1.0.8 + version: 1.0.9 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml index a9004bb2c4..2505bb941e 100644 --- a/applications/portal/Chart.yaml +++ b/applications/portal/Chart.yaml @@ -9,7 +9,7 @@ appVersion: "suit-2023.2.3" dependencies: - name: redis - version: 1.0.8 + version: 1.0.9 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/rubintv/Chart.yaml b/applications/rubintv/Chart.yaml index fb493a331f..c147c27b91 100644 --- a/applications/rubintv/Chart.yaml +++ b/applications/rubintv/Chart.yaml @@ -7,5 +7,5 @@ sources: appVersion: 0.1.0 dependencies: - name: redis - version: 1.0.8 + version: 1.0.9 repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index 018f8c902e..e3ab8f8636 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -12,7 +12,7 @@ appVersion: "0.9.2" dependencies: - name: redis - version: 1.0.8 + version: 1.0.9 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml index df72bdf189..3822de5097 100644 --- a/applications/vo-cutouts/Chart.yaml +++ b/applications/vo-cutouts/Chart.yaml @@ -8,7 +8,7 @@ appVersion: 1.0.0 dependencies: - name: redis - version: 1.0.8 + version: 1.0.9 repository: https://lsst-sqre.github.io/charts/ annotations: From 9ac35156e2c97186ff6227f9bac32eb50b03aead Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 13 Oct 2023 13:36:32 -0700 Subject: [PATCH 166/588] Add CADC auth support on IDF dev Use a test version of Gafaelfawr and add appropriate configuration to enable the CADC-specific user information endpoint on IDF dev for testing. --- applications/gafaelfawr/README.md | 1 + applications/gafaelfawr/templates/configmap.yaml | 3 +++ applications/gafaelfawr/values-idfdev.yaml | 7 +++++++ applications/gafaelfawr/values.yaml | 6 ++++++ 4 files changed, 17 insertions(+) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index bf2a6444ec..5ad40992fa 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -24,6 +24,7 @@ Authentication and identity system | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy pod | | cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | +| config.cadcBaseUuid | string | Disabled | Whether to support the `/auth/cadc/userinfo` route. If set, this UUID is used as the namespace to generate UUID v5 `sub` claims returned by this route to meet the needs of CADC authentication code. | | config.cilogon.clientId | string | `""` | CILogon client ID. One and only one of this, `config.github.clientId`, or `config.oidc.clientId` must be set. | | config.cilogon.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | | config.cilogon.gidClaim | string | Do not set a primary GID | Claim from which to get the primary GID (only used if not retrieved from LDAP or Firestore) | diff --git a/applications/gafaelfawr/templates/configmap.yaml b/applications/gafaelfawr/templates/configmap.yaml index e5c02c34d9..93c2f61976 100644 --- a/applications/gafaelfawr/templates/configmap.yaml +++ b/applications/gafaelfawr/templates/configmap.yaml @@ -27,6 +27,9 @@ {{- if .Values.config.slackAlerts }} slackWebhookFile: "/etc/gafaelfawr/secrets/slack-webhook" {{- end }} + {{- if .Values.config.cadcBaseUuid }} + cadcBaseUuid: {{ .Values.config.cadcBaseUuid | quote }} + {{- end }} {{- if .Values.config.github.clientId }} diff --git a/applications/gafaelfawr/values-idfdev.yaml b/applications/gafaelfawr/values-idfdev.yaml index 91f77427e0..b5c5cca20c 100644 --- a/applications/gafaelfawr/values-idfdev.yaml +++ b/applications/gafaelfawr/values-idfdev.yaml @@ -1,3 +1,7 @@ +image: + tag: "tickets-DM-41186" + pullPolicy: "Always" + # Use the CSI storage class so that we can use snapshots. redis: persistence: @@ -30,6 +34,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "db8626e0-3b93-45c0-89ab-3058b0ed39fe" + # User quota settings for services. quota: default: diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 33e2b3f945..e0b94b3655 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -74,6 +74,12 @@ config: # `slack-webhook` secret must also be set. slackAlerts: false + # -- Whether to support the `/auth/cadc/userinfo` route. If set, this UUID + # is used as the namespace to generate UUID v5 `sub` claims returned by this + # route to meet the needs of CADC authentication code. + # @default -- Disabled + cadcBaseUuid: "" + github: # -- GitHub client ID. One and only one of this, `config.cilogon.clientId`, # or `config.oidc.clientId` must be set. From 6df7fb44636551ff688e21eb5d173b2bef9d4fa2 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 25 Oct 2023 14:45:27 -0700 Subject: [PATCH 167/588] [DM-41186] Set up usdf-dev gafaelfawr for testing cadc auth --- applications/gafaelfawr/values-usdfdev.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml index d44dcdb055..8845305956 100644 --- a/applications/gafaelfawr/values-usdfdev.yaml +++ b/applications/gafaelfawr/values-usdfdev.yaml @@ -1,3 +1,7 @@ +image: + tag: "tickets-DM-41186" + pullPolicy: "Always" + replicaCount: 2 # Use the CSI storage class so that we can use snapshots. @@ -18,6 +22,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "efa0a347-b648-4948-a987-055efbf6802a" + oidc: clientId: rubin-usdf-rsp-dev audience: "rubin-usdf-rsp-dev" From afe7cd5b805d1d476dd048bf811e359174aaedb9 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 25 Oct 2023 16:31:01 -0700 Subject: [PATCH 168/588] Upgrade Gafaelfawr and enable CADC auth Update Gafaelfawr to the 9.5.0 release and enable the new endpoint for the current CADC authentication code. This endpoint will probably move to a different location later once CADC has updated their code to not require UUIDs. --- applications/gafaelfawr/Chart.yaml | 2 +- applications/gafaelfawr/values-idfdev.yaml | 4 ---- applications/gafaelfawr/values-idfint.yaml | 3 +++ applications/gafaelfawr/values-idfprod.yaml | 3 +++ applications/gafaelfawr/values-usdfdev.yaml | 4 ---- applications/gafaelfawr/values-usdfint.yaml | 3 +++ applications/gafaelfawr/values-usdfprod.yaml | 3 +++ 7 files changed, 13 insertions(+), 9 deletions(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index f182217a06..a01481e149 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,7 +5,7 @@ description: Authentication and identity system home: https://gafaelfawr.lsst.io/ sources: - https://github.com/lsst-sqre/gafaelfawr -appVersion: 9.4.0 +appVersion: 9.5.0 dependencies: - name: redis diff --git a/applications/gafaelfawr/values-idfdev.yaml b/applications/gafaelfawr/values-idfdev.yaml index b5c5cca20c..c27d939d26 100644 --- a/applications/gafaelfawr/values-idfdev.yaml +++ b/applications/gafaelfawr/values-idfdev.yaml @@ -1,7 +1,3 @@ -image: - tag: "tickets-DM-41186" - pullPolicy: "Always" - # Use the CSI storage class so that we can use snapshots. redis: persistence: diff --git a/applications/gafaelfawr/values-idfint.yaml b/applications/gafaelfawr/values-idfint.yaml index 9d85b88fe9..533e39f197 100644 --- a/applications/gafaelfawr/values-idfint.yaml +++ b/applications/gafaelfawr/values-idfint.yaml @@ -31,6 +31,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "dd5cd3ee-4239-48e4-b0e3-282f2328b9d1" + # User quota settings for services. quota: default: diff --git a/applications/gafaelfawr/values-idfprod.yaml b/applications/gafaelfawr/values-idfprod.yaml index dfdd5df5ea..a8cabefc8b 100644 --- a/applications/gafaelfawr/values-idfprod.yaml +++ b/applications/gafaelfawr/values-idfprod.yaml @@ -26,6 +26,9 @@ config: firestore: project: "rsp-firestore-stable-e8eb" + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "5f0eb655-0e72-4948-a6a5-a94c0be9019f" + # User quota settings for services. quota: default: diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml index 8845305956..7056c16d50 100644 --- a/applications/gafaelfawr/values-usdfdev.yaml +++ b/applications/gafaelfawr/values-usdfdev.yaml @@ -1,7 +1,3 @@ -image: - tag: "tickets-DM-41186" - pullPolicy: "Always" - replicaCount: 2 # Use the CSI storage class so that we can use snapshots. diff --git a/applications/gafaelfawr/values-usdfint.yaml b/applications/gafaelfawr/values-usdfint.yaml index 75695b8fab..db082d66f5 100644 --- a/applications/gafaelfawr/values-usdfint.yaml +++ b/applications/gafaelfawr/values-usdfint.yaml @@ -11,6 +11,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "82c6fc76-b7d3-4368-92a9-6a468dfa23dc" + oidc: clientId: vcluster--usdf-rsp-int audience: "vcluster--usdf-rsp-int" diff --git a/applications/gafaelfawr/values-usdfprod.yaml b/applications/gafaelfawr/values-usdfprod.yaml index eb620aba7d..d7909f6996 100644 --- a/applications/gafaelfawr/values-usdfprod.yaml +++ b/applications/gafaelfawr/values-usdfprod.yaml @@ -11,6 +11,9 @@ config: oidcServer: enabled: true + # Support generating user metadata for CADC authentication code. + cadcBaseUuid: "595f5a03-bef4-473b-8e5a-588d87f13799" + oidc: clientId: rubin-usdf-rsp audience: "rubin-usdf-rsp" From 8f2a043de0bbd8891963ea1a889b24ad3744d799 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Thu, 26 Oct 2023 12:37:56 -0300 Subject: [PATCH 169/588] narrativelog: update appVersion to 0.6.0 --- applications/narrativelog/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/narrativelog/Chart.yaml b/applications/narrativelog/Chart.yaml index 73f54a8182..cd72dcae0e 100644 --- a/applications/narrativelog/Chart.yaml +++ b/applications/narrativelog/Chart.yaml @@ -12,4 +12,4 @@ version: 1.0.0 # number should be incremented each time you make changes to the # application. Versions are not expected to follow Semantic Versioning. They # should reflect the version the application is using. -appVersion: 0.5.1 +appVersion: 0.6.0 From 6a1f91c9326469d94aae5226308c76334d9ac815 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 26 Oct 2023 13:30:08 -0700 Subject: [PATCH 170/588] [DM-40944] Set up anonymous and normal ingress Anonymous for the capabilities and availability endpoints Authenticated for the query endpoint (and anything else) --- .../siav2/templates/ingress-anonymous.yaml | 30 +++++++++++++++++++ applications/siav2/templates/ingress.yaml | 9 +++--- 2 files changed, 35 insertions(+), 4 deletions(-) create mode 100644 applications/siav2/templates/ingress-anonymous.yaml diff --git a/applications/siav2/templates/ingress-anonymous.yaml b/applications/siav2/templates/ingress-anonymous.yaml new file mode 100644 index 0000000000..6102d9611f --- /dev/null +++ b/applications/siav2/templates/ingress-anonymous.yaml @@ -0,0 +1,30 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "siav2-anonymous" + labels: + {{- include "siav2.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + anonymous: true +template: + metadata: + name: "siav2-anonymous" + {{- with .Values.ingress.annotations }} + annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/api/siav2/(availability|capabilities|swagger-ui.*)" + pathType: "ImplementationSpecific" + backend: + service: + name: "siav2" + port: + number: 8080 diff --git a/applications/siav2/templates/ingress.yaml b/applications/siav2/templates/ingress.yaml index 14e3f48b92..7f9fa4bd21 100644 --- a/applications/siav2/templates/ingress.yaml +++ b/applications/siav2/templates/ingress.yaml @@ -1,17 +1,17 @@ apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: - name: "siav2" + name: "siav2-authenticated" labels: {{- include "siav2.labels" . | nindent 4 }} config: - authType: basic + authType: "basic" baseUrl: {{ .Values.global.baseUrl | quote }} delegate: internal: scopes: - read:tap - service: siav2 + service: "siav2" useAuthorization: true loginRedirect: false scopes: @@ -19,9 +19,10 @@ config: - read:image template: metadata: - name: "siav2" + name: "siav2-authenticated" {{- with .Values.ingress.annotations }} annotations: + nginx.ingress.kubernetes.io/ssl-redirect: "true" {{- toYaml . | nindent 6 }} {{- end }} spec: From b5308915a1cf25800a6925b3c39a64a587bf1bdb Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 26 Oct 2023 14:06:05 -0700 Subject: [PATCH 171/588] Update version of Nublado Update all environments to a new version of the Nublado controller, and update IDF dev and IDF int to the latest version of the RSP REST spawner container, which also uses the new JupyterHub. --- applications/nublado/Chart.yaml | 2 +- applications/nublado/values-idfdev.yaml | 2 +- applications/nublado/values-idfint.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 11de173acf..72453e762f 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -6,7 +6,7 @@ sources: - https://github.com/lsst-sqre/jupyterlab-controller - https://github.com/lsst-sqre/rsp-restspawner home: https://github.com/lsst-sqre/jupyterlab-controller -appVersion: 0.8.0 +appVersion: 0.9.0 dependencies: - name: jupyterhub diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 478d45ab8f..207abf5468 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -70,7 +70,7 @@ controller: jupyterhub: hub: image: - tag: "0.4.1" + tag: "0.5.0" db: url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" upgrade: true diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index d42692b18c..f91a516f8e 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -90,7 +90,7 @@ controller: jupyterhub: hub: image: - tag: "0.4.1" + tag: "0.5.0" config: ServerApp: shutdown_no_activity_timeout: 432000 From 586c84345ca1efde894685cc1cc85cbc4824fc6f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 26 Oct 2023 15:10:56 -0700 Subject: [PATCH 172/588] Switch Nublado to new JupyterHub by default This includes the JupyterHub 4.x migration. Remove the override from IDF dev and IDF int. --- applications/nublado/README.md | 2 +- applications/nublado/values-idfdev.yaml | 2 -- applications/nublado/values-idfint.yaml | 2 -- applications/nublado/values.yaml | 2 +- 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 356b7da88b..4db75789bd 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -82,7 +82,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | | jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | | jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/rsp-restspawner"` | Image to use for JupyterHub | -| jupyterhub.hub.image.tag | string | `"0.3.2"` | Tag of image to use for JupyterHub | +| jupyterhub.hub.image.tag | string | `"0.5.0"` | Tag of image to use for JupyterHub | | jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | | jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | | jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 207abf5468..5ec2b3ba5f 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -69,8 +69,6 @@ controller: server: "10.87.86.26" jupyterhub: hub: - image: - tag: "0.5.0" db: url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" upgrade: true diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index f91a516f8e..0a27c25b73 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -89,8 +89,6 @@ controller: jupyterhub: hub: - image: - tag: "0.5.0" config: ServerApp: shutdown_no_activity_timeout: 432000 diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 94e6d5bc8a..ca8e248481 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -304,7 +304,7 @@ jupyterhub: name: ghcr.io/lsst-sqre/rsp-restspawner # -- Tag of image to use for JupyterHub - tag: 0.3.2 + tag: 0.5.0 # -- Resource limits and requests resources: From d0e75fd0ae3e92f30d0a8e3e8fb91d769b542875 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Fri, 27 Oct 2023 12:15:15 -0700 Subject: [PATCH 173/588] [DM-40944] Enable the rest of the environments Since there's nothing there now, there's nothing wrong with getting it deployed everywhere, it's not going to break anything. --- applications/siav2/values-idfint.yaml | 0 applications/siav2/values-idfprod.yaml | 0 applications/siav2/values-usdfprod.yaml | 2 ++ environments/values-idfint.yaml | 1 + environments/values-idfprod.yaml | 1 + environments/values-usdfprod.yaml | 1 + 6 files changed, 5 insertions(+) create mode 100644 applications/siav2/values-idfint.yaml create mode 100644 applications/siav2/values-idfprod.yaml create mode 100644 applications/siav2/values-usdfprod.yaml diff --git a/applications/siav2/values-idfint.yaml b/applications/siav2/values-idfint.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/siav2/values-idfprod.yaml b/applications/siav2/values-idfprod.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/siav2/values-usdfprod.yaml b/applications/siav2/values-usdfprod.yaml new file mode 100644 index 0000000000..995d3094f2 --- /dev/null +++ b/applications/siav2/values-usdfprod.yaml @@ -0,0 +1,2 @@ +tapService: "live" +obsCoreTable: "oga.ObsCore" diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index f07106e224..acb57ee1ba 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -17,6 +17,7 @@ applications: portal: true postgres: true sasquatch: true + siav2: true ssotap: true production-tools: true semaphore: true diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index 4ad3b30065..283ae161dc 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -12,6 +12,7 @@ applications: postgres: true semaphore: true sherlock: true + siav2: true squareone: true ssotap: true tap: true diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index b32ee742bc..6859d536c4 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -19,6 +19,7 @@ applications: postgres: true sasquatch: true semaphore: true + siav2: true ssotap: true squareone: true strimzi: true From 17aa1c059d391e296ffd5291642b35fc42a6dff6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:26:26 +0000 Subject: [PATCH 174/588] chore(deps): update gcr.io/cloudsql-docker/gce-proxy docker tag to v1.33.13 --- applications/gafaelfawr/values.yaml | 2 +- applications/nublado/values.yaml | 2 +- applications/sqlproxy-cross-project/values.yaml | 2 +- applications/times-square/values.yaml | 2 +- applications/vo-cutouts/values.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index e0b94b3655..1acaf0696a 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -310,7 +310,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.12" + tag: "1.33.13" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index ca8e248481..a74e9796aa 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -469,7 +469,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.12" + tag: "1.33.13" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index 6f0f0f421d..d410ae7bc4 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.33.12" + tag: "1.33.13" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index eace86d016..694c677029 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -126,7 +126,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.12" + tag: "1.33.13" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index a3251b6057..68daf673e2 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -75,7 +75,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.12" + tag: "1.33.13" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" From 784270828056aacea9bc83b8d455395e13890aed Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:31:12 +0000 Subject: [PATCH 175/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 460 ++++++------------------------------------ requirements/main.txt | 54 ++--- 2 files changed, 91 insertions(+), 423 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 724bf6fb04..062ab3d6ef 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -24,9 +24,9 @@ autodoc-pydantic==2.0.1 \ --hash=sha256:7a125a4ff18e4903e27be71e4ddb3269380860eacab4a584d6cc2e212fa96991 \ --hash=sha256:d3c302fdb6d37edb5b721f0f540252fa79cea7018bc1a9a85bf70f33a68b0ce4 # via -r requirements/dev.in -babel==2.13.0 \ - --hash=sha256:04c3e2d28d2b7681644508f836be388ae49e0cfe91465095340395b60d00f210 \ - --hash=sha256:fbfcae1575ff78e26c7449136f1abbefc3c13ce542eeb13d43d50d8b047216ec +babel==2.13.1 \ + --hash=sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900 \ + --hash=sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed # via sphinx beautifulsoup4==4.12.2 \ --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ @@ -143,60 +143,6 @@ click==8.1.7 \ # -c requirements/main.txt # documenteer # sphinx-click -contourpy==1.1.1 \ - --hash=sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6 \ - --hash=sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33 \ - --hash=sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8 \ - --hash=sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d \ - --hash=sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d \ - --hash=sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c \ - --hash=sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf \ - --hash=sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e \ - --hash=sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e \ - --hash=sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163 \ - --hash=sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532 \ - --hash=sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2 \ - --hash=sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8 \ - --hash=sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1 \ - --hash=sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b \ - --hash=sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9 \ - --hash=sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916 \ - --hash=sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23 \ - --hash=sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb \ - --hash=sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a \ - --hash=sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e \ - --hash=sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442 \ - --hash=sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684 \ - --hash=sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34 \ - --hash=sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d \ - --hash=sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d \ - --hash=sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9 \ - --hash=sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45 \ - --hash=sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718 \ - --hash=sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab \ - --hash=sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3 \ - --hash=sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae \ - --hash=sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb \ - --hash=sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5 \ - --hash=sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba \ - --hash=sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0 \ - --hash=sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217 \ - --hash=sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887 \ - --hash=sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887 \ - --hash=sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62 \ - --hash=sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431 \ - --hash=sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b \ - --hash=sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce \ - --hash=sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b \ - --hash=sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f \ - --hash=sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85 \ - --hash=sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e \ - --hash=sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7 \ - --hash=sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251 \ - --hash=sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970 \ - --hash=sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0 \ - --hash=sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7 - # via matplotlib coverage[toml]==7.3.2 \ --hash=sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1 \ --hash=sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63 \ @@ -253,10 +199,6 @@ coverage[toml]==7.3.2 \ # via # -r requirements/dev.in # pytest-cov -cycler==0.12.1 \ - --hash=sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30 \ - --hash=sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c - # via matplotlib diagrams==0.23.3 \ --hash=sha256:543c707c36a2c896dfdf8f23e993a9c7ae48bb1a667f6baf19151eb98e57a134 \ --hash=sha256:c497094f9d3600a94bdcfb62b6daf331d2eb7f9b355246e548dae7a4b5c97be0 @@ -265,71 +207,29 @@ distlib==0.3.7 \ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -documenteer[guide]==1.0.0a10 \ - --hash=sha256:01d56f716e9b8b303eb71fb25f354ba5554adbdc03f60b8bf0d155367b661c0f \ - --hash=sha256:c43505700b99873d431571249e0ded9c59faeb76860e099bc362b6ed8fe3e43e +documenteer[guide]==1.0.0a12 \ + --hash=sha256:34d8d6358f5e30fc279c711dbce453ee100cbe0c89e8cd26e0a926d86e0e97ac \ + --hash=sha256:ac80a724b287b41d48e1f63cabc98fe91c0aee13ea48ebec710ec79c6295cfe2 # via # -r requirements/dev.in # documenteer -docutils==0.19 \ - --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ - --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc +docutils==0.20.1 \ + --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \ + --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b # via + # documenteer # myst-parser # pybtex-docutils # pydata-sphinx-theme # sphinx # sphinx-click # sphinx-jinja + # sphinx-prompt # sphinxcontrib-bibtex -filelock==3.12.4 \ - --hash=sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4 \ - --hash=sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd +filelock==3.13.0 \ + --hash=sha256:63c6052c82a1a24c873a549fbd39a26982e8f35a3016da231ead11a5be9dad44 \ + --hash=sha256:a552f4fde758f4eab33191e9548f671970f8b06d436d31388c9aa1e5861a710f # via virtualenv -fonttools==4.43.1 \ - --hash=sha256:10003ebd81fec0192c889e63a9c8c63f88c7d72ae0460b7ba0cd2a1db246e5ad \ - --hash=sha256:10b3922875ffcba636674f406f9ab9a559564fdbaa253d66222019d569db869c \ - --hash=sha256:13a9a185259ed144def3682f74fdcf6596f2294e56fe62dfd2be736674500dba \ - --hash=sha256:17dbc2eeafb38d5d0e865dcce16e313c58265a6d2d20081c435f84dc5a9d8212 \ - --hash=sha256:18a2477c62a728f4d6e88c45ee9ee0229405e7267d7d79ce1f5ce0f3e9f8ab86 \ - --hash=sha256:18eefac1b247049a3a44bcd6e8c8fd8b97f3cad6f728173b5d81dced12d6c477 \ - --hash=sha256:1952c89a45caceedf2ab2506d9a95756e12b235c7182a7a0fff4f5e52227204f \ - --hash=sha256:1cf9e974f63b1080b1d2686180fc1fbfd3bfcfa3e1128695b5de337eb9075cef \ - --hash=sha256:1e09da7e8519e336239fbd375156488a4c4945f11c4c5792ee086dd84f784d02 \ - --hash=sha256:2062542a7565091cea4cc14dd99feff473268b5b8afdee564f7067dd9fff5860 \ - --hash=sha256:25d3da8a01442cbc1106490eddb6d31d7dffb38c1edbfabbcc8db371b3386d72 \ - --hash=sha256:34f713dad41aa21c637b4e04fe507c36b986a40f7179dcc86402237e2d39dcd3 \ - --hash=sha256:360201d46165fc0753229afe785900bc9596ee6974833124f4e5e9f98d0f592b \ - --hash=sha256:3b7ad05b2beeebafb86aa01982e9768d61c2232f16470f9d0d8e385798e37184 \ - --hash=sha256:4c54466f642d2116686268c3e5f35ebb10e49b0d48d41a847f0e171c785f7ac7 \ - --hash=sha256:4d9740e3783c748521e77d3c397dc0662062c88fd93600a3c2087d3d627cd5e5 \ - --hash=sha256:4f88cae635bfe4bbbdc29d479a297bb525a94889184bb69fa9560c2d4834ddb9 \ - --hash=sha256:51669b60ee2a4ad6c7fc17539a43ffffc8ef69fd5dbed186a38a79c0ac1f5db7 \ - --hash=sha256:5db46659cfe4e321158de74c6f71617e65dc92e54980086823a207f1c1c0e24b \ - --hash=sha256:5f37e31291bf99a63328668bb83b0669f2688f329c4c0d80643acee6e63cd933 \ - --hash=sha256:6bb5ea9076e0e39defa2c325fc086593ae582088e91c0746bee7a5a197be3da0 \ - --hash=sha256:748015d6f28f704e7d95cd3c808b483c5fb87fd3eefe172a9da54746ad56bfb6 \ - --hash=sha256:7bbbf8174501285049e64d174e29f9578495e1b3b16c07c31910d55ad57683d8 \ - --hash=sha256:884ef38a5a2fd47b0c1291647b15f4e88b9de5338ffa24ee52c77d52b4dfd09c \ - --hash=sha256:8da417431bfc9885a505e86ba706f03f598c85f5a9c54f67d63e84b9948ce590 \ - --hash=sha256:95e974d70238fc2be5f444fa91f6347191d0e914d5d8ae002c9aa189572cc215 \ - --hash=sha256:9648518ef687ba818db3fcc5d9aae27a369253ac09a81ed25c3867e8657a0680 \ - --hash=sha256:9a2f0aa6ca7c9bc1058a9d0b35483d4216e0c1bbe3962bc62ce112749954c7b8 \ - --hash=sha256:9c36da88422e0270fbc7fd959dc9749d31a958506c1d000e16703c2fce43e3d0 \ - --hash=sha256:9c60ecfa62839f7184f741d0509b5c039d391c3aff71dc5bc57b87cc305cff3b \ - --hash=sha256:9f727c3e3d08fd25352ed76cc3cb61486f8ed3f46109edf39e5a60fc9fecf6ca \ - --hash=sha256:a7a06f8d95b7496e53af80d974d63516ffb263a468e614978f3899a6df52d4b3 \ - --hash=sha256:ad0b3f6342cfa14be996971ea2b28b125ad681c6277c4cd0fbdb50340220dfb6 \ - --hash=sha256:b2adca1b46d69dce4a37eecc096fe01a65d81a2f5c13b25ad54d5430ae430b13 \ - --hash=sha256:b84a1c00f832feb9d0585ca8432fba104c819e42ff685fcce83537e2e7e91204 \ - --hash=sha256:bb6d2f8ef81ea076877d76acfb6f9534a9c5f31dc94ba70ad001267ac3a8e56f \ - --hash=sha256:bf11e2cca121df35e295bd34b309046c29476ee739753bc6bc9d5050de319273 \ - --hash=sha256:d21099b411e2006d3c3e1f9aaf339e12037dbf7bf9337faf0e93ec915991f43b \ - --hash=sha256:d4071bd1c183b8d0b368cc9ed3c07a0f6eb1bdfc4941c4c024c49a35429ac7cd \ - --hash=sha256:e117a92b07407a061cde48158c03587ab97e74e7d73cb65e6aadb17af191162a \ - --hash=sha256:f7a58eb5e736d7cf198eee94844b81c9573102ae5989ebcaa1d1a37acd04b33d \ - --hash=sha256:fe9b1ec799b6086460a7480e0f55c447b1aca0a4eecc53e444f639e967348896 - # via matplotlib gitdb==4.0.11 \ --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ --hash=sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b @@ -346,9 +246,9 @@ graphviz==0.20.1 \ --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \ --hash=sha256:8c58f14adaa3b947daf26c19bc1e98c4e0702cdc31cf99153e6f06904d492bf8 # via diagrams -identify==2.5.30 \ - --hash=sha256:afe67f26ae29bab007ec21b03d4114f41316ab9dd15aa8736a167481e108da54 \ - --hash=sha256:f302a4256a15c849b91cfcdcec052a8ce914634b2f77ae87dad29cd749f2d88d +identify==2.5.31 \ + --hash=sha256:7736b3c7a28233637e3c36550646fc6389bedd74ae84cb788200cc8e2dd60b75 \ + --hash=sha256:90199cb9e7bd3c5407a9b7e81b4abec4bb9d249991c79439ec8af740afc6293d # via pre-commit idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ @@ -382,112 +282,6 @@ jsonschema-specifications==2023.7.1 \ --hash=sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1 \ --hash=sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb # via jsonschema -kiwisolver==1.4.5 \ - --hash=sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf \ - --hash=sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e \ - --hash=sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af \ - --hash=sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f \ - --hash=sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046 \ - --hash=sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3 \ - --hash=sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5 \ - --hash=sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71 \ - --hash=sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee \ - --hash=sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3 \ - --hash=sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9 \ - --hash=sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b \ - --hash=sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985 \ - --hash=sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea \ - --hash=sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16 \ - --hash=sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89 \ - --hash=sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c \ - --hash=sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9 \ - --hash=sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712 \ - --hash=sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342 \ - --hash=sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a \ - --hash=sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958 \ - --hash=sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d \ - --hash=sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a \ - --hash=sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130 \ - --hash=sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff \ - --hash=sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898 \ - --hash=sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b \ - --hash=sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f \ - --hash=sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265 \ - --hash=sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93 \ - --hash=sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929 \ - --hash=sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635 \ - --hash=sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709 \ - --hash=sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b \ - --hash=sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb \ - --hash=sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a \ - --hash=sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920 \ - --hash=sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e \ - --hash=sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544 \ - --hash=sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45 \ - --hash=sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390 \ - --hash=sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77 \ - --hash=sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355 \ - --hash=sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff \ - --hash=sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4 \ - --hash=sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7 \ - --hash=sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20 \ - --hash=sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c \ - --hash=sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162 \ - --hash=sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228 \ - --hash=sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437 \ - --hash=sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc \ - --hash=sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a \ - --hash=sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901 \ - --hash=sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4 \ - --hash=sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770 \ - --hash=sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525 \ - --hash=sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad \ - --hash=sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a \ - --hash=sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29 \ - --hash=sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90 \ - --hash=sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250 \ - --hash=sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d \ - --hash=sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3 \ - --hash=sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54 \ - --hash=sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f \ - --hash=sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1 \ - --hash=sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da \ - --hash=sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238 \ - --hash=sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa \ - --hash=sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523 \ - --hash=sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0 \ - --hash=sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205 \ - --hash=sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3 \ - --hash=sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4 \ - --hash=sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac \ - --hash=sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9 \ - --hash=sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb \ - --hash=sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced \ - --hash=sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd \ - --hash=sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0 \ - --hash=sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da \ - --hash=sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18 \ - --hash=sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9 \ - --hash=sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276 \ - --hash=sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333 \ - --hash=sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b \ - --hash=sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db \ - --hash=sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126 \ - --hash=sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9 \ - --hash=sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09 \ - --hash=sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0 \ - --hash=sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec \ - --hash=sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7 \ - --hash=sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff \ - --hash=sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9 \ - --hash=sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192 \ - --hash=sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8 \ - --hash=sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d \ - --hash=sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6 \ - --hash=sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797 \ - --hash=sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892 \ - --hash=sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f - # via matplotlib latexcodec==2.0.1 \ --hash=sha256:2aa2551c373261cefe2ad3a8953a6d6533e68238d180eb4bb91d7964adb3fe9a \ --hash=sha256:c277a193638dc7683c4c30f6684e3db728a06efb0dc9cf346db8bd0aa6c5d271 @@ -568,36 +362,6 @@ markupsafe==2.1.3 \ # via # -c requirements/main.txt # jinja2 -matplotlib==3.8.0 \ - --hash=sha256:061ee58facb3580cd2d046a6d227fb77e9295599c5ec6ad069f06b5821ad1cfc \ - --hash=sha256:0b11f354aae62a2aa53ec5bb09946f5f06fc41793e351a04ff60223ea9162955 \ - --hash=sha256:0d5ee602ef517a89d1f2c508ca189cfc395dd0b4a08284fb1b97a78eec354644 \ - --hash=sha256:0e723f5b96f3cd4aad99103dc93e9e3cdc4f18afdcc76951f4857b46f8e39d2d \ - --hash=sha256:23ed11654fc83cd6cfdf6170b453e437674a050a452133a064d47f2f1371f8d3 \ - --hash=sha256:2ea6886e93401c22e534bbfd39201ce8931b75502895cfb115cbdbbe2d31f287 \ - --hash=sha256:31e793c8bd4ea268cc5d3a695c27b30650ec35238626961d73085d5e94b6ab68 \ - --hash=sha256:36eafe2128772195b373e1242df28d1b7ec6c04c15b090b8d9e335d55a323900 \ - --hash=sha256:3cc3776836d0f4f22654a7f2d2ec2004618d5cf86b7185318381f73b80fd8a2d \ - --hash=sha256:5dc945a9cb2deb7d197ba23eb4c210e591d52d77bf0ba27c35fc82dec9fa78d4 \ - --hash=sha256:5de39dc61ca35342cf409e031f70f18219f2c48380d3886c1cf5ad9f17898e06 \ - --hash=sha256:60a6e04dfd77c0d3bcfee61c3cd335fff1b917c2f303b32524cd1235e194ef99 \ - --hash=sha256:6c49a2bd6981264bddcb8c317b6bd25febcece9e2ebfcbc34e7f4c0c867c09dc \ - --hash=sha256:6f25ffb6ad972cdffa7df8e5be4b1e3cadd2f8d43fc72085feb1518006178394 \ - --hash=sha256:7b37b74f00c4cb6af908cb9a00779d97d294e89fd2145ad43f0cdc23f635760c \ - --hash=sha256:7f54b9fb87ca5acbcdd0f286021bedc162e1425fa5555ebf3b3dfc167b955ad9 \ - --hash=sha256:87df75f528020a6299f76a1d986c0ed4406e3b2bd44bc5e306e46bca7d45e53e \ - --hash=sha256:90d74a95fe055f73a6cd737beecc1b81c26f2893b7a3751d52b53ff06ca53f36 \ - --hash=sha256:a33bd3045c7452ca1fa65676d88ba940867880e13e2546abb143035fa9072a9d \ - --hash=sha256:c3499c312f5def8f362a2bf761d04fa2d452b333f3a9a3f58805273719bf20d9 \ - --hash=sha256:c4940bad88a932ddc69734274f6fb047207e008389489f2b6f77d9ca485f0e7a \ - --hash=sha256:d670b9348e712ec176de225d425f150dc8e37b13010d85233c539b547da0be39 \ - --hash=sha256:dae97fdd6996b3a25da8ee43e3fc734fff502f396801063c6b76c20b56683196 \ - --hash=sha256:dd386c80a98b5f51571b9484bf6c6976de383cd2a8cd972b6a9562d85c6d2087 \ - --hash=sha256:df8505e1c19d5c2c26aff3497a7cbd3ccfc2e97043d1e4db3e76afa399164b69 \ - --hash=sha256:eee482731c8c17d86d9ddb5194d38621f9b0f0d53c99006275a12523ab021732 \ - --hash=sha256:f691b4ef47c7384d0936b2e8ebdeb5d526c81d004ad9403dfb9d4c76b9979a93 \ - --hash=sha256:f8b5a1bf27d078453aa7b5b27f52580e16360d02df6d3dc9504f3d2ce11f6309 - # via sphinxext-opengraph mdit-py-plugins==0.4.0 \ --hash=sha256:b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9 \ --hash=sha256:d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b @@ -647,106 +411,13 @@ nodeenv==1.8.0 \ --hash=sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2 \ --hash=sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec # via pre-commit -numpy==1.26.1 \ - --hash=sha256:06934e1a22c54636a059215d6da99e23286424f316fddd979f5071093b648668 \ - --hash=sha256:1c59c046c31a43310ad0199d6299e59f57a289e22f0f36951ced1c9eac3665b9 \ - --hash=sha256:1d1bd82d539607951cac963388534da3b7ea0e18b149a53cf883d8f699178c0f \ - --hash=sha256:1e11668d6f756ca5ef534b5be8653d16c5352cbb210a5c2a79ff288e937010d5 \ - --hash=sha256:3649d566e2fc067597125428db15d60eb42a4e0897fc48d28cb75dc2e0454e53 \ - --hash=sha256:59227c981d43425ca5e5c01094d59eb14e8772ce6975d4b2fc1e106a833d5ae2 \ - --hash=sha256:6081aed64714a18c72b168a9276095ef9155dd7888b9e74b5987808f0dd0a974 \ - --hash=sha256:6965888d65d2848e8768824ca8288db0a81263c1efccec881cb35a0d805fcd2f \ - --hash=sha256:76ff661a867d9272cd2a99eed002470f46dbe0943a5ffd140f49be84f68ffc42 \ - --hash=sha256:78ca54b2f9daffa5f323f34cdf21e1d9779a54073f0018a3094ab907938331a2 \ - --hash=sha256:82e871307a6331b5f09efda3c22e03c095d957f04bf6bc1804f30048d0e5e7af \ - --hash=sha256:8ab9163ca8aeb7fd32fe93866490654d2f7dda4e61bc6297bf72ce07fdc02f67 \ - --hash=sha256:9696aa2e35cc41e398a6d42d147cf326f8f9d81befcb399bc1ed7ffea339b64e \ - --hash=sha256:97e5d6a9f0702c2863aaabf19f0d1b6c2628fbe476438ce0b5ce06e83085064c \ - --hash=sha256:9f42284ebf91bdf32fafac29d29d4c07e5e9d1af862ea73686581773ef9e73a7 \ - --hash=sha256:a03fb25610ef560a6201ff06df4f8105292ba56e7cdd196ea350d123fc32e24e \ - --hash=sha256:a5b411040beead47a228bde3b2241100454a6abde9df139ed087bd73fc0a4908 \ - --hash=sha256:af22f3d8e228d84d1c0c44c1fbdeb80f97a15a0abe4f080960393a00db733b66 \ - --hash=sha256:afd5ced4e5a96dac6725daeb5242a35494243f2239244fad10a90ce58b071d24 \ - --hash=sha256:b9d45d1dbb9de84894cc50efece5b09939752a2d75aab3a8b0cef6f3a35ecd6b \ - --hash=sha256:bb894accfd16b867d8643fc2ba6c8617c78ba2828051e9a69511644ce86ce83e \ - --hash=sha256:c8c6c72d4a9f831f328efb1312642a1cafafaa88981d9ab76368d50d07d93cbe \ - --hash=sha256:cd7837b2b734ca72959a1caf3309457a318c934abef7a43a14bb984e574bbb9a \ - --hash=sha256:cdd9ec98f0063d93baeb01aad472a1a0840dee302842a2746a7a8e92968f9575 \ - --hash=sha256:d1cfc92db6af1fd37a7bb58e55c8383b4aa1ba23d012bdbba26b4bcca45ac297 \ - --hash=sha256:d1d2c6b7dd618c41e202c59c1413ef9b2c8e8a15f5039e344af64195459e3104 \ - --hash=sha256:d2984cb6caaf05294b8466966627e80bf6c7afd273279077679cb010acb0e5ab \ - --hash=sha256:d58e8c51a7cf43090d124d5073bc29ab2755822181fcad978b12e144e5e5a4b3 \ - --hash=sha256:d78f269e0c4fd365fc2992c00353e4530d274ba68f15e968d8bc3c69ce5f5244 \ - --hash=sha256:dcfaf015b79d1f9f9c9fd0731a907407dc3e45769262d657d754c3a028586124 \ - --hash=sha256:e44ccb93f30c75dfc0c3aa3ce38f33486a75ec9abadabd4e59f114994a9c4617 \ - --hash=sha256:e509cbc488c735b43b5ffea175235cec24bbc57b227ef1acc691725beb230d1c - # via - # contourpy - # matplotlib packaging==23.2 \ --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 # via - # matplotlib # pydata-sphinx-theme # pytest # sphinx -pillow==10.1.0 \ - --hash=sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d \ - --hash=sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de \ - --hash=sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616 \ - --hash=sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839 \ - --hash=sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099 \ - --hash=sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a \ - --hash=sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219 \ - --hash=sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106 \ - --hash=sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b \ - --hash=sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412 \ - --hash=sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b \ - --hash=sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7 \ - --hash=sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2 \ - --hash=sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7 \ - --hash=sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14 \ - --hash=sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f \ - --hash=sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27 \ - --hash=sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57 \ - --hash=sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262 \ - --hash=sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28 \ - --hash=sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610 \ - --hash=sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172 \ - --hash=sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273 \ - --hash=sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e \ - --hash=sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d \ - --hash=sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818 \ - --hash=sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f \ - --hash=sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9 \ - --hash=sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01 \ - --hash=sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7 \ - --hash=sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651 \ - --hash=sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312 \ - --hash=sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80 \ - --hash=sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666 \ - --hash=sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061 \ - --hash=sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b \ - --hash=sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992 \ - --hash=sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593 \ - --hash=sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4 \ - --hash=sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db \ - --hash=sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba \ - --hash=sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd \ - --hash=sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e \ - --hash=sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212 \ - --hash=sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb \ - --hash=sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2 \ - --hash=sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34 \ - --hash=sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256 \ - --hash=sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f \ - --hash=sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2 \ - --hash=sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38 \ - --hash=sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996 \ - --hash=sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a \ - --hash=sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793 - # via matplotlib platformdirs==3.11.0 \ --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e @@ -903,13 +574,9 @@ pygments==2.16.1 \ # rich # sphinx # sphinx-prompt -pyparsing==3.1.1 \ - --hash=sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb \ - --hash=sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db - # via matplotlib -pytest==7.4.2 \ - --hash=sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002 \ - --hash=sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069 +pytest==7.4.3 \ + --hash=sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac \ + --hash=sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5 # via # -r requirements/dev.in # pytest-cov @@ -922,12 +589,6 @@ pytest-pretty==1.2.0 \ --hash=sha256:105a355f128e392860ad2c478ae173ff96d2f03044692f9818ff3d49205d3a60 \ --hash=sha256:6f79122bf53864ae2951b6c9e94d7a06a87ef753476acd4588aeac018f062036 # via -r requirements/dev.in -python-dateutil==2.8.2 \ - --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ - --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 - # via - # -c requirements/main.txt - # matplotlib python-dotenv==1.0.0 \ --hash=sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba \ --hash=sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a @@ -1110,24 +771,24 @@ rpds-py==0.10.6 \ # via # jsonschema # referencing -ruff==0.1.1 \ - --hash=sha256:2a909d3930afdbc2e9fd893b0034479e90e7981791879aab50ce3d9f55205bd6 \ - --hash=sha256:2d68367d1379a6b47e61bc9de144a47bcdb1aad7903bbf256e4c3d31f11a87ae \ - --hash=sha256:3305d1cb4eb8ff6d3e63a48d1659d20aab43b49fe987b3ca4900528342367145 \ - --hash=sha256:3521bf910104bf781e6753282282acc145cbe3eff79a1ce6b920404cd756075a \ - --hash=sha256:3ff3006c97d9dc396b87fb46bb65818e614ad0181f059322df82bbfe6944e264 \ - --hash=sha256:620d4b34302538dbd8bbbe8fdb8e8f98d72d29bd47e972e2b59ce6c1e8862257 \ - --hash=sha256:6aa7e63c3852cf8fe62698aef31e563e97143a4b801b57f920012d0e07049a8d \ - --hash=sha256:8f5b24daddf35b6c207619301170cae5d2699955829cda77b6ce1e5fc69340df \ - --hash=sha256:b7cdc893aef23ccc14c54bd79a8109a82a2c527e11d030b62201d86f6c2b81c5 \ - --hash=sha256:ba3208543ab91d3e4032db2652dcb6c22a25787b85b8dc3aeff084afdc612e5c \ - --hash=sha256:bc11955f6ce3398d2afe81ad7e49d0ebf0a581d8bcb27b8c300281737735e3a3 \ - --hash=sha256:c34ae501d0ec71acf19ee5d4d889e379863dcc4b796bf8ce2934a9357dc31db7 \ - --hash=sha256:c90461ae4abec261609e5ea436de4a4b5f2822921cf04c16d2cc9327182dbbcc \ - --hash=sha256:cbbd8eead88ea83a250499074e2a8e9d80975f0b324b1e2e679e4594da318c25 \ - --hash=sha256:d3f9ac658ba29e07b95c80fa742b059a55aefffa8b1e078bc3c08768bdd4b11a \ - --hash=sha256:e140bd717c49164c8feb4f65c644046fe929c46f42493672853e3213d7bdbce2 \ - --hash=sha256:f4780e2bb52f3863a565ec3f699319d3493b83ff95ebbb4993e59c62aaf6e75e +ruff==0.1.3 \ + --hash=sha256:0b6c55f5ef8d9dd05b230bb6ab80bc4381ecb60ae56db0330f660ea240cb0d4a \ + --hash=sha256:0f75e670d529aa2288cd00fc0e9b9287603d95e1536d7a7e0cafe00f75e0dd9d \ + --hash=sha256:12fd53696c83a194a2db7f9a46337ce06445fb9aa7d25ea6f293cf75b21aca9f \ + --hash=sha256:1c595193881922cc0556a90f3af99b1c5681f0c552e7a2a189956141d8666fe8 \ + --hash=sha256:2e3de9ed2e39160800281848ff4670e1698037ca039bda7b9274f849258d26ce \ + --hash=sha256:3ba6145369a151401d5db79f0a47d50e470384d0d89d0d6f7fab0b589ad07c34 \ + --hash=sha256:3e7afcbdcfbe3399c34e0f6370c30f6e529193c731b885316c5a09c9e4317eef \ + --hash=sha256:4874c165f96c14a00590dcc727a04dca0cfd110334c24b039458c06cf78a672e \ + --hash=sha256:76dd49f6cd945d82d9d4a9a6622c54a994689d8d7b22fa1322983389b4892e20 \ + --hash=sha256:7a18df6638cec4a5bd75350639b2bb2a2366e01222825562c7346674bdceb7ea \ + --hash=sha256:918b454bc4f8874a616f0d725590277c42949431ceb303950e87fef7a7d94cb3 \ + --hash=sha256:b46d43d51f7061652eeadb426a9e3caa1e0002470229ab2fc19de8a7b0766901 \ + --hash=sha256:b8afeb9abd26b4029c72adc9921b8363374f4e7edb78385ffaa80278313a15f9 \ + --hash=sha256:ca3cf365bf32e9ba7e6db3f48a4d3e2c446cd19ebee04f05338bc3910114528b \ + --hash=sha256:d8859605e729cd5e53aa38275568dbbdb4fe882d2ea2714c5453b678dca83784 \ + --hash=sha256:dc3ec4edb3b73f21b4aa51337e16674c752f1d76a4a543af56d7d04e97769613 \ + --hash=sha256:eec2dd31eed114e48ea42dbffc443e9b7221976554a504767ceaee3dd38edeb8 # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -1136,7 +797,6 @@ six==1.16.0 \ # -c requirements/main.txt # latexcodec # pybtex - # python-dateutil # sphinxcontrib-redoc smmap==5.0.1 \ --hash=sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62 \ @@ -1152,9 +812,9 @@ soupsieve==2.5 \ --hash=sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690 \ --hash=sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7 # via beautifulsoup4 -sphinx==6.2.1 \ - --hash=sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b \ - --hash=sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912 +sphinx==7.2.6 \ + --hash=sha256:1e09160a40b956dc623c910118fa636da93bd3ca0b9876a7b3df90f07d691560 \ + --hash=sha256:9a5160e1ea90688d5963ba09a2dcd8bdd526620edbb65c328728f1b2228d5ab5 # via # autodoc-pydantic # documenteer @@ -1176,9 +836,10 @@ sphinx==6.2.1 \ # sphinxcontrib-redoc # sphinxcontrib-serializinghtml # sphinxext-opengraph -sphinx-autodoc-typehints==1.22 \ - --hash=sha256:71fca2d5eee9b034204e4c686ab20b4d8f5eb9409396216bcae6c87c38e18ea6 \ - --hash=sha256:ef4a8b9d52de66065aa7d3adfabf5a436feb8a2eff07c2ddc31625d8807f2b69 + # sphinxext-rediraffe +sphinx-autodoc-typehints==1.24.0 \ + --hash=sha256:6a73c0c61a9144ce2ed5ef2bed99d615254e5005c1cc32002017d72d69fb70e6 \ + --hash=sha256:94e440066941bb237704bb880785e2d05e8ae5406c88674feefbb938ad0dc6af # via documenteer sphinx-automodapi==0.16.0 \ --hash=sha256:68fc47064804604b90aa27c047016e86aaf970981d90a0082d5b5dd2e9d38afd \ @@ -1203,17 +864,20 @@ sphinx-diagrams==0.4.0 \ sphinx-jinja==2.0.2 \ --hash=sha256:705ebeb9b7a6018ca3f93724315a7c1effa6ba3db44d630e7eaaa15e4ac081a8 \ --hash=sha256:c6232b59a894139770be1dc6d0b00a379e4288ce78157904e1f8473dea3e0718 - # via -r requirements/dev.in -sphinx-prompt==1.5.0 \ - --hash=sha256:fa4e90d8088b5a996c76087d701fc7e31175f8b9dc4aab03a507e45051067162 + # via + # -r requirements/dev.in + # documenteer +sphinx-prompt==1.8.0 \ + --hash=sha256:369ecc633f0711886f9b3a078c83264245be1adf46abeeb9b88b5519e4b51007 \ + --hash=sha256:47482f86fcec29662fdfd23e7c04ef03582714195d01f5d565403320084372ed # via documenteer sphinxcontrib-applehelp==1.0.7 \ --hash=sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d \ --hash=sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa # via sphinx -sphinxcontrib-bibtex==2.5.0 \ - --hash=sha256:71b42e5db0e2e284f243875326bf9936aa9a763282277d75048826fef5b00eaa \ - --hash=sha256:748f726eaca6efff7731012103417ef130ecdcc09501b4d0c54283bf5f059f76 +sphinxcontrib-bibtex==2.6.1 \ + --hash=sha256:046b49f070ae5276af34c1b8ddb9bc9562ef6de2f7a52d37a91cb8e53f54b863 \ + --hash=sha256:094c772098fe6b030cda8618c45722b2957cad0c04f328ba2b154aa08dfe720a # via documenteer sphinxcontrib-devhelp==1.0.5 \ --hash=sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212 \ @@ -1246,9 +910,13 @@ sphinxcontrib-serializinghtml==1.1.9 \ --hash=sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54 \ --hash=sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1 # via sphinx -sphinxext-opengraph==0.8.2 \ - --hash=sha256:45a693b6704052c426576f0a1f630649c55b4188bc49eb63e9587e24a923db39 \ - --hash=sha256:6a05bdfe5176d9dd0a1d58a504f17118362ab976631213cd36fb44c4c40544c9 +sphinxext-opengraph==0.9.0 \ + --hash=sha256:4e57e25b6d56f47b9c06a5a5d68a2a00ed3577c8a39e459b52118c6bfe5e8c8b \ + --hash=sha256:ab1eb2ffb531fb85b695e719dba7b0245b0643f6b6c0d1cc258d15a81e72a9f1 + # via documenteer +sphinxext-rediraffe==0.2.7 \ + --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ + --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c # via documenteer typed-ast==1.5.5 \ --hash=sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10 \ @@ -1316,9 +984,9 @@ urllib3==2.0.7 \ # -c requirements/main.txt # documenteer # requests -virtualenv==20.24.5 \ - --hash=sha256:b80039f280f4919c77b30f1c23294ae357c4c8701042086e3fc005963e4e537b \ - --hash=sha256:e8361967f6da6fbdf1426483bfe9fca8287c242ac0bc30429905721cefbff752 +virtualenv==20.24.6 \ + --hash=sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af \ + --hash=sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381 # via pre-commit # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/main.txt b/requirements/main.txt index 5815e59056..e9b390bdb0 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -197,37 +197,37 @@ click==8.1.7 \ # via # -r requirements/main.in # safir -cryptography==41.0.4 \ - --hash=sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67 \ - --hash=sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311 \ - --hash=sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8 \ - --hash=sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13 \ - --hash=sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143 \ - --hash=sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f \ - --hash=sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829 \ - --hash=sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd \ - --hash=sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397 \ - --hash=sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac \ - --hash=sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d \ - --hash=sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a \ - --hash=sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839 \ - --hash=sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e \ - --hash=sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6 \ - --hash=sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9 \ - --hash=sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860 \ - --hash=sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca \ - --hash=sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91 \ - --hash=sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d \ - --hash=sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714 \ - --hash=sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb \ - --hash=sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f +cryptography==41.0.5 \ + --hash=sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf \ + --hash=sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84 \ + --hash=sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e \ + --hash=sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8 \ + --hash=sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7 \ + --hash=sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1 \ + --hash=sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88 \ + --hash=sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86 \ + --hash=sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179 \ + --hash=sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81 \ + --hash=sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20 \ + --hash=sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548 \ + --hash=sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d \ + --hash=sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d \ + --hash=sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5 \ + --hash=sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1 \ + --hash=sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147 \ + --hash=sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936 \ + --hash=sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797 \ + --hash=sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696 \ + --hash=sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72 \ + --hash=sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da \ + --hash=sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723 # via # -r requirements/main.in # pyjwt # safir -fastapi==0.104.0 \ - --hash=sha256:456482c1178fb7beb2814b88e1885bc49f9a81f079665016feffe3e1c6a7663e \ - --hash=sha256:9c44de45693ae037b0c6914727a29c49a40668432b67c859a87851fc6a7b74c6 +fastapi==0.104.1 \ + --hash=sha256:752dc31160cdbd0436bb93bad51560b57e525cbb1d4bbf6f4904ceee75548241 \ + --hash=sha256:e5e4540a7c5e1dcfbbcf5b903c234feddcdcd881f191977a1c5dfd917487e7ae # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ From c943bd6873bc4f4bc530580df8bca35c98c10d40 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 15:19:15 +0000 Subject: [PATCH 176/588] chore(deps): update helm release argo-cd to v5.49.0 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index a36043c030..15c53df09e 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.46.8 + version: 5.49.0 repository: https://argoproj.github.io/argo-helm From bc91660ae5ae223d13bb52efd0b33fc1d138dd0c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 15:38:42 +0000 Subject: [PATCH 177/588] chore(deps): update helm release cert-manager to v1.13.2 --- applications/cert-manager/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml index fb4aac4cea..5d9d661070 100644 --- a/applications/cert-manager/Chart.yaml +++ b/applications/cert-manager/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/cert-manager/cert-manager dependencies: - name: cert-manager - version: v1.13.1 + version: v1.13.2 repository: https://charts.jetstack.io From a2643f1cabc7f0a9ad888e504c09be9f69528994 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 30 Oct 2023 08:39:48 -0700 Subject: [PATCH 178/588] Update Helm docs --- applications/gafaelfawr/README.md | 2 +- applications/nublado/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 5ad40992fa..602e3e7995 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -17,7 +17,7 @@ Authentication and identity system | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.12"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.13"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 4db75789bd..8cd1df9b55 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -17,7 +17,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a separate service, because shoehorning it into Zero to Jupyterhub's extraContainers looks messy, and it's not necessary that it be very performant. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.12"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.13"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index 62e46350bd..f25abc80ce 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.33.12"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.33.13"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/times-square/README.md b/applications/times-square/README.md index 0c06f66b42..b03ab53b4c 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -18,7 +18,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.12"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.13"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index b73bd138aa..7542c448f0 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.12"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.13"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | From f624db79b7191bb8e8cf4e0012c215878e11775a Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 30 Oct 2023 16:13:04 -0700 Subject: [PATCH 179/588] Bump version of Gafaelfawr Will hopefully recover more smoothly from Redis outages. --- applications/gafaelfawr/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index 1a50d2ec1f..ded5a49d2a 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,7 +5,7 @@ description: Authentication and identity system home: https://gafaelfawr.lsst.io/ sources: - https://github.com/lsst-sqre/gafaelfawr -appVersion: 9.5.0 +appVersion: 9.5.1 dependencies: - name: redis From 4e2ea0dff714fd3cd19045a975cb2564f09f5326 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:32:02 +0000 Subject: [PATCH 180/588] Update quay.io/influxdb/chronograf Docker tag to v1.10.2 --- applications/sasquatch/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index e79f893e22..fba996135b 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -271,7 +271,7 @@ chronograf: # -- Chronograf image tag. image: repository: "quay.io/influxdb/chronograf" - tag: 1.9.4 + tag: 1.10.2 # -- Chronograf data persistence configuration. persistence: enabled: true From b238810fe4f8df44f92cd5a5ac9996b836dcd427 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 31 Oct 2023 15:40:02 -0700 Subject: [PATCH 181/588] Update Helm docs --- applications/sasquatch/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index aec807a257..ddaa0e1eb3 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -15,7 +15,7 @@ Rubin Observatory's telemetry service. | chronograf.enabled | bool | `true` | Enable Chronograf. | | chronograf.env | object | `{"BASE_PATH":"/chronograf","CUSTOM_AUTO_REFRESH":"1s=1000","HOST_PAGE_DISABLED":true}` | Chronograf environment variables. | | chronograf.envFromSecret | string | `"sasquatch"` | Chronograf secrets, expected keys generic_client_id, generic_client_secret and token_secret. | -| chronograf.image | object | `{"repository":"quay.io/influxdb/chronograf","tag":"1.9.4"}` | Chronograf image tag. | +| chronograf.image | object | `{"repository":"quay.io/influxdb/chronograf","tag":"1.10.2"}` | Chronograf image tag. | | chronograf.ingress | object | disabled | Chronograf ingress configuration. | | chronograf.persistence | object | `{"enabled":true,"size":"100Gi"}` | Chronograf data persistence configuration. | | chronograf.resources.limits.cpu | int | `4` | | From c7e17234c6c0438095bdb8a1d07f9d18264e6054 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 12:33:47 +0000 Subject: [PATCH 182/588] Update obsidiandynamics/kafdrop Docker tag to v4 --- applications/sasquatch/charts/kafdrop/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/kafdrop/values.yaml b/applications/sasquatch/charts/kafdrop/values.yaml index 41fc5e93c6..1a47532bf9 100644 --- a/applications/sasquatch/charts/kafdrop/values.yaml +++ b/applications/sasquatch/charts/kafdrop/values.yaml @@ -9,7 +9,7 @@ image: # -- Image pull policy. pullPolicy: IfNotPresent # -- Kafdrop image version. - tag: 3.31.0 + tag: 4.0.0 kafka: # -- Bootstrap list of Kafka host/port pairs From 8a42253da8c6d8c0580543dd6e0e476466e8f5bf Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 31 Oct 2023 15:49:59 -0700 Subject: [PATCH 183/588] Update Helm docs --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/kafdrop/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index aec807a257..1ecf8642da 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -118,7 +118,7 @@ Rubin Observatory's telemetry service. | kafdrop.host | string | Defaults to localhost. | The hostname to report for the RMI registry (used for JMX). | | kafdrop.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | kafdrop.image.repository | string | `"obsidiandynamics/kafdrop"` | Kafdrop Docker image repository. | -| kafdrop.image.tag | string | `"3.31.0"` | Kafdrop image version. | +| kafdrop.image.tag | string | `"4.0.0"` | Kafdrop image version. | | kafdrop.ingress.annotations | object | `{}` | Ingress annotations. | | kafdrop.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | kafdrop.ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/kafdrop/README.md b/applications/sasquatch/charts/kafdrop/README.md index dea43e6e00..c29f57a87c 100644 --- a/applications/sasquatch/charts/kafdrop/README.md +++ b/applications/sasquatch/charts/kafdrop/README.md @@ -16,7 +16,7 @@ A subchart to deploy the Kafdrop UI for Sasquatch. | host | string | Defaults to localhost. | The hostname to report for the RMI registry (used for JMX). | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | image.repository | string | `"obsidiandynamics/kafdrop"` | Kafdrop Docker image repository. | -| image.tag | string | `"3.31.0"` | Kafdrop image version. | +| image.tag | string | `"4.0.0"` | Kafdrop image version. | | ingress.annotations | object | `{}` | Ingress annotations. | | ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | ingress.hostname | string | `""` | Ingress hostname. | From 81521f11cd9c45d48ceefbccfaa2e21e2290217b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 09:44:04 +0000 Subject: [PATCH 184/588] Update confluentinc/cp-kafka-rest Docker tag to v7.5.1 --- applications/sasquatch/charts/rest-proxy/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/rest-proxy/values.yaml b/applications/sasquatch/charts/rest-proxy/values.yaml index 67305a2e9d..88cec0ae6d 100644 --- a/applications/sasquatch/charts/rest-proxy/values.yaml +++ b/applications/sasquatch/charts/rest-proxy/values.yaml @@ -9,7 +9,7 @@ image: # -- Image pull policy. pullPolicy: IfNotPresent # -- Kafka REST proxy image tag. - tag: 7.4.1 + tag: 7.5.1 service: # -- Kafka REST proxy service port From 0f8e8abb964dc6759a60e274284d7543d218439e Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 31 Oct 2023 15:57:54 -0700 Subject: [PATCH 185/588] Update Helm docs --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/rest-proxy/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index ddaa0e1eb3..1aa414937d 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -206,7 +206,7 @@ Rubin Observatory's telemetry service. | rest-proxy.heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | rest-proxy.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | rest-proxy.image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| rest-proxy.image.tag | string | `"7.4.1"` | Kafka REST proxy image tag. | +| rest-proxy.image.tag | string | `"7.5.1"` | Kafka REST proxy image tag. | | rest-proxy.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | rest-proxy.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | rest-proxy.ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index acd9f5a244..d5c3a77706 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -16,7 +16,7 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| image.tag | string | `"7.4.1"` | Kafka REST proxy image tag. | +| image.tag | string | `"7.5.1"` | Kafka REST proxy image tag. | | ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | ingress.hostname | string | `""` | Ingress hostname. | From f545e083b6f32f9f3e41f6206ca0d09797d38df1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:26:30 +0000 Subject: [PATCH 186/588] chore(deps): update kapacitor docker tag to v1.7.1 --- applications/sasquatch/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index fba996135b..a6c1fe3da8 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -305,7 +305,7 @@ source-kapacitor: # -- Kapacitor image tag. image: repository: kapacitor - tag: 1.7.0 + tag: 1.7.1 # -- Chronograf data persistence configuration. persistence: enabled: true @@ -331,7 +331,7 @@ kapacitor: # -- Kapacitor image tag. image: repository: kapacitor - tag: 1.7.0 + tag: 1.7.1 # -- Chronograf data persistence configuration. persistence: enabled: true From 56a23a95fef5e87dc417e0cebe66623165d6bb9a Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 31 Oct 2023 15:53:39 -0700 Subject: [PATCH 187/588] Update Helm docs --- applications/sasquatch/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 2caadbb5ff..e122361ac6 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -77,7 +77,7 @@ Rubin Observatory's telemetry service. | kapacitor.enabled | bool | `true` | Enable Kapacitor. | | kapacitor.envVars | object | `{"KAPACITOR_SLACK_ENABLED":true}` | Kapacitor environment variables. | | kapacitor.existingSecret | string | `"sasquatch"` | InfluxDB credentials, use influxdb-user and influxdb-password keys from secret. | -| kapacitor.image | object | `{"repository":"kapacitor","tag":"1.7.0"}` | Kapacitor image tag. | +| kapacitor.image | object | `{"repository":"kapacitor","tag":"1.7.1"}` | Kapacitor image tag. | | kapacitor.influxURL | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB connection URL. | | kapacitor.persistence | object | `{"enabled":true,"size":"100Gi"}` | Chronograf data persistence configuration. | | kapacitor.resources.limits.cpu | int | `4` | | @@ -101,7 +101,7 @@ Rubin Observatory's telemetry service. | source-kapacitor.enabled | bool | `false` | Enable Kapacitor. | | source-kapacitor.envVars | object | `{"KAPACITOR_SLACK_ENABLED":true}` | Kapacitor environment variables. | | source-kapacitor.existingSecret | string | `"sasquatch"` | InfluxDB credentials, use influxdb-user and influxdb-password keys from secret. | -| source-kapacitor.image | object | `{"repository":"kapacitor","tag":"1.7.0"}` | Kapacitor image tag. | +| source-kapacitor.image | object | `{"repository":"kapacitor","tag":"1.7.1"}` | Kapacitor image tag. | | source-kapacitor.influxURL | string | `"http://sasquatch-influxdb-staging.sasquatch:8086"` | InfluxDB connection URL. | | source-kapacitor.persistence | object | `{"enabled":true,"size":"100Gi"}` | Chronograf data persistence configuration. | | source-kapacitor.resources.limits.cpu | int | `4` | | From f53642e72acf9d04ce9b10c614b4baf9fd632a23 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 1 Nov 2023 16:42:25 -0700 Subject: [PATCH 188/588] [DM-41493] Rollout tap-postgres 1.14.0 --- charts/cadc-tap/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index 82f61b3465..28f09526f9 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -77,7 +77,7 @@ config: # -- Tag of tap image to use # @default -- Latest release - tag: "1.13.0" + tag: "1.14.0" qserv: # -- QServ hostname:port to connect to From fd50638b5830c22a8f7688f9949d8304b04b98e5 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 2 Nov 2023 12:48:16 -0700 Subject: [PATCH 189/588] Use upstream Telegraf nightly images --- applications/sasquatch/README.md | 4 ++-- .../sasquatch/charts/telegraf-kafka-consumer/README.md | 4 ++-- .../sasquatch/charts/telegraf-kafka-consumer/values.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index e122361ac6..e83c175355 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -353,8 +353,8 @@ Rubin Observatory's telemetry service. | telegraf-kafka-consumer.env[3].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | | telegraf-kafka-consumer.env[3].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | telegraf-kafka-consumer.image.pullPolicy | string | IfNotPresent | Image pull policy. | -| telegraf-kafka-consumer.image.repo | string | `"lsstsqre/telegraf"` | Telegraf image repository. | -| telegraf-kafka-consumer.image.tag | string | `"avrounions"` | Telegraf image tag. | +| telegraf-kafka-consumer.image.repo | string | `"quay.io/influxdb/telegraf-nightly:latest"` | Telegraf image repository. | +| telegraf-kafka-consumer.image.tag | string | `"latest"` | Telegraf image tag. | | telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | telegraf-kafka-consumer.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to. | | telegraf-kafka-consumer.influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index 50cff0795c..bd40c0c75e 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -23,8 +23,8 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | env[3].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | | env[3].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | image.pullPolicy | string | IfNotPresent | Image pull policy. | -| image.repo | string | `"lsstsqre/telegraf"` | Telegraf image repository. | -| image.tag | string | `"avrounions"` | Telegraf image tag. | +| image.repo | string | `"quay.io/influxdb/telegraf-nightly:latest"` | Telegraf image repository. | +| image.tag | string | `"latest"` | Telegraf image tag. | | imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to. | | influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 1c261665e5..b716e36d35 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -4,9 +4,9 @@ enabled: false image: # -- Telegraf image repository. - repo: "lsstsqre/telegraf" + repo: "quay.io/influxdb/telegraf-nightly:latest" # -- Telegraf image tag. - tag: "avrounions" + tag: "latest" # -- Image pull policy. # @default -- IfNotPresent pullPolicy: "Always" From afc9c18445f38041ddc9b7c6f8ef97fd371715bc Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 31 Oct 2023 11:42:52 -0700 Subject: [PATCH 190/588] Change over to new secret framework for IDF prod --- applications/datalinker/values-idfprod.yaml | 2 ++ applications/nublado/secrets-idfprod.yaml | 19 ++++++++++++++++++ applications/nublado/values-idfprod.yaml | 12 +++++++++-- .../values-idfprod.yaml | 14 +++++++++++++ applications/vo-cutouts/secrets-idfprod.yaml | 20 +++++++++++++++++++ environments/values-idfprod.yaml | 6 ++++-- 6 files changed, 69 insertions(+), 4 deletions(-) create mode 100644 applications/nublado/secrets-idfprod.yaml create mode 100644 applications/vo-cutouts/secrets-idfprod.yaml diff --git a/applications/datalinker/values-idfprod.yaml b/applications/datalinker/values-idfprod.yaml index e69de29bb2..288a3da54a 100644 --- a/applications/datalinker/values-idfprod.yaml +++ b/applications/datalinker/values-idfprod.yaml @@ -0,0 +1,2 @@ +config: + separateSecrets: true diff --git a/applications/nublado/secrets-idfprod.yaml b/applications/nublado/secrets-idfprod.yaml new file mode 100644 index 0000000000..6f66967c08 --- /dev/null +++ b/applications/nublado/secrets-idfprod.yaml @@ -0,0 +1,19 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + onepassword: + encoded: true +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. +"butler-hmac-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the private + key syntax used for HMACs. +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + onepassword: + encoded: true diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index d7b2704849..8ca6f99f67 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -81,8 +81,8 @@ jupyterhub: ServerApp: shutdown_no_activity_timeout: 432000 db: - url: "postgresql://nublado3@postgres.postgres/nublado3" - + url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" + upgrade: true cull: enabled: true users: false @@ -90,3 +90,11 @@ jupyterhub: timeout: 432000 every: 300 maxAge: 2160000 +hub: + internalDatabase: false +cloudsql: + enabled: true + instanceConnectionName: "science-platform-stable-0c29612b:us-central1:science-platform-int-8f439af2" + serviceAccount: "nublado@science-platform-stable-0c29612b.iam.gserviceaccount.com" +secrets: + templateSecrets: true diff --git a/applications/vault-secrets-operator/values-idfprod.yaml b/applications/vault-secrets-operator/values-idfprod.yaml index e69de29bb2..1e40e6f933 100644 --- a/applications/vault-secrets-operator/values-idfprod.yaml +++ b/applications/vault-secrets-operator/values-idfprod.yaml @@ -0,0 +1,14 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-credentials + key: VAULT_SECRET_ID + vault: + authMethod: approle diff --git a/applications/vo-cutouts/secrets-idfprod.yaml b/applications/vo-cutouts/secrets-idfprod.yaml new file mode 100644 index 0000000000..57998942f8 --- /dev/null +++ b/applications/vo-cutouts/secrets-idfprod.yaml @@ -0,0 +1,20 @@ +aws-credentials: + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +google-credentials: + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +postgres-credentials: + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index 283ae161dc..2ff2dfe78a 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -1,7 +1,10 @@ butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-repos.yaml" fqdn: data.lsst.cloud name: idfprod -vaultPathPrefix: secret/k8s_operator/data.lsst.cloud +onepassword: + connectUrl: "https://roundtable.lsst.cloud/1password" + vaultTitle: "RSP data.lsst.cloud" +vaultPathPrefix: secret/phalanx/idfprod applications: datalinker: true @@ -9,7 +12,6 @@ applications: mobu: true nublado: true portal: true - postgres: true semaphore: true sherlock: true siav2: true From 6e8e944e087496ca426a9630a94a710dc12a8fc8 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 2 Nov 2023 15:34:34 -0700 Subject: [PATCH 191/588] enable schema update for nublado upgrade --- applications/nublado/values-usdfdev.yaml | 1 + applications/nublado/values-usdfint.yaml | 1 + applications/nublado/values-usdfprod.yaml | 1 + 3 files changed, 3 insertions(+) diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 61d980448d..ea70b8ec7d 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -165,6 +165,7 @@ jupyterhub: baseUrl: "/nb" db: url: "postgresql://nublado3@postgres.postgres/nublado3" + upgrade: true cull: timeout: 432000 every: 300 diff --git a/applications/nublado/values-usdfint.yaml b/applications/nublado/values-usdfint.yaml index 8b24353338..66c9726d34 100644 --- a/applications/nublado/values-usdfint.yaml +++ b/applications/nublado/values-usdfint.yaml @@ -164,6 +164,7 @@ jupyterhub: baseUrl: "/nb" db: url: "postgresql://nublado3@postgres.postgres/nublado3" + upgrade: true cull: timeout: 432000 every: 300 diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml index f8cc1660dc..e5e31c34e3 100644 --- a/applications/nublado/values-usdfprod.yaml +++ b/applications/nublado/values-usdfprod.yaml @@ -164,6 +164,7 @@ jupyterhub: hub: db: url: "postgresql://nublado3@postgres.postgres/nublado3" + upgrade: true cull: timeout: 432000 every: 300 From 885720d70d7a09a8295de8c7224a2c3c4e6e96d3 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 2 Nov 2023 15:36:51 -0700 Subject: [PATCH 192/588] Fix stable cloudsql uri --- applications/nublado/values-idfprod.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 8ca6f99f67..d11d632476 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -94,7 +94,7 @@ hub: internalDatabase: false cloudsql: enabled: true - instanceConnectionName: "science-platform-stable-0c29612b:us-central1:science-platform-int-8f439af2" - serviceAccount: "nublado@science-platform-stable-0c29612b.iam.gserviceaccount.com" + instanceConnectionName: "science-platform-stable-6994:us-central1:science-platform-stable-0c29612b" + serviceAccount: "nublado@science-platform-stable-6994.iam.gserviceaccount.com" secrets: templateSecrets: true From 5fb4ab37c7c32734c315dc9a7d5b82c03d3c2c39 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 3 Nov 2023 10:20:50 -0700 Subject: [PATCH 193/588] Ignore usdf-int URLs for link checking --- docs/documenteer.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/documenteer.toml b/docs/documenteer.toml index 8ebf66682a..b688c5b5e9 100644 --- a/docs/documenteer.toml +++ b/docs/documenteer.toml @@ -63,6 +63,7 @@ ignore = [ '^https://usdf-prompt-processing-dev.slac.stanford.edu', '^https://usdf-rsp.slac.stanford.edu', '^https://usdf-rsp-dev.slac.stanford.edu', + '^https://usdf-rsp-int.slac.stanford.edu', '^https://usdf-tel-rsp.slac.stanford.edu', '^https://github.com/orgs/', ] From 8d9caf3f9c09af99426f6cd1374af24394bd7124 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 3 Nov 2023 10:21:06 -0700 Subject: [PATCH 194/588] Inject butlerRepositoryIndex during linting Some applications inject butlerRepositoryIndex as a global. Add that to the globals injected via linting. --- docs/extras/schemas/environment.json | 28 ++++++++++++++-------------- src/phalanx/models/environments.py | 16 ++++++++++------ src/phalanx/services/application.py | 3 +++ 3 files changed, 27 insertions(+), 20 deletions(-) diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index df92ef86a8..bf0a22509e 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -26,7 +26,7 @@ }, "$id": "https://phalanx.lsst.io/schemas/environment.json", "additionalProperties": false, - "description": "Configuration for a Phalanx environment.\n\nThis is a model for the :file:`values-{environment}.yaml` files for each\nenvironment and is also used to validate those files. For the complete\nconfiguration for an environment, initialize this model with the merger of\n:file:`values.yaml` and :file:`values-{environment}.yaml`.", + "description": "Configuration for a Phalanx environment.\n\nThis is a model for the :file:`values-{environment}.yaml` files for each\nenvironment and is also used to validate those files. For the complete\nconfiguration for an environment, initialize this model with the merger of\n:file:`values.yaml` and :file:`values-{environment}.yaml`.\n\nFields listed here are not available to application linting. If the field\nvalue has to be injected during linting, the field needs to be defined in\n`EnvironmentBaseConfig` instead.", "properties": { "name": { "description": "Name of the environment", @@ -38,6 +38,19 @@ "title": "Domain name", "type": "string" }, + "butlerRepositoryIndex": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "URL to Butler repository index", + "title": "Butler repository index URL" + }, "onepassword": { "anyOf": [ { @@ -79,19 +92,6 @@ "title": "Enabled applications", "type": "object" }, - "butlerRepositoryIndex": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "URL to Butler repository index", - "title": "Butler repository index URL" - }, "repoUrl": { "anyOf": [ { diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index c7d7a3b346..cb0180af15 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -64,6 +64,12 @@ class EnvironmentBaseConfig(CamelCaseModel): ), ) + butler_repository_index: str | None = Field( + None, + title="Butler repository index URL", + description="URL to Butler repository index", + ) + onepassword: OnepasswordConfig | None = Field( None, title="1Password configuration", @@ -153,6 +159,10 @@ class EnvironmentConfig(EnvironmentBaseConfig): environment and is also used to validate those files. For the complete configuration for an environment, initialize this model with the merger of :file:`values.yaml` and :file:`values-{environment}.yaml`. + + Fields listed here are not available to application linting. If the field + value has to be injected during linting, the field needs to be defined in + `EnvironmentBaseConfig` instead. """ applications: dict[str, bool] = Field( @@ -161,12 +171,6 @@ class EnvironmentConfig(EnvironmentBaseConfig): description="List of applications and whether they are enabled", ) - butler_repository_index: str | None = Field( - None, - title="Butler repository index URL", - description="URL to Butler repository index", - ) - repo_url: str | None = Field( None, title="URL of Git repository", diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index 7b0890c578..eb1b63c684 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -286,6 +286,9 @@ def _build_injected_values( "global.baseUrl": f"https://{environment.fqdn}", "global.vaultSecretsPath": environment.vault_path_prefix, } + if environment.butler_repository_index: + butler_index = environment.butler_repository_index + values["global.butlerRepositoryIndex"] = butler_index # vault-secrets-operator gets the Vault host injected into it. Use the # existence of its subchart configuration tree as a cue to inject the From f20f447fee13c667d4baf9c49ecc8c5aefd53931 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 3 Nov 2023 10:22:33 -0700 Subject: [PATCH 195/588] Add siav2 to the application index --- docs/applications/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/applications/index.rst b/docs/applications/index.rst index 18602ea46e..8a851c3267 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -36,6 +36,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde portal/index semaphore/index sherlock/index + siav2/index sqlproxy-cross-project/index squareone/index ssotap/index From 190aa3405e280b5fcb0ec9c74fbbab7890279c6d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 3 Nov 2023 10:25:13 -0700 Subject: [PATCH 196/588] Add usdfint environment documentation --- docs/environments/index.rst | 1 + docs/environments/usdfint/index.rst | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 docs/environments/usdfint/index.rst diff --git a/docs/environments/index.rst b/docs/environments/index.rst index c3018b31d2..fdc7accfc1 100644 --- a/docs/environments/index.rst +++ b/docs/environments/index.rst @@ -24,6 +24,7 @@ To learn more about operating a Phalanx environment, see the :doc:`/admin/index` summit/index tucson-teststand/index usdfdev/index + usdfint/index usdfprod/index usdfdev-alert-stream-broker/index usdfdev-prompt-processing/index diff --git a/docs/environments/usdfint/index.rst b/docs/environments/usdfint/index.rst new file mode 100644 index 0000000000..62fb9d5713 --- /dev/null +++ b/docs/environments/usdfint/index.rst @@ -0,0 +1,11 @@ +.. px-env:: usdfint + +############################################################### +usdfint — usdf-rsp-int.slac.stanford.edu (Integration for USDF) +############################################################### + +``usdfint`` is the integration environment for the Rubin Science Platform at the United States Data Facility (USDF) hosted at SLAC. +The primary use of ``usdfint`` is for Rubin construction and operations teams to integrate applications into the Rubin Science Platform that need to run at the USDF. + +.. jinja:: usdfint + :file: environments/_summary.rst.jinja From 48daf4638c7833545941e68827d4a78251462ee5 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 3 Nov 2023 10:43:11 -0700 Subject: [PATCH 197/588] Add more documentation tests Test that all environments are documented and linked in the index file, and that all applications are linked in the index file. These will probably be caught once we can re-enable fatal Sphinx warnings, but for right now PRs were merged failing these tests. --- tests/docs/applications_test.py | 19 +++++++++++++++++++ tests/docs/environments_test.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 tests/docs/environments_test.py diff --git a/tests/docs/applications_test.py b/tests/docs/applications_test.py index a3ec54fc13..de80c446de 100644 --- a/tests/docs/applications_test.py +++ b/tests/docs/applications_test.py @@ -23,3 +23,22 @@ def test_descriptions() -> None: description = m.group(1) m = re.match("[A-Z0-9]", description) assert m, f"Description must start with capital letter in {index_path}" + + +def test_applications_index() -> None: + """Ensure all applications are mentioned in the index.""" + doc_root = Path(__file__).parent.parent.parent / "docs" / "applications" + seen = set() + with (doc_root / "index.rst").open() as fh: + for line in fh: + if m := re.match("^ ([^/]+)/index$", line): + seen.add(m.group(1)) + root_path = Path(__file__).parent.parent.parent / "applications" + for application in root_path.iterdir(): + if not application.is_dir(): + continue + if application.name in ("fileservers", "nublado-users"): + continue + assert ( + application.name in seen + ), f"{application.name} not lined in docs/applications/index.rst" diff --git a/tests/docs/environments_test.py b/tests/docs/environments_test.py new file mode 100644 index 0000000000..fabbb52fab --- /dev/null +++ b/tests/docs/environments_test.py @@ -0,0 +1,32 @@ +"""Tests for the environment documentation pages.""" + +from __future__ import annotations + +import re +from pathlib import Path + + +def test_environments() -> None: + """Ensure all environments are documented.""" + doc_root = Path(__file__).parent.parent.parent / "docs" / "environments" + seen_dir = set() + for environment in doc_root.iterdir(): + if environment.is_dir(): + seen_dir.add(environment.name) + seen_index = set() + with (doc_root / "index.rst").open() as fh: + for line in fh: + if m := re.match("^ ([^/]+)/index$", line): + seen_index.add(m.group(1)) + root_path = Path(__file__).parent.parent.parent / "environments" + environments = [ + v.stem.removeprefix("values-") + for v in sorted(root_path.glob("values-*.yaml")) + ] + for environment_name in environments: + assert ( + environment_name in seen_dir + ), f"{environment_name} not documented in docs/environments" + assert ( + environment_name in seen_index + ), f"{environment_name} not linked in docs/environments/index.rst" From ebe99770e190ddc04d6017679161c32f398dc8d5 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Mon, 30 Oct 2023 13:14:20 -0700 Subject: [PATCH 198/588] Add Butler service from template --- applications/butler/.helmignore | 23 +++++++ applications/butler/Chart.yaml | 8 +++ applications/butler/README.md | 29 +++++++++ applications/butler/templates/_helpers.tpl | 26 ++++++++ applications/butler/templates/deployment.yaml | 59 +++++++++++++++++ applications/butler/templates/hpa.yaml | 28 ++++++++ applications/butler/templates/ingress.yaml | 31 +++++++++ .../butler/templates/networkpolicy.yaml | 21 ++++++ applications/butler/templates/service.yaml | 15 +++++ applications/butler/values.yaml | 64 +++++++++++++++++++ docs/applications/butler/index.rst | 16 +++++ docs/applications/butler/values.md | 12 ++++ environments/README.md | 1 + .../templates/butler-application.yaml | 34 ++++++++++ environments/values.yaml | 3 + 15 files changed, 370 insertions(+) create mode 100644 applications/butler/.helmignore create mode 100644 applications/butler/Chart.yaml create mode 100644 applications/butler/README.md create mode 100644 applications/butler/templates/_helpers.tpl create mode 100644 applications/butler/templates/deployment.yaml create mode 100644 applications/butler/templates/hpa.yaml create mode 100644 applications/butler/templates/ingress.yaml create mode 100644 applications/butler/templates/networkpolicy.yaml create mode 100644 applications/butler/templates/service.yaml create mode 100644 applications/butler/values.yaml create mode 100644 docs/applications/butler/index.rst create mode 100644 docs/applications/butler/values.md create mode 100644 environments/templates/butler-application.yaml diff --git a/applications/butler/.helmignore b/applications/butler/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/butler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/butler/Chart.yaml b/applications/butler/Chart.yaml new file mode 100644 index 0000000000..b4b3e711ad --- /dev/null +++ b/applications/butler/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: Server for Butler data abstraction service +name: butler +sources: +- https://github.com/lsst-sqre/butler +type: application +version: 1.0.0 diff --git a/applications/butler/README.md b/applications/butler/README.md new file mode 100644 index 0000000000..3277ce8544 --- /dev/null +++ b/applications/butler/README.md @@ -0,0 +1,29 @@ +# butler + +Server for Butler data abstraction service + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the butler deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of butler deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of butler deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the butler image | +| image.repository | string | `"ghcr.io/lsst-sqre/butler"` | Image to use in the butler deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the butler deployment pod | +| podAnnotations | object | `{}` | Annotations for the butler deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the butler deployment pod | +| tolerations | list | `[]` | Tolerations for the butler deployment pod | diff --git a/applications/butler/templates/_helpers.tpl b/applications/butler/templates/_helpers.tpl new file mode 100644 index 0000000000..01f242bfc7 --- /dev/null +++ b/applications/butler/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "butler.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "butler.labels" -}} +helm.sh/chart: {{ include "butler.chart" . }} +{{ include "butler.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "butler.selectorLabels" -}} +app.kubernetes.io/name: "butler" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/butler/templates/deployment.yaml b/applications/butler/templates/deployment.yaml new file mode 100644 index 0000000000..da7c36b487 --- /dev/null +++ b/applications/butler/templates/deployment.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "butler" + labels: + {{- include "butler.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "butler.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "butler.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/butler/templates/hpa.yaml b/applications/butler/templates/hpa.yaml new file mode 100644 index 0000000000..9eab162305 --- /dev/null +++ b/applications/butler/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "butler" + labels: + {{- include "butler.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "butler" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/butler/templates/ingress.yaml b/applications/butler/templates/ingress.yaml new file mode 100644 index 0000000000..74d54bed06 --- /dev/null +++ b/applications/butler/templates/ingress.yaml @@ -0,0 +1,31 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "butler" + labels: + {{- include "butler.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + loginRedirect: true +template: + metadata: + name: "butler" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/butler" + pathType: "Prefix" + backend: + service: + name: "butler" + port: + number: 8080 diff --git a/applications/butler/templates/networkpolicy.yaml b/applications/butler/templates/networkpolicy.yaml new file mode 100644 index 0000000000..ebe6c44067 --- /dev/null +++ b/applications/butler/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "butler" +spec: + podSelector: + matchLabels: + {{- include "butler.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/butler/templates/service.yaml b/applications/butler/templates/service.yaml new file mode 100644 index 0000000000..4906d7b6d3 --- /dev/null +++ b/applications/butler/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "butler" + labels: + {{- include "butler.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "butler.selectorLabels" . | nindent 4 }} diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml new file mode 100644 index 0000000000..91e93b2648 --- /dev/null +++ b/applications/butler/values.yaml @@ -0,0 +1,64 @@ +# Default values for butler. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the butler deployment + repository: "ghcr.io/lsst-sqre/butler" + + # -- Pull policy for the butler image + pullPolicy: "IfNotPresent" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +autoscaling: + # -- Enable autoscaling of butler deployment + enabled: false + + # -- Minimum number of butler deployment pods + minReplicas: 1 + + # -- Maximum number of butler deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of butler deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the butler deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the butler deployment pod +resources: {} + +# -- Node selection rules for the butler deployment pod +nodeSelector: {} + +# -- Tolerations for the butler deployment pod +tolerations: [] + +# -- Affinity rules for the butler deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/docs/applications/butler/index.rst b/docs/applications/butler/index.rst new file mode 100644 index 0000000000..09f9332e15 --- /dev/null +++ b/docs/applications/butler/index.rst @@ -0,0 +1,16 @@ +.. px-app:: butler + +################################################### +butler — Server for Butler data abstraction service +################################################### + +.. jinja:: butler + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/butler/values.md b/docs/applications/butler/values.md new file mode 100644 index 0000000000..ba64489d92 --- /dev/null +++ b/docs/applications/butler/values.md @@ -0,0 +1,12 @@ +```{px-app-values} butler +``` + +# butler Helm values reference + +Helm values reference table for the {px-app}`butler` application. + +```{include} ../../../applications/butler/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/environments/README.md b/environments/README.md index 216d0b7c8b..3d3edff57e 100644 --- a/environments/README.md +++ b/environments/README.md @@ -7,6 +7,7 @@ | applications.alert-stream-broker | bool | `false` | Enable the alert-stream-broker application | | applications.argo-workflows | bool | `false` | Enable the argo-workflows application | | applications.argocd | bool | `true` | Enable the Argo CD application. This must be enabled for all environments and is present here only because it makes parsing easier | +| applications.butler | bool | `false` | Enable the butler application | | applications.cachemachine | bool | `false` | Enable the cachemachine application (required by nublado2) | | applications.cert-manager | bool | `true` | Enable the cert-manager application, required unless the environment makes separate arrangements to inject a current TLS certificate | | applications.datalinker | bool | `false` | Eanble the datalinker application | diff --git a/environments/templates/butler-application.yaml b/environments/templates/butler-application.yaml new file mode 100644 index 0000000000..e1bdb6050e --- /dev/null +++ b/environments/templates/butler-application.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "butler") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "butler" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "butler" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "butler" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/butler" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/values.yaml b/environments/values.yaml index e399ca8e7a..55d6dcac79 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -36,6 +36,9 @@ applications: # environments and is present here only because it makes parsing easier argocd: true + # -- Enable the butler application + butler: false + # -- Enable the cachemachine application (required by nublado2) cachemachine: false From 53719ce6b72ae86cd7c8ebd40cdc6dfa4cbac022 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Tue, 31 Oct 2023 14:47:19 -0700 Subject: [PATCH 199/588] Set application name for butler --- applications/butler/Chart.yaml | 2 +- applications/butler/README.md | 4 ++-- applications/butler/values.yaml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/butler/Chart.yaml b/applications/butler/Chart.yaml index b4b3e711ad..b1b9f2052b 100644 --- a/applications/butler/Chart.yaml +++ b/applications/butler/Chart.yaml @@ -3,6 +3,6 @@ appVersion: 0.1.0 description: Server for Butler data abstraction service name: butler sources: -- https://github.com/lsst-sqre/butler +- https://github.com/lsst/daf_butler type: application version: 1.0.0 diff --git a/applications/butler/README.md b/applications/butler/README.md index 3277ce8544..f434685428 100644 --- a/applications/butler/README.md +++ b/applications/butler/README.md @@ -4,7 +4,7 @@ Server for Butler data abstraction service ## Source Code -* +* ## Values @@ -19,7 +19,7 @@ Server for Butler data abstraction service | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the butler image | -| image.repository | string | `"ghcr.io/lsst-sqre/butler"` | Image to use in the butler deployment | +| image.repository | string | `"ghcr.io/lsst/daf_butler"` | Image to use in the butler deployment | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | | nodeSelector | object | `{}` | Node selection rules for the butler deployment pod | diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index 91e93b2648..f3c4657c48 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -7,7 +7,7 @@ replicaCount: 1 image: # -- Image to use in the butler deployment - repository: "ghcr.io/lsst-sqre/butler" + repository: "ghcr.io/lsst/daf_butler" # -- Pull policy for the butler image pullPolicy: "IfNotPresent" From dcd722c46ca33dbc3126642eb9c3f09d8a2bd3a1 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Tue, 31 Oct 2023 14:48:41 -0700 Subject: [PATCH 200/588] Add secrets needed by butler --- applications/butler/secrets.yml | 20 +++++++ applications/butler/templates/_helpers.tpl | 18 +++++++ applications/butler/templates/deployment.yaml | 54 +++++++++++++++++++ 3 files changed, 92 insertions(+) create mode 100644 applications/butler/secrets.yml diff --git a/applications/butler/secrets.yml b/applications/butler/secrets.yml new file mode 100644 index 0000000000..1b2d88511e --- /dev/null +++ b/applications/butler/secrets.yml @@ -0,0 +1,20 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/butler/templates/_helpers.tpl b/applications/butler/templates/_helpers.tpl index 01f242bfc7..636ac425d9 100644 --- a/applications/butler/templates/_helpers.tpl +++ b/applications/butler/templates/_helpers.tpl @@ -24,3 +24,21 @@ Selector labels app.kubernetes.io/name: "butler" app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "butler.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/applications/butler/templates/deployment.yaml b/applications/butler/templates/deployment.yaml index da7c36b487..447ce546c2 100644 --- a/applications/butler/templates/deployment.yaml +++ b/applications/butler/templates/deployment.yaml @@ -41,6 +41,60 @@ spec: port: "http" resources: {{- toYaml .Values.resources | nindent 12 }} + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: "/opt/lsst/butler/secrets/aws-credentials.ini" + - name: PGPASSFILE + value: "/opt/lsst/butler/secrets/postgres-credentials.txt" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/opt/lsst/butler/secrets/butler-gcs-creds.json" + - name: S3_ENDPOINT_URL + value: "https://storage.googleapis.com" + volumeMounts: + - name: "butler-secrets" + mountPath: "/opt/lsst/butler/secrets" + readOnly: true + volumes: + # butler-secrets-raw pulls in the secrets from the vault as files. + # These files are owned by root and group/world readable. + # This volume is not used directly by the container running the actual + # Butler application. + - name: "butler-secrets-raw" + secret: + secretName: {{ include "butler.fullname" . }} + # Postgres will not use a pgpass file (postgres-credentials.txt in the + # vault) if it is group/world writeable or owned by a different user. + # So the initContainers below copies the files from butler-secrets-raw + # to butlet-secrets, changing the owner and permissions. + # This volume is the one used by the container running the actual + # Butler application. + - name: "butler-secrets" + emptyDir: {} + initContainers: + # To deal with the Postgres file permission issued mentioned above, + # copy the secrets from butler-secrets-raw to butler-secrets. + # This initContainer definition is borrowed from obsloctap's + # deployment.yaml. + - name: fix-secret-permissions + image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/sh" + - "-c" + - | + cp -RL /tmp/butler-secrets-raw/* /opt/lsst/butler/secrets/ + chmod 0400 /opt/lsst/butler/secrets/* + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + volumeMounts: + - name: "butler-secrets" + mountPath: "/opt/lsst/butler/secrets" + - name: "butler-secrets-raw" + mountPath: "/tmp/butler-secrets-raw" + readOnly: true securityContext: runAsNonRoot: true runAsUser: 1000 From d5b833d257a9ec11ee794cdb1afc8137387ca3a0 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Tue, 31 Oct 2023 15:16:09 -0700 Subject: [PATCH 201/588] Add path to Butler configuration file --- applications/butler/README.md | 1 + applications/butler/templates/deployment.yaml | 2 ++ applications/butler/values-idfdev.yaml | 5 +++++ applications/butler/values.yaml | 5 +++++ 4 files changed, 13 insertions(+) create mode 100644 applications/butler/values-idfdev.yaml diff --git a/applications/butler/README.md b/applications/butler/README.md index f434685428..bc0ed146c8 100644 --- a/applications/butler/README.md +++ b/applications/butler/README.md @@ -15,6 +15,7 @@ Server for Butler data abstraction service | autoscaling.maxReplicas | int | `100` | Maximum number of butler deployment pods | | autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | +| config.configUri | string | `""` | URI to the file specifying the DirectButler configuration to be used by the butler server | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/butler/templates/deployment.yaml b/applications/butler/templates/deployment.yaml index 447ce546c2..eb22fc1cc3 100644 --- a/applications/butler/templates/deployment.yaml +++ b/applications/butler/templates/deployment.yaml @@ -50,6 +50,8 @@ spec: value: "/opt/lsst/butler/secrets/butler-gcs-creds.json" - name: S3_ENDPOINT_URL value: "https://storage.googleapis.com" + - name: BUTLER_SERVER_CONFIG_URI + value: {{ .Values.config.configUri | quote }} volumeMounts: - name: "butler-secrets" mountPath: "/opt/lsst/butler/secrets" diff --git a/applications/butler/values-idfdev.yaml b/applications/butler/values-idfdev.yaml new file mode 100644 index 0000000000..0e46edf219 --- /dev/null +++ b/applications/butler/values-idfdev.yaml @@ -0,0 +1,5 @@ +image: + pullPolicy: Always + +config: + configUri: "s3://butler-us-central1-panda-dev/dc2/butler-external-idfdev.yaml" diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index f3c4657c48..3ee5d74d8b 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -62,3 +62,8 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD vaultSecretsPath: "" + +config: + # -- URI to the file specifying the DirectButler configuration to be used + # by the butler server + configUri: "" From 3851351a915b62e4fb768e4a5a0ddebf06c03ae4 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Tue, 31 Oct 2023 16:06:49 -0700 Subject: [PATCH 202/588] Add documentation for butler --- docs/applications/butler/index.rst | 9 ++++++++- docs/applications/index.rst | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/applications/butler/index.rst b/docs/applications/butler/index.rst index 09f9332e15..d050a3b5f0 100644 --- a/docs/applications/butler/index.rst +++ b/docs/applications/butler/index.rst @@ -4,6 +4,13 @@ butler — Server for Butler data abstraction service ################################################### +The Butler server provides a web service for querying the LSST data release +products and retrieving the associated images and data files. It is intended +to become the primary backend for the `Butler python library +`_ for community science use cases. + +This service is in early development and currently considered experimental. + .. jinja:: butler :file: applications/_summary.rst.jinja @@ -13,4 +20,4 @@ Guides .. toctree:: :maxdepth: 1 - values \ No newline at end of file + values diff --git a/docs/applications/index.rst b/docs/applications/index.rst index 8a851c3267..b250024550 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -23,6 +23,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde :maxdepth: 1 :caption: Rubin Science Platform + butler/index cachemachine/index datalinker/index hips/index From 09e9fca252adeb566b613a132f7ee1ef0c3a631a Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Tue, 31 Oct 2023 16:13:03 -0700 Subject: [PATCH 203/588] Enable butler for idfdev --- environments/values-idfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index d81d5dd034..97eb0ca6f0 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -8,6 +8,7 @@ vaultPathPrefix: secret/phalanx/idfdev applications: argo-workflows: true + butler: true datalinker: true hips: true mobu: true From 3a59800d0c37da6a78ef25cd6ae4efad94ce6e3b Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Thu, 2 Nov 2023 15:38:13 -0700 Subject: [PATCH 204/588] Set butler appVersion to a real Docker tag --- applications/butler/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/butler/Chart.yaml b/applications/butler/Chart.yaml index b1b9f2052b..15503e4066 100644 --- a/applications/butler/Chart.yaml +++ b/applications/butler/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 0.1.0 +appVersion: 0.0.1 description: Server for Butler data abstraction service name: butler sources: From a666b4a269370585392c8dbd6fc85415c2c1c989 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Fri, 3 Nov 2023 09:47:17 -0700 Subject: [PATCH 205/588] Configure GafaelfawrIngress for Butler --- applications/butler/templates/ingress.yaml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/applications/butler/templates/ingress.yaml b/applications/butler/templates/ingress.yaml index 74d54bed06..430c5067ca 100644 --- a/applications/butler/templates/ingress.yaml +++ b/applications/butler/templates/ingress.yaml @@ -9,7 +9,14 @@ config: scopes: all: - "read:image" - loginRedirect: true + loginRedirect: false + # Butler needs a delegated token so that we can query Gafaelfawr for the + # user's group membership + delegate: + internal: + service: "butler" + scopes: [] + template: metadata: name: "butler" @@ -22,7 +29,7 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/butler" + - path: "/api/butler" pathType: "Prefix" backend: service: From da09f30caf9eba1930a299ffaef506adbb5a6dda Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 09:50:24 +0000 Subject: [PATCH 206/588] chore(deps): update helm release vault-secrets-operator to v2.5.4 --- applications/vault-secrets-operator/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml index db63283353..18b5b79169 100644 --- a/applications/vault-secrets-operator/Chart.yaml +++ b/applications/vault-secrets-operator/Chart.yaml @@ -5,7 +5,7 @@ sources: - https://github.com/ricoberger/vault-secrets-operator dependencies: - name: vault-secrets-operator - version: 2.5.3 + version: 2.5.4 repository: https://ricoberger.github.io/helm-charts/ annotations: phalanx.lsst.io/docs: | From 60fd76810e1e8fb8647f95a027042627f80a4c54 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 09:50:28 +0000 Subject: [PATCH 207/588] chore(deps): update obsidiandynamics/kafdrop docker tag to v4.0.1 --- applications/sasquatch/charts/kafdrop/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/kafdrop/values.yaml b/applications/sasquatch/charts/kafdrop/values.yaml index 1a47532bf9..6ca6ecdb13 100644 --- a/applications/sasquatch/charts/kafdrop/values.yaml +++ b/applications/sasquatch/charts/kafdrop/values.yaml @@ -9,7 +9,7 @@ image: # -- Image pull policy. pullPolicy: IfNotPresent # -- Kafdrop image version. - tag: 4.0.0 + tag: 4.0.1 kafka: # -- Bootstrap list of Kafka host/port pairs From bba424cd9ff38e4414615a64c9acfc6089d849d0 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 12:32:39 +0000 Subject: [PATCH 208/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 448 +++++++++++++++++++++--------------------- requirements/main.txt | 182 ++++++++--------- 2 files changed, 315 insertions(+), 315 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 062ab3d6ef..a456e28090 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -42,97 +42,97 @@ cfgv==3.4.0 \ --hash=sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9 \ --hash=sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560 # via pre-commit -charset-normalizer==3.3.1 \ - --hash=sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5 \ - --hash=sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93 \ - --hash=sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a \ - --hash=sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d \ - --hash=sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c \ - --hash=sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1 \ - --hash=sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58 \ - --hash=sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2 \ - --hash=sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557 \ - --hash=sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147 \ - --hash=sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041 \ - --hash=sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2 \ - --hash=sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2 \ - --hash=sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7 \ - --hash=sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296 \ - --hash=sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690 \ - --hash=sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67 \ - --hash=sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57 \ - --hash=sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597 \ - --hash=sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846 \ - --hash=sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b \ - --hash=sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97 \ - --hash=sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c \ - --hash=sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62 \ - --hash=sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa \ - --hash=sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f \ - --hash=sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e \ - --hash=sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821 \ - --hash=sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3 \ - --hash=sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4 \ - --hash=sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb \ - --hash=sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727 \ - --hash=sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514 \ - --hash=sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d \ - --hash=sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761 \ - --hash=sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55 \ - --hash=sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f \ - --hash=sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c \ - --hash=sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034 \ - --hash=sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6 \ - --hash=sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae \ - --hash=sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1 \ - --hash=sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14 \ - --hash=sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1 \ - --hash=sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228 \ - --hash=sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708 \ - --hash=sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48 \ - --hash=sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f \ - --hash=sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5 \ - --hash=sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f \ - --hash=sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4 \ - --hash=sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8 \ - --hash=sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff \ - --hash=sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61 \ - --hash=sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b \ - --hash=sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97 \ - --hash=sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b \ - --hash=sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605 \ - --hash=sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728 \ - --hash=sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d \ - --hash=sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c \ - --hash=sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf \ - --hash=sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673 \ - --hash=sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1 \ - --hash=sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b \ - --hash=sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41 \ - --hash=sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8 \ - --hash=sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f \ - --hash=sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4 \ - --hash=sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008 \ - --hash=sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9 \ - --hash=sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5 \ - --hash=sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f \ - --hash=sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e \ - --hash=sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273 \ - --hash=sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45 \ - --hash=sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e \ - --hash=sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656 \ - --hash=sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e \ - --hash=sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c \ - --hash=sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2 \ - --hash=sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72 \ - --hash=sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056 \ - --hash=sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397 \ - --hash=sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42 \ - --hash=sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd \ - --hash=sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3 \ - --hash=sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213 \ - --hash=sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf \ - --hash=sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67 +charset-normalizer==3.3.2 \ + --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ + --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ + --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ + --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ + --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ + --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ + --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ + --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ + --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ + --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ + --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ + --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ + --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ + --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ + --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ + --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ + --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ + --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ + --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ + --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ + --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ + --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ + --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ + --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ + --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ + --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ + --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ + --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ + --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ + --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ + --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ + --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ + --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ + --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ + --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ + --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ + --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ + --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ + --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ + --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ + --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ + --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ + --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ + --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ + --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ + --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ + --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ + --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ + --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ + --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ + --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ + --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ + --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ + --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ + --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ + --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ + --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ + --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ + --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ + --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ + --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ + --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ + --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ + --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ + --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ + --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ + --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ + --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ + --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ + --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ + --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ + --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ + --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ + --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ + --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ + --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ + --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ + --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ + --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ + --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ + --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ + --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ + --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ + --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ + --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ + --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ + --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ + --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ + --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ + --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 # via # -c requirements/main.txt # requests @@ -199,17 +199,17 @@ coverage[toml]==7.3.2 \ # via # -r requirements/dev.in # pytest-cov -diagrams==0.23.3 \ - --hash=sha256:543c707c36a2c896dfdf8f23e993a9c7ae48bb1a667f6baf19151eb98e57a134 \ - --hash=sha256:c497094f9d3600a94bdcfb62b6daf331d2eb7f9b355246e548dae7a4b5c97be0 +diagrams==0.23.4 \ + --hash=sha256:1ba69d98fcf8d768dbddf07d2c77aba6cc95c2e6f90f37146c04c96bc6765450 \ + --hash=sha256:b7ada0b119b5189dd021b1dc1467fad3704737452bb18b1e06d05e4d1fa48ed7 # via sphinx-diagrams distlib==0.3.7 \ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -documenteer[guide]==1.0.0a12 \ - --hash=sha256:34d8d6358f5e30fc279c711dbce453ee100cbe0c89e8cd26e0a926d86e0e97ac \ - --hash=sha256:ac80a724b287b41d48e1f63cabc98fe91c0aee13ea48ebec710ec79c6295cfe2 +documenteer[guide]==1.0.0a13 \ + --hash=sha256:3d2acd02110751166ea7e8dd7cebed723074afe4346079a816a3f9cd6297d24e \ + --hash=sha256:4dbac173d529d23127138fd45a187a426746ec5f11f94f0b8a02fe088a266381 # via # -r requirements/dev.in # documenteer @@ -226,9 +226,9 @@ docutils==0.20.1 \ # sphinx-jinja # sphinx-prompt # sphinxcontrib-bibtex -filelock==3.13.0 \ - --hash=sha256:63c6052c82a1a24c873a549fbd39a26982e8f35a3016da231ead11a5be9dad44 \ - --hash=sha256:a552f4fde758f4eab33191e9548f671970f8b06d436d31388c9aa1e5861a710f +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c # via virtualenv gitdb==4.0.11 \ --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ @@ -274,9 +274,9 @@ jinja2==3.1.2 \ # sphinx # sphinx-jinja # sphinxcontrib-redoc -jsonschema==4.19.1 \ - --hash=sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e \ - --hash=sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf +jsonschema==4.19.2 \ + --hash=sha256:c9ff4d7447eed9592c23a12ccee508baf0dd0d59650615e847feb6cdca74f392 \ + --hash=sha256:eee9e502c788e89cb166d4d37f43084e3b64ab405c795c03d343a4dbc2c810fc # via sphinxcontrib-redoc jsonschema-specifications==2023.7.1 \ --hash=sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1 \ @@ -668,127 +668,127 @@ rich==13.6.0 \ --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \ --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef # via pytest-pretty -rpds-py==0.10.6 \ - --hash=sha256:023574366002bf1bd751ebaf3e580aef4a468b3d3c216d2f3f7e16fdabd885ed \ - --hash=sha256:031f76fc87644a234883b51145e43985aa2d0c19b063e91d44379cd2786144f8 \ - --hash=sha256:052a832078943d2b2627aea0d19381f607fe331cc0eb5df01991268253af8417 \ - --hash=sha256:0699ab6b8c98df998c3eacf51a3b25864ca93dab157abe358af46dc95ecd9801 \ - --hash=sha256:0713631d6e2d6c316c2f7b9320a34f44abb644fc487b77161d1724d883662e31 \ - --hash=sha256:0774a46b38e70fdde0c6ded8d6d73115a7c39d7839a164cc833f170bbf539116 \ - --hash=sha256:0898173249141ee99ffcd45e3829abe7bcee47d941af7434ccbf97717df020e5 \ - --hash=sha256:09586f51a215d17efdb3a5f090d7cbf1633b7f3708f60a044757a5d48a83b393 \ - --hash=sha256:102eac53bb0bf0f9a275b438e6cf6904904908562a1463a6fc3323cf47d7a532 \ - --hash=sha256:10f32b53f424fc75ff7b713b2edb286fdbfc94bf16317890260a81c2c00385dc \ - --hash=sha256:150eec465dbc9cbca943c8e557a21afdcf9bab8aaabf386c44b794c2f94143d2 \ - --hash=sha256:1d7360573f1e046cb3b0dceeb8864025aa78d98be4bb69f067ec1c40a9e2d9df \ - --hash=sha256:1f36a9d751f86455dc5278517e8b65580eeee37d61606183897f122c9e51cef3 \ - --hash=sha256:24656dc36f866c33856baa3ab309da0b6a60f37d25d14be916bd3e79d9f3afcf \ - --hash=sha256:25860ed5c4e7f5e10c496ea78af46ae8d8468e0be745bd233bab9ca99bfd2647 \ - --hash=sha256:26857f0f44f0e791f4a266595a7a09d21f6b589580ee0585f330aaccccb836e3 \ - --hash=sha256:2bb2e4826be25e72013916eecd3d30f66fd076110de09f0e750163b416500721 \ - --hash=sha256:2f6da6d842195fddc1cd34c3da8a40f6e99e4a113918faa5e60bf132f917c247 \ - --hash=sha256:30adb75ecd7c2a52f5e76af50644b3e0b5ba036321c390b8e7ec1bb2a16dd43c \ - --hash=sha256:3339eca941568ed52d9ad0f1b8eb9fe0958fa245381747cecf2e9a78a5539c42 \ - --hash=sha256:34ad87a831940521d462ac11f1774edf867c34172010f5390b2f06b85dcc6014 \ - --hash=sha256:3777cc9dea0e6c464e4b24760664bd8831738cc582c1d8aacf1c3f546bef3f65 \ - --hash=sha256:3953c6926a63f8ea5514644b7afb42659b505ece4183fdaaa8f61d978754349e \ - --hash=sha256:3c4eff26eddac49d52697a98ea01b0246e44ca82ab09354e94aae8823e8bda02 \ - --hash=sha256:40578a6469e5d1df71b006936ce95804edb5df47b520c69cf5af264d462f2cbb \ - --hash=sha256:40f93086eef235623aa14dbddef1b9fb4b22b99454cb39a8d2e04c994fb9868c \ - --hash=sha256:4134aa2342f9b2ab6c33d5c172e40f9ef802c61bb9ca30d21782f6e035ed0043 \ - --hash=sha256:442626328600bde1d09dc3bb00434f5374948838ce75c41a52152615689f9403 \ - --hash=sha256:4a5ee600477b918ab345209eddafde9f91c0acd931f3776369585a1c55b04c57 \ - --hash=sha256:4ce5a708d65a8dbf3748d2474b580d606b1b9f91b5c6ab2a316e0b0cf7a4ba50 \ - --hash=sha256:516a611a2de12fbea70c78271e558f725c660ce38e0006f75139ba337d56b1f6 \ - --hash=sha256:52c215eb46307c25f9fd2771cac8135d14b11a92ae48d17968eda5aa9aaf5071 \ - --hash=sha256:53c43e10d398e365da2d4cc0bcaf0854b79b4c50ee9689652cdc72948e86f487 \ - --hash=sha256:5752b761902cd15073a527b51de76bbae63d938dc7c5c4ad1e7d8df10e765138 \ - --hash=sha256:5e8a78bd4879bff82daef48c14d5d4057f6856149094848c3ed0ecaf49f5aec2 \ - --hash=sha256:5ed505ec6305abd2c2c9586a7b04fbd4baf42d4d684a9c12ec6110deefe2a063 \ - --hash=sha256:5ee97c683eaface61d38ec9a489e353d36444cdebb128a27fe486a291647aff6 \ - --hash=sha256:61fa268da6e2e1cd350739bb61011121fa550aa2545762e3dc02ea177ee4de35 \ - --hash=sha256:64ccc28683666672d7c166ed465c09cee36e306c156e787acef3c0c62f90da5a \ - --hash=sha256:66414dafe4326bca200e165c2e789976cab2587ec71beb80f59f4796b786a238 \ - --hash=sha256:68fe9199184c18d997d2e4293b34327c0009a78599ce703e15cd9a0f47349bba \ - --hash=sha256:6a555ae3d2e61118a9d3e549737bb4a56ff0cec88a22bd1dfcad5b4e04759175 \ - --hash=sha256:6bdc11f9623870d75692cc33c59804b5a18d7b8a4b79ef0b00b773a27397d1f6 \ - --hash=sha256:6cf4393c7b41abbf07c88eb83e8af5013606b1cdb7f6bc96b1b3536b53a574b8 \ - --hash=sha256:6eef672de005736a6efd565577101277db6057f65640a813de6c2707dc69f396 \ - --hash=sha256:734c41f9f57cc28658d98270d3436dba65bed0cfc730d115b290e970150c540d \ - --hash=sha256:73e0a78a9b843b8c2128028864901f55190401ba38aae685350cf69b98d9f7c9 \ - --hash=sha256:775049dfa63fb58293990fc59473e659fcafd953bba1d00fc5f0631a8fd61977 \ - --hash=sha256:7854a207ef77319ec457c1eb79c361b48807d252d94348305db4f4b62f40f7f3 \ - --hash=sha256:78ca33811e1d95cac8c2e49cb86c0fb71f4d8409d8cbea0cb495b6dbddb30a55 \ - --hash=sha256:79edd779cfc46b2e15b0830eecd8b4b93f1a96649bcb502453df471a54ce7977 \ - --hash=sha256:7bf347b495b197992efc81a7408e9a83b931b2f056728529956a4d0858608b80 \ - --hash=sha256:7fde6d0e00b2fd0dbbb40c0eeec463ef147819f23725eda58105ba9ca48744f4 \ - --hash=sha256:81de24a1c51cfb32e1fbf018ab0bdbc79c04c035986526f76c33e3f9e0f3356c \ - --hash=sha256:879fb24304ead6b62dbe5034e7b644b71def53c70e19363f3c3be2705c17a3b4 \ - --hash=sha256:8e7f2219cb72474571974d29a191714d822e58be1eb171f229732bc6fdedf0ac \ - --hash=sha256:9164ec8010327ab9af931d7ccd12ab8d8b5dc2f4c6a16cbdd9d087861eaaefa1 \ - --hash=sha256:945eb4b6bb8144909b203a88a35e0a03d22b57aefb06c9b26c6e16d72e5eb0f0 \ - --hash=sha256:99a57006b4ec39dbfb3ed67e5b27192792ffb0553206a107e4aadb39c5004cd5 \ - --hash=sha256:9e9184fa6c52a74a5521e3e87badbf9692549c0fcced47443585876fcc47e469 \ - --hash=sha256:9ff93d3aedef11f9c4540cf347f8bb135dd9323a2fc705633d83210d464c579d \ - --hash=sha256:a360cfd0881d36c6dc271992ce1eda65dba5e9368575663de993eeb4523d895f \ - --hash=sha256:a5d7ed104d158c0042a6a73799cf0eb576dfd5fc1ace9c47996e52320c37cb7c \ - --hash=sha256:ac17044876e64a8ea20ab132080ddc73b895b4abe9976e263b0e30ee5be7b9c2 \ - --hash=sha256:ad857f42831e5b8d41a32437f88d86ead6c191455a3499c4b6d15e007936d4cf \ - --hash=sha256:b2039f8d545f20c4e52713eea51a275e62153ee96c8035a32b2abb772b6fc9e5 \ - --hash=sha256:b455492cab07107bfe8711e20cd920cc96003e0da3c1f91297235b1603d2aca7 \ - --hash=sha256:b4a9fe992887ac68256c930a2011255bae0bf5ec837475bc6f7edd7c8dfa254e \ - --hash=sha256:b5a53f5998b4bbff1cb2e967e66ab2addc67326a274567697379dd1e326bded7 \ - --hash=sha256:b788276a3c114e9f51e257f2a6f544c32c02dab4aa7a5816b96444e3f9ffc336 \ - --hash=sha256:bddd4f91eede9ca5275e70479ed3656e76c8cdaaa1b354e544cbcf94c6fc8ac4 \ - --hash=sha256:c0503c5b681566e8b722fe8c4c47cce5c7a51f6935d5c7012c4aefe952a35eed \ - --hash=sha256:c1b3cd23d905589cb205710b3988fc8f46d4a198cf12862887b09d7aaa6bf9b9 \ - --hash=sha256:c48f3fbc3e92c7dd6681a258d22f23adc2eb183c8cb1557d2fcc5a024e80b094 \ - --hash=sha256:c63c3ef43f0b3fb00571cff6c3967cc261c0ebd14a0a134a12e83bdb8f49f21f \ - --hash=sha256:c6c45a2d2b68c51fe3d9352733fe048291e483376c94f7723458cfd7b473136b \ - --hash=sha256:caa1afc70a02645809c744eefb7d6ee8fef7e2fad170ffdeacca267fd2674f13 \ - --hash=sha256:cc435d059f926fdc5b05822b1be4ff2a3a040f3ae0a7bbbe672babb468944722 \ - --hash=sha256:cf693eb4a08eccc1a1b636e4392322582db2a47470d52e824b25eca7a3977b53 \ - --hash=sha256:cf71343646756a072b85f228d35b1d7407da1669a3de3cf47f8bbafe0c8183a4 \ - --hash=sha256:d08f63561c8a695afec4975fae445245386d645e3e446e6f260e81663bfd2e38 \ - --hash=sha256:d29ddefeab1791e3c751e0189d5f4b3dbc0bbe033b06e9c333dca1f99e1d523e \ - --hash=sha256:d7f5e15c953ace2e8dde9824bdab4bec50adb91a5663df08d7d994240ae6fa31 \ - --hash=sha256:d858532212f0650be12b6042ff4378dc2efbb7792a286bee4489eaa7ba010586 \ - --hash=sha256:d97dd44683802000277bbf142fd9f6b271746b4846d0acaf0cefa6b2eaf2a7ad \ - --hash=sha256:dcdc88b6b01015da066da3fb76545e8bb9a6880a5ebf89e0f0b2e3ca557b3ab7 \ - --hash=sha256:dd609fafdcdde6e67a139898196698af37438b035b25ad63704fd9097d9a3482 \ - --hash=sha256:defa2c0c68734f4a82028c26bcc85e6b92cced99866af118cd6a89b734ad8e0d \ - --hash=sha256:e22260a4741a0e7a206e175232867b48a16e0401ef5bce3c67ca5b9705879066 \ - --hash=sha256:e225a6a14ecf44499aadea165299092ab0cba918bb9ccd9304eab1138844490b \ - --hash=sha256:e3df0bc35e746cce42579826b89579d13fd27c3d5319a6afca9893a9b784ff1b \ - --hash=sha256:e6fcc026a3f27c1282c7ed24b7fcac82cdd70a0e84cc848c0841a3ab1e3dea2d \ - --hash=sha256:e782379c2028a3611285a795b89b99a52722946d19fc06f002f8b53e3ea26ea9 \ - --hash=sha256:e8cdd52744f680346ff8c1ecdad5f4d11117e1724d4f4e1874f3a67598821069 \ - --hash=sha256:e9616f5bd2595f7f4a04b67039d890348ab826e943a9bfdbe4938d0eba606971 \ - --hash=sha256:e98c4c07ee4c4b3acf787e91b27688409d918212dfd34c872201273fdd5a0e18 \ - --hash=sha256:ebdab79f42c5961682654b851f3f0fc68e6cc7cd8727c2ac4ffff955154123c1 \ - --hash=sha256:f0f17f2ce0f3529177a5fff5525204fad7b43dd437d017dd0317f2746773443d \ - --hash=sha256:f4e56860a5af16a0fcfa070a0a20c42fbb2012eed1eb5ceeddcc7f8079214281 +rpds-py==0.12.0 \ + --hash=sha256:0525847f83f506aa1e28eb2057b696fe38217e12931c8b1b02198cfe6975e142 \ + --hash=sha256:05942656cb2cb4989cd50ced52df16be94d344eae5097e8583966a1d27da73a5 \ + --hash=sha256:0831d3ecdea22e4559cc1793f22e77067c9d8c451d55ae6a75bf1d116a8e7f42 \ + --hash=sha256:0853da3d5e9bc6a07b2486054a410b7b03f34046c123c6561b535bb48cc509e1 \ + --hash=sha256:08e6e7ff286254016b945e1ab632ee843e43d45e40683b66dd12b73791366dd1 \ + --hash=sha256:0a38612d07a36138507d69646c470aedbfe2b75b43a4643f7bd8e51e52779624 \ + --hash=sha256:0bedd91ae1dd142a4dc15970ed2c729ff6c73f33a40fa84ed0cdbf55de87c777 \ + --hash=sha256:0c5441b7626c29dbd54a3f6f3713ec8e956b009f419ffdaaa3c80eaf98ddb523 \ + --hash=sha256:0e9e976e0dbed4f51c56db10831c9623d0fd67aac02853fe5476262e5a22acb7 \ + --hash=sha256:0fadfdda275c838cba5102c7f90a20f2abd7727bf8f4a2b654a5b617529c5c18 \ + --hash=sha256:1096ca0bf2d3426cbe79d4ccc91dc5aaa73629b08ea2d8467375fad8447ce11a \ + --hash=sha256:171d9a159f1b2f42a42a64a985e4ba46fc7268c78299272ceba970743a67ee50 \ + --hash=sha256:188912b22b6c8225f4c4ffa020a2baa6ad8fabb3c141a12dbe6edbb34e7f1425 \ + --hash=sha256:1b4cf9ab9a0ae0cb122685209806d3f1dcb63b9fccdf1424fb42a129dc8c2faa \ + --hash=sha256:1e04581c6117ad9479b6cfae313e212fe0dfa226ac727755f0d539cd54792963 \ + --hash=sha256:1fa73ed22c40a1bec98d7c93b5659cd35abcfa5a0a95ce876b91adbda170537c \ + --hash=sha256:2124f9e645a94ab7c853bc0a3644e0ca8ffbe5bb2d72db49aef8f9ec1c285733 \ + --hash=sha256:240687b5be0f91fbde4936a329c9b7589d9259742766f74de575e1b2046575e4 \ + --hash=sha256:25740fb56e8bd37692ed380e15ec734be44d7c71974d8993f452b4527814601e \ + --hash=sha256:27ccc93c7457ef890b0dd31564d2a05e1aca330623c942b7e818e9e7c2669ee4 \ + --hash=sha256:281c8b219d4f4b3581b918b816764098d04964915b2f272d1476654143801aa2 \ + --hash=sha256:2d34a5450a402b00d20aeb7632489ffa2556ca7b26f4a63c35f6fccae1977427 \ + --hash=sha256:301bd744a1adaa2f6a5e06c98f1ac2b6f8dc31a5c23b838f862d65e32fca0d4b \ + --hash=sha256:30e5ce9f501fb1f970e4a59098028cf20676dee64fc496d55c33e04bbbee097d \ + --hash=sha256:33ab498f9ac30598b6406e2be1b45fd231195b83d948ebd4bd77f337cb6a2bff \ + --hash=sha256:35585a8cb5917161f42c2104567bb83a1d96194095fc54a543113ed5df9fa436 \ + --hash=sha256:389c0e38358fdc4e38e9995e7291269a3aead7acfcf8942010ee7bc5baee091c \ + --hash=sha256:3acadbab8b59f63b87b518e09c4c64b142e7286b9ca7a208107d6f9f4c393c5c \ + --hash=sha256:3b7a64d43e2a1fa2dd46b678e00cabd9a49ebb123b339ce799204c44a593ae1c \ + --hash=sha256:3c8c0226c71bd0ce9892eaf6afa77ae8f43a3d9313124a03df0b389c01f832de \ + --hash=sha256:429349a510da82c85431f0f3e66212d83efe9fd2850f50f339341b6532c62fe4 \ + --hash=sha256:466030a42724780794dea71eb32db83cc51214d66ab3fb3156edd88b9c8f0d78 \ + --hash=sha256:47aeceb4363851d17f63069318ba5721ae695d9da55d599b4d6fb31508595278 \ + --hash=sha256:48aa98987d54a46e13e6954880056c204700c65616af4395d1f0639eba11764b \ + --hash=sha256:4b2416ed743ec5debcf61e1242e012652a4348de14ecc7df3512da072b074440 \ + --hash=sha256:4d0a675a7acbbc16179188d8c6d0afb8628604fc1241faf41007255957335a0b \ + --hash=sha256:4eb74d44776b0fb0782560ea84d986dffec8ddd94947f383eba2284b0f32e35e \ + --hash=sha256:4f8a1d990dc198a6c68ec3d9a637ba1ce489b38cbfb65440a27901afbc5df575 \ + --hash=sha256:513ccbf7420c30e283c25c82d5a8f439d625a838d3ba69e79a110c260c46813f \ + --hash=sha256:5210a0018c7e09c75fa788648617ebba861ae242944111d3079034e14498223f \ + --hash=sha256:54cdfcda59251b9c2f87a05d038c2ae02121219a04d4a1e6fc345794295bdc07 \ + --hash=sha256:56dd500411d03c5e9927a1eb55621e906837a83b02350a9dc401247d0353717c \ + --hash=sha256:57ec6baec231bb19bb5fd5fc7bae21231860a1605174b11585660236627e390e \ + --hash=sha256:5f1519b080d8ce0a814f17ad9fb49fb3a1d4d7ce5891f5c85fc38631ca3a8dc4 \ + --hash=sha256:6174d6ad6b58a6bcf67afbbf1723420a53d06c4b89f4c50763d6fa0a6ac9afd2 \ + --hash=sha256:68172622a5a57deb079a2c78511c40f91193548e8ab342c31e8cb0764d362459 \ + --hash=sha256:6915fc9fa6b3ec3569566832e1bb03bd801c12cea030200e68663b9a87974e76 \ + --hash=sha256:6b75b912a0baa033350367a8a07a8b2d44fd5b90c890bfbd063a8a5f945f644b \ + --hash=sha256:6f5dcb658d597410bb7c967c1d24eaf9377b0d621358cbe9d2ff804e5dd12e81 \ + --hash=sha256:6f8d7fe73d1816eeb5378409adc658f9525ecbfaf9e1ede1e2d67a338b0c7348 \ + --hash=sha256:7036316cc26b93e401cedd781a579be606dad174829e6ad9e9c5a0da6e036f80 \ + --hash=sha256:7188ddc1a8887194f984fa4110d5a3d5b9b5cd35f6bafdff1b649049cbc0ce29 \ + --hash=sha256:761531076df51309075133a6bc1db02d98ec7f66e22b064b1d513bc909f29743 \ + --hash=sha256:7979d90ee2190d000129598c2b0c82f13053dba432b94e45e68253b09bb1f0f6 \ + --hash=sha256:8015835494b21aa7abd3b43fdea0614ee35ef6b03db7ecba9beb58eadf01c24f \ + --hash=sha256:81c4d1a3a564775c44732b94135d06e33417e829ff25226c164664f4a1046213 \ + --hash=sha256:81cf9d306c04df1b45971c13167dc3bad625808aa01281d55f3cf852dde0e206 \ + --hash=sha256:88857060b690a57d2ea8569bca58758143c8faa4639fb17d745ce60ff84c867e \ + --hash=sha256:8c567c664fc2f44130a20edac73e0a867f8e012bf7370276f15c6adc3586c37c \ + --hash=sha256:91bd2b7cf0f4d252eec8b7046fa6a43cee17e8acdfc00eaa8b3dbf2f9a59d061 \ + --hash=sha256:9620650c364c01ed5b497dcae7c3d4b948daeae6e1883ae185fef1c927b6b534 \ + --hash=sha256:9b007c2444705a2dc4a525964fd4dd28c3320b19b3410da6517cab28716f27d3 \ + --hash=sha256:9bf9acce44e967a5103fcd820fc7580c7b0ab8583eec4e2051aec560f7b31a63 \ + --hash=sha256:a239303acb0315091d54c7ff36712dba24554993b9a93941cf301391d8a997ee \ + --hash=sha256:a2baa6be130e8a00b6cbb9f18a33611ec150b4537f8563bddadb54c1b74b8193 \ + --hash=sha256:a54917b7e9cd3a67e429a630e237a90b096e0ba18897bfb99ee8bd1068a5fea0 \ + --hash=sha256:a689e1ded7137552bea36305a7a16ad2b40be511740b80748d3140614993db98 \ + --hash=sha256:a952ae3eb460c6712388ac2ec706d24b0e651b9396d90c9a9e0a69eb27737fdc \ + --hash=sha256:aa32205358a76bf578854bf31698a86dc8b2cb591fd1d79a833283f4a403f04b \ + --hash=sha256:b2287c09482949e0ca0c0eb68b2aca6cf57f8af8c6dfd29dcd3bc45f17b57978 \ + --hash=sha256:b6b0e17d39d21698185097652c611f9cf30f7c56ccec189789920e3e7f1cee56 \ + --hash=sha256:b710bf7e7ae61957d5c4026b486be593ed3ec3dca3e5be15e0f6d8cf5d0a4990 \ + --hash=sha256:b8e11715178f3608874508f08e990d3771e0b8c66c73eb4e183038d600a9b274 \ + --hash=sha256:b92aafcfab3d41580d54aca35a8057341f1cfc7c9af9e8bdfc652f83a20ced31 \ + --hash=sha256:bec29b801b4adbf388314c0d050e851d53762ab424af22657021ce4b6eb41543 \ + --hash=sha256:c694bee70ece3b232df4678448fdda245fd3b1bb4ba481fb6cd20e13bb784c46 \ + --hash=sha256:c6b52b7028b547866c2413f614ee306c2d4eafdd444b1ff656bf3295bf1484aa \ + --hash=sha256:cb41ad20064e18a900dd427d7cf41cfaec83bcd1184001f3d91a1f76b3fcea4e \ + --hash=sha256:cd316dbcc74c76266ba94eb021b0cc090b97cca122f50bd7a845f587ff4bf03f \ + --hash=sha256:ced40cdbb6dd47a032725a038896cceae9ce267d340f59508b23537f05455431 \ + --hash=sha256:d1c562a9bb72244fa767d1c1ab55ca1d92dd5f7c4d77878fee5483a22ffac808 \ + --hash=sha256:d389ff1e95b6e46ebedccf7fd1fadd10559add595ac6a7c2ea730268325f832c \ + --hash=sha256:d56b1cd606ba4cedd64bb43479d56580e147c6ef3f5d1c5e64203a1adab784a2 \ + --hash=sha256:d72a4315514e5a0b9837a086cb433b004eea630afb0cc129de76d77654a9606f \ + --hash=sha256:d9e7f29c00577aff6b318681e730a519b235af292732a149337f6aaa4d1c5e31 \ + --hash=sha256:dbc25baa6abb205766fb8606f8263b02c3503a55957fcb4576a6bb0a59d37d10 \ + --hash=sha256:e57919c32ee295a2fca458bb73e4b20b05c115627f96f95a10f9f5acbd61172d \ + --hash=sha256:e5bbe011a2cea9060fef1bb3d668a2fd8432b8888e6d92e74c9c794d3c101595 \ + --hash=sha256:e6aea5c0eb5b0faf52c7b5c4a47c8bb64437173be97227c819ffa31801fa4e34 \ + --hash=sha256:e888be685fa42d8b8a3d3911d5604d14db87538aa7d0b29b1a7ea80d354c732d \ + --hash=sha256:eebaf8c76c39604d52852366249ab807fe6f7a3ffb0dd5484b9944917244cdbe \ + --hash=sha256:efbe0b5e0fd078ed7b005faa0170da4f72666360f66f0bb2d7f73526ecfd99f9 \ + --hash=sha256:efddca2d02254a52078c35cadad34762adbae3ff01c6b0c7787b59d038b63e0d \ + --hash=sha256:f05450fa1cd7c525c0b9d1a7916e595d3041ac0afbed2ff6926e5afb6a781b7f \ + --hash=sha256:f12d69d568f5647ec503b64932874dade5a20255736c89936bf690951a5e79f5 \ + --hash=sha256:f45321224144c25a62052035ce96cbcf264667bcb0d81823b1bbc22c4addd194 \ + --hash=sha256:f62581d7e884dd01ee1707b7c21148f61f2febb7de092ae2f108743fcbef5985 \ + --hash=sha256:f8832a4f83d4782a8f5a7b831c47e8ffe164e43c2c148c8160ed9a6d630bc02a \ + --hash=sha256:fa35ad36440aaf1ac8332b4a4a433d4acd28f1613f0d480995f5cfd3580e90b7 # via # jsonschema # referencing -ruff==0.1.3 \ - --hash=sha256:0b6c55f5ef8d9dd05b230bb6ab80bc4381ecb60ae56db0330f660ea240cb0d4a \ - --hash=sha256:0f75e670d529aa2288cd00fc0e9b9287603d95e1536d7a7e0cafe00f75e0dd9d \ - --hash=sha256:12fd53696c83a194a2db7f9a46337ce06445fb9aa7d25ea6f293cf75b21aca9f \ - --hash=sha256:1c595193881922cc0556a90f3af99b1c5681f0c552e7a2a189956141d8666fe8 \ - --hash=sha256:2e3de9ed2e39160800281848ff4670e1698037ca039bda7b9274f849258d26ce \ - --hash=sha256:3ba6145369a151401d5db79f0a47d50e470384d0d89d0d6f7fab0b589ad07c34 \ - --hash=sha256:3e7afcbdcfbe3399c34e0f6370c30f6e529193c731b885316c5a09c9e4317eef \ - --hash=sha256:4874c165f96c14a00590dcc727a04dca0cfd110334c24b039458c06cf78a672e \ - --hash=sha256:76dd49f6cd945d82d9d4a9a6622c54a994689d8d7b22fa1322983389b4892e20 \ - --hash=sha256:7a18df6638cec4a5bd75350639b2bb2a2366e01222825562c7346674bdceb7ea \ - --hash=sha256:918b454bc4f8874a616f0d725590277c42949431ceb303950e87fef7a7d94cb3 \ - --hash=sha256:b46d43d51f7061652eeadb426a9e3caa1e0002470229ab2fc19de8a7b0766901 \ - --hash=sha256:b8afeb9abd26b4029c72adc9921b8363374f4e7edb78385ffaa80278313a15f9 \ - --hash=sha256:ca3cf365bf32e9ba7e6db3f48a4d3e2c446cd19ebee04f05338bc3910114528b \ - --hash=sha256:d8859605e729cd5e53aa38275568dbbdb4fe882d2ea2714c5453b678dca83784 \ - --hash=sha256:dc3ec4edb3b73f21b4aa51337e16674c752f1d76a4a543af56d7d04e97769613 \ - --hash=sha256:eec2dd31eed114e48ea42dbffc443e9b7221976554a504767ceaee3dd38edeb8 +ruff==0.1.4 \ + --hash=sha256:01206e361021426e3c1b7fba06ddcb20dbc5037d64f6841e5f2b21084dc51800 \ + --hash=sha256:1dfd6bf8f6ad0a4ac99333f437e0ec168989adc5d837ecd38ddb2cc4a2e3db8a \ + --hash=sha256:21520ecca4cc555162068d87c747b8f95e1e95f8ecfcbbe59e8dd00710586315 \ + --hash=sha256:58826efb8b3efbb59bb306f4b19640b7e366967a31c049d49311d9eb3a4c60cb \ + --hash=sha256:645591a613a42cb7e5c2b667cbefd3877b21e0252b59272ba7212c3d35a5819f \ + --hash=sha256:6bc02a480d4bfffd163a723698da15d1a9aec2fced4c06f2a753f87f4ce6969c \ + --hash=sha256:78e8db8ab6f100f02e28b3d713270c857d370b8d61871d5c7d1702ae411df683 \ + --hash=sha256:80fea754eaae06335784b8ea053d6eb8e9aac75359ebddd6fee0858e87c8d510 \ + --hash=sha256:864958706b669cce31d629902175138ad8a069d99ca53514611521f532d91495 \ + --hash=sha256:9862811b403063765b03e716dac0fda8fdbe78b675cd947ed5873506448acea4 \ + --hash=sha256:99908ca2b3b85bffe7e1414275d004917d1e0dfc99d497ccd2ecd19ad115fd0d \ + --hash=sha256:9fdd61883bb34317c788af87f4cd75dfee3a73f5ded714b77ba928e418d6e39e \ + --hash=sha256:a9a1301dc43cbf633fb603242bccd0aaa34834750a14a4c1817e2e5c8d60de17 \ + --hash=sha256:b4eaca8c9cc39aa7f0f0d7b8fe24ecb51232d1bb620fc4441a61161be4a17539 \ + --hash=sha256:d98ae9ebf56444e18a3e3652b3383204748f73e247dea6caaf8b52d37e6b32da \ + --hash=sha256:e8791482d508bd0b36c76481ad3117987301b86072158bdb69d796503e1c84a8 \ + --hash=sha256:fdfd453fc91d9d86d6aaa33b1bafa69d114cf7421057868f0b79104079d3e66e # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -837,9 +837,9 @@ sphinx==7.2.6 \ # sphinxcontrib-serializinghtml # sphinxext-opengraph # sphinxext-rediraffe -sphinx-autodoc-typehints==1.24.0 \ - --hash=sha256:6a73c0c61a9144ce2ed5ef2bed99d615254e5005c1cc32002017d72d69fb70e6 \ - --hash=sha256:94e440066941bb237704bb880785e2d05e8ae5406c88674feefbb938ad0dc6af +sphinx-autodoc-typehints==1.24.1 \ + --hash=sha256:06683a2b76c3c7b1931b75e40e0211866fbb50ba4c4e802d0901d9b4e849add2 \ + --hash=sha256:4cc16c5545f2bf896ca52a854babefe3d8baeaaa033d13a7f179ac1d9feb02d5 # via documenteer sphinx-automodapi==0.16.0 \ --hash=sha256:68fc47064804604b90aa27c047016e86aaf970981d90a0082d5b5dd2e9d38afd \ diff --git a/requirements/main.txt b/requirements/main.txt index e9b390bdb0..05f2b1ca17 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -99,97 +99,97 @@ cffi==1.16.0 \ --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 # via cryptography -charset-normalizer==3.3.1 \ - --hash=sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5 \ - --hash=sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93 \ - --hash=sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a \ - --hash=sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d \ - --hash=sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c \ - --hash=sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1 \ - --hash=sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58 \ - --hash=sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2 \ - --hash=sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557 \ - --hash=sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147 \ - --hash=sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041 \ - --hash=sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2 \ - --hash=sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2 \ - --hash=sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7 \ - --hash=sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296 \ - --hash=sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690 \ - --hash=sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67 \ - --hash=sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57 \ - --hash=sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597 \ - --hash=sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846 \ - --hash=sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b \ - --hash=sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97 \ - --hash=sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c \ - --hash=sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62 \ - --hash=sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa \ - --hash=sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f \ - --hash=sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e \ - --hash=sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821 \ - --hash=sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3 \ - --hash=sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4 \ - --hash=sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb \ - --hash=sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727 \ - --hash=sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514 \ - --hash=sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d \ - --hash=sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761 \ - --hash=sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55 \ - --hash=sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f \ - --hash=sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c \ - --hash=sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034 \ - --hash=sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6 \ - --hash=sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae \ - --hash=sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1 \ - --hash=sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14 \ - --hash=sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1 \ - --hash=sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228 \ - --hash=sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708 \ - --hash=sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48 \ - --hash=sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f \ - --hash=sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5 \ - --hash=sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f \ - --hash=sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4 \ - --hash=sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8 \ - --hash=sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff \ - --hash=sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61 \ - --hash=sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b \ - --hash=sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97 \ - --hash=sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b \ - --hash=sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605 \ - --hash=sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728 \ - --hash=sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d \ - --hash=sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c \ - --hash=sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf \ - --hash=sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673 \ - --hash=sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1 \ - --hash=sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b \ - --hash=sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41 \ - --hash=sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8 \ - --hash=sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f \ - --hash=sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4 \ - --hash=sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008 \ - --hash=sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9 \ - --hash=sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5 \ - --hash=sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f \ - --hash=sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e \ - --hash=sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273 \ - --hash=sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45 \ - --hash=sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e \ - --hash=sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656 \ - --hash=sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e \ - --hash=sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c \ - --hash=sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2 \ - --hash=sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72 \ - --hash=sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056 \ - --hash=sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397 \ - --hash=sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42 \ - --hash=sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd \ - --hash=sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3 \ - --hash=sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213 \ - --hash=sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf \ - --hash=sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67 +charset-normalizer==3.3.2 \ + --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ + --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ + --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ + --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ + --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ + --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ + --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ + --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ + --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ + --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ + --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ + --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ + --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ + --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ + --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ + --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ + --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ + --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ + --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ + --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ + --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ + --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ + --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ + --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ + --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ + --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ + --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ + --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ + --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ + --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ + --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ + --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ + --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ + --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ + --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ + --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ + --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ + --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ + --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ + --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ + --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ + --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ + --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ + --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ + --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ + --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ + --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ + --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ + --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ + --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ + --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ + --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ + --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ + --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ + --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ + --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ + --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ + --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ + --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ + --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ + --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ + --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ + --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ + --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ + --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ + --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ + --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ + --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ + --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ + --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ + --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ + --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ + --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ + --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ + --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ + --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ + --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ + --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ + --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ + --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ + --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ + --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ + --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ + --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ + --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ + --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ + --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ + --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ + --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ + --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 # via requests click==8.1.7 \ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ From 473af8aa9df0af49f2a6ae78dee5095bf0d0442c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 12:38:08 +0000 Subject: [PATCH 209/588] Update Helm release argo-workflows to v0.38.0 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index d7eb85d18f..e7bc397581 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.37.0 + version: 0.38.0 repository: https://argoproj.github.io/argo-helm From 5e97aef0062f3d309fe92ab7ad53f6f1ab008923 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 18:31:43 +0000 Subject: [PATCH 210/588] Update Helm release argo-cd to v5.51.0 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 15c53df09e..9060dd4930 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.49.0 + version: 5.51.0 repository: https://argoproj.github.io/argo-helm From fa39fb28774fd1b71d6ce14f0fb1e33cbf6b402a Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Mon, 6 Nov 2023 11:36:44 -0800 Subject: [PATCH 211/588] Update prompt processing to use w_2023_44 --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 9d69579cf4..f08c90674b 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -8,7 +8,7 @@ prompt-proto-service: repository: ghcr.io/lsst-dm/prompt-proto-service pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: d_2023_09_22 + tag: w_2023_44 instrument: pipelines: >- From 9e4303f3cb250adabbb542c6710040ee0c0014a1 Mon Sep 17 00:00:00 2001 From: Brianna Smart Date: Tue, 7 Nov 2023 10:04:06 -0800 Subject: [PATCH 212/588] Add test topic Add a test topic to send alerts to that does not receive simulated alerts. --- applications/alert-stream-broker/Chart.yaml | 2 +- applications/alert-stream-broker/README.md | 12 ++++++-- .../charts/alert-stream-broker/Chart.yaml | 2 +- .../charts/alert-stream-broker/README.md | 12 ++++++-- .../templates/kafka-topics.yaml | 13 +++++++++ .../charts/alert-stream-broker/values.yaml | 28 ++++++++++++++++++- .../values-usdfdev-alert-stream-broker.yaml | 4 +++ 7 files changed, 66 insertions(+), 7 deletions(-) create mode 100644 applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml diff --git a/applications/alert-stream-broker/Chart.yaml b/applications/alert-stream-broker/Chart.yaml index 8e29824c87..cdeb3ac3e6 100644 --- a/applications/alert-stream-broker/Chart.yaml +++ b/applications/alert-stream-broker/Chart.yaml @@ -7,7 +7,7 @@ sources: - https://github.com/lsst-dm/alert-stream-simulator dependencies: - name: alert-stream-broker - version: 2.5.1 + version: 2.5.2 # The schema registry is bundled together in the same application as the # Kafka broker because Strimzi Registry Operator expects everything (the diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index d56b4589f3..5bfa1d8968 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -50,6 +50,8 @@ Alert transmission to community brokers | alert-database.storage.gcp.project | string | `""` | Name of a GCP project that has a bucket for database storage | | alert-database.storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data | | alert-stream-broker.cluster.name | string | `"alert-broker"` | Name used for the Kafka broker, and used by Strimzi for many annotations. | +| alert-stream-broker.clusterName | string | `"alert-broker"` | Name of a Strimzi Kafka cluster to connect to. | +| alert-stream-broker.clusterPort | int | `9092` | Port to connect to on the Strimzi Kafka cluster. It should be an internal TLS listener. | | alert-stream-broker.fullnameOverride | string | `""` | Override for the full name used for Kubernetes resources; by default one will be created based on the chart name and helm release name. | | alert-stream-broker.kafka.config | object | `{"log.retention.bytes":"42949672960","log.retention.hours":168,"offsets.retention.minutes":1440}` | Configuration overrides for the Kafka server. | | alert-stream-broker.kafka.config."log.retention.bytes" | string | `"42949672960"` | Maximum retained number of bytes for a broker's data. This is a string to avoid YAML type conversion issues for large numbers. | @@ -76,14 +78,20 @@ Alert transmission to community brokers | alert-stream-broker.kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | | alert-stream-broker.kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging | | alert-stream-broker.kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | +| alert-stream-broker.maxBytesRetained | string | `"24000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | +| alert-stream-broker.maxMillisecondsRetained | string | `"604800000"` | Maximum amount of time to save simulated alerts in the replay topic, in milliseconds. Default is 7 days. | | alert-stream-broker.nameOverride | string | `""` | | +| alert-stream-broker.schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | | alert-stream-broker.strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | | alert-stream-broker.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | +| alert-stream-broker.testTopicName | string | `"alert-stream-test"` | Name of the topic which will be used to send test alerts. | +| alert-stream-broker.testTopicPartitions | int | `8` | | +| alert-stream-broker.testTopicReplicas | int | `2` | | | alert-stream-broker.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | | alert-stream-broker.tls.subject.organization | string | `"Vera C. Rubin Observatory"` | Organization to use in the 'Subject' field of the broker's TLS certificate. | -| alert-stream-broker.users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | +| alert-stream-broker.users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated","alert-stream-test"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | | alert-stream-broker.users[0].groups | list | `["rubin-testing"]` | A list of string prefixes for groups that the user should get admin access to, allowing them to create, delete, describe, etc consumer groups. Note that these are prefix-matched, not just literal exact matches. | -| alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated"]` | A list of topics that the user should get read-only access to. | +| alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. | | alert-stream-broker.users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | | alert-stream-broker.vaultSecretsPath | string | `""` | Path to the secret resource in Vault | | alert-stream-broker.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/Chart.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/Chart.yaml index 41df3cce85..ba9abab12b 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/Chart.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: alert-stream-broker -version: 2.5.1 +version: 2.5.2 description: Kafka broker cluster for distributing alerts maintainers: - name: bsmart diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/README.md b/applications/alert-stream-broker/charts/alert-stream-broker/README.md index 6fac6e2664..75f458e99e 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-broker/README.md @@ -7,6 +7,8 @@ Kafka broker cluster for distributing alerts | Key | Type | Default | Description | |-----|------|---------|-------------| | cluster.name | string | `"alert-broker"` | Name used for the Kafka broker, and used by Strimzi for many annotations. | +| clusterName | string | `"alert-broker"` | Name of a Strimzi Kafka cluster to connect to. | +| clusterPort | int | `9092` | Port to connect to on the Strimzi Kafka cluster. It should be an internal TLS listener. | | fullnameOverride | string | `""` | Override for the full name used for Kubernetes resources; by default one will be created based on the chart name and helm release name. | | kafka.config | object | `{"log.retention.bytes":"42949672960","log.retention.hours":168,"offsets.retention.minutes":1440}` | Configuration overrides for the Kafka server. | | kafka.config."log.retention.bytes" | string | `"42949672960"` | Maximum retained number of bytes for a broker's data. This is a string to avoid YAML type conversion issues for large numbers. | @@ -33,14 +35,20 @@ Kafka broker cluster for distributing alerts | kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | | kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging | | kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | +| maxBytesRetained | string | `"24000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | +| maxMillisecondsRetained | string | `"604800000"` | Maximum amount of time to save simulated alerts in the replay topic, in milliseconds. Default is 7 days. | | nameOverride | string | `""` | | +| schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | | strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | | superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | +| testTopicName | string | `"alert-stream-test"` | Name of the topic which will be used to send test alerts. | +| testTopicPartitions | int | `8` | | +| testTopicReplicas | int | `2` | | | tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | | tls.subject.organization | string | `"Vera C. Rubin Observatory"` | Organization to use in the 'Subject' field of the broker's TLS certificate. | -| users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | +| users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated","alert-stream-test"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | | users[0].groups | list | `["rubin-testing"]` | A list of string prefixes for groups that the user should get admin access to, allowing them to create, delete, describe, etc consumer groups. Note that these are prefix-matched, not just literal exact matches. | -| users[0].readonlyTopics | list | `["alert-stream","alerts-simulated"]` | A list of topics that the user should get read-only access to. | +| users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. | | users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | | vaultSecretsPath | string | `""` | Path to the secret resource in Vault | | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml new file mode 100644 index 0000000000..17049cb56d --- /dev/null +++ b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml @@ -0,0 +1,13 @@ +apiVersion: "kafka.strimzi.io/{{ .Values.strimziAPIVersion }}" +kind: KafkaTopic +metadata: + name: "{{ .Values.testTopicName }}" + labels: + strimzi.io/cluster: "{{ .Values.clusterName }}" +spec: + partitions: {{ .Values.testTopicPartitions }} + replicas: {{ .Values.testTopicReplicas }} + config: + cleanup.policy: "delete" + retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days + retention.bytes: {{ .Values.maxBytesRetained }} diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml index 9c48cd5ff6..d18993399b 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml @@ -114,7 +114,7 @@ users: - # -- The username for the user that should be created. username: rubin-testing # -- A list of topics that the user should get read-only access to. - readonlyTopics: ["alert-stream", "alerts-simulated"] + readonlyTopics: ["alert-stream", "alerts-simulated", "alert-stream-test"] # -- A list of string prefixes for groups that the user should get admin # access to, allowing them to create, delete, describe, etc consumer # groups. Note that these are prefix-matched, not just literal exact @@ -148,3 +148,29 @@ vaultSecretsPath: "" fullnameOverride: "" nameOverride: "" + +# -- Name of the topic which will be used to send test alerts. +testTopicName: alert-stream-test + +# -- Integer ID to use in the prefix of alert data packets. This should be a +# valid Confluent Schema Registry ID associated with the schema used. +schemaID: 1 + +# -- Name of a Strimzi Kafka cluster to connect to. +clusterName: alert-broker + +# -- Port to connect to on the Strimzi Kafka cluster. It should be an internal +# TLS listener. +clusterPort: 9092 + +# -- Maximum amount of time to save simulated alerts in the replay topic, in +# milliseconds. Default is 7 days. +maxMillisecondsRetained: "604800000" + +# -- Maximum number of bytes for the replay topic, per partition, per replica. +# Default is 100GB, but should be lower to not fill storage. +maxBytesRetained: "24000000000" + +testTopicPartitions: 8 + +testTopicReplicas: 2 diff --git a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml index 08c469616d..282680ec8d 100644 --- a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml +++ b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml @@ -110,6 +110,10 @@ alert-stream-broker: readonlyTopics: ["alerts-simulated"] groups: ["pittgoogle-idfint"] + testTopicName: "alert-stream-test" + testTopicPartitions: 20 + testTopicReplicas: 1 + alert-stream-schema-registry: hostname: "usdf-alert-schemas-dev.slac.stanford.edu" schemaTopic: "registry-schemas" From e7f85a88afbb89f84ee7f4625bbfd0245633fee0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 8 Nov 2023 01:54:22 +0000 Subject: [PATCH 213/588] chore(deps): update helm release redis to v1.0.10 --- applications/gafaelfawr/Chart.yaml | 2 +- applications/noteburst/Chart.yaml | 2 +- applications/portal/Chart.yaml | 2 +- applications/rubintv/Chart.yaml | 2 +- applications/times-square/Chart.yaml | 2 +- applications/vo-cutouts/Chart.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index ded5a49d2a..6fc1197bea 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -9,7 +9,7 @@ appVersion: 9.5.1 dependencies: - name: redis - version: 1.0.9 + version: 1.0.10 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index fa6dcc2828..0ae38cb825 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -13,7 +13,7 @@ maintainers: dependencies: - name: redis - version: 1.0.9 + version: 1.0.10 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml index 2505bb941e..5e6b5575ac 100644 --- a/applications/portal/Chart.yaml +++ b/applications/portal/Chart.yaml @@ -9,7 +9,7 @@ appVersion: "suit-2023.2.3" dependencies: - name: redis - version: 1.0.9 + version: 1.0.10 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/rubintv/Chart.yaml b/applications/rubintv/Chart.yaml index c147c27b91..52aeec7f6d 100644 --- a/applications/rubintv/Chart.yaml +++ b/applications/rubintv/Chart.yaml @@ -7,5 +7,5 @@ sources: appVersion: 0.1.0 dependencies: - name: redis - version: 1.0.9 + version: 1.0.10 repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index e3ab8f8636..8b74abf857 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -12,7 +12,7 @@ appVersion: "0.9.2" dependencies: - name: redis - version: 1.0.9 + version: 1.0.10 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml index 3822de5097..966bdb6814 100644 --- a/applications/vo-cutouts/Chart.yaml +++ b/applications/vo-cutouts/Chart.yaml @@ -8,7 +8,7 @@ appVersion: 1.0.0 dependencies: - name: redis - version: 1.0.9 + version: 1.0.10 repository: https://lsst-sqre.github.io/charts/ annotations: From 06bf37fb72b9cc4607955383523315e157e243a4 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Wed, 8 Nov 2023 20:45:54 -0800 Subject: [PATCH 214/588] Reformat Prompt Processing pipelines config. The lack of indentation is a workaround for a Helm bug, but we can at least have each survey on a new line. --- .../values-usdfprod-prompt-processing.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index f08c90674b..e0ca04d61d 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -18,7 +18,13 @@ prompt-proto-service: (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/SingleFrame.yaml, ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/Isr.yaml] - (survey="spec")=[] (survey="spec_with_rotation")=[] (survey="spec_bright")=[] (survey="spec_bright_with_rotation")=[] (survey="spec_pole")=[] (survey="spec_pole_with_rotation")=[] (survey="")=[] + (survey="spec")=[] + (survey="spec_with_rotation")=[] + (survey="spec_bright")=[] + (survey="spec_bright_with_rotation")=[] + (survey="spec_pole")=[] + (survey="spec_pole_with_rotation")=[] + (survey="")=[] calibRepo: /app/butler s3: From dc11fa68db5411689cb40e1dd9dbee6e745971e0 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Wed, 8 Nov 2023 20:51:37 -0800 Subject: [PATCH 215/588] Register spec-survey with Prompt Processing. --- .../values-usdfprod-prompt-processing.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index e0ca04d61d..0bc2f47266 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -19,6 +19,7 @@ prompt-proto-service: ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/SingleFrame.yaml, ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/Isr.yaml] (survey="spec")=[] + (survey="spec-survey")=[] (survey="spec_with_rotation")=[] (survey="spec_bright")=[] (survey="spec_bright_with_rotation")=[] From 28209c9a1f6b36b3e8629e0825f422d22ad80dcf Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Wed, 8 Nov 2023 21:09:39 -0800 Subject: [PATCH 216/588] Revert "Register spec-survey with Prompt Processing." This reverts commit dc11fa68db5411689cb40e1dd9dbee6e745971e0. The new config is correct, but a bug in prompt_prototype prevents it from being parsed. The config will be redeployed once the bug is fixed. --- .../values-usdfprod-prompt-processing.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 0bc2f47266..e0ca04d61d 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -19,7 +19,6 @@ prompt-proto-service: ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/SingleFrame.yaml, ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/Isr.yaml] (survey="spec")=[] - (survey="spec-survey")=[] (survey="spec_with_rotation")=[] (survey="spec_bright")=[] (survey="spec_bright_with_rotation")=[] From 2f31d38a5cc008006c3ed0a979f2b5544bcc40e9 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Thu, 9 Nov 2023 14:38:43 -0800 Subject: [PATCH 217/588] Update Prompt Processing version. The new version has better config parsing than the previous one. --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index e0ca04d61d..89fbe1018f 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -8,7 +8,7 @@ prompt-proto-service: repository: ghcr.io/lsst-dm/prompt-proto-service pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: w_2023_44 + tag: d_2023_11_06 instrument: pipelines: >- From acf5a40f0614640a0e858d4a9c9feee2f2a3e551 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Thu, 9 Nov 2023 12:07:07 -0800 Subject: [PATCH 218/588] Register spec-survey with Prompt Processing. --- .../values-usdfprod-prompt-processing.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 89fbe1018f..cdfc87f704 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -19,6 +19,7 @@ prompt-proto-service: ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/SingleFrame.yaml, ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/Isr.yaml] (survey="spec")=[] + (survey="spec-survey")=[] (survey="spec_with_rotation")=[] (survey="spec_bright")=[] (survey="spec_bright_with_rotation")=[] From 3a1a74d6ee8fb09ecea6afa6fcbf323c4fa7fbaf Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Fri, 10 Nov 2023 11:16:35 -0800 Subject: [PATCH 219/588] Just after run of phalanx template generation for schedview-prenight --- applications/schedview_prenight/.helmignore | 23 +++++++ applications/schedview_prenight/Chart.yaml | 8 +++ applications/schedview_prenight/README.md | 29 +++++++++ .../schedview_prenight/templates/_helpers.tpl | 26 ++++++++ .../templates/deployment.yaml | 59 +++++++++++++++++ .../schedview_prenight/templates/hpa.yaml | 28 ++++++++ .../schedview_prenight/templates/ingress.yaml | 31 +++++++++ .../templates/networkpolicy.yaml | 21 ++++++ .../schedview_prenight/templates/service.yaml | 15 +++++ applications/schedview_prenight/values.yaml | 64 +++++++++++++++++++ .../applications/schedview_prenight/index.rst | 16 +++++ .../applications/schedview_prenight/values.md | 12 ++++ environments/README.md | 1 + .../schedview_prenight-application.yaml | 34 ++++++++++ environments/values.yaml | 3 + 15 files changed, 370 insertions(+) create mode 100644 applications/schedview_prenight/.helmignore create mode 100644 applications/schedview_prenight/Chart.yaml create mode 100644 applications/schedview_prenight/README.md create mode 100644 applications/schedview_prenight/templates/_helpers.tpl create mode 100644 applications/schedview_prenight/templates/deployment.yaml create mode 100644 applications/schedview_prenight/templates/hpa.yaml create mode 100644 applications/schedview_prenight/templates/ingress.yaml create mode 100644 applications/schedview_prenight/templates/networkpolicy.yaml create mode 100644 applications/schedview_prenight/templates/service.yaml create mode 100644 applications/schedview_prenight/values.yaml create mode 100644 docs/applications/schedview_prenight/index.rst create mode 100644 docs/applications/schedview_prenight/values.md create mode 100644 environments/templates/schedview_prenight-application.yaml diff --git a/applications/schedview_prenight/.helmignore b/applications/schedview_prenight/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/schedview_prenight/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/schedview_prenight/Chart.yaml b/applications/schedview_prenight/Chart.yaml new file mode 100644 index 0000000000..3fb9aa95fb --- /dev/null +++ b/applications/schedview_prenight/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: Run the schedview pre-night briefing dashboard. +name: schedview_prenight +sources: +- https://github.com/lsst-sqre/schedview_prenight +type: application +version: 1.0.0 diff --git a/applications/schedview_prenight/README.md b/applications/schedview_prenight/README.md new file mode 100644 index 0000000000..279e496e15 --- /dev/null +++ b/applications/schedview_prenight/README.md @@ -0,0 +1,29 @@ +# schedview_prenight + +Run the schedview pre-night briefing dashboard. + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the schedview_prenight deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of schedview_prenight deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of schedview_prenight deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of schedview_prenight deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of schedview_prenight deployment pods | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the schedview_prenight image | +| image.repository | string | `"ghcr.io/lsst-sqre/schedview_prenight"` | Image to use in the schedview_prenight deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the schedview_prenight deployment pod | +| podAnnotations | object | `{}` | Annotations for the schedview_prenight deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the schedview_prenight deployment pod | +| tolerations | list | `[]` | Tolerations for the schedview_prenight deployment pod | diff --git a/applications/schedview_prenight/templates/_helpers.tpl b/applications/schedview_prenight/templates/_helpers.tpl new file mode 100644 index 0000000000..11118eb286 --- /dev/null +++ b/applications/schedview_prenight/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "schedview_prenight.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "schedview_prenight.labels" -}} +helm.sh/chart: {{ include "schedview_prenight.chart" . }} +{{ include "schedview_prenight.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "schedview_prenight.selectorLabels" -}} +app.kubernetes.io/name: "schedview_prenight" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/schedview_prenight/templates/deployment.yaml b/applications/schedview_prenight/templates/deployment.yaml new file mode 100644 index 0000000000..68b8d9cf79 --- /dev/null +++ b/applications/schedview_prenight/templates/deployment.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "schedview_prenight" + labels: + {{- include "schedview_prenight.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "schedview_prenight.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "schedview_prenight.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/schedview_prenight/templates/hpa.yaml b/applications/schedview_prenight/templates/hpa.yaml new file mode 100644 index 0000000000..a43342641c --- /dev/null +++ b/applications/schedview_prenight/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "schedview_prenight" + labels: + {{- include "schedview_prenight.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "schedview_prenight" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/schedview_prenight/templates/ingress.yaml b/applications/schedview_prenight/templates/ingress.yaml new file mode 100644 index 0000000000..9b532a6977 --- /dev/null +++ b/applications/schedview_prenight/templates/ingress.yaml @@ -0,0 +1,31 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "schedview_prenight" + labels: + {{- include "schedview_prenight.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + loginRedirect: true +template: + metadata: + name: "schedview_prenight" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/schedview_prenight" + pathType: "Prefix" + backend: + service: + name: "schedview_prenight" + port: + number: 8080 diff --git a/applications/schedview_prenight/templates/networkpolicy.yaml b/applications/schedview_prenight/templates/networkpolicy.yaml new file mode 100644 index 0000000000..245ce24750 --- /dev/null +++ b/applications/schedview_prenight/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "schedview_prenight" +spec: + podSelector: + matchLabels: + {{- include "schedview_prenight.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/schedview_prenight/templates/service.yaml b/applications/schedview_prenight/templates/service.yaml new file mode 100644 index 0000000000..97dc5f967a --- /dev/null +++ b/applications/schedview_prenight/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "schedview_prenight" + labels: + {{- include "schedview_prenight.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "schedview_prenight.selectorLabels" . | nindent 4 }} diff --git a/applications/schedview_prenight/values.yaml b/applications/schedview_prenight/values.yaml new file mode 100644 index 0000000000..7e9c4696d3 --- /dev/null +++ b/applications/schedview_prenight/values.yaml @@ -0,0 +1,64 @@ +# Default values for schedview_prenight. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the schedview_prenight deployment + repository: "ghcr.io/lsst-sqre/schedview_prenight" + + # -- Pull policy for the schedview_prenight image + pullPolicy: "IfNotPresent" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +autoscaling: + # -- Enable autoscaling of schedview_prenight deployment + enabled: false + + # -- Minimum number of schedview_prenight deployment pods + minReplicas: 1 + + # -- Maximum number of schedview_prenight deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of schedview_prenight deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the schedview_prenight deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the schedview_prenight deployment pod +resources: {} + +# -- Node selection rules for the schedview_prenight deployment pod +nodeSelector: {} + +# -- Tolerations for the schedview_prenight deployment pod +tolerations: [] + +# -- Affinity rules for the schedview_prenight deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/docs/applications/schedview_prenight/index.rst b/docs/applications/schedview_prenight/index.rst new file mode 100644 index 0000000000..44ae59673d --- /dev/null +++ b/docs/applications/schedview_prenight/index.rst @@ -0,0 +1,16 @@ +.. px-app:: schedview_prenight + +#################################################################### +schedview_prenight — Run the schedview pre-night briefing dashboard. +#################################################################### + +.. jinja:: schedview_prenight + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/schedview_prenight/values.md b/docs/applications/schedview_prenight/values.md new file mode 100644 index 0000000000..89a1b38c82 --- /dev/null +++ b/docs/applications/schedview_prenight/values.md @@ -0,0 +1,12 @@ +```{px-app-values} schedview_prenight +``` + +# schedview_prenight Helm values reference + +Helm values reference table for the {px-app}`schedview_prenight` application. + +```{include} ../../../applications/schedview_prenight/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/environments/README.md b/environments/README.md index 216d0b7c8b..2bbf22de25 100644 --- a/environments/README.md +++ b/environments/README.md @@ -39,6 +39,7 @@ | applications.prompt-proto-service-lsstcomcam | bool | `false` | Enable the prompt-proto-service-lsstcomcam application | | applications.rubintv | bool | `false` | Enable the rubintv application | | applications.sasquatch | bool | `false` | Enable the sasquatch application | +| applications.schedview_prenight | bool | `false` | Enable the schedview_prenight application | | applications.semaphore | bool | `false` | Enable the semaphore application | | applications.sherlock | bool | `false` | Enable the sherlock application | | applications.siav2 | bool | `false` | Enable the siav2 application | diff --git a/environments/templates/schedview_prenight-application.yaml b/environments/templates/schedview_prenight-application.yaml new file mode 100644 index 0000000000..d66f384fb0 --- /dev/null +++ b/environments/templates/schedview_prenight-application.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "schedview_prenight") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "schedview_prenight" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "schedview_prenight" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "schedview_prenight" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/schedview_prenight" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/values.yaml b/environments/values.yaml index e399ca8e7a..37934bec22 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -125,6 +125,9 @@ applications: # -- Enable the sasquatch application sasquatch: false + # -- Enable the schedview_prenight application + schedview_prenight: false + # -- Enable the siav2 application siav2: false From dc8819c50de82ab6cd43ef9dc03cf66fd46e5da9 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Fri, 10 Nov 2023 11:23:08 -0800 Subject: [PATCH 220/588] Update prenight Chart according to "Write a Helm chart for an application" --- applications/schedview_prenight/Chart.yaml | 5 +++-- applications/schedview_prenight/README.md | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/applications/schedview_prenight/Chart.yaml b/applications/schedview_prenight/Chart.yaml index 3fb9aa95fb..337a9b0efe 100644 --- a/applications/schedview_prenight/Chart.yaml +++ b/applications/schedview_prenight/Chart.yaml @@ -1,8 +1,9 @@ apiVersion: v2 -appVersion: 0.1.0 +appVersion: tickets-PREOPS-4508 description: Run the schedview pre-night briefing dashboard. name: schedview_prenight sources: -- https://github.com/lsst-sqre/schedview_prenight +- https://github.com/lsst/schedview +home: https://schedview.lsst.io/ type: application version: 1.0.0 diff --git a/applications/schedview_prenight/README.md b/applications/schedview_prenight/README.md index 279e496e15..92407455be 100644 --- a/applications/schedview_prenight/README.md +++ b/applications/schedview_prenight/README.md @@ -2,9 +2,11 @@ Run the schedview pre-night briefing dashboard. +**Homepage:** + ## Source Code -* +* ## Values From 5335858884e96223d48a2b39ca1c1ceae9fb5d3a Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Fri, 10 Nov 2023 11:28:09 -0800 Subject: [PATCH 221/588] Set the container repo for schedview --- applications/schedview_prenight/README.md | 2 +- applications/schedview_prenight/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/schedview_prenight/README.md b/applications/schedview_prenight/README.md index 92407455be..41af94c9f1 100644 --- a/applications/schedview_prenight/README.md +++ b/applications/schedview_prenight/README.md @@ -21,7 +21,7 @@ Run the schedview pre-night briefing dashboard. | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the schedview_prenight image | -| image.repository | string | `"ghcr.io/lsst-sqre/schedview_prenight"` | Image to use in the schedview_prenight deployment | +| image.repository | string | `"ghcr.io/lsst/schedview"` | Image to use in the schedview_prenight deployment | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | | nodeSelector | object | `{}` | Node selection rules for the schedview_prenight deployment pod | diff --git a/applications/schedview_prenight/values.yaml b/applications/schedview_prenight/values.yaml index 7e9c4696d3..73b1b9660b 100644 --- a/applications/schedview_prenight/values.yaml +++ b/applications/schedview_prenight/values.yaml @@ -7,7 +7,7 @@ replicaCount: 1 image: # -- Image to use in the schedview_prenight deployment - repository: "ghcr.io/lsst-sqre/schedview_prenight" + repository: "ghcr.io/lsst/schedview" # -- Pull policy for the schedview_prenight image pullPolicy: "IfNotPresent" From c21d74ff39d23cd608ef37ccb8101ec026811b19 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Fri, 10 Nov 2023 11:33:31 -0800 Subject: [PATCH 222/588] Change gafaelfawr scope to exec:portal --- applications/schedview_prenight/templates/ingress.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview_prenight/templates/ingress.yaml b/applications/schedview_prenight/templates/ingress.yaml index 9b532a6977..1ebdc02fa3 100644 --- a/applications/schedview_prenight/templates/ingress.yaml +++ b/applications/schedview_prenight/templates/ingress.yaml @@ -8,7 +8,7 @@ config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: all: - - "read:image" + - "exec:portal" loginRedirect: true template: metadata: From 7d6179a7f8088c63b60486ebb31b21f4d5fc2a08 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Fri, 10 Nov 2023 11:50:03 -0800 Subject: [PATCH 223/588] try to add minikube env --- applications/schedview_prenight/values-minikube.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 applications/schedview_prenight/values-minikube.yaml diff --git a/applications/schedview_prenight/values-minikube.yaml b/applications/schedview_prenight/values-minikube.yaml new file mode 100644 index 0000000000..e69de29bb2 From 284dec57e9afb542405ea29768873791a09bc5db Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Fri, 10 Nov 2023 12:10:37 -0800 Subject: [PATCH 224/588] enable schedview_prenight in minikube --- environments/values-minikube.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index aea3579661..75a8d2df0f 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -15,3 +15,4 @@ applications: mobu: true postgres: true squareone: true + schedview_prenight: true From c45d35413951f91c87fa99144d09a832bd3380be Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 09:35:36 +0000 Subject: [PATCH 225/588] chore(deps): update helm release argo-cd to v5.51.1 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 9060dd4930..b6b4fad9a2 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.51.0 + version: 5.51.1 repository: https://argoproj.github.io/argo-helm From 543eacf53b084d860655412636c895dfdbe97f3a Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 12:31:09 +0000 Subject: [PATCH 226/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 104 +++++++++++++++++++++--------------------- requirements/main.txt | 6 +-- 2 files changed, 55 insertions(+), 55 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index a456e28090..39ca37fe41 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -370,34 +370,34 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.6.1 \ - --hash=sha256:19f905bcfd9e167159b3d63ecd8cb5e696151c3e59a1742e79bc3bcb540c42c7 \ - --hash=sha256:21a1ad938fee7d2d96ca666c77b7c494c3c5bd88dff792220e1afbebb2925b5e \ - --hash=sha256:40b1844d2e8b232ed92e50a4bd11c48d2daa351f9deee6c194b83bf03e418b0c \ - --hash=sha256:41697773aa0bf53ff917aa077e2cde7aa50254f28750f9b88884acea38a16169 \ - --hash=sha256:49ae115da099dcc0922a7a895c1eec82c1518109ea5c162ed50e3b3594c71208 \ - --hash=sha256:4c46b51de523817a0045b150ed11b56f9fff55f12b9edd0f3ed35b15a2809de0 \ - --hash=sha256:4cbe68ef919c28ea561165206a2dcb68591c50f3bcf777932323bc208d949cf1 \ - --hash=sha256:4d01c00d09a0be62a4ca3f933e315455bde83f37f892ba4b08ce92f3cf44bcc1 \ - --hash=sha256:59a0d7d24dfb26729e0a068639a6ce3500e31d6655df8557156c51c1cb874ce7 \ - --hash=sha256:68351911e85145f582b5aa6cd9ad666c8958bcae897a1bfda8f4940472463c45 \ - --hash=sha256:7274b0c57737bd3476d2229c6389b2ec9eefeb090bbaf77777e9d6b1b5a9d143 \ - --hash=sha256:81af8adaa5e3099469e7623436881eff6b3b06db5ef75e6f5b6d4871263547e5 \ - --hash=sha256:82e469518d3e9a321912955cc702d418773a2fd1e91c651280a1bda10622f02f \ - --hash=sha256:8b27958f8c76bed8edaa63da0739d76e4e9ad4ed325c814f9b3851425582a3cd \ - --hash=sha256:8c223fa57cb154c7eab5156856c231c3f5eace1e0bed9b32a24696b7ba3c3245 \ - --hash=sha256:8f57e6b6927a49550da3d122f0cb983d400f843a8a82e65b3b380d3d7259468f \ - --hash=sha256:925cd6a3b7b55dfba252b7c4561892311c5358c6b5a601847015a1ad4eb7d332 \ - --hash=sha256:a43ef1c8ddfdb9575691720b6352761f3f53d85f1b57d7745701041053deff30 \ - --hash=sha256:a8032e00ce71c3ceb93eeba63963b864bf635a18f6c0c12da6c13c450eedb183 \ - --hash=sha256:b96ae2c1279d1065413965c607712006205a9ac541895004a1e0d4f281f2ff9f \ - --hash=sha256:bb8ccb4724f7d8601938571bf3f24da0da791fe2db7be3d9e79849cb64e0ae85 \ - --hash=sha256:bbaf4662e498c8c2e352da5f5bca5ab29d378895fa2d980630656178bd607c46 \ - --hash=sha256:cfd13d47b29ed3bbaafaff7d8b21e90d827631afda134836962011acb5904b71 \ - --hash=sha256:d4473c22cc296425bbbce7e9429588e76e05bc7342da359d6520b6427bf76660 \ - --hash=sha256:d8fbb68711905f8912e5af474ca8b78d077447d8f3918997fecbf26943ff3cbb \ - --hash=sha256:e5012e5cc2ac628177eaac0e83d622b2dd499e28253d4107a08ecc59ede3fc2c \ - --hash=sha256:eb4f18589d196a4cbe5290b435d135dee96567e07c2b2d43b5c4621b6501531a +mypy==1.7.0 \ + --hash=sha256:0e81ffd120ee24959b449b647c4b2fbfcf8acf3465e082b8d58fd6c4c2b27e46 \ + --hash=sha256:185cff9b9a7fec1f9f7d8352dff8a4c713b2e3eea9c6c4b5ff7f0edf46b91e41 \ + --hash=sha256:1e280b5697202efa698372d2f39e9a6713a0395a756b1c6bd48995f8d72690dc \ + --hash=sha256:1fe46e96ae319df21359c8db77e1aecac8e5949da4773c0274c0ef3d8d1268a9 \ + --hash=sha256:2b53655a295c1ed1af9e96b462a736bf083adba7b314ae775563e3fb4e6795f5 \ + --hash=sha256:551d4a0cdcbd1d2cccdcc7cb516bb4ae888794929f5b040bb51aae1846062901 \ + --hash=sha256:55d28d7963bef00c330cb6461db80b0b72afe2f3c4e2963c99517cf06454e665 \ + --hash=sha256:5da84d7bf257fd8f66b4f759a904fd2c5a765f70d8b52dde62b521972a0a2357 \ + --hash=sha256:6cb8d5f6d0fcd9e708bb190b224089e45902cacef6f6915481806b0c77f7786d \ + --hash=sha256:7a7b1e399c47b18feb6f8ad4a3eef3813e28c1e871ea7d4ea5d444b2ac03c418 \ + --hash=sha256:870bd1ffc8a5862e593185a4c169804f2744112b4a7c55b93eb50f48e7a77010 \ + --hash=sha256:87c076c174e2c7ef8ab416c4e252d94c08cd4980a10967754f91571070bf5fbe \ + --hash=sha256:96650d9a4c651bc2a4991cf46f100973f656d69edc7faf91844e87fe627f7e96 \ + --hash=sha256:a3637c03f4025f6405737570d6cbfa4f1400eb3c649317634d273687a09ffc2f \ + --hash=sha256:a79cdc12a02eb526d808a32a934c6fe6df07b05f3573d210e41808020aed8b5d \ + --hash=sha256:b633f188fc5ae1b6edca39dae566974d7ef4e9aaaae00bc36efe1f855e5173ac \ + --hash=sha256:bf7a2f0a6907f231d5e41adba1a82d7d88cf1f61a70335889412dec99feeb0f8 \ + --hash=sha256:c1b06b4b109e342f7dccc9efda965fc3970a604db70f8560ddfdee7ef19afb05 \ + --hash=sha256:cddee95dea7990e2215576fae95f6b78a8c12f4c089d7e4367564704e99118d3 \ + --hash=sha256:d01921dbd691c4061a3e2ecdbfbfad029410c5c2b1ee88946bf45c62c6c91210 \ + --hash=sha256:d0fa29919d2e720c8dbaf07d5578f93d7b313c3e9954c8ec05b6d83da592e5d9 \ + --hash=sha256:d6ed9a3997b90c6f891138e3f83fb8f475c74db4ccaa942a1c7bf99e83a989a1 \ + --hash=sha256:d93e76c2256aa50d9c82a88e2f569232e9862c9982095f6d54e13509f01222fc \ + --hash=sha256:df67fbeb666ee8828f675fee724cc2cbd2e4828cc3df56703e02fe6a421b7401 \ + --hash=sha256:f29386804c3577c83d76520abf18cfcd7d68264c7e431c5907d250ab502658ee \ + --hash=sha256:f65f385a6f43211effe8c682e8ec3f55d79391f70a201575def73d08db68ead1 \ + --hash=sha256:fc9fe455ad58a20ec68599139ed1113b21f977b536a91b42bef3ffed5cce7391 # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ @@ -771,24 +771,24 @@ rpds-py==0.12.0 \ # via # jsonschema # referencing -ruff==0.1.4 \ - --hash=sha256:01206e361021426e3c1b7fba06ddcb20dbc5037d64f6841e5f2b21084dc51800 \ - --hash=sha256:1dfd6bf8f6ad0a4ac99333f437e0ec168989adc5d837ecd38ddb2cc4a2e3db8a \ - --hash=sha256:21520ecca4cc555162068d87c747b8f95e1e95f8ecfcbbe59e8dd00710586315 \ - --hash=sha256:58826efb8b3efbb59bb306f4b19640b7e366967a31c049d49311d9eb3a4c60cb \ - --hash=sha256:645591a613a42cb7e5c2b667cbefd3877b21e0252b59272ba7212c3d35a5819f \ - --hash=sha256:6bc02a480d4bfffd163a723698da15d1a9aec2fced4c06f2a753f87f4ce6969c \ - --hash=sha256:78e8db8ab6f100f02e28b3d713270c857d370b8d61871d5c7d1702ae411df683 \ - --hash=sha256:80fea754eaae06335784b8ea053d6eb8e9aac75359ebddd6fee0858e87c8d510 \ - --hash=sha256:864958706b669cce31d629902175138ad8a069d99ca53514611521f532d91495 \ - --hash=sha256:9862811b403063765b03e716dac0fda8fdbe78b675cd947ed5873506448acea4 \ - --hash=sha256:99908ca2b3b85bffe7e1414275d004917d1e0dfc99d497ccd2ecd19ad115fd0d \ - --hash=sha256:9fdd61883bb34317c788af87f4cd75dfee3a73f5ded714b77ba928e418d6e39e \ - --hash=sha256:a9a1301dc43cbf633fb603242bccd0aaa34834750a14a4c1817e2e5c8d60de17 \ - --hash=sha256:b4eaca8c9cc39aa7f0f0d7b8fe24ecb51232d1bb620fc4441a61161be4a17539 \ - --hash=sha256:d98ae9ebf56444e18a3e3652b3383204748f73e247dea6caaf8b52d37e6b32da \ - --hash=sha256:e8791482d508bd0b36c76481ad3117987301b86072158bdb69d796503e1c84a8 \ - --hash=sha256:fdfd453fc91d9d86d6aaa33b1bafa69d114cf7421057868f0b79104079d3e66e +ruff==0.1.5 \ + --hash=sha256:171276c1df6c07fa0597fb946139ced1c2978f4f0b8254f201281729981f3c17 \ + --hash=sha256:17ef33cd0bb7316ca65649fc748acc1406dfa4da96a3d0cde6d52f2e866c7b39 \ + --hash=sha256:32d47fc69261c21a4c48916f16ca272bf2f273eb635d91c65d5cd548bf1f3d96 \ + --hash=sha256:5cbec0ef2ae1748fb194f420fb03fb2c25c3258c86129af7172ff8f198f125ab \ + --hash=sha256:721f4b9d3b4161df8dc9f09aa8562e39d14e55a4dbaa451a8e55bdc9590e20f4 \ + --hash=sha256:82bfcb9927e88c1ed50f49ac6c9728dab3ea451212693fe40d08d314663e412f \ + --hash=sha256:9b97fd6da44d6cceb188147b68db69a5741fbc736465b5cea3928fdac0bc1aeb \ + --hash=sha256:a00a7ec893f665ed60008c70fe9eeb58d210e6b4d83ec6654a9904871f982a2a \ + --hash=sha256:a4894dddb476597a0ba4473d72a23151b8b3b0b5f958f2cf4d3f1c572cdb7af7 \ + --hash=sha256:a8c11206b47f283cbda399a654fd0178d7a389e631f19f51da15cbe631480c5b \ + --hash=sha256:aafb9d2b671ed934998e881e2c0f5845a4295e84e719359c71c39a5363cccc91 \ + --hash=sha256:b2c205827b3f8c13b4a432e9585750b93fd907986fe1aec62b2a02cf4401eee6 \ + --hash=sha256:bb408e3a2ad8f6881d0f2e7ad70cddb3ed9f200eb3517a91a245bbe27101d379 \ + --hash=sha256:c21fe20ee7d76206d290a76271c1af7a5096bc4c73ab9383ed2ad35f852a0087 \ + --hash=sha256:f20dc5e5905ddb407060ca27267c7174f532375c08076d1a953cf7bb016f5a24 \ + --hash=sha256:f80c73bba6bc69e4fdc73b3991db0b546ce641bdcd5b07210b8ad6f64c79f1ab \ + --hash=sha256:fa29e67b3284b9a79b1a85ee66e293a94ac6b7bb068b307a8a373c3d343aa8ec # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -837,9 +837,9 @@ sphinx==7.2.6 \ # sphinxcontrib-serializinghtml # sphinxext-opengraph # sphinxext-rediraffe -sphinx-autodoc-typehints==1.24.1 \ - --hash=sha256:06683a2b76c3c7b1931b75e40e0211866fbb50ba4c4e802d0901d9b4e849add2 \ - --hash=sha256:4cc16c5545f2bf896ca52a854babefe3d8baeaaa033d13a7f179ac1d9feb02d5 +sphinx-autodoc-typehints==1.25.2 \ + --hash=sha256:3cabc2537e17989b2f92e64a399425c4c8bf561ed73f087bc7414a5003616a50 \ + --hash=sha256:5ed05017d23ad4b937eab3bee9fae9ab0dd63f0b42aa360031f1fad47e47f673 # via documenteer sphinx-automodapi==0.16.0 \ --hash=sha256:68fc47064804604b90aa27c047016e86aaf970981d90a0082d5b5dd2e9d38afd \ @@ -977,9 +977,9 @@ uc-micro-py==1.0.2 \ --hash=sha256:30ae2ac9c49f39ac6dce743bd187fcd2b574b16ca095fa74cd9396795c954c54 \ --hash=sha256:8c9110c309db9d9e87302e2f4ad2c3152770930d88ab385cd544e7a7e75f3de0 # via linkify-it-py -urllib3==2.0.7 \ - --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \ - --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e +urllib3==2.1.0 \ + --hash=sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3 \ + --hash=sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54 # via # -c requirements/main.txt # documenteer diff --git a/requirements/main.txt b/requirements/main.txt index 05f2b1ca17..b01d48674c 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -567,7 +567,7 @@ uritemplate==4.1.1 \ --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e # via gidgethub -urllib3==2.0.7 \ - --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \ - --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e +urllib3==2.1.0 \ + --hash=sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3 \ + --hash=sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54 # via requests From 23f140b9c27e78991ab9005de3ea796a3e73c3c3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 12:31:23 +0000 Subject: [PATCH 227/588] chore(deps): update lsstsqre/tap-schema-mock docker tag to v2.1.4 --- charts/cadc-tap/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index 28f09526f9..2b26702bb8 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -165,7 +165,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.1.3" + tag: "2.1.4" # -- Resource limits and requests for the TAP schema database pod resources: {} From d4b738b97544bdbde148e84f146a09004d472348 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 12:31:26 +0000 Subject: [PATCH 228/588] chore(deps): update helm release argo-workflows to v0.39.0 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index e7bc397581..a1805dc5a3 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.38.0 + version: 0.39.0 repository: https://argoproj.github.io/argo-helm From bcbab3fd7a154bfd1fb31bda9e6f425704a8763c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 13 Nov 2023 08:48:37 -0800 Subject: [PATCH 229/588] Ignore spurious mypy failure --- tests/support/helm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/support/helm.py b/tests/support/helm.py index a47c7e3e3b..5c38e7bc86 100644 --- a/tests/support/helm.py +++ b/tests/support/helm.py @@ -72,8 +72,9 @@ def capture( """ self.call_args_list.append([command, *args]) if self._callback: - callback = self._callback - result = callback(command, *args) + # https://github.com/python/mypy/issues/708 (which despite being + # closed is not fixed for protocols as of mypy 1.7.0) + result = self._callback(command, *args) # type: ignore[misc] if result.returncode != 0: exc = subprocess.CalledProcessError( returncode=result.returncode, From 43080e401b10a3f377ac9d120479b0e8faddc245 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 13 Nov 2023 08:50:47 -0800 Subject: [PATCH 230/588] Regenerate Helm docs --- charts/cadc-tap/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index 69220b5372..c787651f79 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -56,7 +56,7 @@ IVOA TAP service | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.1.3"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"2.1.4"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the TAP schema database pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the TAP schema database pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | From a947fc03357ae6d193b49ade5dbcdf48ae27f966 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 13 Nov 2023 09:13:56 -0800 Subject: [PATCH 231/588] Fix broken links Change www.lsst.io links to lsst.io, since www.lsst.io is now a redirect. Update the URLs for Nublado. Remove the link for SIAv2, which points to a nonexistent repository. Update the link for Ruff documentation. --- applications/nublado/Chart.yaml | 5 ++--- applications/nublado/README.md | 5 ++--- applications/ook/Chart.yaml | 5 ++++- applications/ook/README.md | 2 +- applications/siav2/Chart.yaml | 2 -- applications/siav2/README.md | 4 ---- applications/squareone/values-idfdev.yaml | 2 +- applications/squareone/values-idfint.yaml | 2 +- applications/squareone/values.yaml | 2 +- docs/_rst_epilog.rst | 2 +- docs/applications/ook/index.rst | 2 +- docs/index.rst | 2 +- 12 files changed, 15 insertions(+), 20 deletions(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 72453e762f..4c83bb821e 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -3,9 +3,8 @@ name: nublado version: 1.0.0 description: JupyterHub and custom spawner for the Rubin Science Platform sources: - - https://github.com/lsst-sqre/jupyterlab-controller - - https://github.com/lsst-sqre/rsp-restspawner -home: https://github.com/lsst-sqre/jupyterlab-controller + - https://github.com/lsst-sqre/nublado +home: https://nublado.lsst.io/ appVersion: 0.9.0 dependencies: diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 8cd1df9b55..c6696bb816 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -2,12 +2,11 @@ JupyterHub and custom spawner for the Rubin Science Platform -**Homepage:** +**Homepage:** ## Source Code -* -* +* ## Values diff --git a/applications/ook/Chart.yaml b/applications/ook/Chart.yaml index 6ad1aeae30..e5de862ee2 100644 --- a/applications/ook/Chart.yaml +++ b/applications/ook/Chart.yaml @@ -2,7 +2,10 @@ apiVersion: v2 name: ook version: 1.0.0 appVersion: "0.9.0" -description: Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io. +description: > + Ook is the librarian service for Rubin Observatory. Ook indexes + documentation content into the Algolia search engine that powers the Rubin + Observatory documentation portal, lsst.io. type: application home: https://ook.lsst.io/ sources: diff --git a/applications/ook/README.md b/applications/ook/README.md index 6487a2508b..cc05df847e 100644 --- a/applications/ook/README.md +++ b/applications/ook/README.md @@ -1,6 +1,6 @@ # ook -Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io. +Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, lsst.io. **Homepage:** diff --git a/applications/siav2/Chart.yaml b/applications/siav2/Chart.yaml index fb6b8b378a..ea261435c0 100644 --- a/applications/siav2/Chart.yaml +++ b/applications/siav2/Chart.yaml @@ -2,7 +2,5 @@ apiVersion: v2 appVersion: 0.1.0 description: Simple Image Access v2 service name: siav2 -sources: -- https://github.com/lsst-sqre/siav2 type: application version: 1.0.0 diff --git a/applications/siav2/README.md b/applications/siav2/README.md index dd825755a2..65c8ba1381 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -2,10 +2,6 @@ Simple Image Access v2 service -## Source Code - -* - ## Values | Key | Type | Default | Description | diff --git a/applications/squareone/values-idfdev.yaml b/applications/squareone/values-idfdev.yaml index 30c341b27a..98798e453e 100644 --- a/applications/squareone/values-idfdev.yaml +++ b/applications/squareone/values-idfdev.yaml @@ -90,7 +90,7 @@ config: Want to dive deeper into the Rubin Observatory and Legacy Survey of Space and Time? [Search in our technical documentation - portal.](https://www.lsst.io) + portal.](https://lsst.io) diff --git a/applications/squareone/values-idfint.yaml b/applications/squareone/values-idfint.yaml index c0b24974fd..aa757ae6e8 100644 --- a/applications/squareone/values-idfint.yaml +++ b/applications/squareone/values-idfint.yaml @@ -154,7 +154,7 @@ config: Want to dive deeper into the Rubin Observatory and Legacy Survey of Space and Time? [Search in our technical documentation - portal.](https://www.lsst.io) + portal.](https://lsst.io) supportPageMdx: | diff --git a/applications/squareone/values.yaml b/applications/squareone/values.yaml index e626eef66b..363a8b2b28 100644 --- a/applications/squareone/values.yaml +++ b/applications/squareone/values.yaml @@ -229,7 +229,7 @@ config: Want to dive deeper into the Rubin Observatory and Legacy Survey of Space and Time? [Search in our technical documentation - portal.](https://www.lsst.io) + portal.](https://lsst.io) diff --git a/docs/_rst_epilog.rst b/docs/_rst_epilog.rst index c795acdce4..05b41ccd02 100644 --- a/docs/_rst_epilog.rst +++ b/docs/_rst_epilog.rst @@ -34,7 +34,7 @@ .. _Pod: https://kubernetes.io/docs/concepts/workloads/pods/ .. _pre-commit: https://pre-commit.com .. _Roundtable: https://roundtable.lsst.io/ -.. _Ruff: https://beta.ruff.rs/docs/ +.. _Ruff: https://docs.astral.sh/ruff/ .. _Safir: https://safir.lsst.io/ .. _Secret: https://kubernetes.io/docs/concepts/configuration/secret/ .. _semantic versioning: https://semver.org/ diff --git a/docs/applications/ook/index.rst b/docs/applications/ook/index.rst index 9228055682..faab8901f1 100644 --- a/docs/applications/ook/index.rst +++ b/docs/applications/ook/index.rst @@ -5,7 +5,7 @@ ook — Documentation indexing ############################ Ook is the librarian service for Rubin Observatory. -Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, https://www.lsst.io. +Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, https://lsst.io. .. jinja:: ook :file: applications/_summary.rst.jinja diff --git a/docs/index.rst b/docs/index.rst index bcedc9a8cf..4b85393527 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,7 +6,7 @@ Phalanx [#name]_ is a GitOps repository for Rubin Observatory's Kubernetes envir Using Helm_ and `Argo CD`_, Phalanx defines the configurations of applications in each environment. This documentation is for Rubin team members that are developing applications and administering Kubernetes clusters. -Astronomers and other end-users can visit the `Rubin Documentation Portal `__ to learn how to use Rubin Observatory's software, services, and datasets. +Astronomers and other end-users can visit the `Rubin Documentation Portal `__ to learn how to use Rubin Observatory's software, services, and datasets. Phalanx is on GitHub at https://github.com/lsst-sqre/phalanx. From 4e9849ccad55c580c46f8ede3c9624b2993e2e77 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 13 Nov 2023 09:16:01 -0800 Subject: [PATCH 232/588] Add comment for repointing to USDF qserv Add a comment with the IP address for repointing TAP to USDF qserv on data-dev.lsst.cloud so that we can close #2122 and not keep an ancient pull request around. --- applications/tap/values-idfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/tap/values-idfdev.yaml b/applications/tap/values-idfdev.yaml index 2f972d8040..fa5f854eae 100644 --- a/applications/tap/values-idfdev.yaml +++ b/applications/tap/values-idfdev.yaml @@ -6,3 +6,4 @@ cadc-tap: config: qserv: host: "10.136.1.211:4040" + # Change to 134.79.23.209:4040 to point to USDF qserv From 797e1c92eae2ab8838655d77d9b045fa73ff20e6 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 13 Nov 2023 11:02:21 -0700 Subject: [PATCH 233/588] Update Helm docs --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/kafdrop/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index e83c175355..9fd5d64a97 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -118,7 +118,7 @@ Rubin Observatory's telemetry service. | kafdrop.host | string | Defaults to localhost. | The hostname to report for the RMI registry (used for JMX). | | kafdrop.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | kafdrop.image.repository | string | `"obsidiandynamics/kafdrop"` | Kafdrop Docker image repository. | -| kafdrop.image.tag | string | `"4.0.0"` | Kafdrop image version. | +| kafdrop.image.tag | string | `"4.0.1"` | Kafdrop image version. | | kafdrop.ingress.annotations | object | `{}` | Ingress annotations. | | kafdrop.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | kafdrop.ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/kafdrop/README.md b/applications/sasquatch/charts/kafdrop/README.md index c29f57a87c..9ccdfd6655 100644 --- a/applications/sasquatch/charts/kafdrop/README.md +++ b/applications/sasquatch/charts/kafdrop/README.md @@ -16,7 +16,7 @@ A subchart to deploy the Kafdrop UI for Sasquatch. | host | string | Defaults to localhost. | The hostname to report for the RMI registry (used for JMX). | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | image.repository | string | `"obsidiandynamics/kafdrop"` | Kafdrop Docker image repository. | -| image.tag | string | `"4.0.0"` | Kafdrop image version. | +| image.tag | string | `"4.0.1"` | Kafdrop image version. | | ingress.annotations | object | `{}` | Ingress annotations. | | ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | ingress.hostname | string | `""` | Ingress hostname. | From 0aa2cbe01f516eb263ff873dc6461f6b502f8506 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Nov 2023 09:35:32 +0000 Subject: [PATCH 234/588] chore(deps): update confluentinc/cp-kafka-rest docker tag to v7.5.2 --- applications/sasquatch/charts/rest-proxy/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/rest-proxy/values.yaml b/applications/sasquatch/charts/rest-proxy/values.yaml index 88cec0ae6d..b3ccbd2f22 100644 --- a/applications/sasquatch/charts/rest-proxy/values.yaml +++ b/applications/sasquatch/charts/rest-proxy/values.yaml @@ -9,7 +9,7 @@ image: # -- Image pull policy. pullPolicy: IfNotPresent # -- Kafka REST proxy image tag. - tag: 7.5.1 + tag: 7.5.2 service: # -- Kafka REST proxy service port From fbc31c8cd076a9ce797e06d93a89d689d1f19459 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 13 Nov 2023 10:59:25 -0700 Subject: [PATCH 235/588] Update Helm docs --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/rest-proxy/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index e83c175355..ebfeba7076 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -206,7 +206,7 @@ Rubin Observatory's telemetry service. | rest-proxy.heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | rest-proxy.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | rest-proxy.image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| rest-proxy.image.tag | string | `"7.5.1"` | Kafka REST proxy image tag. | +| rest-proxy.image.tag | string | `"7.5.2"` | Kafka REST proxy image tag. | | rest-proxy.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | rest-proxy.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | rest-proxy.ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index d5c3a77706..8286c3294e 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -16,7 +16,7 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| image.tag | string | `"7.5.1"` | Kafka REST proxy image tag. | +| image.tag | string | `"7.5.2"` | Kafka REST proxy image tag. | | ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | ingress.hostname | string | `""` | Ingress hostname. | From eb2f100fff837c164ba57a31191fb74a750626fc Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Mon, 13 Nov 2023 12:03:10 -0800 Subject: [PATCH 236/588] add schedview_prenight to docs as an application --- docs/applications/index.rst | 1 + docs/applications/schedview_prenight/index.rst | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/docs/applications/index.rst b/docs/applications/index.rst index 8a851c3267..597a299795 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -57,6 +57,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde production-tools/index rubintv/index sasquatch/index + schedview_prenight/index strimzi/index strimzi-access-operator/index telegraf/index diff --git a/docs/applications/schedview_prenight/index.rst b/docs/applications/schedview_prenight/index.rst index 44ae59673d..305229c605 100644 --- a/docs/applications/schedview_prenight/index.rst +++ b/docs/applications/schedview_prenight/index.rst @@ -4,6 +4,11 @@ schedview_prenight — Run the schedview pre-night briefing dashboard. #################################################################### +schedview's pre-night dashboard is a web application for examination of +Rubin Observatory/LSST scheduler simulation data for one night. It is intended +to be used to understand what the scheduler is likely to do before a night +observing, both to check for problems and to set expectations. + .. jinja:: schedview_prenight :file: applications/_summary.rst.jinja From 632e74733725ceca9cab778cd2300ed6ee5f32c6 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Mon, 13 Nov 2023 15:50:19 -0700 Subject: [PATCH 237/588] Give David Irving permissions for ArgoCD --- applications/argocd/values-idfdev.yaml | 1 + applications/argocd/values-idfint.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/applications/argocd/values-idfdev.yaml b/applications/argocd/values-idfdev.yaml index 24ff51f640..02c9448719 100644 --- a/applications/argocd/values-idfdev.yaml +++ b/applications/argocd/values-idfdev.yaml @@ -35,4 +35,5 @@ argo-cd: g, roby@lsst.cloud, role:admin g, kkoehler@lsst.cloud, role:admin g, fritzm@lsst.cloud, role:admin + g, dirving@lsst.cloud, role:admin scopes: "[email]" diff --git a/applications/argocd/values-idfint.yaml b/applications/argocd/values-idfint.yaml index c2745b744d..88aa16f9b9 100644 --- a/applications/argocd/values-idfint.yaml +++ b/applications/argocd/values-idfint.yaml @@ -37,4 +37,5 @@ argo-cd: g, fritzm@lsst.cloud, role:admin g, drbsmart@lsst.cloud, role:admin g, ecbellm@lsst.cloud, role:admin + g, dirving@lsst.cloud, role:admin scopes: "[email]" From 98acfd22006d675365d00fe4b32b992f81bb8239 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Tue, 14 Nov 2023 12:13:34 -0800 Subject: [PATCH 238/588] Update documentation links for prompt-proto-service. --- applications/prompt-proto-service-hsc/Chart.yaml | 3 +++ applications/prompt-proto-service-latiss/Chart.yaml | 3 +++ applications/prompt-proto-service-lsstcam/Chart.yaml | 3 +++ applications/prompt-proto-service-lsstcomcam/Chart.yaml | 3 +++ charts/prompt-proto-service/Chart.yaml | 3 +++ 5 files changed, 15 insertions(+) diff --git a/applications/prompt-proto-service-hsc/Chart.yaml b/applications/prompt-proto-service-hsc/Chart.yaml index 7afc1a667c..0a215ca6dc 100644 --- a/applications/prompt-proto-service-hsc/Chart.yaml +++ b/applications/prompt-proto-service-hsc/Chart.yaml @@ -12,6 +12,9 @@ annotations: - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" dependencies: - name: prompt-proto-service version: 1.0.0 diff --git a/applications/prompt-proto-service-latiss/Chart.yaml b/applications/prompt-proto-service-latiss/Chart.yaml index a265da32fe..9f05697357 100644 --- a/applications/prompt-proto-service-latiss/Chart.yaml +++ b/applications/prompt-proto-service-latiss/Chart.yaml @@ -12,6 +12,9 @@ annotations: - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" dependencies: - name: prompt-proto-service version: 1.0.0 diff --git a/applications/prompt-proto-service-lsstcam/Chart.yaml b/applications/prompt-proto-service-lsstcam/Chart.yaml index f35a54ad71..b1a83cedac 100644 --- a/applications/prompt-proto-service-lsstcam/Chart.yaml +++ b/applications/prompt-proto-service-lsstcam/Chart.yaml @@ -12,6 +12,9 @@ annotations: - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" dependencies: - name: prompt-proto-service version: 1.0.0 diff --git a/applications/prompt-proto-service-lsstcomcam/Chart.yaml b/applications/prompt-proto-service-lsstcomcam/Chart.yaml index 201c406019..75be08e60b 100644 --- a/applications/prompt-proto-service-lsstcomcam/Chart.yaml +++ b/applications/prompt-proto-service-lsstcomcam/Chart.yaml @@ -12,6 +12,9 @@ annotations: - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" dependencies: - name: prompt-proto-service version: 1.0.0 diff --git a/charts/prompt-proto-service/Chart.yaml b/charts/prompt-proto-service/Chart.yaml index 5234d5ddc3..da0be6208b 100644 --- a/charts/prompt-proto-service/Chart.yaml +++ b/charts/prompt-proto-service/Chart.yaml @@ -12,3 +12,6 @@ annotations: - id: "DMTN-219" title: "Proposal and Prototype for Prompt Processing" url: "https://dmtn-219.lsst.io/" + - id: "DMTN-260" + title: "Failure Modes and Error Handling for Prompt Processing" + url: "https://dmtn-260.lsst.io/" From 043c41e5b260bc024313290f9666689417512dbf Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Tue, 14 Nov 2023 14:51:16 -0800 Subject: [PATCH 239/588] Remove $RUBIN_INSTRUMENT from prompt-proto-service values. The $RUBIN_INSTRUMENT variable is defined in the Helm template (Kubernetes config). Its use in the pipelines config is a holdover from the pre-Helm setup, when it reduced the amount of code that needed to be edited to switch instruments. With the Phalanx setup, all it does is introduce a (conceptual) circular dependency from the values file back to the template. --- .../values-usdfdev-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index d462eb56fc..f3ece8f669 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -11,7 +11,7 @@ prompt-proto-service: tag: latest instrument: - pipelines: (survey="SURVEY")=[${PROMPT_PROTOTYPE_DIR}/pipelines/${RUBIN_INSTRUMENT}/ApPipe.yaml] + pipelines: (survey="SURVEY")=[${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/ApPipe.yaml] calibRepo: s3://rubin-pp-users/central_repo/ s3: From 4bfcbb4323a0f93a4992978207552b49ba2d2bf9 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Tue, 14 Nov 2023 12:24:25 -0800 Subject: [PATCH 240/588] Update references to prompt_prototype package. The package is now called prompt_processing. --- applications/prompt-proto-service-hsc/Chart.yaml | 4 ++-- applications/prompt-proto-service-hsc/README.md | 8 ++++---- .../values-usdfdev-prompt-processing.yaml | 2 +- applications/prompt-proto-service-hsc/values.yaml | 4 ++-- applications/prompt-proto-service-latiss/Chart.yaml | 4 ++-- applications/prompt-proto-service-latiss/README.md | 8 ++++---- .../values-usdfdev-prompt-processing.yaml | 2 +- .../values-usdfprod-prompt-processing.yaml | 12 ++++++------ applications/prompt-proto-service-latiss/values.yaml | 4 ++-- applications/prompt-proto-service-lsstcam/Chart.yaml | 4 ++-- applications/prompt-proto-service-lsstcam/README.md | 8 ++++---- .../prompt-proto-service-lsstcam/values.yaml | 4 ++-- .../prompt-proto-service-lsstcomcam/Chart.yaml | 4 ++-- .../prompt-proto-service-lsstcomcam/README.md | 8 ++++---- .../prompt-proto-service-lsstcomcam/values.yaml | 4 ++-- charts/prompt-proto-service/Chart.yaml | 4 ++-- charts/prompt-proto-service/README.md | 8 ++++---- charts/prompt-proto-service/values.yaml | 4 ++-- 18 files changed, 48 insertions(+), 48 deletions(-) diff --git a/applications/prompt-proto-service-hsc/Chart.yaml b/applications/prompt-proto-service-hsc/Chart.yaml index 0a215ca6dc..4b7b508ea2 100644 --- a/applications/prompt-proto-service-hsc/Chart.yaml +++ b/applications/prompt-proto-service-hsc/Chart.yaml @@ -4,9 +4,9 @@ version: 1.0.0 description: >- Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles HSC images. -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index fa19d57c47..a4560ec201 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -2,11 +2,11 @@ Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles HSC images. -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values @@ -26,14 +26,14 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | | prompt-proto-service.instrument.name | string | `"HSC"` | The "short" name of the instrument | -| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | prompt-proto-service.knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| prompt-proto-service.logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index fb1b435a9d..bcf1f3dc21 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -11,7 +11,7 @@ prompt-proto-service: tag: latest instrument: - pipelines: (survey="SURVEY")=[${PROMPT_PROTOTYPE_DIR}/pipelines/HSC/ApPipe.yaml] + pipelines: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] calibRepo: s3://rubin:rubin-pp-users/central_repo/ s3: diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index 87e306249d..ab8df6e3dd 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -23,7 +23,7 @@ prompt-proto-service: # -- The "short" name of the instrument name: HSC # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -83,7 +83,7 @@ prompt-proto-service: centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). - # @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. + # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/applications/prompt-proto-service-latiss/Chart.yaml b/applications/prompt-proto-service-latiss/Chart.yaml index 9f05697357..cb9215abe4 100644 --- a/applications/prompt-proto-service-latiss/Chart.yaml +++ b/applications/prompt-proto-service-latiss/Chart.yaml @@ -4,9 +4,9 @@ version: 1.0.0 description: >- Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LATISS images. -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index ca527039dd..b0e846a872 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -2,11 +2,11 @@ Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LATISS images. -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values @@ -26,14 +26,14 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | | prompt-proto-service.instrument.name | string | `"LATISS"` | The "short" name of the instrument | -| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `"latiss_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | prompt-proto-service.knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| prompt-proto-service.logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index f3ece8f669..9378063026 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -11,7 +11,7 @@ prompt-proto-service: tag: latest instrument: - pipelines: (survey="SURVEY")=[${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/ApPipe.yaml] + pipelines: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml] calibRepo: s3://rubin-pp-users/central_repo/ s3: diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index cdfc87f704..04c9c211d4 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -12,12 +12,12 @@ prompt-proto-service: instrument: pipelines: >- - (survey="AUXTEL_PHOTO_IMAGING")=[${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/ApPipe.yaml, - ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/SingleFrame.yaml, - ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/Isr.yaml] - (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/ApPipe.yaml, - ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/SingleFrame.yaml, - ${PROMPT_PROTOTYPE_DIR}/pipelines/LATISS/Isr.yaml] + (survey="AUXTEL_PHOTO_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] + (survey="AUXTEL_DRP_IMAGING")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] (survey="spec")=[] (survey="spec-survey")=[] (survey="spec_with_rotation")=[] diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 62a2cec5d9..8199150e2a 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -24,7 +24,7 @@ prompt-proto-service: # -- The "short" name of the instrument name: LATISS # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -83,7 +83,7 @@ prompt-proto-service: centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). - # @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. + # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/applications/prompt-proto-service-lsstcam/Chart.yaml b/applications/prompt-proto-service-lsstcam/Chart.yaml index b1a83cedac..8e47a14f03 100644 --- a/applications/prompt-proto-service-lsstcam/Chart.yaml +++ b/applications/prompt-proto-service-lsstcam/Chart.yaml @@ -4,9 +4,9 @@ version: 1.0.0 description: >- Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LSSTCam images. -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index 587a346d60..bfae874af4 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -2,11 +2,11 @@ Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LSSTCam images. -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values @@ -26,14 +26,14 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | | prompt-proto-service.instrument.name | string | `""` | The "short" name of the instrument | -| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | prompt-proto-service.knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| prompt-proto-service.logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index 918c2acdb7..67982e1872 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -23,7 +23,7 @@ prompt-proto-service: # -- The "short" name of the instrument name: "" # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -83,7 +83,7 @@ prompt-proto-service: centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). - # @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. + # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/applications/prompt-proto-service-lsstcomcam/Chart.yaml b/applications/prompt-proto-service-lsstcomcam/Chart.yaml index 75be08e60b..ab5e410dad 100644 --- a/applications/prompt-proto-service-lsstcomcam/Chart.yaml +++ b/applications/prompt-proto-service-lsstcomcam/Chart.yaml @@ -4,9 +4,9 @@ version: 1.0.0 description: >- Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LSSTComCam images. -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index 14ea0bb548..c9751001b7 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -2,11 +2,11 @@ Prompt Proto Service is an event driven service for processing camera images. This instance of the service handles LSSTComCam images. -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values @@ -25,14 +25,14 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | | prompt-proto-service.instrument.name | string | `""` | The "short" name of the instrument | -| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | prompt-proto-service.knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| prompt-proto-service.logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index cacc5b24ae..91825cac57 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -23,7 +23,7 @@ prompt-proto-service: # -- The "short" name of the instrument name: "" # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -83,7 +83,7 @@ prompt-proto-service: centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). - # @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. + # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: diff --git a/charts/prompt-proto-service/Chart.yaml b/charts/prompt-proto-service/Chart.yaml index da0be6208b..fe2ce5f751 100644 --- a/charts/prompt-proto-service/Chart.yaml +++ b/charts/prompt-proto-service/Chart.yaml @@ -4,9 +4,9 @@ version: 1.0.0 appVersion: "0.1.0" description: Event-driven processing of camera images type: application -home: https://github.com/lsst-dm/prompt_prototype/blob/main/doc/playbook.rst +home: https://github.com/lsst-dm/prompt_processing/blob/main/doc/playbook.rst sources: - - https://github.com/lsst-dm/prompt_prototype + - https://github.com/lsst-dm/prompt_processing annotations: phalanx.lsst.io/docs: | - id: "DMTN-219" diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index 8b00851b06..498914638e 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -2,11 +2,11 @@ Event-driven processing of camera images -**Homepage:** +**Homepage:** ## Source Code -* +* ## Values @@ -30,14 +30,14 @@ Event-driven processing of camera images | imagePullSecrets | list | `[]` | | | instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | | instrument.name | string | None, must be set | The "short" name of the instrument | -| instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. | +| instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | instrument.skymap | string | `""` | Skymap to use with the instrument | | knative.ephemeralStorageLimit | string | `"20Gi"` | The maximum storage space allowed for each container (mostly local Butler). | | knative.ephemeralStorageRequest | string | `"20Gi"` | The storage space reserved for each container (mostly local Butler). | | knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service (seconds). | | knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to the fanout service after initial submission (seconds). | | knative.timeout | int | `900` | Maximum time that a container can respond to a next_visit request (seconds). | -| logLevel | string | log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | +| logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | | | podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 808b1f4f85..39c6ab7b34 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -25,7 +25,7 @@ instrument: # @default -- None, must be set name: "" # -- Machine-readable string describing which pipeline(s) should be run for which visits. - # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_prototype/blob/main/python/activator/config.py) for examples. + # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. # @default -- None, must be set pipelines: "" # -- Skymap to use with the instrument @@ -85,7 +85,7 @@ registry: centralRepoFile: false # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). -# @default -- log prompt_prototype at DEBUG, other LSST code at INFO, and third-party code at WARNING. +# @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. logLevel: "" knative: From ccdda9f04e0ca42e2221f5279eeb319b7cf3cfe7 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Tue, 14 Nov 2023 12:30:52 -0800 Subject: [PATCH 241/588] Use prompt-service container in prompt-proto-service. The old prompt-proto-service containers are being phased out in favor of the shorter name. This commit drops the override for latiss-usdfprod entirely, as it's extremely unlikely that we'd want to use a nonstandard container in production (but this is kept as a useful hook for usdfdev). --- applications/prompt-proto-service-hsc/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 2 +- applications/prompt-proto-service-hsc/values.yaml | 2 +- applications/prompt-proto-service-latiss/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 2 +- .../values-usdfprod-prompt-processing.yaml | 1 - applications/prompt-proto-service-latiss/values.yaml | 2 +- applications/prompt-proto-service-lsstcam/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 2 +- applications/prompt-proto-service-lsstcam/values.yaml | 2 +- applications/prompt-proto-service-lsstcomcam/README.md | 2 +- .../values-usdfdev-prompt-processing.yaml | 2 +- applications/prompt-proto-service-lsstcomcam/values.yaml | 2 +- charts/prompt-proto-service/README.md | 2 +- charts/prompt-proto-service/values.yaml | 2 +- 15 files changed, 14 insertions(+), 15 deletions(-) diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index a4560ec201..e472f0abda 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -19,7 +19,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | | prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | | prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index bcf1f3dc21..99c7031c66 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -5,7 +5,7 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: latest diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index ab8df6e3dd..658e9070a1 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -12,7 +12,7 @@ prompt-proto-service: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index b0e846a872..4209990b4f 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -19,7 +19,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | | prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | | prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index 9378063026..acd5dd126b 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -5,7 +5,7 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: latest diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 04c9c211d4..a34411adac 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -5,7 +5,6 @@ prompt-proto-service: revision: "12" image: - repository: ghcr.io/lsst-dm/prompt-proto-service pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. tag: d_2023_11_06 diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 8199150e2a..bd1b7dfb7f 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -13,7 +13,7 @@ prompt-proto-service: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index bfae874af4..9d54c3e0e2 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -19,7 +19,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | | prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | | prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | diff --git a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml index 228fe82b20..fa7596695c 100644 --- a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml @@ -5,7 +5,7 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: latest diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index 67982e1872..ac5ddb3110 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -12,7 +12,7 @@ prompt-proto-service: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index c9751001b7..70f26b1e79 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -18,7 +18,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | | prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | | prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | diff --git a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml index fd58db9178..077f6293dd 100644 --- a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml @@ -5,7 +5,7 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: latest diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index 91825cac57..de786d31fc 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -12,7 +12,7 @@ prompt-proto-service: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index 498914638e..134a32565e 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -22,7 +22,7 @@ Event-driven processing of camera images | containerConcurrency | int | `1` | | | fullnameOverride | string | `"prompt-proto-service"` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | -| image.repository | string | `"ghcr.io/lsst-dm/prompt-proto-service"` | Image to use in the PP deployment | +| image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | | imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 39c6ab7b34..3d795d7148 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -13,7 +13,7 @@ podAnnotations: image: # -- Image to use in the PP deployment - repository: ghcr.io/lsst-dm/prompt-proto-service + repository: ghcr.io/lsst-dm/prompt-service # -- Pull policy for the PP image # @default -- `IfNotPresent` in prod, `Always` in dev pullPolicy: IfNotPresent From 4ae475ebf4babee53a517e76b2e21070b267e1ce Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Tue, 14 Nov 2023 10:54:44 -0700 Subject: [PATCH 242/588] Add VaultSecret resource for butler --- applications/butler/templates/vault-secrets.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 applications/butler/templates/vault-secrets.yaml diff --git a/applications/butler/templates/vault-secrets.yaml b/applications/butler/templates/vault-secrets.yaml new file mode 100644 index 0000000000..38fd855560 --- /dev/null +++ b/applications/butler/templates/vault-secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "butler.fullname" . }} + labels: + {{- include "butler.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/butler" + type: Opaque From f87684c83dc8f9e7c7a8c7d791db532610062b2c Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 14 Nov 2023 11:45:24 -0700 Subject: [PATCH 243/588] rename secrets.yml to secrets.yaml for butler --- applications/butler/{secrets.yml => secrets.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename applications/butler/{secrets.yml => secrets.yaml} (100%) diff --git a/applications/butler/secrets.yml b/applications/butler/secrets.yaml similarity index 100% rename from applications/butler/secrets.yml rename to applications/butler/secrets.yaml From e57043d3ceca0bf992c4c03109466984b2e0ae74 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Tue, 14 Nov 2023 13:30:56 -0700 Subject: [PATCH 244/588] Allow anonymous access to Butler config files --- applications/butler/README.md | 1 + .../butler/templates/ingress-anonymous.yaml | 44 +++++++++++++++++++ ...ngress.yaml => ingress-authenticated.yaml} | 2 +- applications/butler/values.yaml | 5 +++ 4 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 applications/butler/templates/ingress-anonymous.yaml rename applications/butler/templates/{ingress.yaml => ingress-authenticated.yaml} (93%) diff --git a/applications/butler/README.md b/applications/butler/README.md index bc0ed146c8..3043ea5172 100644 --- a/applications/butler/README.md +++ b/applications/butler/README.md @@ -16,6 +16,7 @@ Server for Butler data abstraction service | autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | | config.configUri | string | `""` | URI to the file specifying the DirectButler configuration to be used by the butler server | +| config.pathPrefix | string | `"/api/butler"` | The prefix of the path portion of the URL where the Butler service will be exposed. For example, if the service should be exposed at `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/butler/templates/ingress-anonymous.yaml b/applications/butler/templates/ingress-anonymous.yaml new file mode 100644 index 0000000000..d3b79b4ae5 --- /dev/null +++ b/applications/butler/templates/ingress-anonymous.yaml @@ -0,0 +1,44 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "butler-anonymous" + labels: + {{- include "butler.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + anonymous: true +template: + metadata: + name: "butler-anonymous" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + # For direct end-user use of the Butler client library, the + # Butler() convenience constructor must be able to load a + # configuration file via unauthenticated HTTP. This exists for + # compatibility with the way Butler instances were configured prior + # to the existence of the Butler server -- they are passed the URI + # for a repository root on the filesystem or HTTP, from which a + # configuration file is loaded. + - path: "{{ .Values.config.pathPrefix }}/butler.yaml" + pathType: "Exact" + backend: + service: + name: "butler" + port: + number: 8080 + - path: "{{ .Values.config.pathPrefix }}/butler.json" + pathType: "Exact" + backend: + service: + name: "butler" + port: + number: 8080 + diff --git a/applications/butler/templates/ingress.yaml b/applications/butler/templates/ingress-authenticated.yaml similarity index 93% rename from applications/butler/templates/ingress.yaml rename to applications/butler/templates/ingress-authenticated.yaml index 430c5067ca..2868813397 100644 --- a/applications/butler/templates/ingress.yaml +++ b/applications/butler/templates/ingress-authenticated.yaml @@ -29,7 +29,7 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/api/butler" + - path: {{ .Values.config.pathPrefix | quote }} pathType: "Prefix" backend: service: diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index 3ee5d74d8b..00a45ff8b7 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -67,3 +67,8 @@ config: # -- URI to the file specifying the DirectButler configuration to be used # by the butler server configUri: "" + + # -- The prefix of the path portion of the URL where the Butler service will + # be exposed. For example, if the service should be exposed at + # `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` + pathPrefix: "/api/butler" From d530a67bdf58e730f44f84e608bb3a70989cca73 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 15 Nov 2023 14:58:02 -0800 Subject: [PATCH 245/588] change schedview_prenight to schedview-prenight --- .../.helmignore | 0 .../Chart.yaml | 2 +- .../README.md | 24 +++++++++---------- .../templates/_helpers.tpl | 12 +++++----- .../templates/deployment.yaml | 8 +++---- .../templates/hpa.yaml | 6 ++--- .../templates/ingress.yaml | 10 ++++---- .../templates/networkpolicy.yaml | 4 ++-- .../templates/service.yaml | 6 ++--- .../values-minikube.yaml | 0 .../values.yaml | 24 +++++++++---------- docs/applications/index.rst | 2 +- .../index.rst | 6 ++--- .../applications/schedview-prenight/values.md | 12 ++++++++++ .../applications/schedview_prenight/values.md | 12 ---------- environments/README.md | 2 +- ...ml => schedview-prenight-application.yaml} | 10 ++++---- environments/values-minikube.yaml | 2 +- environments/values.yaml | 4 ++-- 19 files changed, 73 insertions(+), 73 deletions(-) rename applications/{schedview_prenight => schedview-prenight}/.helmignore (100%) rename applications/{schedview_prenight => schedview-prenight}/Chart.yaml (89%) rename applications/{schedview_prenight => schedview-prenight}/README.md (73%) rename applications/{schedview_prenight => schedview-prenight}/templates/_helpers.tpl (60%) rename applications/{schedview_prenight => schedview-prenight}/templates/deployment.yaml (87%) rename applications/{schedview_prenight => schedview-prenight}/templates/hpa.yaml (86%) rename applications/{schedview_prenight => schedview-prenight}/templates/ingress.yaml (74%) rename applications/{schedview_prenight => schedview-prenight}/templates/networkpolicy.yaml (82%) rename applications/{schedview_prenight => schedview-prenight}/templates/service.yaml (54%) rename applications/{schedview_prenight => schedview-prenight}/values-minikube.yaml (100%) rename applications/{schedview_prenight => schedview-prenight}/values.yaml (59%) rename docs/applications/{schedview_prenight => schedview-prenight}/index.rst (81%) create mode 100644 docs/applications/schedview-prenight/values.md delete mode 100644 docs/applications/schedview_prenight/values.md rename environments/templates/{schedview_prenight-application.yaml => schedview-prenight-application.yaml} (79%) diff --git a/applications/schedview_prenight/.helmignore b/applications/schedview-prenight/.helmignore similarity index 100% rename from applications/schedview_prenight/.helmignore rename to applications/schedview-prenight/.helmignore diff --git a/applications/schedview_prenight/Chart.yaml b/applications/schedview-prenight/Chart.yaml similarity index 89% rename from applications/schedview_prenight/Chart.yaml rename to applications/schedview-prenight/Chart.yaml index 337a9b0efe..c3c37afbbc 100644 --- a/applications/schedview_prenight/Chart.yaml +++ b/applications/schedview-prenight/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 appVersion: tickets-PREOPS-4508 description: Run the schedview pre-night briefing dashboard. -name: schedview_prenight +name: schedview-prenight sources: - https://github.com/lsst/schedview home: https://schedview.lsst.io/ diff --git a/applications/schedview_prenight/README.md b/applications/schedview-prenight/README.md similarity index 73% rename from applications/schedview_prenight/README.md rename to applications/schedview-prenight/README.md index 41af94c9f1..fd4d975cea 100644 --- a/applications/schedview_prenight/README.md +++ b/applications/schedview-prenight/README.md @@ -1,4 +1,4 @@ -# schedview_prenight +# schedview-prenight Run the schedview pre-night briefing dashboard. @@ -12,20 +12,20 @@ Run the schedview pre-night briefing dashboard. | Key | Type | Default | Description | |-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the schedview_prenight deployment pod | -| autoscaling.enabled | bool | `false` | Enable autoscaling of schedview_prenight deployment | -| autoscaling.maxReplicas | int | `100` | Maximum number of schedview_prenight deployment pods | -| autoscaling.minReplicas | int | `1` | Minimum number of schedview_prenight deployment pods | -| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of schedview_prenight deployment pods | +| affinity | object | `{}` | Affinity rules for the schedview-prenight deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of schedview-prenight deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of schedview-prenight deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of schedview-prenight deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of schedview-prenight deployment pods | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the schedview_prenight image | -| image.repository | string | `"ghcr.io/lsst/schedview"` | Image to use in the schedview_prenight deployment | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the schedview-prenight image | +| image.repository | string | `"ghcr.io/lsst/schedview"` | Image to use in the schedview-prenight deployment | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | -| nodeSelector | object | `{}` | Node selection rules for the schedview_prenight deployment pod | -| podAnnotations | object | `{}` | Annotations for the schedview_prenight deployment pod | +| nodeSelector | object | `{}` | Node selection rules for the schedview-prenight deployment pod | +| podAnnotations | object | `{}` | Annotations for the schedview-prenight deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | -| resources | object | `{}` | Resource limits and requests for the schedview_prenight deployment pod | -| tolerations | list | `[]` | Tolerations for the schedview_prenight deployment pod | +| resources | object | `{}` | Resource limits and requests for the schedview-prenight deployment pod | +| tolerations | list | `[]` | Tolerations for the schedview-prenight deployment pod | diff --git a/applications/schedview_prenight/templates/_helpers.tpl b/applications/schedview-prenight/templates/_helpers.tpl similarity index 60% rename from applications/schedview_prenight/templates/_helpers.tpl rename to applications/schedview-prenight/templates/_helpers.tpl index 11118eb286..4a655207a6 100644 --- a/applications/schedview_prenight/templates/_helpers.tpl +++ b/applications/schedview-prenight/templates/_helpers.tpl @@ -1,16 +1,16 @@ {{/* Create chart name and version as used by the chart label. */}} -{{- define "schedview_prenight.chart" -}} +{{- define "schedview-prenight.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} -{{- define "schedview_prenight.labels" -}} -helm.sh/chart: {{ include "schedview_prenight.chart" . }} -{{ include "schedview_prenight.selectorLabels" . }} +{{- define "schedview-prenight.labels" -}} +helm.sh/chart: {{ include "schedview-prenight.chart" . }} +{{ include "schedview-prenight.selectorLabels" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} @@ -20,7 +20,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }} {{/* Selector labels */}} -{{- define "schedview_prenight.selectorLabels" -}} -app.kubernetes.io/name: "schedview_prenight" +{{- define "schedview-prenight.selectorLabels" -}} +app.kubernetes.io/name: "schedview-prenight" app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} diff --git a/applications/schedview_prenight/templates/deployment.yaml b/applications/schedview-prenight/templates/deployment.yaml similarity index 87% rename from applications/schedview_prenight/templates/deployment.yaml rename to applications/schedview-prenight/templates/deployment.yaml index 68b8d9cf79..df01800e6b 100644 --- a/applications/schedview_prenight/templates/deployment.yaml +++ b/applications/schedview-prenight/templates/deployment.yaml @@ -1,16 +1,16 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: "schedview_prenight" + name: "schedview-prenight" labels: - {{- include "schedview_prenight.labels" . | nindent 4 }} + {{- include "schedview-prenight.labels" . | nindent 4 }} spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} {{- end }} selector: matchLabels: - {{- include "schedview_prenight.selectorLabels" . | nindent 6 }} + {{- include "schedview-prenight.selectorLabels" . | nindent 6 }} template: metadata: {{- with .Values.podAnnotations }} @@ -18,7 +18,7 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} labels: - {{- include "schedview_prenight.selectorLabels" . | nindent 8 }} + {{- include "schedview-prenight.selectorLabels" . | nindent 8 }} spec: automountServiceAccountToken: false containers: diff --git a/applications/schedview_prenight/templates/hpa.yaml b/applications/schedview-prenight/templates/hpa.yaml similarity index 86% rename from applications/schedview_prenight/templates/hpa.yaml rename to applications/schedview-prenight/templates/hpa.yaml index a43342641c..bce3338552 100644 --- a/applications/schedview_prenight/templates/hpa.yaml +++ b/applications/schedview-prenight/templates/hpa.yaml @@ -2,14 +2,14 @@ apiVersion: autoscaling/v2beta1 kind: HorizontalPodAutoscaler metadata: - name: "schedview_prenight" + name: "schedview-prenight" labels: - {{- include "schedview_prenight.labels" . | nindent 4 }} + {{- include "schedview-prenight.labels" . | nindent 4 }} spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: "schedview_prenight" + name: "schedview-prenight" minReplicas: {{ .Values.autoscaling.minReplicas }} maxReplicas: {{ .Values.autoscaling.maxReplicas }} metrics: diff --git a/applications/schedview_prenight/templates/ingress.yaml b/applications/schedview-prenight/templates/ingress.yaml similarity index 74% rename from applications/schedview_prenight/templates/ingress.yaml rename to applications/schedview-prenight/templates/ingress.yaml index 1ebdc02fa3..8565f31302 100644 --- a/applications/schedview_prenight/templates/ingress.yaml +++ b/applications/schedview-prenight/templates/ingress.yaml @@ -1,9 +1,9 @@ apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: - name: "schedview_prenight" + name: "schedview-prenight" labels: - {{- include "schedview_prenight.labels" . | nindent 4 }} + {{- include "schedview-prenight.labels" . | nindent 4 }} config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: @@ -12,7 +12,7 @@ config: loginRedirect: true template: metadata: - name: "schedview_prenight" + name: "schedview-prenight" {{- with .Values.ingress.annotations }} annotations: {{- toYaml . | nindent 6 }} @@ -22,10 +22,10 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/schedview_prenight" + - path: "/schedview-prenight" pathType: "Prefix" backend: service: - name: "schedview_prenight" + name: "schedview-prenight" port: number: 8080 diff --git a/applications/schedview_prenight/templates/networkpolicy.yaml b/applications/schedview-prenight/templates/networkpolicy.yaml similarity index 82% rename from applications/schedview_prenight/templates/networkpolicy.yaml rename to applications/schedview-prenight/templates/networkpolicy.yaml index 245ce24750..a576a4d494 100644 --- a/applications/schedview_prenight/templates/networkpolicy.yaml +++ b/applications/schedview-prenight/templates/networkpolicy.yaml @@ -1,11 +1,11 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: - name: "schedview_prenight" + name: "schedview-prenight" spec: podSelector: matchLabels: - {{- include "schedview_prenight.selectorLabels" . | nindent 6 }} + {{- include "schedview-prenight.selectorLabels" . | nindent 6 }} policyTypes: - Ingress ingress: diff --git a/applications/schedview_prenight/templates/service.yaml b/applications/schedview-prenight/templates/service.yaml similarity index 54% rename from applications/schedview_prenight/templates/service.yaml rename to applications/schedview-prenight/templates/service.yaml index 97dc5f967a..2372652f57 100644 --- a/applications/schedview_prenight/templates/service.yaml +++ b/applications/schedview-prenight/templates/service.yaml @@ -1,9 +1,9 @@ apiVersion: v1 kind: Service metadata: - name: "schedview_prenight" + name: "schedview-prenight" labels: - {{- include "schedview_prenight.labels" . | nindent 4 }} + {{- include "schedview-prenight.labels" . | nindent 4 }} spec: type: "ClusterIP" ports: @@ -12,4 +12,4 @@ spec: protocol: "TCP" name: "http" selector: - {{- include "schedview_prenight.selectorLabels" . | nindent 4 }} + {{- include "schedview-prenight.selectorLabels" . | nindent 4 }} diff --git a/applications/schedview_prenight/values-minikube.yaml b/applications/schedview-prenight/values-minikube.yaml similarity index 100% rename from applications/schedview_prenight/values-minikube.yaml rename to applications/schedview-prenight/values-minikube.yaml diff --git a/applications/schedview_prenight/values.yaml b/applications/schedview-prenight/values.yaml similarity index 59% rename from applications/schedview_prenight/values.yaml rename to applications/schedview-prenight/values.yaml index 73b1b9660b..63afd2ce47 100644 --- a/applications/schedview_prenight/values.yaml +++ b/applications/schedview-prenight/values.yaml @@ -1,4 +1,4 @@ -# Default values for schedview_prenight. +# Default values for schedview-prenight. # This is a YAML-formatted file. # Declare variables to be passed into your templates. @@ -6,10 +6,10 @@ replicaCount: 1 image: - # -- Image to use in the schedview_prenight deployment + # -- Image to use in the schedview-prenight deployment repository: "ghcr.io/lsst/schedview" - # -- Pull policy for the schedview_prenight image + # -- Pull policy for the schedview-prenight image pullPolicy: "IfNotPresent" # -- Overrides the image tag whose default is the chart appVersion. @@ -20,32 +20,32 @@ ingress: annotations: {} autoscaling: - # -- Enable autoscaling of schedview_prenight deployment + # -- Enable autoscaling of schedview-prenight deployment enabled: false - # -- Minimum number of schedview_prenight deployment pods + # -- Minimum number of schedview-prenight deployment pods minReplicas: 1 - # -- Maximum number of schedview_prenight deployment pods + # -- Maximum number of schedview-prenight deployment pods maxReplicas: 100 - # -- Target CPU utilization of schedview_prenight deployment pods + # -- Target CPU utilization of schedview-prenight deployment pods targetCPUUtilizationPercentage: 80 # targetMemoryUtilizationPercentage: 80 -# -- Annotations for the schedview_prenight deployment pod +# -- Annotations for the schedview-prenight deployment pod podAnnotations: {} -# -- Resource limits and requests for the schedview_prenight deployment pod +# -- Resource limits and requests for the schedview-prenight deployment pod resources: {} -# -- Node selection rules for the schedview_prenight deployment pod +# -- Node selection rules for the schedview-prenight deployment pod nodeSelector: {} -# -- Tolerations for the schedview_prenight deployment pod +# -- Tolerations for the schedview-prenight deployment pod tolerations: [] -# -- Affinity rules for the schedview_prenight deployment pod +# -- Affinity rules for the schedview-prenight deployment pod affinity: {} # The following will be set by parameters injected by Argo CD and should not diff --git a/docs/applications/index.rst b/docs/applications/index.rst index 597a299795..5dd483002a 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -57,7 +57,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde production-tools/index rubintv/index sasquatch/index - schedview_prenight/index + schedview-prenight/index strimzi/index strimzi-access-operator/index telegraf/index diff --git a/docs/applications/schedview_prenight/index.rst b/docs/applications/schedview-prenight/index.rst similarity index 81% rename from docs/applications/schedview_prenight/index.rst rename to docs/applications/schedview-prenight/index.rst index 305229c605..d0e5496f5a 100644 --- a/docs/applications/schedview_prenight/index.rst +++ b/docs/applications/schedview-prenight/index.rst @@ -1,7 +1,7 @@ -.. px-app:: schedview_prenight +.. px-app:: schedview-prenight #################################################################### -schedview_prenight — Run the schedview pre-night briefing dashboard. +schedview-prenight — Run the schedview pre-night briefing dashboard. #################################################################### schedview's pre-night dashboard is a web application for examination of @@ -9,7 +9,7 @@ Rubin Observatory/LSST scheduler simulation data for one night. It is intended to be used to understand what the scheduler is likely to do before a night observing, both to check for problems and to set expectations. -.. jinja:: schedview_prenight +.. jinja:: schedview-prenight :file: applications/_summary.rst.jinja Guides diff --git a/docs/applications/schedview-prenight/values.md b/docs/applications/schedview-prenight/values.md new file mode 100644 index 0000000000..cc7da75492 --- /dev/null +++ b/docs/applications/schedview-prenight/values.md @@ -0,0 +1,12 @@ +```{px-app-values} schedview-prenight +``` + +# schedview-prenight Helm values reference + +Helm values reference table for the {px-app}`schedview-prenight` application. + +```{include} ../../../applications/schedview-prenight/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/schedview_prenight/values.md b/docs/applications/schedview_prenight/values.md deleted file mode 100644 index 89a1b38c82..0000000000 --- a/docs/applications/schedview_prenight/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} schedview_prenight -``` - -# schedview_prenight Helm values reference - -Helm values reference table for the {px-app}`schedview_prenight` application. - -```{include} ../../../applications/schedview_prenight/README.md ---- -start-after: "## Values" ---- -``` \ No newline at end of file diff --git a/environments/README.md b/environments/README.md index 2bbf22de25..6c993d185d 100644 --- a/environments/README.md +++ b/environments/README.md @@ -39,7 +39,7 @@ | applications.prompt-proto-service-lsstcomcam | bool | `false` | Enable the prompt-proto-service-lsstcomcam application | | applications.rubintv | bool | `false` | Enable the rubintv application | | applications.sasquatch | bool | `false` | Enable the sasquatch application | -| applications.schedview_prenight | bool | `false` | Enable the schedview_prenight application | +| applications.schedview-prenight | bool | `false` | Enable the schedview-prenight application | | applications.semaphore | bool | `false` | Enable the semaphore application | | applications.sherlock | bool | `false` | Enable the sherlock application | | applications.siav2 | bool | `false` | Enable the siav2 application | diff --git a/environments/templates/schedview_prenight-application.yaml b/environments/templates/schedview-prenight-application.yaml similarity index 79% rename from environments/templates/schedview_prenight-application.yaml rename to environments/templates/schedview-prenight-application.yaml index d66f384fb0..1489318618 100644 --- a/environments/templates/schedview_prenight-application.yaml +++ b/environments/templates/schedview-prenight-application.yaml @@ -1,23 +1,23 @@ -{{- if (index .Values "applications" "schedview_prenight") -}} +{{- if (index .Values "applications" "schedview-prenight") -}} apiVersion: v1 kind: Namespace metadata: - name: "schedview_prenight" + name: "schedview-prenight" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: "schedview_prenight" + name: "schedview-prenight" namespace: "argocd" finalizers: - "resources-finalizer.argocd.argoproj.io" spec: destination: - namespace: "schedview_prenight" + namespace: "schedview-prenight" server: "https://kubernetes.default.svc" project: "default" source: - path: "applications/schedview_prenight" + path: "applications/schedview-prenight" repoURL: {{ .Values.repoUrl | quote }} targetRevision: {{ .Values.targetRevision | quote }} helm: diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index 75a8d2df0f..595ce8d38f 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -15,4 +15,4 @@ applications: mobu: true postgres: true squareone: true - schedview_prenight: true + schedview-prenight: true diff --git a/environments/values.yaml b/environments/values.yaml index 37934bec22..f319359483 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -125,8 +125,8 @@ applications: # -- Enable the sasquatch application sasquatch: false - # -- Enable the schedview_prenight application - schedview_prenight: false + # -- Enable the schedview-prenight application + schedview-prenight: false # -- Enable the siav2 application siav2: false From f3d9539deae9388bc606d037f26c74474fda622c Mon Sep 17 00:00:00 2001 From: Frossie Date: Wed, 15 Nov 2023 16:10:01 -0700 Subject: [PATCH 246/588] Add schedview-prenight app for @ehneilsen --- environments/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index ff328c582a..0c33540aa3 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -22,6 +22,7 @@ applications: postgres: true rubintv: true sasquatch: true + schedview-prenight: true semaphore: true siav2: true ssotap: true From aa68ca6fae11751e7e5f791f33a07683ebe62dc0 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:18:39 -0800 Subject: [PATCH 247/588] add user to usdf-dev argocd rbac --- applications/argocd/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index 1827d88d0d..3eae1cf297 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -56,6 +56,7 @@ argo-cd: g, rra@slac.stanford.edu, role:admin g, fritzm@slac.stanford.edu, role:admin g, cslater@slac.stanford.edu, role:admin + g, neilsen@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | From 7933d7ca0b62a8f356fe60ecedb3261a9c58b4c6 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 15 Nov 2023 15:30:29 -0800 Subject: [PATCH 248/588] add values for usdfdev env --- applications/schedview-prenight/values-usdfdev.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 applications/schedview-prenight/values-usdfdev.yaml diff --git a/applications/schedview-prenight/values-usdfdev.yaml b/applications/schedview-prenight/values-usdfdev.yaml new file mode 100644 index 0000000000..e69de29bb2 From 4b6f8439a9bee5d47f81ffd17cf2a89ea84e1686 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Thu, 16 Nov 2023 08:38:08 -0800 Subject: [PATCH 249/588] add tmp volume to schedview-prenight --- applications/schedview-prenight/templates/deployment.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/applications/schedview-prenight/templates/deployment.yaml b/applications/schedview-prenight/templates/deployment.yaml index df01800e6b..573e937939 100644 --- a/applications/schedview-prenight/templates/deployment.yaml +++ b/applications/schedview-prenight/templates/deployment.yaml @@ -41,6 +41,9 @@ spec: port: "http" resources: {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: tmp + mountPath: /tmp securityContext: runAsNonRoot: true runAsUser: 1000 @@ -57,3 +60,6 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} + volumes: + - name: tmp + emptyDir: {} From 676392397964f5f9496447f16adb4e28314eadc1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 09:44:39 +0000 Subject: [PATCH 250/588] Update Helm release argo-cd to v5.51.3 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index b6b4fad9a2..72bb1839e8 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.51.1 + version: 5.51.3 repository: https://argoproj.github.io/argo-helm From 00fa967fed5530d25c550067c47b061ba1ee19bf Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 09:44:43 +0000 Subject: [PATCH 251/588] Update Helm release argo-workflows to v0.39.3 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index a1805dc5a3..8fea1ce9dc 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.39.0 + version: 0.39.3 repository: https://argoproj.github.io/argo-helm From a63aea84c3d4af3ec22008c9d234e70554255a75 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 20 Nov 2023 09:33:39 -0800 Subject: [PATCH 252/588] Sort applications when deleting Vault secrets The output order from phalanx secrets sync --delete was unstable, which caused occasional test failures. Sort by application to avoid this. --- src/phalanx/services/secrets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index f11b99c5ed..02a29ee733 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -346,7 +346,7 @@ def _clean_vault_secrets( has_pull_secret Whether there should be a pull secret for this environment. """ - for application, values in vault_secrets.items(): + for application, values in sorted(vault_secrets.items()): if application not in resolved.applications: if application == "pull-secret" and has_pull_secret: continue From 46528348af5cae06a66a77e7ae08a18046446a2d Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 12:32:27 +0000 Subject: [PATCH 253/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 503 +++++++++++++++++++++--------------------- requirements/main.txt | 231 ++++++++++--------- 2 files changed, 366 insertions(+), 368 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 39ca37fe41..8c77162f55 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -32,9 +32,9 @@ beautifulsoup4==4.12.2 \ --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ --hash=sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a # via pydata-sphinx-theme -certifi==2023.7.22 \ - --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ - --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 +certifi==2023.11.17 \ + --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \ + --hash=sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474 # via # -c requirements/main.txt # requests @@ -246,9 +246,9 @@ graphviz==0.20.1 \ --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \ --hash=sha256:8c58f14adaa3b947daf26c19bc1e98c4e0702cdc31cf99153e6f06904d492bf8 # via diagrams -identify==2.5.31 \ - --hash=sha256:7736b3c7a28233637e3c36550646fc6389bedd74ae84cb788200cc8e2dd60b75 \ - --hash=sha256:90199cb9e7bd3c5407a9b7e81b4abec4bb9d249991c79439ec8af740afc6293d +identify==2.5.32 \ + --hash=sha256:0b7656ef6cba81664b783352c73f8c24b39cf82f926f78f4550eda928e5e0545 \ + --hash=sha256:5d9979348ec1a21c768ae07e0a652924538e8bce67313a73cb0f681cf08ba407 # via pre-commit idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ @@ -274,13 +274,13 @@ jinja2==3.1.2 \ # sphinx # sphinx-jinja # sphinxcontrib-redoc -jsonschema==4.19.2 \ - --hash=sha256:c9ff4d7447eed9592c23a12ccee508baf0dd0d59650615e847feb6cdca74f392 \ - --hash=sha256:eee9e502c788e89cb166d4d37f43084e3b64ab405c795c03d343a4dbc2c810fc +jsonschema==4.20.0 \ + --hash=sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa \ + --hash=sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3 # via sphinxcontrib-redoc -jsonschema-specifications==2023.7.1 \ - --hash=sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1 \ - --hash=sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb +jsonschema-specifications==2023.11.1 \ + --hash=sha256:c9b234904ffe02f079bf91b14d79987faa685fd4b39c377a0996954c0090b9ca \ + --hash=sha256:f596778ab612b3fd29f72ea0d990393d0540a5aab18bf0407a46632eab540779 # via jsonschema latexcodec==2.0.1 \ --hash=sha256:2aa2551c373261cefe2ad3a8953a6d6533e68238d180eb4bb91d7964adb3fe9a \ @@ -440,135 +440,134 @@ pybtex-docutils==1.0.3 \ --hash=sha256:3a7ebdf92b593e00e8c1c538aa9a20bca5d92d84231124715acc964d51d93c6b \ --hash=sha256:8fd290d2ae48e32fcb54d86b0efb8d573198653c7e2447d5bec5847095f430b9 # via sphinxcontrib-bibtex -pydantic==2.4.2 \ - --hash=sha256:94f336138093a5d7f426aac732dcfe7ab4eb4da243c88f891d65deb4a2556ee7 \ - --hash=sha256:bc3ddf669d234f4220e6e1c4d96b061abe0998185a8d7855c0126782b7abc8c1 +pydantic==2.5.1 \ + --hash=sha256:0b8be5413c06aadfbe56f6dc1d45c9ed25fd43264414c571135c97dd77c2bedb \ + --hash=sha256:dc5244a8939e0d9a68f1f1b5f550b2e1c879912033b1becbedb315accc75441b # via # -c requirements/main.txt # autodoc-pydantic # documenteer # pydantic-settings -pydantic-core==2.10.1 \ - --hash=sha256:042462d8d6ba707fd3ce9649e7bf268633a41018d6a998fb5fbacb7e928a183e \ - --hash=sha256:0523aeb76e03f753b58be33b26540880bac5aa54422e4462404c432230543f33 \ - --hash=sha256:05560ab976012bf40f25d5225a58bfa649bb897b87192a36c6fef1ab132540d7 \ - --hash=sha256:0675ba5d22de54d07bccde38997e780044dcfa9a71aac9fd7d4d7a1d2e3e65f7 \ - --hash=sha256:073d4a470b195d2b2245d0343569aac7e979d3a0dcce6c7d2af6d8a920ad0bea \ - --hash=sha256:07ec6d7d929ae9c68f716195ce15e745b3e8fa122fc67698ac6498d802ed0fa4 \ - --hash=sha256:0880e239827b4b5b3e2ce05e6b766a7414e5f5aedc4523be6b68cfbc7f61c5d0 \ - --hash=sha256:0c27f38dc4fbf07b358b2bc90edf35e82d1703e22ff2efa4af4ad5de1b3833e7 \ - --hash=sha256:0d8a8adef23d86d8eceed3e32e9cca8879c7481c183f84ed1a8edc7df073af94 \ - --hash=sha256:0e2a35baa428181cb2270a15864ec6286822d3576f2ed0f4cd7f0c1708472aff \ - --hash=sha256:0f8682dbdd2f67f8e1edddcbffcc29f60a6182b4901c367fc8c1c40d30bb0a82 \ - --hash=sha256:0fa467fd300a6f046bdb248d40cd015b21b7576c168a6bb20aa22e595c8ffcdd \ - --hash=sha256:128552af70a64660f21cb0eb4876cbdadf1a1f9d5de820fed6421fa8de07c893 \ - --hash=sha256:1396e81b83516b9d5c9e26a924fa69164156c148c717131f54f586485ac3c15e \ - --hash=sha256:149b8a07712f45b332faee1a2258d8ef1fb4a36f88c0c17cb687f205c5dc6e7d \ - --hash=sha256:14ac492c686defc8e6133e3a2d9eaf5261b3df26b8ae97450c1647286750b901 \ - --hash=sha256:14cfbb00959259e15d684505263d5a21732b31248a5dd4941f73a3be233865b9 \ - --hash=sha256:14e09ff0b8fe6e46b93d36a878f6e4a3a98ba5303c76bb8e716f4878a3bee92c \ - --hash=sha256:154ea7c52e32dce13065dbb20a4a6f0cc012b4f667ac90d648d36b12007fa9f7 \ - --hash=sha256:15d6bca84ffc966cc9976b09a18cf9543ed4d4ecbd97e7086f9ce9327ea48891 \ - --hash=sha256:1d40f55222b233e98e3921df7811c27567f0e1a4411b93d4c5c0f4ce131bc42f \ - --hash=sha256:25bd966103890ccfa028841a8f30cebcf5875eeac8c4bde4fe221364c92f0c9a \ - --hash=sha256:2cf5bb4dd67f20f3bbc1209ef572a259027c49e5ff694fa56bed62959b41e1f9 \ - --hash=sha256:2e0e2959ef5d5b8dc9ef21e1a305a21a36e254e6a34432d00c72a92fdc5ecda5 \ - --hash=sha256:320f14bd4542a04ab23747ff2c8a778bde727158b606e2661349557f0770711e \ - --hash=sha256:3625578b6010c65964d177626fde80cf60d7f2e297d56b925cb5cdeda6e9925a \ - --hash=sha256:39215d809470f4c8d1881758575b2abfb80174a9e8daf8f33b1d4379357e417c \ - --hash=sha256:3f0ac9fb8608dbc6eaf17956bf623c9119b4db7dbb511650910a82e261e6600f \ - --hash=sha256:417243bf599ba1f1fef2bb8c543ceb918676954734e2dcb82bf162ae9d7bd514 \ - --hash=sha256:420a692b547736a8d8703c39ea935ab5d8f0d2573f8f123b0a294e49a73f214b \ - --hash=sha256:443fed67d33aa85357464f297e3d26e570267d1af6fef1c21ca50921d2976302 \ - --hash=sha256:48525933fea744a3e7464c19bfede85df4aba79ce90c60b94d8b6e1eddd67096 \ - --hash=sha256:485a91abe3a07c3a8d1e082ba29254eea3e2bb13cbbd4351ea4e5a21912cc9b0 \ - --hash=sha256:4a5be350f922430997f240d25f8219f93b0c81e15f7b30b868b2fddfc2d05f27 \ - --hash=sha256:4d966c47f9dd73c2d32a809d2be529112d509321c5310ebf54076812e6ecd884 \ - --hash=sha256:524ff0ca3baea164d6d93a32c58ac79eca9f6cf713586fdc0adb66a8cdeab96a \ - --hash=sha256:53df009d1e1ba40f696f8995683e067e3967101d4bb4ea6f667931b7d4a01357 \ - --hash=sha256:5994985da903d0b8a08e4935c46ed8daf5be1cf217489e673910951dc533d430 \ - --hash=sha256:5cabb9710f09d5d2e9e2748c3e3e20d991a4c5f96ed8f1132518f54ab2967221 \ - --hash=sha256:5fdb39f67c779b183b0c853cd6b45f7db84b84e0571b3ef1c89cdb1dfc367325 \ - --hash=sha256:600d04a7b342363058b9190d4e929a8e2e715c5682a70cc37d5ded1e0dd370b4 \ - --hash=sha256:631cb7415225954fdcc2a024119101946793e5923f6c4d73a5914d27eb3d3a05 \ - --hash=sha256:63974d168b6233b4ed6a0046296803cb13c56637a7b8106564ab575926572a55 \ - --hash=sha256:64322bfa13e44c6c30c518729ef08fda6026b96d5c0be724b3c4ae4da939f875 \ - --hash=sha256:655f8f4c8d6a5963c9a0687793da37b9b681d9ad06f29438a3b2326d4e6b7970 \ - --hash=sha256:6835451b57c1b467b95ffb03a38bb75b52fb4dc2762bb1d9dbed8de31ea7d0fc \ - --hash=sha256:6db2eb9654a85ada248afa5a6db5ff1cf0f7b16043a6b070adc4a5be68c716d6 \ - --hash=sha256:7c4d1894fe112b0864c1fa75dffa045720a194b227bed12f4be7f6045b25209f \ - --hash=sha256:7eb037106f5c6b3b0b864ad226b0b7ab58157124161d48e4b30c4a43fef8bc4b \ - --hash=sha256:8282bab177a9a3081fd3d0a0175a07a1e2bfb7fcbbd949519ea0980f8a07144d \ - --hash=sha256:82f55187a5bebae7d81d35b1e9aaea5e169d44819789837cdd4720d768c55d15 \ - --hash=sha256:8572cadbf4cfa95fb4187775b5ade2eaa93511f07947b38f4cd67cf10783b118 \ - --hash=sha256:8cdbbd92154db2fec4ec973d45c565e767ddc20aa6dbaf50142676484cbff8ee \ - --hash=sha256:8f6e6aed5818c264412ac0598b581a002a9f050cb2637a84979859e70197aa9e \ - --hash=sha256:92f675fefa977625105708492850bcbc1182bfc3e997f8eecb866d1927c98ae6 \ - --hash=sha256:962ed72424bf1f72334e2f1e61b68f16c0e596f024ca7ac5daf229f7c26e4208 \ - --hash=sha256:9badf8d45171d92387410b04639d73811b785b5161ecadabf056ea14d62d4ede \ - --hash=sha256:9c120c9ce3b163b985a3b966bb701114beb1da4b0468b9b236fc754783d85aa3 \ - --hash=sha256:9f6f3e2598604956480f6c8aa24a3384dbf6509fe995d97f6ca6103bb8c2534e \ - --hash=sha256:a1254357f7e4c82e77c348dabf2d55f1d14d19d91ff025004775e70a6ef40ada \ - --hash=sha256:a1392e0638af203cee360495fd2cfdd6054711f2db5175b6e9c3c461b76f5175 \ - --hash=sha256:a1c311fd06ab3b10805abb72109f01a134019739bd3286b8ae1bc2fc4e50c07a \ - --hash=sha256:a5cb87bdc2e5f620693148b5f8f842d293cae46c5f15a1b1bf7ceeed324a740c \ - --hash=sha256:a7a7902bf75779bc12ccfc508bfb7a4c47063f748ea3de87135d433a4cca7a2f \ - --hash=sha256:aad7bd686363d1ce4ee930ad39f14e1673248373f4a9d74d2b9554f06199fb58 \ - --hash=sha256:aafdb89fdeb5fe165043896817eccd6434aee124d5ee9b354f92cd574ba5e78f \ - --hash=sha256:ae8a8843b11dc0b03b57b52793e391f0122e740de3df1474814c700d2622950a \ - --hash=sha256:b00bc4619f60c853556b35f83731bd817f989cba3e97dc792bb8c97941b8053a \ - --hash=sha256:b1f22a9ab44de5f082216270552aa54259db20189e68fc12484873d926426921 \ - --hash=sha256:b3c01c2fb081fced3bbb3da78510693dc7121bb893a1f0f5f4b48013201f362e \ - --hash=sha256:b3dcd587b69bbf54fc04ca157c2323b8911033e827fffaecf0cafa5a892a0904 \ - --hash=sha256:b4a6db486ac8e99ae696e09efc8b2b9fea67b63c8f88ba7a1a16c24a057a0776 \ - --hash=sha256:bec7dd208a4182e99c5b6c501ce0b1f49de2802448d4056091f8e630b28e9a52 \ - --hash=sha256:c0877239307b7e69d025b73774e88e86ce82f6ba6adf98f41069d5b0b78bd1bf \ - --hash=sha256:caa48fc31fc7243e50188197b5f0c4228956f97b954f76da157aae7f67269ae8 \ - --hash=sha256:cfe1090245c078720d250d19cb05d67e21a9cd7c257698ef139bc41cf6c27b4f \ - --hash=sha256:d43002441932f9a9ea5d6f9efaa2e21458221a3a4b417a14027a1d530201ef1b \ - --hash=sha256:d64728ee14e667ba27c66314b7d880b8eeb050e58ffc5fec3b7a109f8cddbd63 \ - --hash=sha256:d6495008733c7521a89422d7a68efa0a0122c99a5861f06020ef5b1f51f9ba7c \ - --hash=sha256:d8f1ebca515a03e5654f88411420fea6380fc841d1bea08effb28184e3d4899f \ - --hash=sha256:d99277877daf2efe074eae6338453a4ed54a2d93fb4678ddfe1209a0c93a2468 \ - --hash=sha256:da01bec0a26befab4898ed83b362993c844b9a607a86add78604186297eb047e \ - --hash=sha256:db9a28c063c7c00844ae42a80203eb6d2d6bbb97070cfa00194dff40e6f545ab \ - --hash=sha256:dda81e5ec82485155a19d9624cfcca9be88a405e2857354e5b089c2a982144b2 \ - --hash=sha256:e357571bb0efd65fd55f18db0a2fb0ed89d0bb1d41d906b138f088933ae618bb \ - --hash=sha256:e544246b859f17373bed915182ab841b80849ed9cf23f1f07b73b7c58baee5fb \ - --hash=sha256:e562617a45b5a9da5be4abe72b971d4f00bf8555eb29bb91ec2ef2be348cd132 \ - --hash=sha256:e570ffeb2170e116a5b17e83f19911020ac79d19c96f320cbfa1fa96b470185b \ - --hash=sha256:e6f31a17acede6a8cd1ae2d123ce04d8cca74056c9d456075f4f6f85de055607 \ - --hash=sha256:e9121b4009339b0f751955baf4543a0bfd6bc3f8188f8056b1a25a2d45099934 \ - --hash=sha256:ebedb45b9feb7258fac0a268a3f6bec0a2ea4d9558f3d6f813f02ff3a6dc6698 \ - --hash=sha256:ecaac27da855b8d73f92123e5f03612b04c5632fd0a476e469dfc47cd37d6b2e \ - --hash=sha256:ecdbde46235f3d560b18be0cb706c8e8ad1b965e5c13bbba7450c86064e96561 \ - --hash=sha256:ed550ed05540c03f0e69e6d74ad58d026de61b9eaebebbaaf8873e585cbb18de \ - --hash=sha256:eeb3d3d6b399ffe55f9a04e09e635554012f1980696d6b0aca3e6cf42a17a03b \ - --hash=sha256:ef337945bbd76cce390d1b2496ccf9f90b1c1242a3a7bc242ca4a9fc5993427a \ - --hash=sha256:f1365e032a477c1430cfe0cf2856679529a2331426f8081172c4a74186f1d595 \ - --hash=sha256:f23b55eb5464468f9e0e9a9935ce3ed2a870608d5f534025cd5536bca25b1402 \ - --hash=sha256:f2e9072d71c1f6cfc79a36d4484c82823c560e6f5599c43c1ca6b5cdbd54f881 \ - --hash=sha256:f323306d0556351735b54acbf82904fe30a27b6a7147153cbe6e19aaaa2aa429 \ - --hash=sha256:f36a3489d9e28fe4b67be9992a23029c3cec0babc3bd9afb39f49844a8c721c5 \ - --hash=sha256:f64f82cc3443149292b32387086d02a6c7fb39b8781563e0ca7b8d7d9cf72bd7 \ - --hash=sha256:f6defd966ca3b187ec6c366604e9296f585021d922e666b99c47e78738b5666c \ - --hash=sha256:f7c2b8eb9fc872e68b46eeaf835e86bccc3a58ba57d0eedc109cbb14177be531 \ - --hash=sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6 \ - --hash=sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521 +pydantic-core==2.14.3 \ + --hash=sha256:056ea7cc3c92a7d2a14b5bc9c9fa14efa794d9f05b9794206d089d06d3433dc7 \ + --hash=sha256:0653fb9fc2fa6787f2fa08631314ab7fc8070307bd344bf9471d1b7207c24623 \ + --hash=sha256:076edc972b68a66870cec41a4efdd72a6b655c4098a232314b02d2bfa3bfa157 \ + --hash=sha256:0a3e51c2be472b7867eb0c5d025b91400c2b73a0823b89d4303a9097e2ec6655 \ + --hash=sha256:0c7f8e8a7cf8e81ca7d44bea4f181783630959d41b4b51d2f74bc50f348a090f \ + --hash=sha256:10904368261e4509c091cbcc067e5a88b070ed9a10f7ad78f3029c175487490f \ + --hash=sha256:113752a55a8eaece2e4ac96bc8817f134c2c23477e477d085ba89e3aa0f4dc44 \ + --hash=sha256:12e05a76b223577a4696c76d7a6b36a0ccc491ffb3c6a8cf92d8001d93ddfd63 \ + --hash=sha256:136bc7247e97a921a020abbd6ef3169af97569869cd6eff41b6a15a73c44ea9b \ + --hash=sha256:1582f01eaf0537a696c846bea92082082b6bfc1103a88e777e983ea9fbdc2a0f \ + --hash=sha256:1767bd3f6370458e60c1d3d7b1d9c2751cc1ad743434e8ec84625a610c8b9195 \ + --hash=sha256:1e2979dc80246e18e348de51246d4c9b410186ffa3c50e77924bec436b1e36cb \ + --hash=sha256:1ea992659c03c3ea811d55fc0a997bec9dde863a617cc7b25cfde69ef32e55af \ + --hash=sha256:1f2d4516c32255782153e858f9a900ca6deadfb217fd3fb21bb2b60b4e04d04d \ + --hash=sha256:2494d20e4c22beac30150b4be3b8339bf2a02ab5580fa6553ca274bc08681a65 \ + --hash=sha256:260692420028319e201b8649b13ac0988974eeafaaef95d0dfbf7120c38dc000 \ + --hash=sha256:2646f8270f932d79ba61102a15ea19a50ae0d43b314e22b3f8f4b5fabbfa6e38 \ + --hash=sha256:27828f0227b54804aac6fb077b6bb48e640b5435fdd7fbf0c274093a7b78b69c \ + --hash=sha256:2bc736725f9bd18a60eec0ed6ef9b06b9785454c8d0105f2be16e4d6274e63d0 \ + --hash=sha256:2c08ac60c3caa31f825b5dbac47e4875bd4954d8f559650ad9e0b225eaf8ed0c \ + --hash=sha256:2c83892c7bf92b91d30faca53bb8ea21f9d7e39f0ae4008ef2c2f91116d0464a \ + --hash=sha256:354db020b1f8f11207b35360b92d95725621eb92656725c849a61e4b550f4acc \ + --hash=sha256:364dba61494e48f01ef50ae430e392f67ee1ee27e048daeda0e9d21c3ab2d609 \ + --hash=sha256:37dad73a2f82975ed563d6a277fd9b50e5d9c79910c4aec787e2d63547202315 \ + --hash=sha256:38113856c7fad8c19be7ddd57df0c3e77b1b2336459cb03ee3903ce9d5e236ce \ + --hash=sha256:38aed5a1bbc3025859f56d6a32f6e53ca173283cb95348e03480f333b1091e7d \ + --hash=sha256:3ad083df8fe342d4d8d00cc1d3c1a23f0dc84fce416eb301e69f1ddbbe124d3f \ + --hash=sha256:3c1bf1a7b05a65d3b37a9adea98e195e0081be6b17ca03a86f92aeb8b110f468 \ + --hash=sha256:3d1dde10bd9962b1434053239b1d5490fc31a2b02d8950a5f731bc584c7a5a0f \ + --hash=sha256:44aaf1a07ad0824e407dafc637a852e9a44d94664293bbe7d8ee549c356c8882 \ + --hash=sha256:44afa3c18d45053fe8d8228950ee4c8eaf3b5a7f3b64963fdeac19b8342c987f \ + --hash=sha256:4a70d23eedd88a6484aa79a732a90e36701048a1509078d1b59578ef0ea2cdf5 \ + --hash=sha256:4aa89919fbd8a553cd7d03bf23d5bc5deee622e1b5db572121287f0e64979476 \ + --hash=sha256:4cc6bb11f4e8e5ed91d78b9880774fbc0856cb226151b0a93b549c2b26a00c19 \ + --hash=sha256:536e1f58419e1ec35f6d1310c88496f0d60e4f182cacb773d38076f66a60b149 \ + --hash=sha256:5402ee0f61e7798ea93a01b0489520f2abfd9b57b76b82c93714c4318c66ca06 \ + --hash=sha256:56814b41486e2d712a8bc02a7b1f17b87fa30999d2323bbd13cf0e52296813a1 \ + --hash=sha256:5b73441a1159f1fb37353aaefb9e801ab35a07dd93cb8177504b25a317f4215a \ + --hash=sha256:61beaa79d392d44dc19d6f11ccd824d3cccb865c4372157c40b92533f8d76dd0 \ + --hash=sha256:6c2d118d1b6c9e2d577e215567eedbe11804c3aafa76d39ec1f8bc74e918fd07 \ + --hash=sha256:6e2f9d76c00e805d47f19c7a96a14e4135238a7551a18bfd89bb757993fd0933 \ + --hash=sha256:71ed769b58d44e0bc2701aa59eb199b6665c16e8a5b8b4a84db01f71580ec448 \ + --hash=sha256:7349f99f1ef8b940b309179733f2cad2e6037a29560f1b03fdc6aa6be0a8d03c \ + --hash=sha256:75f3f534f33651b73f4d3a16d0254de096f43737d51e981478d580f4b006b427 \ + --hash=sha256:76fc18653a5c95e5301a52d1b5afb27c9adc77175bf00f73e94f501caf0e05ad \ + --hash=sha256:7cb0c397f29688a5bd2c0dbd44451bc44ebb9b22babc90f97db5ec3e5bb69977 \ + --hash=sha256:7cc24728a1a9cef497697e53b3d085fb4d3bc0ef1ef4d9b424d9cf808f52c146 \ + --hash=sha256:7e63a56eb7fdee1587d62f753ccd6d5fa24fbeea57a40d9d8beaef679a24bdd6 \ + --hash=sha256:832d16f248ca0cc96929139734ec32d21c67669dcf8a9f3f733c85054429c012 \ + --hash=sha256:8488e973547e8fb1b4193fd9faf5236cf1b7cd5e9e6dc7ff6b4d9afdc4c720cb \ + --hash=sha256:849cff945284c577c5f621d2df76ca7b60f803cc8663ff01b778ad0af0e39bb9 \ + --hash=sha256:88ec906eb2d92420f5b074f59cf9e50b3bb44f3cb70e6512099fdd4d88c2f87c \ + --hash=sha256:8d3b9c91eeb372a64ec6686c1402afd40cc20f61a0866850f7d989b6bf39a41a \ + --hash=sha256:8f5624f0f67f2b9ecaa812e1dfd2e35b256487566585160c6c19268bf2ffeccc \ + --hash=sha256:905a12bf088d6fa20e094f9a477bf84bd823651d8b8384f59bcd50eaa92e6a52 \ + --hash=sha256:92486a04d54987054f8b4405a9af9d482e5100d6fe6374fc3303015983fc8bda \ + --hash=sha256:96eb10ef8920990e703da348bb25fedb8b8653b5966e4e078e5be382b430f9e0 \ + --hash=sha256:96fb679c7ca12a512d36d01c174a4fbfd912b5535cc722eb2c010c7b44eceb8e \ + --hash=sha256:98d8b3932f1a369364606417ded5412c4ffb15bedbcf797c31317e55bd5d920e \ + --hash=sha256:9dbab442a8d9ca918b4ed99db8d89d11b1f067a7dadb642476ad0889560dac79 \ + --hash=sha256:9ef3e2e407e4cad2df3c89488a761ed1f1c33f3b826a2ea9a411b0a7d1cccf1b \ + --hash=sha256:9ff737f24b34ed26de62d481ef522f233d3c5927279f6b7229de9b0deb3f76b5 \ + --hash=sha256:a1a39fecb5f0b19faee9a8a8176c805ed78ce45d760259a4ff3d21a7daa4dfc1 \ + --hash=sha256:a402ae1066be594701ac45661278dc4a466fb684258d1a2c434de54971b006ca \ + --hash=sha256:a5c51460ede609fbb4fa883a8fe16e749964ddb459966d0518991ec02eb8dfb9 \ + --hash=sha256:a8ca13480ce16daad0504be6ce893b0ee8ec34cd43b993b754198a89e2787f7e \ + --hash=sha256:ab4a2381005769a4af2ffddae74d769e8a4aae42e970596208ec6d615c6fb080 \ + --hash=sha256:aeafc7f5bbddc46213707266cadc94439bfa87ecf699444de8be044d6d6eb26f \ + --hash=sha256:aecd5ed096b0e5d93fb0367fd8f417cef38ea30b786f2501f6c34eabd9062c38 \ + --hash=sha256:af452e69446fadf247f18ac5d153b1f7e61ef708f23ce85d8c52833748c58075 \ + --hash=sha256:af46f0b7a1342b49f208fed31f5a83b8495bb14b652f621e0a6787d2f10f24ee \ + --hash=sha256:b02b5e1f54c3396c48b665050464803c23c685716eb5d82a1d81bf81b5230da4 \ + --hash=sha256:b28996872b48baf829ee75fa06998b607c66a4847ac838e6fd7473a6b2ab68e7 \ + --hash=sha256:b7692f539a26265cece1e27e366df5b976a6db6b1f825a9e0466395b314ee48b \ + --hash=sha256:ba44fad1d114539d6a1509966b20b74d2dec9a5b0ee12dd7fd0a1bb7b8785e5f \ + --hash=sha256:bf15145b1f8056d12c67255cd3ce5d317cd4450d5ee747760d8d088d85d12a2d \ + --hash=sha256:c3dc2920cc96f9aa40c6dc54256e436cc95c0a15562eb7bd579e1811593c377e \ + --hash=sha256:c54af5069da58ea643ad34ff32fd6bc4eebb8ae0fef9821cd8919063e0aeeaab \ + --hash=sha256:c5ea0153482e5b4d601c25465771c7267c99fddf5d3f3bdc238ef930e6d051cf \ + --hash=sha256:c9ffd823c46e05ef3eb28b821aa7bc501efa95ba8880b4a1380068e32c5bed47 \ + --hash=sha256:ca55c9671bb637ce13d18ef352fd32ae7aba21b4402f300a63f1fb1fd18e0364 \ + --hash=sha256:caa94726791e316f0f63049ee00dff3b34a629b0d099f3b594770f7d0d8f1f56 \ + --hash=sha256:cc956f78651778ec1ab105196e90e0e5f5275884793ab67c60938c75bcca3989 \ + --hash=sha256:ccbf355b7276593c68fa824030e68cb29f630c50e20cb11ebb0ee450ae6b3d08 \ + --hash=sha256:cf08b43d1d5d1678f295f0431a4a7e1707d4652576e1d0f8914b5e0213bfeee5 \ + --hash=sha256:d06c78074646111fb01836585f1198367b17d57c9f427e07aaa9ff499003e58d \ + --hash=sha256:d2b53e1f851a2b406bbb5ac58e16c4a5496038eddd856cc900278fa0da97f3fc \ + --hash=sha256:d41df8e10b094640a6b234851b624b76a41552f637b9fb34dc720b9fe4ef3be4 \ + --hash=sha256:d7abd17a838a52140e3aeca271054e321226f52df7e0a9f0da8f91ea123afe98 \ + --hash=sha256:de52ddfa6e10e892d00f747bf7135d7007302ad82e243cf16d89dd77b03b649d \ + --hash=sha256:df33902464410a1f1a0411a235f0a34e7e129f12cb6340daca0f9d1390f5fe10 \ + --hash=sha256:e16aaf788f1de5a85c8f8fcc9c1ca1dd7dd52b8ad30a7889ca31c7c7606615b8 \ + --hash=sha256:e3ad4968711fb379a67c8c755beb4dae8b721a83737737b7bcee27c05400b047 \ + --hash=sha256:e483b8b913fcd3b48badec54185c150cb7ab0e6487914b84dc7cde2365e0c892 \ + --hash=sha256:e71f666c3bf019f2490a47dddb44c3ccea2e69ac882f7495c68dc14d4065eac2 \ + --hash=sha256:ea1498ce4491236d1cffa0eee9ad0968b6ecb0c1cd711699c5677fc689905f00 \ + --hash=sha256:eaab9dc009e22726c62fe3b850b797e7f0e7ba76d245284d1064081f512c7226 \ + --hash=sha256:ec79dbe23702795944d2ae4c6925e35a075b88acd0d20acde7c77a817ebbce94 \ + --hash=sha256:f1b92e72babfd56585c75caf44f0b15258c58e6be23bc33f90885cebffde3400 \ + --hash=sha256:f1f46700402312bdc31912f6fc17f5ecaaaa3bafe5487c48f07c800052736289 \ + --hash=sha256:f518eac285c9632be337323eef9824a856f2680f943a9b68ac41d5f5bad7df7c \ + --hash=sha256:f86f20a9d5bee1a6ede0f2757b917bac6908cde0f5ad9fcb3606db1e2968bcf5 \ + --hash=sha256:f8fc652c354d3362e2932a79d5ac4bbd7170757a41a62c4fe0f057d29f10bebb \ + --hash=sha256:fe272a72c7ed29f84c42fedd2d06c2f9858dc0c00dae3b34ba15d6d8ae0fbaaf \ + --hash=sha256:fe863491664c6720d65ae438d4efaa5eca766565a53adb53bf14bc3246c72fe0 # via # -c requirements/main.txt # pydantic -pydantic-settings==2.0.3 \ - --hash=sha256:962dc3672495aad6ae96a4390fac7e593591e144625e5112d359f8f67fb75945 \ - --hash=sha256:ddd907b066622bd67603b75e2ff791875540dc485b7307c4fffc015719da8625 +pydantic-settings==2.1.0 \ + --hash=sha256:26b1492e0a24755626ac5e6d715e9077ab7ad4fb5f19a8b7ed7011d52f36141c \ + --hash=sha256:7621c0cb5d90d1140d2f0ef557bdf03573aac7035948109adf2574770b77605a # via autodoc-pydantic pydata-sphinx-theme==0.12.0 \ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ --hash=sha256:c17dbab67a3774f06f34f6378e896fcd0668cc8b5da1c1ba017e65cf1df0af58 # via documenteer -pygments==2.16.1 \ - --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \ - --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29 +pygments==2.17.1 \ + --hash=sha256:1b37f1b1e1bff2af52ecaf28cc601e2ef7077000b227a0675da25aef85784bc4 \ + --hash=sha256:e45a0e74bf9c530f564ca81b8952343be986a29f6afe7f5ad95c5f06b7bdf5e8 # via # pydata-sphinx-theme # rich @@ -651,9 +650,9 @@ pyyaml==6.0.1 \ # pre-commit # pybtex # sphinxcontrib-redoc -referencing==0.30.2 \ - --hash=sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf \ - --hash=sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0 +referencing==0.31.0 \ + --hash=sha256:381b11e53dd93babb55696c71cf42aef2d36b8a150c49bf0bc301e36d536c882 \ + --hash=sha256:cc28f2c88fbe7b961a7817a0abc034c09a1e36358f82fedb4ffdf29a25398863 # via # jsonschema # jsonschema-specifications @@ -664,131 +663,131 @@ requests==2.31.0 \ # -c requirements/main.txt # documenteer # sphinx -rich==13.6.0 \ - --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \ - --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef +rich==13.7.0 \ + --hash=sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa \ + --hash=sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235 # via pytest-pretty -rpds-py==0.12.0 \ - --hash=sha256:0525847f83f506aa1e28eb2057b696fe38217e12931c8b1b02198cfe6975e142 \ - --hash=sha256:05942656cb2cb4989cd50ced52df16be94d344eae5097e8583966a1d27da73a5 \ - --hash=sha256:0831d3ecdea22e4559cc1793f22e77067c9d8c451d55ae6a75bf1d116a8e7f42 \ - --hash=sha256:0853da3d5e9bc6a07b2486054a410b7b03f34046c123c6561b535bb48cc509e1 \ - --hash=sha256:08e6e7ff286254016b945e1ab632ee843e43d45e40683b66dd12b73791366dd1 \ - --hash=sha256:0a38612d07a36138507d69646c470aedbfe2b75b43a4643f7bd8e51e52779624 \ - --hash=sha256:0bedd91ae1dd142a4dc15970ed2c729ff6c73f33a40fa84ed0cdbf55de87c777 \ - --hash=sha256:0c5441b7626c29dbd54a3f6f3713ec8e956b009f419ffdaaa3c80eaf98ddb523 \ - --hash=sha256:0e9e976e0dbed4f51c56db10831c9623d0fd67aac02853fe5476262e5a22acb7 \ - --hash=sha256:0fadfdda275c838cba5102c7f90a20f2abd7727bf8f4a2b654a5b617529c5c18 \ - --hash=sha256:1096ca0bf2d3426cbe79d4ccc91dc5aaa73629b08ea2d8467375fad8447ce11a \ - --hash=sha256:171d9a159f1b2f42a42a64a985e4ba46fc7268c78299272ceba970743a67ee50 \ - --hash=sha256:188912b22b6c8225f4c4ffa020a2baa6ad8fabb3c141a12dbe6edbb34e7f1425 \ - --hash=sha256:1b4cf9ab9a0ae0cb122685209806d3f1dcb63b9fccdf1424fb42a129dc8c2faa \ - --hash=sha256:1e04581c6117ad9479b6cfae313e212fe0dfa226ac727755f0d539cd54792963 \ - --hash=sha256:1fa73ed22c40a1bec98d7c93b5659cd35abcfa5a0a95ce876b91adbda170537c \ - --hash=sha256:2124f9e645a94ab7c853bc0a3644e0ca8ffbe5bb2d72db49aef8f9ec1c285733 \ - --hash=sha256:240687b5be0f91fbde4936a329c9b7589d9259742766f74de575e1b2046575e4 \ - --hash=sha256:25740fb56e8bd37692ed380e15ec734be44d7c71974d8993f452b4527814601e \ - --hash=sha256:27ccc93c7457ef890b0dd31564d2a05e1aca330623c942b7e818e9e7c2669ee4 \ - --hash=sha256:281c8b219d4f4b3581b918b816764098d04964915b2f272d1476654143801aa2 \ - --hash=sha256:2d34a5450a402b00d20aeb7632489ffa2556ca7b26f4a63c35f6fccae1977427 \ - --hash=sha256:301bd744a1adaa2f6a5e06c98f1ac2b6f8dc31a5c23b838f862d65e32fca0d4b \ - --hash=sha256:30e5ce9f501fb1f970e4a59098028cf20676dee64fc496d55c33e04bbbee097d \ - --hash=sha256:33ab498f9ac30598b6406e2be1b45fd231195b83d948ebd4bd77f337cb6a2bff \ - --hash=sha256:35585a8cb5917161f42c2104567bb83a1d96194095fc54a543113ed5df9fa436 \ - --hash=sha256:389c0e38358fdc4e38e9995e7291269a3aead7acfcf8942010ee7bc5baee091c \ - --hash=sha256:3acadbab8b59f63b87b518e09c4c64b142e7286b9ca7a208107d6f9f4c393c5c \ - --hash=sha256:3b7a64d43e2a1fa2dd46b678e00cabd9a49ebb123b339ce799204c44a593ae1c \ - --hash=sha256:3c8c0226c71bd0ce9892eaf6afa77ae8f43a3d9313124a03df0b389c01f832de \ - --hash=sha256:429349a510da82c85431f0f3e66212d83efe9fd2850f50f339341b6532c62fe4 \ - --hash=sha256:466030a42724780794dea71eb32db83cc51214d66ab3fb3156edd88b9c8f0d78 \ - --hash=sha256:47aeceb4363851d17f63069318ba5721ae695d9da55d599b4d6fb31508595278 \ - --hash=sha256:48aa98987d54a46e13e6954880056c204700c65616af4395d1f0639eba11764b \ - --hash=sha256:4b2416ed743ec5debcf61e1242e012652a4348de14ecc7df3512da072b074440 \ - --hash=sha256:4d0a675a7acbbc16179188d8c6d0afb8628604fc1241faf41007255957335a0b \ - --hash=sha256:4eb74d44776b0fb0782560ea84d986dffec8ddd94947f383eba2284b0f32e35e \ - --hash=sha256:4f8a1d990dc198a6c68ec3d9a637ba1ce489b38cbfb65440a27901afbc5df575 \ - --hash=sha256:513ccbf7420c30e283c25c82d5a8f439d625a838d3ba69e79a110c260c46813f \ - --hash=sha256:5210a0018c7e09c75fa788648617ebba861ae242944111d3079034e14498223f \ - --hash=sha256:54cdfcda59251b9c2f87a05d038c2ae02121219a04d4a1e6fc345794295bdc07 \ - --hash=sha256:56dd500411d03c5e9927a1eb55621e906837a83b02350a9dc401247d0353717c \ - --hash=sha256:57ec6baec231bb19bb5fd5fc7bae21231860a1605174b11585660236627e390e \ - --hash=sha256:5f1519b080d8ce0a814f17ad9fb49fb3a1d4d7ce5891f5c85fc38631ca3a8dc4 \ - --hash=sha256:6174d6ad6b58a6bcf67afbbf1723420a53d06c4b89f4c50763d6fa0a6ac9afd2 \ - --hash=sha256:68172622a5a57deb079a2c78511c40f91193548e8ab342c31e8cb0764d362459 \ - --hash=sha256:6915fc9fa6b3ec3569566832e1bb03bd801c12cea030200e68663b9a87974e76 \ - --hash=sha256:6b75b912a0baa033350367a8a07a8b2d44fd5b90c890bfbd063a8a5f945f644b \ - --hash=sha256:6f5dcb658d597410bb7c967c1d24eaf9377b0d621358cbe9d2ff804e5dd12e81 \ - --hash=sha256:6f8d7fe73d1816eeb5378409adc658f9525ecbfaf9e1ede1e2d67a338b0c7348 \ - --hash=sha256:7036316cc26b93e401cedd781a579be606dad174829e6ad9e9c5a0da6e036f80 \ - --hash=sha256:7188ddc1a8887194f984fa4110d5a3d5b9b5cd35f6bafdff1b649049cbc0ce29 \ - --hash=sha256:761531076df51309075133a6bc1db02d98ec7f66e22b064b1d513bc909f29743 \ - --hash=sha256:7979d90ee2190d000129598c2b0c82f13053dba432b94e45e68253b09bb1f0f6 \ - --hash=sha256:8015835494b21aa7abd3b43fdea0614ee35ef6b03db7ecba9beb58eadf01c24f \ - --hash=sha256:81c4d1a3a564775c44732b94135d06e33417e829ff25226c164664f4a1046213 \ - --hash=sha256:81cf9d306c04df1b45971c13167dc3bad625808aa01281d55f3cf852dde0e206 \ - --hash=sha256:88857060b690a57d2ea8569bca58758143c8faa4639fb17d745ce60ff84c867e \ - --hash=sha256:8c567c664fc2f44130a20edac73e0a867f8e012bf7370276f15c6adc3586c37c \ - --hash=sha256:91bd2b7cf0f4d252eec8b7046fa6a43cee17e8acdfc00eaa8b3dbf2f9a59d061 \ - --hash=sha256:9620650c364c01ed5b497dcae7c3d4b948daeae6e1883ae185fef1c927b6b534 \ - --hash=sha256:9b007c2444705a2dc4a525964fd4dd28c3320b19b3410da6517cab28716f27d3 \ - --hash=sha256:9bf9acce44e967a5103fcd820fc7580c7b0ab8583eec4e2051aec560f7b31a63 \ - --hash=sha256:a239303acb0315091d54c7ff36712dba24554993b9a93941cf301391d8a997ee \ - --hash=sha256:a2baa6be130e8a00b6cbb9f18a33611ec150b4537f8563bddadb54c1b74b8193 \ - --hash=sha256:a54917b7e9cd3a67e429a630e237a90b096e0ba18897bfb99ee8bd1068a5fea0 \ - --hash=sha256:a689e1ded7137552bea36305a7a16ad2b40be511740b80748d3140614993db98 \ - --hash=sha256:a952ae3eb460c6712388ac2ec706d24b0e651b9396d90c9a9e0a69eb27737fdc \ - --hash=sha256:aa32205358a76bf578854bf31698a86dc8b2cb591fd1d79a833283f4a403f04b \ - --hash=sha256:b2287c09482949e0ca0c0eb68b2aca6cf57f8af8c6dfd29dcd3bc45f17b57978 \ - --hash=sha256:b6b0e17d39d21698185097652c611f9cf30f7c56ccec189789920e3e7f1cee56 \ - --hash=sha256:b710bf7e7ae61957d5c4026b486be593ed3ec3dca3e5be15e0f6d8cf5d0a4990 \ - --hash=sha256:b8e11715178f3608874508f08e990d3771e0b8c66c73eb4e183038d600a9b274 \ - --hash=sha256:b92aafcfab3d41580d54aca35a8057341f1cfc7c9af9e8bdfc652f83a20ced31 \ - --hash=sha256:bec29b801b4adbf388314c0d050e851d53762ab424af22657021ce4b6eb41543 \ - --hash=sha256:c694bee70ece3b232df4678448fdda245fd3b1bb4ba481fb6cd20e13bb784c46 \ - --hash=sha256:c6b52b7028b547866c2413f614ee306c2d4eafdd444b1ff656bf3295bf1484aa \ - --hash=sha256:cb41ad20064e18a900dd427d7cf41cfaec83bcd1184001f3d91a1f76b3fcea4e \ - --hash=sha256:cd316dbcc74c76266ba94eb021b0cc090b97cca122f50bd7a845f587ff4bf03f \ - --hash=sha256:ced40cdbb6dd47a032725a038896cceae9ce267d340f59508b23537f05455431 \ - --hash=sha256:d1c562a9bb72244fa767d1c1ab55ca1d92dd5f7c4d77878fee5483a22ffac808 \ - --hash=sha256:d389ff1e95b6e46ebedccf7fd1fadd10559add595ac6a7c2ea730268325f832c \ - --hash=sha256:d56b1cd606ba4cedd64bb43479d56580e147c6ef3f5d1c5e64203a1adab784a2 \ - --hash=sha256:d72a4315514e5a0b9837a086cb433b004eea630afb0cc129de76d77654a9606f \ - --hash=sha256:d9e7f29c00577aff6b318681e730a519b235af292732a149337f6aaa4d1c5e31 \ - --hash=sha256:dbc25baa6abb205766fb8606f8263b02c3503a55957fcb4576a6bb0a59d37d10 \ - --hash=sha256:e57919c32ee295a2fca458bb73e4b20b05c115627f96f95a10f9f5acbd61172d \ - --hash=sha256:e5bbe011a2cea9060fef1bb3d668a2fd8432b8888e6d92e74c9c794d3c101595 \ - --hash=sha256:e6aea5c0eb5b0faf52c7b5c4a47c8bb64437173be97227c819ffa31801fa4e34 \ - --hash=sha256:e888be685fa42d8b8a3d3911d5604d14db87538aa7d0b29b1a7ea80d354c732d \ - --hash=sha256:eebaf8c76c39604d52852366249ab807fe6f7a3ffb0dd5484b9944917244cdbe \ - --hash=sha256:efbe0b5e0fd078ed7b005faa0170da4f72666360f66f0bb2d7f73526ecfd99f9 \ - --hash=sha256:efddca2d02254a52078c35cadad34762adbae3ff01c6b0c7787b59d038b63e0d \ - --hash=sha256:f05450fa1cd7c525c0b9d1a7916e595d3041ac0afbed2ff6926e5afb6a781b7f \ - --hash=sha256:f12d69d568f5647ec503b64932874dade5a20255736c89936bf690951a5e79f5 \ - --hash=sha256:f45321224144c25a62052035ce96cbcf264667bcb0d81823b1bbc22c4addd194 \ - --hash=sha256:f62581d7e884dd01ee1707b7c21148f61f2febb7de092ae2f108743fcbef5985 \ - --hash=sha256:f8832a4f83d4782a8f5a7b831c47e8ffe164e43c2c148c8160ed9a6d630bc02a \ - --hash=sha256:fa35ad36440aaf1ac8332b4a4a433d4acd28f1613f0d480995f5cfd3580e90b7 +rpds-py==0.13.0 \ + --hash=sha256:0982b59d014efb84a57128e7e69399fb29ad8f2da5b0a5bcbfd12e211c00492e \ + --hash=sha256:13c8061115f1468de6ffdfb1d31b446e1bd814f1ff6e556862169aacb9fbbc5d \ + --hash=sha256:152570689a27ae0be1d5f50b21dad38d450b9227d0974f23bd400400ea087e88 \ + --hash=sha256:153b6d8cf7ae4b9ffd09de6abeda661e351e3e06eaafd18a8c104ea00099b131 \ + --hash=sha256:15a2d542de5cbfc6abddc4846d9412b59f8ee9c8dfa0b9c92a29321297c91745 \ + --hash=sha256:169063f346b8fd84f47d986c9c48e6094eb38b839c1287e7cb886b8a2b32195d \ + --hash=sha256:1758197cc8d7ff383c07405f188253535b4aa7fa745cbc54d221ae84b18e0702 \ + --hash=sha256:189aebd44a07fa7b7966cf78b85bde8335b0b6c3b1c4ef5589f8c03176830107 \ + --hash=sha256:1c9c9cb48ab77ebfa47db25b753f594d4f44959cfe43b713439ca6e3c9329671 \ + --hash=sha256:1e5becd0de924616ca9a12abeb6458568d1dc8fe5c670d5cdb738402a8a8429d \ + --hash=sha256:1e63b32b856c0f08a56b76967d61b6ad811d8d330a8aebb9d21afadd82a296f6 \ + --hash=sha256:1f22cab655b41033d430f20266bf563b35038a7f01c9a099b0ccfd30a7fb9247 \ + --hash=sha256:2063ab9cd1be7ef6b5ed0f408e2bdf32c060b6f40c097a468f32864731302636 \ + --hash=sha256:240279ca0b2afd6d4710afce1c94bf9e75fc161290bf62c0feba64d64780d80b \ + --hash=sha256:244be953f13f148b0071d67a610f89cd72eb5013a147e517d6ca3f3f3b7e0380 \ + --hash=sha256:25c9727da2dabc93664a18eda7a70feedf478f0c4c8294e4cdba7f60a479a246 \ + --hash=sha256:26660c74a20fe249fad75ca00bbfcf60e57c3fdbde92971c88a20e07fea1de64 \ + --hash=sha256:28324f2f0247d407daabf7ff357ad9f36126075c92a0cf5319396d96ff4e1248 \ + --hash=sha256:28bb22019f4a783ea06a6b81437d5996551869e8a722ee8720b744f7684d97f4 \ + --hash=sha256:2a29ec68fa9655ce9501bc6ae074b166e8b45c2dfcd2d71d90d1a61758ed8c73 \ + --hash=sha256:2e73511e88368f93c24efe7c9a20b319eaa828bc7431f8a17713efb9e31a39fa \ + --hash=sha256:2ed65ad3fc5065d13e31e90794e0b52e405b63ae4fab1080caeaadc10a3439c5 \ + --hash=sha256:35cc91cbb0b775705e0feb3362490b8418c408e9e3c3b9cb3b02f6e495f03ee7 \ + --hash=sha256:3a1a38512925829784b5dc38591c757b80cfce115c72c594dc59567dab62b9c4 \ + --hash=sha256:3c5b9ad4d3e05dfcf8629f0d534f92610e9805dbce2fcb9b3c801ddb886431d5 \ + --hash=sha256:4084ab6840bd4d79eff3b5f497add847a7db31ce5a0c2d440c90b2d2b7011857 \ + --hash=sha256:42d0ad129c102856a364ccc7d356faec017af86b3543a8539795f22b6cabad11 \ + --hash=sha256:46be9c0685cce2ea02151aa8308f2c1b78581be41a5dd239448a941a210ef5dd \ + --hash=sha256:4eb1faf8e2ee9a2de3cb3ae4c8c355914cdc85f2cd7f27edf76444c9550ce1e7 \ + --hash=sha256:50b6d80925dfeb573fc5e38582fb9517c6912dc462cc858a11c8177b0837127a \ + --hash=sha256:525d19ef0a999229ef0f0a7687ab2c9a00d1b6a47a005006f4d8c4b8975fdcec \ + --hash=sha256:533d728ea5ad5253af3395102723ca8a77b62de47b2295155650c9a88fcdeec8 \ + --hash=sha256:54b1d671a74395344239ee3adbcd8c496525f6a2b2e54c40fec69620a31a8dcb \ + --hash=sha256:54e513df45a8a9419e7952ffd26ac9a5b7b1df97fe72530421794b0de29f9d72 \ + --hash=sha256:5c2545bba02f68abdf398ef4990dc77592cc1e5d29438b35b3a3ca34d171fb4b \ + --hash=sha256:5c6824673f66c47f7ee759c21e973bfce3ceaf2c25cb940cb45b41105dc914e8 \ + --hash=sha256:6052bb47ea583646b8ff562acacb9a2ec5ec847267049cbae3919671929e94c6 \ + --hash=sha256:62772259b3381e2aabf274c74fd1e1ac03b0524de0a6593900684becfa8cfe4b \ + --hash=sha256:66eb5aa36e857f768c598d2082fafb733eaf53e06e1169c6b4de65636e04ffd0 \ + --hash=sha256:6ad465e5a70580ca9c1944f43a9a71bca3a7b74554347fc96ca0479eca8981f9 \ + --hash=sha256:70cfe098d915f566eeebcb683f49f9404d2f948432891b6e075354336eda9dfb \ + --hash=sha256:715df74cbcef4387d623c917f295352127f4b3e0388038d68fa577b4e4c6e540 \ + --hash=sha256:7472bd60a8293217444bdc6a46e516feb8d168da44d5f3fccea0336e88e3b79a \ + --hash=sha256:762013dd59df12380c5444f61ccbf9ae1297027cabbd7aa25891f724ebf8c8f7 \ + --hash=sha256:766b573a964389ef0d91a26bb31e1b59dbc5d06eff7707f3dfcec23d93080ba3 \ + --hash=sha256:7e5fbe9800f09c56967fda88c4d9272955e781699a66102bd098f22511a3f260 \ + --hash=sha256:8220321f2dccd9d66f72639185247cb7bbdd90753bf0b6bfca0fa31dba8af23c \ + --hash=sha256:84f7f3f18d29a1c645729634003d21d84028bd9c2fd78eba9d028998f46fa5aa \ + --hash=sha256:87f591ff8cc834fa01ca5899ab5edcd7ee590492a9cdcf43424ac142e731ce3e \ + --hash=sha256:8a33d2b6340261191bb59adb5a453fa6c7d99de85552bd4e8196411f0509c9bf \ + --hash=sha256:8b9c1dd90461940315981499df62a627571c4f0992e8bafc5396d33916224cac \ + --hash=sha256:8c4e84016ba225e09df20fed8befe8c68d14fbeff6078f4a0ff907ae2095e17e \ + --hash=sha256:8dd69e01b29ff45a0062cad5c480d8aa9301c3ef09da471f86337a78eb2d3405 \ + --hash=sha256:91ca9aaee7ccdfa66d800b5c4ec634fefca947721bab52d6ad2f6350969a3771 \ + --hash=sha256:9435bf4832555c4f769c6be9401664357be33d5f5d8dc58f5c20fb8d21e2c45d \ + --hash=sha256:95375c44ffb9ea2bc25d67fb66e726ea266ff1572df50b9556fe28a5f3519cd7 \ + --hash=sha256:95c11647fac2a3515ea2614a79e14b7c75025724ad54c91c7db4a6ea5c25ef19 \ + --hash=sha256:9645f7fe10a68b2396d238250b4b264c2632d2eb6ce2cb90aa0fe08adee194be \ + --hash=sha256:977c6123c359dcc70ce3161b781ab70b0d342de2666944b776617e01a0a7822a \ + --hash=sha256:97c1be5a018cdad54fa7e5f7d36b9ab45ef941a1d185987f18bdab0a42344012 \ + --hash=sha256:981e46e1e5064f95460381bff4353783b4b5ce351c930e5b507ebe0278c61dac \ + --hash=sha256:9c4c4b4ff3de834ec5c1c690e5a18233ca78547d003eb83664668ccf09ef1398 \ + --hash=sha256:9f50ca0460f1f7a89ab9b8355d83ac993d5998ad4218e76654ecf8afe648d8aa \ + --hash=sha256:a2383f400691fd7bd63347d4d75eb2fd525de9d901799a33a4e896c9885609f8 \ + --hash=sha256:a25f514a53927b6b4bd04a9a6a13b55209df54f548660eeed673336c0c946d14 \ + --hash=sha256:a61a152d61e3ae26e0bbba7b2f568f6f25ca0abdeb6553eca7e7c45b59d9b1a9 \ + --hash=sha256:a78861123b002725633871a2096c3a4313224aab3d11b953dced87cfba702418 \ + --hash=sha256:afcec1f5b09d0db70aeb2d90528a9164acb61841a3124e28f6ac0137f4c36cb4 \ + --hash=sha256:afde37e3763c602d0385bce5c12f262e7b1dd2a0f323e239fa9d7b2d4d5d8509 \ + --hash=sha256:b431c2c0ff1ea56048a2b066d99d0c2d151ae7625b20be159b7e699f3e80390b \ + --hash=sha256:b4de9d20fe68c16b4d97f551a09920745add0c86430262230528b83c2ed2fe90 \ + --hash=sha256:b70a54fb628c1d6400e351674a31ba63d2912b8c5b707f99b408674a5d8b69ab \ + --hash=sha256:b9a0507342c37132813449393e6e6f351bbff376031cfff1ee6e616402ac7908 \ + --hash=sha256:bad6758df5f1042b35683bd1811d5432ac1b17700a5a2a51fdc293f7df5f7827 \ + --hash=sha256:c07cb9bcccd08f9bc2fd05bf586479df4272ea5a6a70fbcb59b018ed48a5a84d \ + --hash=sha256:c10326e30c97a95b7e1d75e5200ef0b9827aa0f861e331e43b15dfdfd63e669b \ + --hash=sha256:c1a920fa679ec2758411d66bf68840b0a21317b9954ab0e973742d723bb67709 \ + --hash=sha256:c1e37dfffe8959a492b7b331995f291847a41a035b4aad82d6060f38e8378a2b \ + --hash=sha256:c472409037e05ed87b99430f97a6b82130328bb977502813547e8ee6a3392502 \ + --hash=sha256:c8a9cec0f49df9bac252d92f138c0d7708d98828e21fd57db78087d8f50b5656 \ + --hash=sha256:c99f9dda2c959f7bb69a7125e192c74fcafb7a534a95ccf49313ae3a04807804 \ + --hash=sha256:c9f4c2b7d989426e9fe9b720211172cf10eb5f7aa16c63de2e5dc61457abcf35 \ + --hash=sha256:cdded3cf9e36840b09ccef714d5fa74a03f4eb6cf81e694226ed9cb5e6f90de0 \ + --hash=sha256:d5bf560634ea6e9a59ceb2181a6cd6195a03f48cef9a400eb15e197e18f14548 \ + --hash=sha256:d70a93a40e55da117c511ddc514642bc7d59a95a99137168a5f3f2f876b47962 \ + --hash=sha256:da2852201e8e00c86be82c43d6893e6c380ef648ae53f337ffd1eaa35e3dfb8a \ + --hash=sha256:e1f40faf406c52c7ae7d208b9140377c06397248978ccb03fbfbb30a0571e359 \ + --hash=sha256:e33b17915c8e4fb2ea8b91bb4c46cba92242c63dd38b87e869ead5ba217e2970 \ + --hash=sha256:e499bf2200eb74774a6f85a7465e3bc5273fa8ef0055590d97a88c1e7ea02eea \ + --hash=sha256:e6c6fed07d13b9e0fb689356c40c81f1aa92e3c9d91d8fd5816a0348ccd999f7 \ + --hash=sha256:e8f1d466a9747213d3cf7e1afec849cc51edb70d5b4ae9a82eca0f172bfbb6d0 \ + --hash=sha256:eef7ee7c70f8b8698be468d54f9f5e01804f3a1dd5657e8a96363dbd52b9b5ec \ + --hash=sha256:efdd02971a02f98492a72b25484f1f6125fb9f2166e48cc4c9bfa563349c851b \ + --hash=sha256:f6c225011467021879c0482316e42d8a28852fc29f0c15d2a435ff457cadccd4 \ + --hash=sha256:f714dd5b705f1c394d1b361d96486c4981055c434a7eafb1a3147ac75e34a3de \ + --hash=sha256:f7c7ddc8d1a64623068da5a15e28001fbd0f0aff754aae7a75a4be5042191638 \ + --hash=sha256:f9339d1404b87e6d8cb35e485945753be57a99ab9bb389f42629215b2f6bda0f \ + --hash=sha256:fdaef49055cc0c701fb17b9b34a38ef375e5cdb230b3722d4a12baf9b7cbc6d3 \ + --hash=sha256:fea99967d4a978ce95dd52310bcb4a943b77c61725393bca631b0908047d6e2f # via # jsonschema # referencing -ruff==0.1.5 \ - --hash=sha256:171276c1df6c07fa0597fb946139ced1c2978f4f0b8254f201281729981f3c17 \ - --hash=sha256:17ef33cd0bb7316ca65649fc748acc1406dfa4da96a3d0cde6d52f2e866c7b39 \ - --hash=sha256:32d47fc69261c21a4c48916f16ca272bf2f273eb635d91c65d5cd548bf1f3d96 \ - --hash=sha256:5cbec0ef2ae1748fb194f420fb03fb2c25c3258c86129af7172ff8f198f125ab \ - --hash=sha256:721f4b9d3b4161df8dc9f09aa8562e39d14e55a4dbaa451a8e55bdc9590e20f4 \ - --hash=sha256:82bfcb9927e88c1ed50f49ac6c9728dab3ea451212693fe40d08d314663e412f \ - --hash=sha256:9b97fd6da44d6cceb188147b68db69a5741fbc736465b5cea3928fdac0bc1aeb \ - --hash=sha256:a00a7ec893f665ed60008c70fe9eeb58d210e6b4d83ec6654a9904871f982a2a \ - --hash=sha256:a4894dddb476597a0ba4473d72a23151b8b3b0b5f958f2cf4d3f1c572cdb7af7 \ - --hash=sha256:a8c11206b47f283cbda399a654fd0178d7a389e631f19f51da15cbe631480c5b \ - --hash=sha256:aafb9d2b671ed934998e881e2c0f5845a4295e84e719359c71c39a5363cccc91 \ - --hash=sha256:b2c205827b3f8c13b4a432e9585750b93fd907986fe1aec62b2a02cf4401eee6 \ - --hash=sha256:bb408e3a2ad8f6881d0f2e7ad70cddb3ed9f200eb3517a91a245bbe27101d379 \ - --hash=sha256:c21fe20ee7d76206d290a76271c1af7a5096bc4c73ab9383ed2ad35f852a0087 \ - --hash=sha256:f20dc5e5905ddb407060ca27267c7174f532375c08076d1a953cf7bb016f5a24 \ - --hash=sha256:f80c73bba6bc69e4fdc73b3991db0b546ce641bdcd5b07210b8ad6f64c79f1ab \ - --hash=sha256:fa29e67b3284b9a79b1a85ee66e293a94ac6b7bb068b307a8a373c3d343aa8ec +ruff==0.1.6 \ + --hash=sha256:03910e81df0d8db0e30050725a5802441c2022ea3ae4fe0609b76081731accbc \ + --hash=sha256:05991ee20d4ac4bb78385360c684e4b417edd971030ab12a4fbd075ff535050e \ + --hash=sha256:137852105586dcbf80c1717facb6781555c4e99f520c9c827bd414fac67ddfb6 \ + --hash=sha256:1610e14750826dfc207ccbcdd7331b6bd285607d4181df9c1c6ae26646d6848a \ + --hash=sha256:1b09f29b16c6ead5ea6b097ef2764b42372aebe363722f1605ecbcd2b9207184 \ + --hash=sha256:1cf5f701062e294f2167e66d11b092bba7af6a057668ed618a9253e1e90cfd76 \ + --hash=sha256:3a0cd909d25f227ac5c36d4e7e681577275fb74ba3b11d288aff7ec47e3ae745 \ + --hash=sha256:4558b3e178145491e9bc3b2ee3c4b42f19d19384eaa5c59d10acf6e8f8b57e33 \ + --hash=sha256:491262006e92f825b145cd1e52948073c56560243b55fb3b4ecb142f6f0e9543 \ + --hash=sha256:5c549ed437680b6105a1299d2cd30e4964211606eeb48a0ff7a93ef70b902248 \ + --hash=sha256:683aa5bdda5a48cb8266fcde8eea2a6af4e5700a392c56ea5fb5f0d4bfdc0240 \ + --hash=sha256:87455a0c1f739b3c069e2f4c43b66479a54dea0276dd5d4d67b091265f6fd1dc \ + --hash=sha256:88b8cdf6abf98130991cbc9f6438f35f6e8d41a02622cc5ee130a02a0ed28703 \ + --hash=sha256:bd98138a98d48a1c36c394fd6b84cd943ac92a08278aa8ac8c0fdefcf7138f35 \ + --hash=sha256:e8fd1c62a47aa88a02707b5dd20c5ff20d035d634aa74826b42a1da77861b5ff \ + --hash=sha256:ea284789861b8b5ca9d5443591a92a397ac183d4351882ab52f6296b4fdd5462 \ + --hash=sha256:fd89b45d374935829134a082617954120d7a1470a9f0ec0e7f3ead983edc48cc # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ diff --git a/requirements/main.txt b/requirements/main.txt index b01d48674c..5e659214e1 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -38,9 +38,9 @@ bcrypt==4.0.1 \ --hash=sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e \ --hash=sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3 # via -r requirements/main.in -certifi==2023.7.22 \ - --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ - --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 +certifi==2023.11.17 \ + --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \ + --hash=sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474 # via # httpcore # httpx @@ -340,120 +340,119 @@ pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pydantic==2.4.2 \ - --hash=sha256:94f336138093a5d7f426aac732dcfe7ab4eb4da243c88f891d65deb4a2556ee7 \ - --hash=sha256:bc3ddf669d234f4220e6e1c4d96b061abe0998185a8d7855c0126782b7abc8c1 +pydantic==2.5.1 \ + --hash=sha256:0b8be5413c06aadfbe56f6dc1d45c9ed25fd43264414c571135c97dd77c2bedb \ + --hash=sha256:dc5244a8939e0d9a68f1f1b5f550b2e1c879912033b1becbedb315accc75441b # via # -r requirements/main.in # fastapi # safir -pydantic-core==2.10.1 \ - --hash=sha256:042462d8d6ba707fd3ce9649e7bf268633a41018d6a998fb5fbacb7e928a183e \ - --hash=sha256:0523aeb76e03f753b58be33b26540880bac5aa54422e4462404c432230543f33 \ - --hash=sha256:05560ab976012bf40f25d5225a58bfa649bb897b87192a36c6fef1ab132540d7 \ - --hash=sha256:0675ba5d22de54d07bccde38997e780044dcfa9a71aac9fd7d4d7a1d2e3e65f7 \ - --hash=sha256:073d4a470b195d2b2245d0343569aac7e979d3a0dcce6c7d2af6d8a920ad0bea \ - --hash=sha256:07ec6d7d929ae9c68f716195ce15e745b3e8fa122fc67698ac6498d802ed0fa4 \ - --hash=sha256:0880e239827b4b5b3e2ce05e6b766a7414e5f5aedc4523be6b68cfbc7f61c5d0 \ - --hash=sha256:0c27f38dc4fbf07b358b2bc90edf35e82d1703e22ff2efa4af4ad5de1b3833e7 \ - --hash=sha256:0d8a8adef23d86d8eceed3e32e9cca8879c7481c183f84ed1a8edc7df073af94 \ - --hash=sha256:0e2a35baa428181cb2270a15864ec6286822d3576f2ed0f4cd7f0c1708472aff \ - --hash=sha256:0f8682dbdd2f67f8e1edddcbffcc29f60a6182b4901c367fc8c1c40d30bb0a82 \ - --hash=sha256:0fa467fd300a6f046bdb248d40cd015b21b7576c168a6bb20aa22e595c8ffcdd \ - --hash=sha256:128552af70a64660f21cb0eb4876cbdadf1a1f9d5de820fed6421fa8de07c893 \ - --hash=sha256:1396e81b83516b9d5c9e26a924fa69164156c148c717131f54f586485ac3c15e \ - --hash=sha256:149b8a07712f45b332faee1a2258d8ef1fb4a36f88c0c17cb687f205c5dc6e7d \ - --hash=sha256:14ac492c686defc8e6133e3a2d9eaf5261b3df26b8ae97450c1647286750b901 \ - --hash=sha256:14cfbb00959259e15d684505263d5a21732b31248a5dd4941f73a3be233865b9 \ - --hash=sha256:14e09ff0b8fe6e46b93d36a878f6e4a3a98ba5303c76bb8e716f4878a3bee92c \ - --hash=sha256:154ea7c52e32dce13065dbb20a4a6f0cc012b4f667ac90d648d36b12007fa9f7 \ - --hash=sha256:15d6bca84ffc966cc9976b09a18cf9543ed4d4ecbd97e7086f9ce9327ea48891 \ - --hash=sha256:1d40f55222b233e98e3921df7811c27567f0e1a4411b93d4c5c0f4ce131bc42f \ - --hash=sha256:25bd966103890ccfa028841a8f30cebcf5875eeac8c4bde4fe221364c92f0c9a \ - --hash=sha256:2cf5bb4dd67f20f3bbc1209ef572a259027c49e5ff694fa56bed62959b41e1f9 \ - --hash=sha256:2e0e2959ef5d5b8dc9ef21e1a305a21a36e254e6a34432d00c72a92fdc5ecda5 \ - --hash=sha256:320f14bd4542a04ab23747ff2c8a778bde727158b606e2661349557f0770711e \ - --hash=sha256:3625578b6010c65964d177626fde80cf60d7f2e297d56b925cb5cdeda6e9925a \ - --hash=sha256:39215d809470f4c8d1881758575b2abfb80174a9e8daf8f33b1d4379357e417c \ - --hash=sha256:3f0ac9fb8608dbc6eaf17956bf623c9119b4db7dbb511650910a82e261e6600f \ - --hash=sha256:417243bf599ba1f1fef2bb8c543ceb918676954734e2dcb82bf162ae9d7bd514 \ - --hash=sha256:420a692b547736a8d8703c39ea935ab5d8f0d2573f8f123b0a294e49a73f214b \ - --hash=sha256:443fed67d33aa85357464f297e3d26e570267d1af6fef1c21ca50921d2976302 \ - --hash=sha256:48525933fea744a3e7464c19bfede85df4aba79ce90c60b94d8b6e1eddd67096 \ - --hash=sha256:485a91abe3a07c3a8d1e082ba29254eea3e2bb13cbbd4351ea4e5a21912cc9b0 \ - --hash=sha256:4a5be350f922430997f240d25f8219f93b0c81e15f7b30b868b2fddfc2d05f27 \ - --hash=sha256:4d966c47f9dd73c2d32a809d2be529112d509321c5310ebf54076812e6ecd884 \ - --hash=sha256:524ff0ca3baea164d6d93a32c58ac79eca9f6cf713586fdc0adb66a8cdeab96a \ - --hash=sha256:53df009d1e1ba40f696f8995683e067e3967101d4bb4ea6f667931b7d4a01357 \ - --hash=sha256:5994985da903d0b8a08e4935c46ed8daf5be1cf217489e673910951dc533d430 \ - --hash=sha256:5cabb9710f09d5d2e9e2748c3e3e20d991a4c5f96ed8f1132518f54ab2967221 \ - --hash=sha256:5fdb39f67c779b183b0c853cd6b45f7db84b84e0571b3ef1c89cdb1dfc367325 \ - --hash=sha256:600d04a7b342363058b9190d4e929a8e2e715c5682a70cc37d5ded1e0dd370b4 \ - --hash=sha256:631cb7415225954fdcc2a024119101946793e5923f6c4d73a5914d27eb3d3a05 \ - --hash=sha256:63974d168b6233b4ed6a0046296803cb13c56637a7b8106564ab575926572a55 \ - --hash=sha256:64322bfa13e44c6c30c518729ef08fda6026b96d5c0be724b3c4ae4da939f875 \ - --hash=sha256:655f8f4c8d6a5963c9a0687793da37b9b681d9ad06f29438a3b2326d4e6b7970 \ - --hash=sha256:6835451b57c1b467b95ffb03a38bb75b52fb4dc2762bb1d9dbed8de31ea7d0fc \ - --hash=sha256:6db2eb9654a85ada248afa5a6db5ff1cf0f7b16043a6b070adc4a5be68c716d6 \ - --hash=sha256:7c4d1894fe112b0864c1fa75dffa045720a194b227bed12f4be7f6045b25209f \ - --hash=sha256:7eb037106f5c6b3b0b864ad226b0b7ab58157124161d48e4b30c4a43fef8bc4b \ - --hash=sha256:8282bab177a9a3081fd3d0a0175a07a1e2bfb7fcbbd949519ea0980f8a07144d \ - --hash=sha256:82f55187a5bebae7d81d35b1e9aaea5e169d44819789837cdd4720d768c55d15 \ - --hash=sha256:8572cadbf4cfa95fb4187775b5ade2eaa93511f07947b38f4cd67cf10783b118 \ - --hash=sha256:8cdbbd92154db2fec4ec973d45c565e767ddc20aa6dbaf50142676484cbff8ee \ - --hash=sha256:8f6e6aed5818c264412ac0598b581a002a9f050cb2637a84979859e70197aa9e \ - --hash=sha256:92f675fefa977625105708492850bcbc1182bfc3e997f8eecb866d1927c98ae6 \ - --hash=sha256:962ed72424bf1f72334e2f1e61b68f16c0e596f024ca7ac5daf229f7c26e4208 \ - --hash=sha256:9badf8d45171d92387410b04639d73811b785b5161ecadabf056ea14d62d4ede \ - --hash=sha256:9c120c9ce3b163b985a3b966bb701114beb1da4b0468b9b236fc754783d85aa3 \ - --hash=sha256:9f6f3e2598604956480f6c8aa24a3384dbf6509fe995d97f6ca6103bb8c2534e \ - --hash=sha256:a1254357f7e4c82e77c348dabf2d55f1d14d19d91ff025004775e70a6ef40ada \ - --hash=sha256:a1392e0638af203cee360495fd2cfdd6054711f2db5175b6e9c3c461b76f5175 \ - --hash=sha256:a1c311fd06ab3b10805abb72109f01a134019739bd3286b8ae1bc2fc4e50c07a \ - --hash=sha256:a5cb87bdc2e5f620693148b5f8f842d293cae46c5f15a1b1bf7ceeed324a740c \ - --hash=sha256:a7a7902bf75779bc12ccfc508bfb7a4c47063f748ea3de87135d433a4cca7a2f \ - --hash=sha256:aad7bd686363d1ce4ee930ad39f14e1673248373f4a9d74d2b9554f06199fb58 \ - --hash=sha256:aafdb89fdeb5fe165043896817eccd6434aee124d5ee9b354f92cd574ba5e78f \ - --hash=sha256:ae8a8843b11dc0b03b57b52793e391f0122e740de3df1474814c700d2622950a \ - --hash=sha256:b00bc4619f60c853556b35f83731bd817f989cba3e97dc792bb8c97941b8053a \ - --hash=sha256:b1f22a9ab44de5f082216270552aa54259db20189e68fc12484873d926426921 \ - --hash=sha256:b3c01c2fb081fced3bbb3da78510693dc7121bb893a1f0f5f4b48013201f362e \ - --hash=sha256:b3dcd587b69bbf54fc04ca157c2323b8911033e827fffaecf0cafa5a892a0904 \ - --hash=sha256:b4a6db486ac8e99ae696e09efc8b2b9fea67b63c8f88ba7a1a16c24a057a0776 \ - --hash=sha256:bec7dd208a4182e99c5b6c501ce0b1f49de2802448d4056091f8e630b28e9a52 \ - --hash=sha256:c0877239307b7e69d025b73774e88e86ce82f6ba6adf98f41069d5b0b78bd1bf \ - --hash=sha256:caa48fc31fc7243e50188197b5f0c4228956f97b954f76da157aae7f67269ae8 \ - --hash=sha256:cfe1090245c078720d250d19cb05d67e21a9cd7c257698ef139bc41cf6c27b4f \ - --hash=sha256:d43002441932f9a9ea5d6f9efaa2e21458221a3a4b417a14027a1d530201ef1b \ - --hash=sha256:d64728ee14e667ba27c66314b7d880b8eeb050e58ffc5fec3b7a109f8cddbd63 \ - --hash=sha256:d6495008733c7521a89422d7a68efa0a0122c99a5861f06020ef5b1f51f9ba7c \ - --hash=sha256:d8f1ebca515a03e5654f88411420fea6380fc841d1bea08effb28184e3d4899f \ - --hash=sha256:d99277877daf2efe074eae6338453a4ed54a2d93fb4678ddfe1209a0c93a2468 \ - --hash=sha256:da01bec0a26befab4898ed83b362993c844b9a607a86add78604186297eb047e \ - --hash=sha256:db9a28c063c7c00844ae42a80203eb6d2d6bbb97070cfa00194dff40e6f545ab \ - --hash=sha256:dda81e5ec82485155a19d9624cfcca9be88a405e2857354e5b089c2a982144b2 \ - --hash=sha256:e357571bb0efd65fd55f18db0a2fb0ed89d0bb1d41d906b138f088933ae618bb \ - --hash=sha256:e544246b859f17373bed915182ab841b80849ed9cf23f1f07b73b7c58baee5fb \ - --hash=sha256:e562617a45b5a9da5be4abe72b971d4f00bf8555eb29bb91ec2ef2be348cd132 \ - --hash=sha256:e570ffeb2170e116a5b17e83f19911020ac79d19c96f320cbfa1fa96b470185b \ - --hash=sha256:e6f31a17acede6a8cd1ae2d123ce04d8cca74056c9d456075f4f6f85de055607 \ - --hash=sha256:e9121b4009339b0f751955baf4543a0bfd6bc3f8188f8056b1a25a2d45099934 \ - --hash=sha256:ebedb45b9feb7258fac0a268a3f6bec0a2ea4d9558f3d6f813f02ff3a6dc6698 \ - --hash=sha256:ecaac27da855b8d73f92123e5f03612b04c5632fd0a476e469dfc47cd37d6b2e \ - --hash=sha256:ecdbde46235f3d560b18be0cb706c8e8ad1b965e5c13bbba7450c86064e96561 \ - --hash=sha256:ed550ed05540c03f0e69e6d74ad58d026de61b9eaebebbaaf8873e585cbb18de \ - --hash=sha256:eeb3d3d6b399ffe55f9a04e09e635554012f1980696d6b0aca3e6cf42a17a03b \ - --hash=sha256:ef337945bbd76cce390d1b2496ccf9f90b1c1242a3a7bc242ca4a9fc5993427a \ - --hash=sha256:f1365e032a477c1430cfe0cf2856679529a2331426f8081172c4a74186f1d595 \ - --hash=sha256:f23b55eb5464468f9e0e9a9935ce3ed2a870608d5f534025cd5536bca25b1402 \ - --hash=sha256:f2e9072d71c1f6cfc79a36d4484c82823c560e6f5599c43c1ca6b5cdbd54f881 \ - --hash=sha256:f323306d0556351735b54acbf82904fe30a27b6a7147153cbe6e19aaaa2aa429 \ - --hash=sha256:f36a3489d9e28fe4b67be9992a23029c3cec0babc3bd9afb39f49844a8c721c5 \ - --hash=sha256:f64f82cc3443149292b32387086d02a6c7fb39b8781563e0ca7b8d7d9cf72bd7 \ - --hash=sha256:f6defd966ca3b187ec6c366604e9296f585021d922e666b99c47e78738b5666c \ - --hash=sha256:f7c2b8eb9fc872e68b46eeaf835e86bccc3a58ba57d0eedc109cbb14177be531 \ - --hash=sha256:fa7db7558607afeccb33c0e4bf1c9a9a835e26599e76af6fe2fcea45904083a6 \ - --hash=sha256:fcb83175cc4936a5425dde3356f079ae03c0802bbdf8ff82c035f8a54b333521 +pydantic-core==2.14.3 \ + --hash=sha256:056ea7cc3c92a7d2a14b5bc9c9fa14efa794d9f05b9794206d089d06d3433dc7 \ + --hash=sha256:0653fb9fc2fa6787f2fa08631314ab7fc8070307bd344bf9471d1b7207c24623 \ + --hash=sha256:076edc972b68a66870cec41a4efdd72a6b655c4098a232314b02d2bfa3bfa157 \ + --hash=sha256:0a3e51c2be472b7867eb0c5d025b91400c2b73a0823b89d4303a9097e2ec6655 \ + --hash=sha256:0c7f8e8a7cf8e81ca7d44bea4f181783630959d41b4b51d2f74bc50f348a090f \ + --hash=sha256:10904368261e4509c091cbcc067e5a88b070ed9a10f7ad78f3029c175487490f \ + --hash=sha256:113752a55a8eaece2e4ac96bc8817f134c2c23477e477d085ba89e3aa0f4dc44 \ + --hash=sha256:12e05a76b223577a4696c76d7a6b36a0ccc491ffb3c6a8cf92d8001d93ddfd63 \ + --hash=sha256:136bc7247e97a921a020abbd6ef3169af97569869cd6eff41b6a15a73c44ea9b \ + --hash=sha256:1582f01eaf0537a696c846bea92082082b6bfc1103a88e777e983ea9fbdc2a0f \ + --hash=sha256:1767bd3f6370458e60c1d3d7b1d9c2751cc1ad743434e8ec84625a610c8b9195 \ + --hash=sha256:1e2979dc80246e18e348de51246d4c9b410186ffa3c50e77924bec436b1e36cb \ + --hash=sha256:1ea992659c03c3ea811d55fc0a997bec9dde863a617cc7b25cfde69ef32e55af \ + --hash=sha256:1f2d4516c32255782153e858f9a900ca6deadfb217fd3fb21bb2b60b4e04d04d \ + --hash=sha256:2494d20e4c22beac30150b4be3b8339bf2a02ab5580fa6553ca274bc08681a65 \ + --hash=sha256:260692420028319e201b8649b13ac0988974eeafaaef95d0dfbf7120c38dc000 \ + --hash=sha256:2646f8270f932d79ba61102a15ea19a50ae0d43b314e22b3f8f4b5fabbfa6e38 \ + --hash=sha256:27828f0227b54804aac6fb077b6bb48e640b5435fdd7fbf0c274093a7b78b69c \ + --hash=sha256:2bc736725f9bd18a60eec0ed6ef9b06b9785454c8d0105f2be16e4d6274e63d0 \ + --hash=sha256:2c08ac60c3caa31f825b5dbac47e4875bd4954d8f559650ad9e0b225eaf8ed0c \ + --hash=sha256:2c83892c7bf92b91d30faca53bb8ea21f9d7e39f0ae4008ef2c2f91116d0464a \ + --hash=sha256:354db020b1f8f11207b35360b92d95725621eb92656725c849a61e4b550f4acc \ + --hash=sha256:364dba61494e48f01ef50ae430e392f67ee1ee27e048daeda0e9d21c3ab2d609 \ + --hash=sha256:37dad73a2f82975ed563d6a277fd9b50e5d9c79910c4aec787e2d63547202315 \ + --hash=sha256:38113856c7fad8c19be7ddd57df0c3e77b1b2336459cb03ee3903ce9d5e236ce \ + --hash=sha256:38aed5a1bbc3025859f56d6a32f6e53ca173283cb95348e03480f333b1091e7d \ + --hash=sha256:3ad083df8fe342d4d8d00cc1d3c1a23f0dc84fce416eb301e69f1ddbbe124d3f \ + --hash=sha256:3c1bf1a7b05a65d3b37a9adea98e195e0081be6b17ca03a86f92aeb8b110f468 \ + --hash=sha256:3d1dde10bd9962b1434053239b1d5490fc31a2b02d8950a5f731bc584c7a5a0f \ + --hash=sha256:44aaf1a07ad0824e407dafc637a852e9a44d94664293bbe7d8ee549c356c8882 \ + --hash=sha256:44afa3c18d45053fe8d8228950ee4c8eaf3b5a7f3b64963fdeac19b8342c987f \ + --hash=sha256:4a70d23eedd88a6484aa79a732a90e36701048a1509078d1b59578ef0ea2cdf5 \ + --hash=sha256:4aa89919fbd8a553cd7d03bf23d5bc5deee622e1b5db572121287f0e64979476 \ + --hash=sha256:4cc6bb11f4e8e5ed91d78b9880774fbc0856cb226151b0a93b549c2b26a00c19 \ + --hash=sha256:536e1f58419e1ec35f6d1310c88496f0d60e4f182cacb773d38076f66a60b149 \ + --hash=sha256:5402ee0f61e7798ea93a01b0489520f2abfd9b57b76b82c93714c4318c66ca06 \ + --hash=sha256:56814b41486e2d712a8bc02a7b1f17b87fa30999d2323bbd13cf0e52296813a1 \ + --hash=sha256:5b73441a1159f1fb37353aaefb9e801ab35a07dd93cb8177504b25a317f4215a \ + --hash=sha256:61beaa79d392d44dc19d6f11ccd824d3cccb865c4372157c40b92533f8d76dd0 \ + --hash=sha256:6c2d118d1b6c9e2d577e215567eedbe11804c3aafa76d39ec1f8bc74e918fd07 \ + --hash=sha256:6e2f9d76c00e805d47f19c7a96a14e4135238a7551a18bfd89bb757993fd0933 \ + --hash=sha256:71ed769b58d44e0bc2701aa59eb199b6665c16e8a5b8b4a84db01f71580ec448 \ + --hash=sha256:7349f99f1ef8b940b309179733f2cad2e6037a29560f1b03fdc6aa6be0a8d03c \ + --hash=sha256:75f3f534f33651b73f4d3a16d0254de096f43737d51e981478d580f4b006b427 \ + --hash=sha256:76fc18653a5c95e5301a52d1b5afb27c9adc77175bf00f73e94f501caf0e05ad \ + --hash=sha256:7cb0c397f29688a5bd2c0dbd44451bc44ebb9b22babc90f97db5ec3e5bb69977 \ + --hash=sha256:7cc24728a1a9cef497697e53b3d085fb4d3bc0ef1ef4d9b424d9cf808f52c146 \ + --hash=sha256:7e63a56eb7fdee1587d62f753ccd6d5fa24fbeea57a40d9d8beaef679a24bdd6 \ + --hash=sha256:832d16f248ca0cc96929139734ec32d21c67669dcf8a9f3f733c85054429c012 \ + --hash=sha256:8488e973547e8fb1b4193fd9faf5236cf1b7cd5e9e6dc7ff6b4d9afdc4c720cb \ + --hash=sha256:849cff945284c577c5f621d2df76ca7b60f803cc8663ff01b778ad0af0e39bb9 \ + --hash=sha256:88ec906eb2d92420f5b074f59cf9e50b3bb44f3cb70e6512099fdd4d88c2f87c \ + --hash=sha256:8d3b9c91eeb372a64ec6686c1402afd40cc20f61a0866850f7d989b6bf39a41a \ + --hash=sha256:8f5624f0f67f2b9ecaa812e1dfd2e35b256487566585160c6c19268bf2ffeccc \ + --hash=sha256:905a12bf088d6fa20e094f9a477bf84bd823651d8b8384f59bcd50eaa92e6a52 \ + --hash=sha256:92486a04d54987054f8b4405a9af9d482e5100d6fe6374fc3303015983fc8bda \ + --hash=sha256:96eb10ef8920990e703da348bb25fedb8b8653b5966e4e078e5be382b430f9e0 \ + --hash=sha256:96fb679c7ca12a512d36d01c174a4fbfd912b5535cc722eb2c010c7b44eceb8e \ + --hash=sha256:98d8b3932f1a369364606417ded5412c4ffb15bedbcf797c31317e55bd5d920e \ + --hash=sha256:9dbab442a8d9ca918b4ed99db8d89d11b1f067a7dadb642476ad0889560dac79 \ + --hash=sha256:9ef3e2e407e4cad2df3c89488a761ed1f1c33f3b826a2ea9a411b0a7d1cccf1b \ + --hash=sha256:9ff737f24b34ed26de62d481ef522f233d3c5927279f6b7229de9b0deb3f76b5 \ + --hash=sha256:a1a39fecb5f0b19faee9a8a8176c805ed78ce45d760259a4ff3d21a7daa4dfc1 \ + --hash=sha256:a402ae1066be594701ac45661278dc4a466fb684258d1a2c434de54971b006ca \ + --hash=sha256:a5c51460ede609fbb4fa883a8fe16e749964ddb459966d0518991ec02eb8dfb9 \ + --hash=sha256:a8ca13480ce16daad0504be6ce893b0ee8ec34cd43b993b754198a89e2787f7e \ + --hash=sha256:ab4a2381005769a4af2ffddae74d769e8a4aae42e970596208ec6d615c6fb080 \ + --hash=sha256:aeafc7f5bbddc46213707266cadc94439bfa87ecf699444de8be044d6d6eb26f \ + --hash=sha256:aecd5ed096b0e5d93fb0367fd8f417cef38ea30b786f2501f6c34eabd9062c38 \ + --hash=sha256:af452e69446fadf247f18ac5d153b1f7e61ef708f23ce85d8c52833748c58075 \ + --hash=sha256:af46f0b7a1342b49f208fed31f5a83b8495bb14b652f621e0a6787d2f10f24ee \ + --hash=sha256:b02b5e1f54c3396c48b665050464803c23c685716eb5d82a1d81bf81b5230da4 \ + --hash=sha256:b28996872b48baf829ee75fa06998b607c66a4847ac838e6fd7473a6b2ab68e7 \ + --hash=sha256:b7692f539a26265cece1e27e366df5b976a6db6b1f825a9e0466395b314ee48b \ + --hash=sha256:ba44fad1d114539d6a1509966b20b74d2dec9a5b0ee12dd7fd0a1bb7b8785e5f \ + --hash=sha256:bf15145b1f8056d12c67255cd3ce5d317cd4450d5ee747760d8d088d85d12a2d \ + --hash=sha256:c3dc2920cc96f9aa40c6dc54256e436cc95c0a15562eb7bd579e1811593c377e \ + --hash=sha256:c54af5069da58ea643ad34ff32fd6bc4eebb8ae0fef9821cd8919063e0aeeaab \ + --hash=sha256:c5ea0153482e5b4d601c25465771c7267c99fddf5d3f3bdc238ef930e6d051cf \ + --hash=sha256:c9ffd823c46e05ef3eb28b821aa7bc501efa95ba8880b4a1380068e32c5bed47 \ + --hash=sha256:ca55c9671bb637ce13d18ef352fd32ae7aba21b4402f300a63f1fb1fd18e0364 \ + --hash=sha256:caa94726791e316f0f63049ee00dff3b34a629b0d099f3b594770f7d0d8f1f56 \ + --hash=sha256:cc956f78651778ec1ab105196e90e0e5f5275884793ab67c60938c75bcca3989 \ + --hash=sha256:ccbf355b7276593c68fa824030e68cb29f630c50e20cb11ebb0ee450ae6b3d08 \ + --hash=sha256:cf08b43d1d5d1678f295f0431a4a7e1707d4652576e1d0f8914b5e0213bfeee5 \ + --hash=sha256:d06c78074646111fb01836585f1198367b17d57c9f427e07aaa9ff499003e58d \ + --hash=sha256:d2b53e1f851a2b406bbb5ac58e16c4a5496038eddd856cc900278fa0da97f3fc \ + --hash=sha256:d41df8e10b094640a6b234851b624b76a41552f637b9fb34dc720b9fe4ef3be4 \ + --hash=sha256:d7abd17a838a52140e3aeca271054e321226f52df7e0a9f0da8f91ea123afe98 \ + --hash=sha256:de52ddfa6e10e892d00f747bf7135d7007302ad82e243cf16d89dd77b03b649d \ + --hash=sha256:df33902464410a1f1a0411a235f0a34e7e129f12cb6340daca0f9d1390f5fe10 \ + --hash=sha256:e16aaf788f1de5a85c8f8fcc9c1ca1dd7dd52b8ad30a7889ca31c7c7606615b8 \ + --hash=sha256:e3ad4968711fb379a67c8c755beb4dae8b721a83737737b7bcee27c05400b047 \ + --hash=sha256:e483b8b913fcd3b48badec54185c150cb7ab0e6487914b84dc7cde2365e0c892 \ + --hash=sha256:e71f666c3bf019f2490a47dddb44c3ccea2e69ac882f7495c68dc14d4065eac2 \ + --hash=sha256:ea1498ce4491236d1cffa0eee9ad0968b6ecb0c1cd711699c5677fc689905f00 \ + --hash=sha256:eaab9dc009e22726c62fe3b850b797e7f0e7ba76d245284d1064081f512c7226 \ + --hash=sha256:ec79dbe23702795944d2ae4c6925e35a075b88acd0d20acde7c77a817ebbce94 \ + --hash=sha256:f1b92e72babfd56585c75caf44f0b15258c58e6be23bc33f90885cebffde3400 \ + --hash=sha256:f1f46700402312bdc31912f6fc17f5ecaaaa3bafe5487c48f07c800052736289 \ + --hash=sha256:f518eac285c9632be337323eef9824a856f2680f943a9b68ac41d5f5bad7df7c \ + --hash=sha256:f86f20a9d5bee1a6ede0f2757b917bac6908cde0f5ad9fcb3606db1e2968bcf5 \ + --hash=sha256:f8fc652c354d3362e2932a79d5ac4bbd7170757a41a62c4fe0f057d29f10bebb \ + --hash=sha256:fe272a72c7ed29f84c42fedd2d06c2f9858dc0c00dae3b34ba15d6d8ae0fbaaf \ + --hash=sha256:fe863491664c6720d65ae438d4efaa5eca766565a53adb53bf14bc3246c72fe0 # via pydantic pyjwt[crypto]==2.8.0 \ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ @@ -527,9 +526,9 @@ rfc3986[idna2008]==1.5.0 \ # via # httpx # rfc3986 -safir==5.0.0a2 \ - --hash=sha256:a13ac781a345d67ae43fd8a0a2434904e5dfca9f9321c15547e4d18b50144fe4 \ - --hash=sha256:c8ab7f043e0e65ccda4fef2a15697802224b2c42876991c1a12d0b41115d0bc5 +safir==5.0.0a4 \ + --hash=sha256:8cd7c8212f777af6afef0023614279cfd1695b35932b551278fdad7d8bc202a9 \ + --hash=sha256:eadaffb3b32129c2db562fc0823d5b1237beaf037e2cdd946fda309c3a9f50b1 # via -r requirements/main.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ From c328e08d8fc595b9f88cdd471643df9843274486 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 17:51:40 +0000 Subject: [PATCH 254/588] Update Helm release telegraf to v1.8.38 --- applications/telegraf/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index d8dbea8f95..5f28ebd25f 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.37 + version: 1.8.38 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 0c5a1197dd0d8962e638d6177c290d34f8152ab2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 17:51:44 +0000 Subject: [PATCH 255/588] Update Helm release telegraf-ds to v1.1.20 --- applications/telegraf-ds/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index 3012197051..a82062eb47 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.19 + version: 1.1.20 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 5afccb726a05a1ffb1fc33c33edae45a54992d85 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 17:51:48 +0000 Subject: [PATCH 256/588] Update gcr.io/cloudsql-docker/gce-proxy Docker tag to v1.33.14 --- applications/gafaelfawr/values.yaml | 2 +- applications/nublado/values.yaml | 2 +- applications/sqlproxy-cross-project/values.yaml | 2 +- applications/times-square/values.yaml | 2 +- applications/vo-cutouts/values.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 1acaf0696a..07d0b88c11 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -310,7 +310,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.13" + tag: "1.33.14" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index a74e9796aa..bbeab11844 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -469,7 +469,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.13" + tag: "1.33.14" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index d410ae7bc4..f23980b684 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.33.13" + tag: "1.33.14" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 694c677029..3e91e29499 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -126,7 +126,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.13" + tag: "1.33.14" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index 68daf673e2..658b5f08b8 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -75,7 +75,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.13" + tag: "1.33.14" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" From 8a742fd4c9e444666b001111c87bd45ac517215d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 17:51:52 +0000 Subject: [PATCH 257/588] Update postgres Docker tag to v16.1 --- applications/siav2/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml index a6a3a7c58b..094730a1a7 100644 --- a/applications/siav2/values.yaml +++ b/applications/siav2/values.yaml @@ -72,7 +72,7 @@ uws: pullPolicy: "IfNotPresent" # -- Tag of UWS database image to use - tag: "16.0" + tag: "16.1" # -- Resource limits and requests for the UWS database pod resources: From 5018e3c181d221fc89f734385a361d1cd325a84b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 20 Nov 2023 09:56:26 -0800 Subject: [PATCH 258/588] Update Helm docs --- applications/gafaelfawr/README.md | 2 +- applications/nublado/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 602e3e7995..b88a433aa2 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -17,7 +17,7 @@ Authentication and identity system | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.13"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/nublado/README.md b/applications/nublado/README.md index c6696bb816..9763338186 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -16,7 +16,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a separate service, because shoehorning it into Zero to Jupyterhub's extraContainers looks messy, and it's not necessary that it be very performant. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.13"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index f25abc80ce..04ba518112 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.33.13"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.33.14"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/times-square/README.md b/applications/times-square/README.md index b03ab53b4c..9b8c7af307 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -18,7 +18,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.13"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index 7542c448f0..8926e9ef7a 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.13"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | From 94ca9e73c7c4bb1d7386b495d5cc18cf279ca7e0 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 20 Nov 2023 09:57:41 -0800 Subject: [PATCH 259/588] Update Helm docs --- applications/siav2/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/siav2/README.md b/applications/siav2/README.md index 65c8ba1381..7869c7d7d6 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -28,7 +28,7 @@ Simple Image Access v2 service | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | | uws.image.repository | string | `"library/postgres"` | UWS database image to use | -| uws.image.tag | string | `"16.0"` | Tag of UWS database image to use | +| uws.image.tag | string | `"16.1"` | Tag of UWS database image to use | | uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{"limits":{"cpu":2,"memory":"4Gi"},"requests":{"cpu":0.25,"memory":"1Gi"}}` | Resource limits and requests for the UWS database pod | From 31a06fbd78a6af2ef09228d56751750047d2d4ce Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 13 Nov 2023 12:18:22 -0700 Subject: [PATCH 260/588] Reconfigure giftless for TF-based deployment; split ro/rw --- .../gafaelfawr/values-roundtable-prod.yaml | 7 +++++++ applications/giftless/README.md | 8 +++++--- applications/giftless/secrets.yaml | 9 +++++++-- applications/giftless/templates/configmap.yaml | 8 ++++---- applications/giftless/templates/deployment.yaml | 4 ++-- .../giftless/templates/serviceaccount.yaml | 17 +++++++++++++++++ .../giftless/values-roundtable-dev.yaml | 6 ++++-- .../giftless/values-roundtable-prod.yaml | 11 +++++++++++ applications/giftless/values.yaml | 12 +++++++++--- environments/values-roundtable-prod.yaml | 1 + 10 files changed, 67 insertions(+), 16 deletions(-) create mode 100644 applications/giftless/templates/serviceaccount.yaml create mode 100644 applications/giftless/values-roundtable-prod.yaml diff --git a/applications/gafaelfawr/values-roundtable-prod.yaml b/applications/gafaelfawr/values-roundtable-prod.yaml index 82c14bd15d..22ea82132a 100644 --- a/applications/gafaelfawr/values-roundtable-prod.yaml +++ b/applications/gafaelfawr/values-roundtable-prod.yaml @@ -23,6 +23,13 @@ config: - github: organization: "lsst-sqre" team: "square" + "write:git-lfs": + - github: + organization: "lsst" + team: "data-management" + - github: + organization: "lsst" + team: "simulations" initialAdmins: - "afausti" diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 4979510629..04b608d874 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -11,9 +11,11 @@ Git-LFS server with GCS S3 backend, with Rubin-specific auth | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the giftless frontend pod | -| config | object | `{"bucketName":"","projectName":""}` | Configuration for giftless server | -| config.bucketName | string | Must be overridden in environment-specific values file | Bucket name for GCS LFS Object bucket | -| config.projectName | string | Must be overridden in environment-specific values file | Project name for GCS LFS Object bucket | +| config | object | `{"bucketName":"","serviceAccountReadonly":"","serviceAccountReadwrite":"","storageProjectName":""}` | Configuration for giftless server | +| config.bucketName | string | Must be overridden in environment-specific values file | Bucket name for GCS LFS Object Storage bucket | +| config.serviceAccountReadonly | string | Must be overridden in environment-specific values file | Read-only service account name for GCS LFS Object Storage bucket | +| config.serviceAccountReadwrite | string | Must be overridden in environment-specific values file | Read-write service account name for GCS LFS Object Storage bucket | +| config.storageProjectName | string | Must be overridden in environment-specific values file | Project name for GCS LFS Object Storage bucket | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the giftless image | diff --git a/applications/giftless/secrets.yaml b/applications/giftless/secrets.yaml index 52e5b2dbb8..a3c00f01d0 100644 --- a/applications/giftless/secrets.yaml +++ b/applications/giftless/secrets.yaml @@ -1,5 +1,10 @@ -"giftless-gcp-key.json": +"giftless-gcp-key-ro.json": description: >- - The GCP service account JSON file for the giftless + The GCP service account JSON file for the giftless Git LFS service (RO) + onepassword: + encoded: true +"giftless-gcp-key-rw.json": + description: >- + The GCP service account JSON file for the giftless Git LFS service (RO) onepassword: encoded: true diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index 7cb4901730..c9100e0cca 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -14,8 +14,8 @@ data: options: storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" storage_options: - account_key_file: "/etc/secret/giftless-gcp-key.json" - project_name: {{ .Values.config.projectName | quote }} + account_key_file: "/etc/secret/giftless-gcp-key-ro.json" + project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} --- apiVersion: v1 @@ -34,6 +34,6 @@ data: options: storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" storage_options: - account_key_file: "/etc/secret/giftless-gcp-key.json" - project_name: {{ .Values.config.projectName | quote }} + account_key_file: "/etc/secret/giftless-gcp-key-rw.json" + project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index ab17ea9b2c..9465ac25a8 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -30,7 +30,6 @@ spec: labels: {{- include "giftless.selectorLabels" . | nindent 8 }} spec: - automountServiceAccountToken: false containers: - name: {{ .Chart.Name }} command: @@ -70,6 +69,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true + serviceAccountName: "git-lfs-ro" volumeMounts: - name: "tmp" mountPath: "/tmp" @@ -126,7 +126,6 @@ spec: labels: {{- include "giftless-rw.selectorLabels" . | nindent 8 }} spec: - automountServiceAccountToken: false containers: - name: {{ .Chart.Name }} command: @@ -166,6 +165,7 @@ spec: drop: - ALL readOnlyRootFilesystem: true + serviceAccountName: "git-lfs-rw" volumeMounts: - name: "tmp" mountPath: "/tmp" diff --git a/applications/giftless/templates/serviceaccount.yaml b/applications/giftless/templates/serviceaccount.yaml new file mode 100644 index 0000000000..d7fcd57bb1 --- /dev/null +++ b/applications/giftless/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "git-lfs-ro" + labels: + {{- include "giftless.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: {{ required "config.serviceAccountReadonly must be set to a valid Google service account" .Values.config.serviceAccountReadonly | quote }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "git-lfs-rw" + labels: + {{- include "giftless-rw.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: {{ required "config.serviceAccountReadwrite must be set to a valid Google service account" .Values.config.serviceAccountReadwrite | quote }} diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 5bbf6b5651..a7df2f9f2f 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -5,5 +5,7 @@ ingress: readonly: "git-lfs-dev.lsst.cloud" readwrite: "git-lfs-dev-rw.lsst.cloud" config: - projectName: "plasma-geode-127520" - bucketName: "rubin-gitlfs-experimental" + storageProjectName: "data-curation-prod-fbdb" + bucketName: "rubin-us-central1-git-lfs-dev" + serviceAccountReadonly: "git_lfs_rw@roundtable-dev-abe2.iam.gserviceaccount.com" + serviceAccountReadwrite: "git_lfs_rw@roundtable-dev-abe2.iam.gserviceaccount.com" diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml new file mode 100644 index 0000000000..ca09c12be3 --- /dev/null +++ b/applications/giftless/values-roundtable-prod.yaml @@ -0,0 +1,11 @@ +server: + debug: true +ingress: + hostname: + readonly: "git-lfs.lsst.cloud" + readwrite: "git-lfs-rw.lsst.cloud" +config: + storageProjectName: "data-curation-prod-fbdb" + bucketName: "rubin-us-central1-git-lfs" + serviceAccountReadonly: "git_lfs_rw@roundtable-prod-f6fd.iam.gserviceaccount.com" + serviceAccountReadwrite: "git_lfs_rw@roundtable-prod-f6fd.iam.gserviceaccount.com" diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index 0de26a224b..e930cf1b82 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -55,12 +55,18 @@ server: # -- Configuration for giftless server config: - # -- Project name for GCS LFS Object bucket + # -- Project name for GCS LFS Object Storage bucket # @default -- Must be overridden in environment-specific values file - projectName: "" - # -- Bucket name for GCS LFS Object bucket + storageProjectName: "" + # -- Bucket name for GCS LFS Object Storage bucket # @default -- Must be overridden in environment-specific values file bucketName: "" + # -- Read-only service account name for GCS LFS Object Storage bucket + # @default -- Must be overridden in environment-specific values file + serviceAccountReadonly: "" + # -- Read-write service account name for GCS LFS Object Storage bucket + # @default -- Must be overridden in environment-specific values file + serviceAccountReadwrite: "" global: # -- Base path for Vault secrets diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index 926669adad..e0745ae962 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -6,6 +6,7 @@ onepassword: vaultPathPrefix: secret/phalanx/roundtable-prod applications: + giftless: true kubernetes-replicator: true onepassword-connect: true ook: true From 409a6dd4a768a8a5827ebb21d4799fa861ae4e90 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 14 Nov 2023 15:26:37 -0700 Subject: [PATCH 261/588] use custom build with workload management --- applications/giftless/templates/configmap.yaml | 2 -- applications/giftless/templates/vault-secrets.yaml | 9 --------- applications/giftless/values-roundtable-dev.yaml | 4 ++++ applications/giftless/values-roundtable-prod.yaml | 4 ++++ 4 files changed, 8 insertions(+), 11 deletions(-) delete mode 100644 applications/giftless/templates/vault-secrets.yaml diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index c9100e0cca..6f0444d0da 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -14,7 +14,6 @@ data: options: storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" storage_options: - account_key_file: "/etc/secret/giftless-gcp-key-ro.json" project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} --- @@ -34,6 +33,5 @@ data: options: storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" storage_options: - account_key_file: "/etc/secret/giftless-gcp-key-rw.json" project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} diff --git a/applications/giftless/templates/vault-secrets.yaml b/applications/giftless/templates/vault-secrets.yaml deleted file mode 100644 index 0466225d3c..0000000000 --- a/applications/giftless/templates/vault-secrets.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: {{ include "giftless.fullname" . }} - labels: - {{- include "giftless.labels" . | nindent 4 }} -spec: - path: "{{ .Values.global.vaultSecretsPath }}/giftless" - type: "Opaque" diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index a7df2f9f2f..5a9e6db950 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,3 +1,7 @@ +image: + pullPolicy: "Always" + repository: "ghcr.io/lsst-sqre/giftless" + tag: "ajt-dev" server: debug: true ingress: diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index ca09c12be3..47a3622a47 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -1,3 +1,7 @@ +image: + pullPolicy: "Always" + repository: "ghcr.io/lsst-sqre/giftless" + tag: "ajt-dev" server: debug: true ingress: From 1d8ec150828b9fa7d7c52014313df168a2f89776 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 14 Nov 2023 16:36:32 -0700 Subject: [PATCH 262/588] remove secrets.yaml from giftless --- applications/giftless/secrets.yaml | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 applications/giftless/secrets.yaml diff --git a/applications/giftless/secrets.yaml b/applications/giftless/secrets.yaml deleted file mode 100644 index a3c00f01d0..0000000000 --- a/applications/giftless/secrets.yaml +++ /dev/null @@ -1,10 +0,0 @@ -"giftless-gcp-key-ro.json": - description: >- - The GCP service account JSON file for the giftless Git LFS service (RO) - onepassword: - encoded: true -"giftless-gcp-key-rw.json": - description: >- - The GCP service account JSON file for the giftless Git LFS service (RO) - onepassword: - encoded: true From fa0a031cf5fa5032fad39ccb690f14dc5f527c5c Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 14 Nov 2023 16:44:02 -0700 Subject: [PATCH 263/588] remove giftless secret config --- .../giftless/templates/deployment.yaml | 25 ++++++------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index 9465ac25a8..4ada0d71b3 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -1,12 +1,13 @@ # Note that this creates two nearly-identical deployments, one named # "giftless" and one named "giftless-rw". The only real difference -# between them is that their configuration configmaps and secrets are -# different: one has the configuration for read-only access to the Git -# LFS server, and other has configuration for read-write access. It is -# possible that we might in future want to further split the -# configuration in order to allow, for instance, different numbers of -# processes and threads for the read-write and the read-only servers, on -# the grounds that our Git LFS usage is read-mostly. +# between them is that their configuration configmaps and +# serviceaccounts are different: one has the configuration for read-only +# access to the Git LFS server, and other has configuration for +# read-write access. It is possible that we might in future want to +# further split the configuration in order to allow, for instance, +# different numbers of processes and threads for the read-write and the +# read-only servers, on the grounds that our Git LFS usage is +# read-mostly. apiVersion: apps/v1 kind: Deployment metadata: @@ -75,8 +76,6 @@ spec: mountPath: "/tmp" - name: "giftless-config" mountPath: "/etc/giftless" - - name: "giftless-secret" - mountPath: "/etc/secret" securityContext: runAsNonRoot: true runAsUser: 1000 @@ -87,9 +86,6 @@ spec: - name: "giftless-config" configMap: name: {{ include "giftless.fullname" . | quote }} - - name: "giftless-secret" - secret: - secretName: {{ include "giftless.fullname" . | quote }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -171,8 +167,6 @@ spec: mountPath: "/tmp" - name: "giftless-config" mountPath: "/etc/giftless" - - name: "giftless-secret" - mountPath: "/etc/secret" securityContext: runAsNonRoot: true runAsUser: 1000 @@ -183,9 +177,6 @@ spec: - name: "giftless-config" configMap: name: {{ template "giftless.fullname" . }}-rw - - name: "giftless-secret" - secret: - secretName: {{ include "giftless.fullname" . | quote }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} From 08040faa6beffe6d490f849eca1e3786e01fa1d2 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 14 Nov 2023 16:50:36 -0700 Subject: [PATCH 264/588] Use docker hub--repo visibility issues --- applications/giftless/values-roundtable-dev.yaml | 2 +- applications/giftless/values-roundtable-prod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 5a9e6db950..c225f3a5c9 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,6 +1,6 @@ image: pullPolicy: "Always" - repository: "ghcr.io/lsst-sqre/giftless" + repository: "docker.io/lsstsqre/giftless" tag: "ajt-dev" server: debug: true diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index 47a3622a47..2af58877a5 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -1,6 +1,6 @@ image: pullPolicy: "Always" - repository: "ghcr.io/lsst-sqre/giftless" + repository: "docker.io/lsstsqre/giftless" tag: "ajt-dev" server: debug: true From 10587d4af39957b9aa9f6e245f85f59542e175a7 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 14 Nov 2023 16:54:26 -0700 Subject: [PATCH 265/588] service account goes higher up --- applications/giftless/templates/deployment.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index 4ada0d71b3..bbf169bf03 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -31,6 +31,7 @@ spec: labels: {{- include "giftless.selectorLabels" . | nindent 8 }} spec: + serviceAccountName: "git-lfs-ro" containers: - name: {{ .Chart.Name }} command: @@ -70,7 +71,6 @@ spec: drop: - ALL readOnlyRootFilesystem: true - serviceAccountName: "git-lfs-ro" volumeMounts: - name: "tmp" mountPath: "/tmp" @@ -122,6 +122,7 @@ spec: labels: {{- include "giftless-rw.selectorLabels" . | nindent 8 }} spec: + serviceAccountName: "git-lfs-rw" containers: - name: {{ .Chart.Name }} command: @@ -161,7 +162,6 @@ spec: drop: - ALL readOnlyRootFilesystem: true - serviceAccountName: "git-lfs-rw" volumeMounts: - name: "tmp" mountPath: "/tmp" From 63dbce039b451bef8c52264d5e80616df66bab5a Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 15 Nov 2023 10:32:25 -0700 Subject: [PATCH 266/588] Correct SA email addresses --- applications/giftless/values-roundtable-dev.yaml | 4 ++-- applications/giftless/values-roundtable-prod.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index c225f3a5c9..11f6e3ed2d 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -11,5 +11,5 @@ ingress: config: storageProjectName: "data-curation-prod-fbdb" bucketName: "rubin-us-central1-git-lfs-dev" - serviceAccountReadonly: "git_lfs_rw@roundtable-dev-abe2.iam.gserviceaccount.com" - serviceAccountReadwrite: "git_lfs_rw@roundtable-dev-abe2.iam.gserviceaccount.com" + serviceAccountReadonly: "git-lfs-rw@roundtable-dev-abe2.iam.gserviceaccount.com" + serviceAccountReadwrite: "git-lfs-rw@roundtable-dev-abe2.iam.gserviceaccount.com" diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index 2af58877a5..6778a39348 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -11,5 +11,5 @@ ingress: config: storageProjectName: "data-curation-prod-fbdb" bucketName: "rubin-us-central1-git-lfs" - serviceAccountReadonly: "git_lfs_rw@roundtable-prod-f6fd.iam.gserviceaccount.com" - serviceAccountReadwrite: "git_lfs_rw@roundtable-prod-f6fd.iam.gserviceaccount.com" + serviceAccountReadonly: "git-lfs-rw@roundtable-prod-f6fd.iam.gserviceaccount.com" + serviceAccountReadwrite: "git-lfs-rw@roundtable-prod-f6fd.iam.gserviceaccount.com" From baba8da771dd610d369319aa24936e93ca41addd Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 16 Nov 2023 10:50:59 -0700 Subject: [PATCH 267/588] Use different test image --- applications/giftless/values-roundtable-dev.yaml | 2 +- applications/giftless/values-roundtable-prod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 11f6e3ed2d..7765226c00 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "ajt-dev" + tag: "ajt-test-b" server: debug: true ingress: diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index 6778a39348..1f97004374 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "ajt-dev" + tag: "ajt-test-b" server: debug: true ingress: From 8dccb654f257d34a008f8cef7429dd60e7a0fcc7 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 16 Nov 2023 15:33:02 -0700 Subject: [PATCH 268/588] Bind correct account read-only --- applications/giftless/values-roundtable-dev.yaml | 2 +- applications/giftless/values-roundtable-prod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 7765226c00..bdb555deb9 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -11,5 +11,5 @@ ingress: config: storageProjectName: "data-curation-prod-fbdb" bucketName: "rubin-us-central1-git-lfs-dev" - serviceAccountReadonly: "git-lfs-rw@roundtable-dev-abe2.iam.gserviceaccount.com" + serviceAccountReadonly: "git-lfs-ro@roundtable-dev-abe2.iam.gserviceaccount.com" serviceAccountReadwrite: "git-lfs-rw@roundtable-dev-abe2.iam.gserviceaccount.com" diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index 1f97004374..1f119bb36b 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -11,5 +11,5 @@ ingress: config: storageProjectName: "data-curation-prod-fbdb" bucketName: "rubin-us-central1-git-lfs" - serviceAccountReadonly: "git-lfs-rw@roundtable-prod-f6fd.iam.gserviceaccount.com" + serviceAccountReadonly: "git-lfs-ro@roundtable-prod-f6fd.iam.gserviceaccount.com" serviceAccountReadwrite: "git-lfs-rw@roundtable-prod-f6fd.iam.gserviceaccount.com" From e31f70435591edd8985eac78f58ae39b7330726e Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 16 Nov 2023 19:19:46 -0700 Subject: [PATCH 269/588] Add service account email to config --- applications/giftless/templates/configmap.yaml | 2 ++ applications/giftless/templates/deployment.yaml | 4 ++-- applications/giftless/values-roundtable-dev.yaml | 2 +- applications/giftless/values-roundtable-prod.yaml | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index 6f0444d0da..3a17f419f8 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -16,6 +16,7 @@ data: storage_options: project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} + serviceaccount_email: {{ .Values.config.serviceAccountReadonly }} --- apiVersion: v1 kind: ConfigMap @@ -35,3 +36,4 @@ data: storage_options: project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} + serviceaccount_email: {{ .Values.config.serviceAccountReadwrite }} diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index bbf169bf03..529241f611 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -1,5 +1,5 @@ # Note that this creates two nearly-identical deployments, one named -# "giftless" and one named "giftless-rw". The only real difference +# "giftless-ro" and one named "giftless-rw". The only real difference # between them is that their configuration configmaps and # serviceaccounts are different: one has the configuration for read-only # access to the Git LFS server, and other has configuration for @@ -11,7 +11,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro labels: {{- include "giftless.labels" . | nindent 4 }} spec: diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index bdb555deb9..5397bebba3 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "ajt-test-b" + tag: "ajt-dev" server: debug: true ingress: diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index 1f119bb36b..79012d5065 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "ajt-test-b" + tag: "ajt-dev" server: debug: true ingress: From efbee4f921b46c8dd7adfbecab7c671db9842f7a Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 17 Nov 2023 12:59:18 -0700 Subject: [PATCH 270/588] rationalize object naming --- applications/giftless/templates/configmap.yaml | 2 +- applications/giftless/templates/deployment.yaml | 2 +- applications/giftless/templates/ingress.yaml | 6 +++--- applications/giftless/templates/service.yaml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index 3a17f419f8..0894aac00b 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ include "giftless.fullname" . }} + name: {{ template "giftless.fullname" . }}-ro labels: {{- include "giftless.labels" . | nindent 4 }} data: diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index 529241f611..93f9a86bfd 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -85,7 +85,7 @@ spec: emptyDir: {} - name: "giftless-config" configMap: - name: {{ include "giftless.fullname" . | quote }} + name: {{ template "giftless.fullname" . }}-ro {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index 499575871c..cd903e262e 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -1,7 +1,7 @@ apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro labels: {{- include "giftless.labels" . | nindent 4 }} config: @@ -15,7 +15,7 @@ template: {{- with .Values.ingress.annotations }} {{- toYaml . | nindent 6 }} {{- end }} - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro spec: tls: - hosts: @@ -29,7 +29,7 @@ template: pathType: "Prefix" backend: service: - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro port: number: 5000 --- diff --git a/applications/giftless/templates/service.yaml b/applications/giftless/templates/service.yaml index 1ce6a9be64..31dd6357d5 100644 --- a/applications/giftless/templates/service.yaml +++ b/applications/giftless/templates/service.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ include "giftless.fullname" . }} + name: {{ include "giftless.fullname" . }}-ro labels: {{- include "giftless.labels" . | nindent 4 }} spec: From c06a43ed6e1382c915bee18da2aea7e7e36b44d3 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 17 Nov 2023 14:55:50 -0700 Subject: [PATCH 271/588] add temporary noverify_upload to storage --- applications/giftless/templates/configmap.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index 0894aac00b..c0d486d9b3 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -37,3 +37,5 @@ data: project_name: {{ .Values.config.storageProjectName | quote }} bucket_name: {{ .Values.config.bucketName | quote }} serviceaccount_email: {{ .Values.config.serviceAccountReadwrite }} + noverify_upload: true + From b5186d356c5257b36935a90647ade4cbb18633da Mon Sep 17 00:00:00 2001 From: adam Date: Sat, 18 Nov 2023 11:12:17 -0700 Subject: [PATCH 272/588] Add anonymous ingress for verification --- applications/giftless/templates/ingress.yaml | 47 ++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index cd903e262e..f06085fdbc 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -33,6 +33,53 @@ template: port: number: 5000 --- +# +# We need this one because the default Giftless transfer implementation +# generates a Bearer token for verification...but since we're going +# through Gafaelfawr, that gets replaced with the Gafaelfawr token. +# Then verification fails but the upload succeeds. +# +# This just means Gafaelfawr lets any verification request through. +# That does mean that absolutely anyone can verify stored objects. +# Since we already provide exactly that service anonymously on the +# readonly endpoint, I don't think this changes anything. +# +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: {{ include "giftless.fullname" . }}-rw-anon-verify + labels: + {{- include "giftless.labels" . | nindent 4 }} +config: + baseUrl: "https://{{ .Values.ingress.hostname.readwrite }}" + scopes: + anonymous: true +template: + metadata: + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-dns" + nginx.ingress.kubernetes.io/use-regex: "true" + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 6 }} + {{- end }} + name: {{ include "giftless.fullname" . }}-rw-anon-verify + spec: + tls: + - hosts: + - {{ .Values.ingress.hostname.readwrite | quote }} + secretName: tls + rules: + - host: {{ .Values.ingress.hostname.readwrite | quote }} + http: + paths: + - path: "/.*/objects/storage/verify$" + pathType: "ImplementationSpecific" + backend: + service: + name: {{ include "giftless.fullname" . }}-rw + port: + number: 5000 +--- apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: From fd08ba28e518a010c3ab6bc0eefd4a0d4e81ccc5 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 21 Nov 2023 13:41:56 -0700 Subject: [PATCH 273/588] Reduce kafka retention to 48h - Also review log.retention.bytes parameter. log.retention.bytes specifies how much disk space Kafka will ensure is available. The recommendation is to set it to 70% of the data partition size. --- applications/sasquatch/README.md | 6 +++--- applications/sasquatch/charts/strimzi-kafka/README.md | 6 +++--- applications/sasquatch/charts/strimzi-kafka/values.yaml | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index f766ac9fbe..229c5d7421 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -292,10 +292,10 @@ Rubin Observatory's telemetry service. | strimzi-kafka.connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | strimzi-kafka.connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | strimzi-kafka.kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | -| strimzi-kafka.kafka.config."log.retention.bytes" | string | `"429496729600"` | Maximum retained number of bytes for a topic's data. | -| strimzi-kafka.kafka.config."log.retention.hours" | int | `72` | Number of days for a topic's data to be retained. | +| strimzi-kafka.kafka.config."log.retention.bytes" | string | `"350000000000"` | How much disk space Kafka will ensure is available, set to 70% of the data partition size | +| strimzi-kafka.kafka.config."log.retention.hours" | int | `48` | Number of days for a topic's data to be retained. | | strimzi-kafka.kafka.config."message.max.bytes" | int | `10485760` | The largest record batch size allowed by Kafka. | -| strimzi-kafka.kafka.config."offsets.retention.minutes" | int | `4320` | Number of minutes for a consumer group's offsets to be retained. | +| strimzi-kafka.kafka.config."offsets.retention.minutes" | int | `2880` | Number of minutes for a consumer group's offsets to be retained. | | strimzi-kafka.kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition. | | strimzi-kafka.kafka.config."replica.lag.time.max.ms" | int | `120000` | Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. | | strimzi-kafka.kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 73fe1951e8..93c4b9855f 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -11,10 +11,10 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | -| kafka.config."log.retention.bytes" | string | `"429496729600"` | Maximum retained number of bytes for a topic's data. | -| kafka.config."log.retention.hours" | int | `72` | Number of days for a topic's data to be retained. | +| kafka.config."log.retention.bytes" | string | `"350000000000"` | How much disk space Kafka will ensure is available, set to 70% of the data partition size | +| kafka.config."log.retention.hours" | int | `48` | Number of days for a topic's data to be retained. | | kafka.config."message.max.bytes" | int | `10485760` | The largest record batch size allowed by Kafka. | -| kafka.config."offsets.retention.minutes" | int | `4320` | Number of minutes for a consumer group's offsets to be retained. | +| kafka.config."offsets.retention.minutes" | int | `2880` | Number of minutes for a consumer group's offsets to be retained. | | kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition. | | kafka.config."replica.lag.time.max.ms" | int | `120000` | Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. | | kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource. | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 82eeae277e..e819687976 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -15,11 +15,11 @@ kafka: storageClassName: "" config: # -- Number of minutes for a consumer group's offsets to be retained. - offsets.retention.minutes: 4320 + offsets.retention.minutes: 2880 # -- Number of days for a topic's data to be retained. - log.retention.hours: 72 - # -- Maximum retained number of bytes for a topic's data. - log.retention.bytes: "429496729600" + log.retention.hours: 48 + # -- How much disk space Kafka will ensure is available, set to 70% of the data partition size + log.retention.bytes: "350000000000" # -- The largest record batch size allowed by Kafka. message.max.bytes: 10485760 # -- The number of bytes of messages to attempt to fetch for each partition. From b7cb1a7c2c7209c72a941b8293da66ac239a0358 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 21 Nov 2023 15:56:34 -0700 Subject: [PATCH 274/588] Add 60s query timeout to InfluxDB - Set this limit by default to prevent long queries to use memory excessively and eventually restart the InfluxDB pod. --- applications/sasquatch/README.md | 6 +++--- applications/sasquatch/values.yaml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 229c5d7421..04d600fe20 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -22,7 +22,7 @@ Rubin Observatory's telemetry service. | chronograf.resources.limits.memory | string | `"64Gi"` | | | chronograf.resources.requests.cpu | int | `1` | | | chronograf.resources.requests.memory | string | `"4Gi"` | | -| influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"60s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb-staging.enabled | bool | `false` | Enable InfluxDB staging deployment. | | influxdb-staging.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | | influxdb-staging.ingress | object | disabled | InfluxDB ingress configuration. | @@ -34,7 +34,7 @@ Rubin Observatory's telemetry service. | influxdb-staging.resources.requests.cpu | int | `8` | | | influxdb-staging.resources.requests.memory | string | `"96Gi"` | | | influxdb-staging.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | -| influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"60s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb.enabled | bool | `true` | Enable InfluxDB. | | influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | | influxdb.ingress | object | disabled | InfluxDB ingress configuration. | @@ -85,7 +85,7 @@ Rubin Observatory's telemetry service. | kapacitor.resources.requests.cpu | int | `1` | | | kapacitor.resources.requests.memory | string | `"1Gi"` | | | rest-proxy | object | `{"enabled":false}` | Override rest-proxy configuration. | -| source-influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| source-influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"60s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | source-influxdb.enabled | bool | `false` | Enable InfluxDB staging deployment. | | source-influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | | source-influxdb.ingress | object | disabled | InfluxDB ingress configuration. | diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index a6c1fe3da8..4e2436b3b5 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -53,7 +53,7 @@ influxdb: coordinator: write-timeout: "1h" max-concurrent-queries: 0 - query-timeout: "0s" + query-timeout: "60s" log-queries-after: "15s" continuous_queries: enabled: false @@ -116,7 +116,7 @@ influxdb-staging: coordinator: write-timeout: "1h" max-concurrent-queries: 0 - query-timeout: "0s" + query-timeout: "60s" log-queries-after: "15s" continuous_queries: enabled: false @@ -179,7 +179,7 @@ source-influxdb: coordinator: write-timeout: "1h" max-concurrent-queries: 0 - query-timeout: "0s" + query-timeout: "60s" log-queries-after: "15s" continuous_queries: enabled: false From 2f455e08be54269d3f8730547524c19e225a9061 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 21 Nov 2023 16:36:07 -0700 Subject: [PATCH 275/588] Increase n of connector tasks for M1M3 and MTMount - Increase the number of connector tasks to improve latency issues as reported by Petr when TMA/M1M3 tests restarted at the Summit. --- applications/sasquatch/values-summit.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index a82397b46d..447a9f4583 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -58,6 +58,7 @@ kafka-connect-manager: enabled: true repairerConnector: false topicsRegex: ".*MTMount" + tasksMax: "8" comcam: enabled: true repairerConnector: false @@ -74,6 +75,7 @@ kafka-connect-manager: enabled: true repairerConnector: false topicsRegex: ".*MTM1M3" + tasksMax: "8" m2: enabled: true repairerConnector: false From d776529c4dfaad95b4362f8cc7ea3c28a80df9b3 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 22 Nov 2023 08:34:13 -0700 Subject: [PATCH 276/588] Remove the 1s refresh option for Chronograf dashboards - The 1s refresh option in Chronograf only works with dashboards that take less than 1s to update, most dashboards take longer, and this option is causing some confusion.It also seems unnecessary as human reactions to displays take longer than that. Users agree that a 5s refresh is enough, and the decision is to remove the 1s refresh option. --- applications/sasquatch/README.md | 2 +- applications/sasquatch/values.yaml | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 04d600fe20..cc8f0f7165 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -13,7 +13,7 @@ Rubin Observatory's telemetry service. | bucketmapper.image.repository | string | `"ghcr.io/lsst-sqre/rubin-influx-tools"` | repository for rubin-influx-tools | | bucketmapper.image.tag | string | `"0.2.0"` | tag for rubin-influx-tools | | chronograf.enabled | bool | `true` | Enable Chronograf. | -| chronograf.env | object | `{"BASE_PATH":"/chronograf","CUSTOM_AUTO_REFRESH":"1s=1000","HOST_PAGE_DISABLED":true}` | Chronograf environment variables. | +| chronograf.env | object | `{"BASE_PATH":"/chronograf","HOST_PAGE_DISABLED":true}` | Chronograf environment variables. | | chronograf.envFromSecret | string | `"sasquatch"` | Chronograf secrets, expected keys generic_client_id, generic_client_secret and token_secret. | | chronograf.image | object | `{"repository":"quay.io/influxdb/chronograf","tag":"1.10.2"}` | Chronograf image tag. | | chronograf.ingress | object | disabled | Chronograf ingress configuration. | diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index 4e2436b3b5..64bf6c24e1 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -288,7 +288,6 @@ chronograf: env: HOST_PAGE_DISABLED: true BASE_PATH: /chronograf - CUSTOM_AUTO_REFRESH: "1s=1000" # -- Chronograf secrets, expected keys generic_client_id, generic_client_secret and token_secret. envFromSecret: "sasquatch" resources: From f5e34666e1ede45e3b1979c62e279d4b87676f02 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 22 Nov 2023 08:34:03 -0800 Subject: [PATCH 277/588] Disable nublado2 at the summit We've been running Nublado v3 by default for a while. --- environments/values-summit.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index f82d65ace3..ecce933133 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -3,12 +3,9 @@ fqdn: summit-lsp.lsst.codes vaultPathPrefix: secret/k8s_operator/summit-lsp.lsst.codes applications: - cachemachine: true exposurelog: true - moneypenny: true narrativelog: true nublado: true - nublado2: true portal: true postgres: true sasquatch: true From 5e5b8e8824f29d1e7cfb7a1e7a019130ea82c694 Mon Sep 17 00:00:00 2001 From: Rob Bovill Date: Wed, 22 Nov 2023 14:36:58 -0700 Subject: [PATCH 278/588] BTS: Update nublado to Cycle 34. --- applications/nublado/values-base.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 80e404f2fc..046db05ee7 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -8,8 +8,8 @@ controller: num_releases: 0 num_weeklies: 3 num_dailies: 2 - cycle: 33 - recommended_tag: "recommended_c0033" + cycle: 34 + recommended_tag: "recommended_c0034" lab: application: "nublado-users" pullSecret: "pull-secret" From 9a3e69944b83e496fb5753a89260cfc6e7c1e7c7 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Wed, 22 Nov 2023 16:04:43 -0800 Subject: [PATCH 279/588] Update prompt processing to use d_2023_11_22. --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index a34411adac..55edaf9936 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -7,7 +7,7 @@ prompt-proto-service: image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: d_2023_11_06 + tag: d_2023_11_22 instrument: pipelines: >- From 9c63c32e85cbf9d7cb3d8ed737811247512c88dd Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 10:24:14 +0000 Subject: [PATCH 280/588] Update Helm release argo-cd to v5.51.4 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 72bb1839e8..a0ac414350 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.51.3 + version: 5.51.4 repository: https://argoproj.github.io/argo-helm From ec3ec2889c3a71f75e55a5265eba570434210672 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 10:24:18 +0000 Subject: [PATCH 281/588] Update Helm release kubernetes-replicator to v2.9.2 --- applications/kubernetes-replicator/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/kubernetes-replicator/Chart.yaml b/applications/kubernetes-replicator/Chart.yaml index dbb914dee2..0fa53d5822 100644 --- a/applications/kubernetes-replicator/Chart.yaml +++ b/applications/kubernetes-replicator/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/mittwald/kubernetes-replicator dependencies: - name: kubernetes-replicator - version: 2.9.1 + version: 2.9.2 repository: https://helm.mittwald.de From b4289abd99f8d1ddc46514697461ce3ec5674fa4 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 12:31:56 +0000 Subject: [PATCH 282/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 510 +++++++++++++++++++++--------------------- requirements/main.txt | 230 +++++++++---------- 2 files changed, 370 insertions(+), 370 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 8c77162f55..3b23dcb1b6 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -250,9 +250,9 @@ identify==2.5.32 \ --hash=sha256:0b7656ef6cba81664b783352c73f8c24b39cf82f926f78f4550eda928e5e0545 \ --hash=sha256:5d9979348ec1a21c768ae07e0a652924538e8bce67313a73cb0f681cf08ba407 # via pre-commit -idna==3.4 \ - --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ - --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 +idna==3.6 \ + --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ + --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f # via # -c requirements/main.txt # requests @@ -370,34 +370,34 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.7.0 \ - --hash=sha256:0e81ffd120ee24959b449b647c4b2fbfcf8acf3465e082b8d58fd6c4c2b27e46 \ - --hash=sha256:185cff9b9a7fec1f9f7d8352dff8a4c713b2e3eea9c6c4b5ff7f0edf46b91e41 \ - --hash=sha256:1e280b5697202efa698372d2f39e9a6713a0395a756b1c6bd48995f8d72690dc \ - --hash=sha256:1fe46e96ae319df21359c8db77e1aecac8e5949da4773c0274c0ef3d8d1268a9 \ - --hash=sha256:2b53655a295c1ed1af9e96b462a736bf083adba7b314ae775563e3fb4e6795f5 \ - --hash=sha256:551d4a0cdcbd1d2cccdcc7cb516bb4ae888794929f5b040bb51aae1846062901 \ - --hash=sha256:55d28d7963bef00c330cb6461db80b0b72afe2f3c4e2963c99517cf06454e665 \ - --hash=sha256:5da84d7bf257fd8f66b4f759a904fd2c5a765f70d8b52dde62b521972a0a2357 \ - --hash=sha256:6cb8d5f6d0fcd9e708bb190b224089e45902cacef6f6915481806b0c77f7786d \ - --hash=sha256:7a7b1e399c47b18feb6f8ad4a3eef3813e28c1e871ea7d4ea5d444b2ac03c418 \ - --hash=sha256:870bd1ffc8a5862e593185a4c169804f2744112b4a7c55b93eb50f48e7a77010 \ - --hash=sha256:87c076c174e2c7ef8ab416c4e252d94c08cd4980a10967754f91571070bf5fbe \ - --hash=sha256:96650d9a4c651bc2a4991cf46f100973f656d69edc7faf91844e87fe627f7e96 \ - --hash=sha256:a3637c03f4025f6405737570d6cbfa4f1400eb3c649317634d273687a09ffc2f \ - --hash=sha256:a79cdc12a02eb526d808a32a934c6fe6df07b05f3573d210e41808020aed8b5d \ - --hash=sha256:b633f188fc5ae1b6edca39dae566974d7ef4e9aaaae00bc36efe1f855e5173ac \ - --hash=sha256:bf7a2f0a6907f231d5e41adba1a82d7d88cf1f61a70335889412dec99feeb0f8 \ - --hash=sha256:c1b06b4b109e342f7dccc9efda965fc3970a604db70f8560ddfdee7ef19afb05 \ - --hash=sha256:cddee95dea7990e2215576fae95f6b78a8c12f4c089d7e4367564704e99118d3 \ - --hash=sha256:d01921dbd691c4061a3e2ecdbfbfad029410c5c2b1ee88946bf45c62c6c91210 \ - --hash=sha256:d0fa29919d2e720c8dbaf07d5578f93d7b313c3e9954c8ec05b6d83da592e5d9 \ - --hash=sha256:d6ed9a3997b90c6f891138e3f83fb8f475c74db4ccaa942a1c7bf99e83a989a1 \ - --hash=sha256:d93e76c2256aa50d9c82a88e2f569232e9862c9982095f6d54e13509f01222fc \ - --hash=sha256:df67fbeb666ee8828f675fee724cc2cbd2e4828cc3df56703e02fe6a421b7401 \ - --hash=sha256:f29386804c3577c83d76520abf18cfcd7d68264c7e431c5907d250ab502658ee \ - --hash=sha256:f65f385a6f43211effe8c682e8ec3f55d79391f70a201575def73d08db68ead1 \ - --hash=sha256:fc9fe455ad58a20ec68599139ed1113b21f977b536a91b42bef3ffed5cce7391 +mypy==1.7.1 \ + --hash=sha256:12cce78e329838d70a204293e7b29af9faa3ab14899aec397798a4b41be7f340 \ + --hash=sha256:1484b8fa2c10adf4474f016e09d7a159602f3239075c7bf9f1627f5acf40ad49 \ + --hash=sha256:204e0d6de5fd2317394a4eff62065614c4892d5a4d1a7ee55b765d7a3d9e3f82 \ + --hash=sha256:2643d145af5292ee956aa0a83c2ce1038a3bdb26e033dadeb2f7066fb0c9abce \ + --hash=sha256:2c6e4464ed5f01dc44dc9821caf67b60a4e5c3b04278286a85c067010653a0eb \ + --hash=sha256:2f7f6985d05a4e3ce8255396df363046c28bea790e40617654e91ed580ca7c51 \ + --hash=sha256:31902408f4bf54108bbfb2e35369877c01c95adc6192958684473658c322c8a5 \ + --hash=sha256:40716d1f821b89838589e5b3106ebbc23636ffdef5abc31f7cd0266db936067e \ + --hash=sha256:4b901927f16224d0d143b925ce9a4e6b3a758010673eeded9b748f250cf4e8f7 \ + --hash=sha256:4fc3d14ee80cd22367caaaf6e014494415bf440980a3045bf5045b525680ac33 \ + --hash=sha256:5cf3f0c5ac72139797953bd50bc6c95ac13075e62dbfcc923571180bebb662e9 \ + --hash=sha256:6dbdec441c60699288adf051f51a5d512b0d818526d1dcfff5a41f8cd8b4aaf1 \ + --hash=sha256:72cf32ce7dd3562373f78bd751f73c96cfb441de147cc2448a92c1a308bd0ca6 \ + --hash=sha256:75aa828610b67462ffe3057d4d8a4112105ed211596b750b53cbfe182f44777a \ + --hash=sha256:75c4d2a6effd015786c87774e04331b6da863fc3fc4e8adfc3b40aa55ab516fe \ + --hash=sha256:78e25b2fd6cbb55ddfb8058417df193f0129cad5f4ee75d1502248e588d9e0d7 \ + --hash=sha256:84860e06ba363d9c0eeabd45ac0fde4b903ad7aa4f93cd8b648385a888e23200 \ + --hash=sha256:8c5091ebd294f7628eb25ea554852a52058ac81472c921150e3a61cdd68f75a7 \ + --hash=sha256:944bdc21ebd620eafefc090cdf83158393ec2b1391578359776c00de00e8907a \ + --hash=sha256:9c7ac372232c928fff0645d85f273a726970c014749b924ce5710d7d89763a28 \ + --hash=sha256:d9b338c19fa2412f76e17525c1b4f2c687a55b156320acb588df79f2e6fa9fea \ + --hash=sha256:ee5d62d28b854eb61889cde4e1dbc10fbaa5560cb39780c3995f6737f7e82120 \ + --hash=sha256:f2c2521a8e4d6d769e3234350ba7b65ff5d527137cdcde13ff4d99114b0c8e7d \ + --hash=sha256:f6efc9bd72258f89a3816e3a98c09d36f079c223aa345c659622f056b760ab42 \ + --hash=sha256:f7c5d642db47376a0cc130f0de6d055056e010debdaf0707cd2b0fc7e7ef30ea \ + --hash=sha256:fcb6d9afb1b6208b4c712af0dafdc650f518836065df0d4fb1d800f5d6773db2 \ + --hash=sha256:fcd2572dd4519e8a6642b733cd3a8cfc1ef94bafd0c1ceed9c94fe736cb65b6a # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ @@ -418,9 +418,9 @@ packaging==23.2 \ # pydata-sphinx-theme # pytest # sphinx -platformdirs==3.11.0 \ - --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ - --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e +platformdirs==4.0.0 \ + --hash=sha256:118c954d7e949b35437270383a3f2531e99dd93cf7ce4dc8340d3356d30f173b \ + --hash=sha256:cb633b2bcf10c51af60beb0ab06d2f1d69064b43abf4c185ca6b28865f3f9731 # via virtualenv pluggy==1.3.0 \ --hash=sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12 \ @@ -440,120 +440,120 @@ pybtex-docutils==1.0.3 \ --hash=sha256:3a7ebdf92b593e00e8c1c538aa9a20bca5d92d84231124715acc964d51d93c6b \ --hash=sha256:8fd290d2ae48e32fcb54d86b0efb8d573198653c7e2447d5bec5847095f430b9 # via sphinxcontrib-bibtex -pydantic==2.5.1 \ - --hash=sha256:0b8be5413c06aadfbe56f6dc1d45c9ed25fd43264414c571135c97dd77c2bedb \ - --hash=sha256:dc5244a8939e0d9a68f1f1b5f550b2e1c879912033b1becbedb315accc75441b +pydantic==2.5.2 \ + --hash=sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0 \ + --hash=sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd # via # -c requirements/main.txt # autodoc-pydantic # documenteer # pydantic-settings -pydantic-core==2.14.3 \ - --hash=sha256:056ea7cc3c92a7d2a14b5bc9c9fa14efa794d9f05b9794206d089d06d3433dc7 \ - --hash=sha256:0653fb9fc2fa6787f2fa08631314ab7fc8070307bd344bf9471d1b7207c24623 \ - --hash=sha256:076edc972b68a66870cec41a4efdd72a6b655c4098a232314b02d2bfa3bfa157 \ - --hash=sha256:0a3e51c2be472b7867eb0c5d025b91400c2b73a0823b89d4303a9097e2ec6655 \ - --hash=sha256:0c7f8e8a7cf8e81ca7d44bea4f181783630959d41b4b51d2f74bc50f348a090f \ - --hash=sha256:10904368261e4509c091cbcc067e5a88b070ed9a10f7ad78f3029c175487490f \ - --hash=sha256:113752a55a8eaece2e4ac96bc8817f134c2c23477e477d085ba89e3aa0f4dc44 \ - --hash=sha256:12e05a76b223577a4696c76d7a6b36a0ccc491ffb3c6a8cf92d8001d93ddfd63 \ - --hash=sha256:136bc7247e97a921a020abbd6ef3169af97569869cd6eff41b6a15a73c44ea9b \ - --hash=sha256:1582f01eaf0537a696c846bea92082082b6bfc1103a88e777e983ea9fbdc2a0f \ - --hash=sha256:1767bd3f6370458e60c1d3d7b1d9c2751cc1ad743434e8ec84625a610c8b9195 \ - --hash=sha256:1e2979dc80246e18e348de51246d4c9b410186ffa3c50e77924bec436b1e36cb \ - --hash=sha256:1ea992659c03c3ea811d55fc0a997bec9dde863a617cc7b25cfde69ef32e55af \ - --hash=sha256:1f2d4516c32255782153e858f9a900ca6deadfb217fd3fb21bb2b60b4e04d04d \ - --hash=sha256:2494d20e4c22beac30150b4be3b8339bf2a02ab5580fa6553ca274bc08681a65 \ - --hash=sha256:260692420028319e201b8649b13ac0988974eeafaaef95d0dfbf7120c38dc000 \ - --hash=sha256:2646f8270f932d79ba61102a15ea19a50ae0d43b314e22b3f8f4b5fabbfa6e38 \ - --hash=sha256:27828f0227b54804aac6fb077b6bb48e640b5435fdd7fbf0c274093a7b78b69c \ - --hash=sha256:2bc736725f9bd18a60eec0ed6ef9b06b9785454c8d0105f2be16e4d6274e63d0 \ - --hash=sha256:2c08ac60c3caa31f825b5dbac47e4875bd4954d8f559650ad9e0b225eaf8ed0c \ - --hash=sha256:2c83892c7bf92b91d30faca53bb8ea21f9d7e39f0ae4008ef2c2f91116d0464a \ - --hash=sha256:354db020b1f8f11207b35360b92d95725621eb92656725c849a61e4b550f4acc \ - --hash=sha256:364dba61494e48f01ef50ae430e392f67ee1ee27e048daeda0e9d21c3ab2d609 \ - --hash=sha256:37dad73a2f82975ed563d6a277fd9b50e5d9c79910c4aec787e2d63547202315 \ - --hash=sha256:38113856c7fad8c19be7ddd57df0c3e77b1b2336459cb03ee3903ce9d5e236ce \ - --hash=sha256:38aed5a1bbc3025859f56d6a32f6e53ca173283cb95348e03480f333b1091e7d \ - --hash=sha256:3ad083df8fe342d4d8d00cc1d3c1a23f0dc84fce416eb301e69f1ddbbe124d3f \ - --hash=sha256:3c1bf1a7b05a65d3b37a9adea98e195e0081be6b17ca03a86f92aeb8b110f468 \ - --hash=sha256:3d1dde10bd9962b1434053239b1d5490fc31a2b02d8950a5f731bc584c7a5a0f \ - --hash=sha256:44aaf1a07ad0824e407dafc637a852e9a44d94664293bbe7d8ee549c356c8882 \ - --hash=sha256:44afa3c18d45053fe8d8228950ee4c8eaf3b5a7f3b64963fdeac19b8342c987f \ - --hash=sha256:4a70d23eedd88a6484aa79a732a90e36701048a1509078d1b59578ef0ea2cdf5 \ - --hash=sha256:4aa89919fbd8a553cd7d03bf23d5bc5deee622e1b5db572121287f0e64979476 \ - --hash=sha256:4cc6bb11f4e8e5ed91d78b9880774fbc0856cb226151b0a93b549c2b26a00c19 \ - --hash=sha256:536e1f58419e1ec35f6d1310c88496f0d60e4f182cacb773d38076f66a60b149 \ - --hash=sha256:5402ee0f61e7798ea93a01b0489520f2abfd9b57b76b82c93714c4318c66ca06 \ - --hash=sha256:56814b41486e2d712a8bc02a7b1f17b87fa30999d2323bbd13cf0e52296813a1 \ - --hash=sha256:5b73441a1159f1fb37353aaefb9e801ab35a07dd93cb8177504b25a317f4215a \ - --hash=sha256:61beaa79d392d44dc19d6f11ccd824d3cccb865c4372157c40b92533f8d76dd0 \ - --hash=sha256:6c2d118d1b6c9e2d577e215567eedbe11804c3aafa76d39ec1f8bc74e918fd07 \ - --hash=sha256:6e2f9d76c00e805d47f19c7a96a14e4135238a7551a18bfd89bb757993fd0933 \ - --hash=sha256:71ed769b58d44e0bc2701aa59eb199b6665c16e8a5b8b4a84db01f71580ec448 \ - --hash=sha256:7349f99f1ef8b940b309179733f2cad2e6037a29560f1b03fdc6aa6be0a8d03c \ - --hash=sha256:75f3f534f33651b73f4d3a16d0254de096f43737d51e981478d580f4b006b427 \ - --hash=sha256:76fc18653a5c95e5301a52d1b5afb27c9adc77175bf00f73e94f501caf0e05ad \ - --hash=sha256:7cb0c397f29688a5bd2c0dbd44451bc44ebb9b22babc90f97db5ec3e5bb69977 \ - --hash=sha256:7cc24728a1a9cef497697e53b3d085fb4d3bc0ef1ef4d9b424d9cf808f52c146 \ - --hash=sha256:7e63a56eb7fdee1587d62f753ccd6d5fa24fbeea57a40d9d8beaef679a24bdd6 \ - --hash=sha256:832d16f248ca0cc96929139734ec32d21c67669dcf8a9f3f733c85054429c012 \ - --hash=sha256:8488e973547e8fb1b4193fd9faf5236cf1b7cd5e9e6dc7ff6b4d9afdc4c720cb \ - --hash=sha256:849cff945284c577c5f621d2df76ca7b60f803cc8663ff01b778ad0af0e39bb9 \ - --hash=sha256:88ec906eb2d92420f5b074f59cf9e50b3bb44f3cb70e6512099fdd4d88c2f87c \ - --hash=sha256:8d3b9c91eeb372a64ec6686c1402afd40cc20f61a0866850f7d989b6bf39a41a \ - --hash=sha256:8f5624f0f67f2b9ecaa812e1dfd2e35b256487566585160c6c19268bf2ffeccc \ - --hash=sha256:905a12bf088d6fa20e094f9a477bf84bd823651d8b8384f59bcd50eaa92e6a52 \ - --hash=sha256:92486a04d54987054f8b4405a9af9d482e5100d6fe6374fc3303015983fc8bda \ - --hash=sha256:96eb10ef8920990e703da348bb25fedb8b8653b5966e4e078e5be382b430f9e0 \ - --hash=sha256:96fb679c7ca12a512d36d01c174a4fbfd912b5535cc722eb2c010c7b44eceb8e \ - --hash=sha256:98d8b3932f1a369364606417ded5412c4ffb15bedbcf797c31317e55bd5d920e \ - --hash=sha256:9dbab442a8d9ca918b4ed99db8d89d11b1f067a7dadb642476ad0889560dac79 \ - --hash=sha256:9ef3e2e407e4cad2df3c89488a761ed1f1c33f3b826a2ea9a411b0a7d1cccf1b \ - --hash=sha256:9ff737f24b34ed26de62d481ef522f233d3c5927279f6b7229de9b0deb3f76b5 \ - --hash=sha256:a1a39fecb5f0b19faee9a8a8176c805ed78ce45d760259a4ff3d21a7daa4dfc1 \ - --hash=sha256:a402ae1066be594701ac45661278dc4a466fb684258d1a2c434de54971b006ca \ - --hash=sha256:a5c51460ede609fbb4fa883a8fe16e749964ddb459966d0518991ec02eb8dfb9 \ - --hash=sha256:a8ca13480ce16daad0504be6ce893b0ee8ec34cd43b993b754198a89e2787f7e \ - --hash=sha256:ab4a2381005769a4af2ffddae74d769e8a4aae42e970596208ec6d615c6fb080 \ - --hash=sha256:aeafc7f5bbddc46213707266cadc94439bfa87ecf699444de8be044d6d6eb26f \ - --hash=sha256:aecd5ed096b0e5d93fb0367fd8f417cef38ea30b786f2501f6c34eabd9062c38 \ - --hash=sha256:af452e69446fadf247f18ac5d153b1f7e61ef708f23ce85d8c52833748c58075 \ - --hash=sha256:af46f0b7a1342b49f208fed31f5a83b8495bb14b652f621e0a6787d2f10f24ee \ - --hash=sha256:b02b5e1f54c3396c48b665050464803c23c685716eb5d82a1d81bf81b5230da4 \ - --hash=sha256:b28996872b48baf829ee75fa06998b607c66a4847ac838e6fd7473a6b2ab68e7 \ - --hash=sha256:b7692f539a26265cece1e27e366df5b976a6db6b1f825a9e0466395b314ee48b \ - --hash=sha256:ba44fad1d114539d6a1509966b20b74d2dec9a5b0ee12dd7fd0a1bb7b8785e5f \ - --hash=sha256:bf15145b1f8056d12c67255cd3ce5d317cd4450d5ee747760d8d088d85d12a2d \ - --hash=sha256:c3dc2920cc96f9aa40c6dc54256e436cc95c0a15562eb7bd579e1811593c377e \ - --hash=sha256:c54af5069da58ea643ad34ff32fd6bc4eebb8ae0fef9821cd8919063e0aeeaab \ - --hash=sha256:c5ea0153482e5b4d601c25465771c7267c99fddf5d3f3bdc238ef930e6d051cf \ - --hash=sha256:c9ffd823c46e05ef3eb28b821aa7bc501efa95ba8880b4a1380068e32c5bed47 \ - --hash=sha256:ca55c9671bb637ce13d18ef352fd32ae7aba21b4402f300a63f1fb1fd18e0364 \ - --hash=sha256:caa94726791e316f0f63049ee00dff3b34a629b0d099f3b594770f7d0d8f1f56 \ - --hash=sha256:cc956f78651778ec1ab105196e90e0e5f5275884793ab67c60938c75bcca3989 \ - --hash=sha256:ccbf355b7276593c68fa824030e68cb29f630c50e20cb11ebb0ee450ae6b3d08 \ - --hash=sha256:cf08b43d1d5d1678f295f0431a4a7e1707d4652576e1d0f8914b5e0213bfeee5 \ - --hash=sha256:d06c78074646111fb01836585f1198367b17d57c9f427e07aaa9ff499003e58d \ - --hash=sha256:d2b53e1f851a2b406bbb5ac58e16c4a5496038eddd856cc900278fa0da97f3fc \ - --hash=sha256:d41df8e10b094640a6b234851b624b76a41552f637b9fb34dc720b9fe4ef3be4 \ - --hash=sha256:d7abd17a838a52140e3aeca271054e321226f52df7e0a9f0da8f91ea123afe98 \ - --hash=sha256:de52ddfa6e10e892d00f747bf7135d7007302ad82e243cf16d89dd77b03b649d \ - --hash=sha256:df33902464410a1f1a0411a235f0a34e7e129f12cb6340daca0f9d1390f5fe10 \ - --hash=sha256:e16aaf788f1de5a85c8f8fcc9c1ca1dd7dd52b8ad30a7889ca31c7c7606615b8 \ - --hash=sha256:e3ad4968711fb379a67c8c755beb4dae8b721a83737737b7bcee27c05400b047 \ - --hash=sha256:e483b8b913fcd3b48badec54185c150cb7ab0e6487914b84dc7cde2365e0c892 \ - --hash=sha256:e71f666c3bf019f2490a47dddb44c3ccea2e69ac882f7495c68dc14d4065eac2 \ - --hash=sha256:ea1498ce4491236d1cffa0eee9ad0968b6ecb0c1cd711699c5677fc689905f00 \ - --hash=sha256:eaab9dc009e22726c62fe3b850b797e7f0e7ba76d245284d1064081f512c7226 \ - --hash=sha256:ec79dbe23702795944d2ae4c6925e35a075b88acd0d20acde7c77a817ebbce94 \ - --hash=sha256:f1b92e72babfd56585c75caf44f0b15258c58e6be23bc33f90885cebffde3400 \ - --hash=sha256:f1f46700402312bdc31912f6fc17f5ecaaaa3bafe5487c48f07c800052736289 \ - --hash=sha256:f518eac285c9632be337323eef9824a856f2680f943a9b68ac41d5f5bad7df7c \ - --hash=sha256:f86f20a9d5bee1a6ede0f2757b917bac6908cde0f5ad9fcb3606db1e2968bcf5 \ - --hash=sha256:f8fc652c354d3362e2932a79d5ac4bbd7170757a41a62c4fe0f057d29f10bebb \ - --hash=sha256:fe272a72c7ed29f84c42fedd2d06c2f9858dc0c00dae3b34ba15d6d8ae0fbaaf \ - --hash=sha256:fe863491664c6720d65ae438d4efaa5eca766565a53adb53bf14bc3246c72fe0 +pydantic-core==2.14.5 \ + --hash=sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b \ + --hash=sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b \ + --hash=sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d \ + --hash=sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8 \ + --hash=sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124 \ + --hash=sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189 \ + --hash=sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c \ + --hash=sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d \ + --hash=sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f \ + --hash=sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520 \ + --hash=sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4 \ + --hash=sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6 \ + --hash=sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955 \ + --hash=sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3 \ + --hash=sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b \ + --hash=sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a \ + --hash=sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68 \ + --hash=sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3 \ + --hash=sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd \ + --hash=sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de \ + --hash=sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b \ + --hash=sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634 \ + --hash=sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7 \ + --hash=sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459 \ + --hash=sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7 \ + --hash=sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3 \ + --hash=sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331 \ + --hash=sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf \ + --hash=sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d \ + --hash=sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36 \ + --hash=sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59 \ + --hash=sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937 \ + --hash=sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc \ + --hash=sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093 \ + --hash=sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753 \ + --hash=sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706 \ + --hash=sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca \ + --hash=sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260 \ + --hash=sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997 \ + --hash=sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588 \ + --hash=sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71 \ + --hash=sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb \ + --hash=sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e \ + --hash=sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69 \ + --hash=sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5 \ + --hash=sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07 \ + --hash=sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1 \ + --hash=sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0 \ + --hash=sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd \ + --hash=sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8 \ + --hash=sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944 \ + --hash=sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26 \ + --hash=sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda \ + --hash=sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4 \ + --hash=sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9 \ + --hash=sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00 \ + --hash=sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe \ + --hash=sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6 \ + --hash=sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada \ + --hash=sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4 \ + --hash=sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7 \ + --hash=sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325 \ + --hash=sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4 \ + --hash=sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b \ + --hash=sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88 \ + --hash=sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04 \ + --hash=sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863 \ + --hash=sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0 \ + --hash=sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911 \ + --hash=sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b \ + --hash=sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e \ + --hash=sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144 \ + --hash=sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5 \ + --hash=sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720 \ + --hash=sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab \ + --hash=sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d \ + --hash=sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789 \ + --hash=sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec \ + --hash=sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2 \ + --hash=sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db \ + --hash=sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f \ + --hash=sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef \ + --hash=sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3 \ + --hash=sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209 \ + --hash=sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc \ + --hash=sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651 \ + --hash=sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8 \ + --hash=sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e \ + --hash=sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66 \ + --hash=sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7 \ + --hash=sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550 \ + --hash=sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd \ + --hash=sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405 \ + --hash=sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27 \ + --hash=sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093 \ + --hash=sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077 \ + --hash=sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113 \ + --hash=sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3 \ + --hash=sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6 \ + --hash=sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf \ + --hash=sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed \ + --hash=sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88 \ + --hash=sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe \ + --hash=sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18 \ + --hash=sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867 # via # -c requirements/main.txt # pydantic @@ -565,9 +565,9 @@ pydata-sphinx-theme==0.12.0 \ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ --hash=sha256:c17dbab67a3774f06f34f6378e896fcd0668cc8b5da1c1ba017e65cf1df0af58 # via documenteer -pygments==2.17.1 \ - --hash=sha256:1b37f1b1e1bff2af52ecaf28cc601e2ef7077000b227a0675da25aef85784bc4 \ - --hash=sha256:e45a0e74bf9c530f564ca81b8952343be986a29f6afe7f5ad95c5f06b7bdf5e8 +pygments==2.17.2 \ + --hash=sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c \ + --hash=sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367 # via # pydata-sphinx-theme # rich @@ -667,106 +667,106 @@ rich==13.7.0 \ --hash=sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa \ --hash=sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235 # via pytest-pretty -rpds-py==0.13.0 \ - --hash=sha256:0982b59d014efb84a57128e7e69399fb29ad8f2da5b0a5bcbfd12e211c00492e \ - --hash=sha256:13c8061115f1468de6ffdfb1d31b446e1bd814f1ff6e556862169aacb9fbbc5d \ - --hash=sha256:152570689a27ae0be1d5f50b21dad38d450b9227d0974f23bd400400ea087e88 \ - --hash=sha256:153b6d8cf7ae4b9ffd09de6abeda661e351e3e06eaafd18a8c104ea00099b131 \ - --hash=sha256:15a2d542de5cbfc6abddc4846d9412b59f8ee9c8dfa0b9c92a29321297c91745 \ - --hash=sha256:169063f346b8fd84f47d986c9c48e6094eb38b839c1287e7cb886b8a2b32195d \ - --hash=sha256:1758197cc8d7ff383c07405f188253535b4aa7fa745cbc54d221ae84b18e0702 \ - --hash=sha256:189aebd44a07fa7b7966cf78b85bde8335b0b6c3b1c4ef5589f8c03176830107 \ - --hash=sha256:1c9c9cb48ab77ebfa47db25b753f594d4f44959cfe43b713439ca6e3c9329671 \ - --hash=sha256:1e5becd0de924616ca9a12abeb6458568d1dc8fe5c670d5cdb738402a8a8429d \ - --hash=sha256:1e63b32b856c0f08a56b76967d61b6ad811d8d330a8aebb9d21afadd82a296f6 \ - --hash=sha256:1f22cab655b41033d430f20266bf563b35038a7f01c9a099b0ccfd30a7fb9247 \ - --hash=sha256:2063ab9cd1be7ef6b5ed0f408e2bdf32c060b6f40c097a468f32864731302636 \ - --hash=sha256:240279ca0b2afd6d4710afce1c94bf9e75fc161290bf62c0feba64d64780d80b \ - --hash=sha256:244be953f13f148b0071d67a610f89cd72eb5013a147e517d6ca3f3f3b7e0380 \ - --hash=sha256:25c9727da2dabc93664a18eda7a70feedf478f0c4c8294e4cdba7f60a479a246 \ - --hash=sha256:26660c74a20fe249fad75ca00bbfcf60e57c3fdbde92971c88a20e07fea1de64 \ - --hash=sha256:28324f2f0247d407daabf7ff357ad9f36126075c92a0cf5319396d96ff4e1248 \ - --hash=sha256:28bb22019f4a783ea06a6b81437d5996551869e8a722ee8720b744f7684d97f4 \ - --hash=sha256:2a29ec68fa9655ce9501bc6ae074b166e8b45c2dfcd2d71d90d1a61758ed8c73 \ - --hash=sha256:2e73511e88368f93c24efe7c9a20b319eaa828bc7431f8a17713efb9e31a39fa \ - --hash=sha256:2ed65ad3fc5065d13e31e90794e0b52e405b63ae4fab1080caeaadc10a3439c5 \ - --hash=sha256:35cc91cbb0b775705e0feb3362490b8418c408e9e3c3b9cb3b02f6e495f03ee7 \ - --hash=sha256:3a1a38512925829784b5dc38591c757b80cfce115c72c594dc59567dab62b9c4 \ - --hash=sha256:3c5b9ad4d3e05dfcf8629f0d534f92610e9805dbce2fcb9b3c801ddb886431d5 \ - --hash=sha256:4084ab6840bd4d79eff3b5f497add847a7db31ce5a0c2d440c90b2d2b7011857 \ - --hash=sha256:42d0ad129c102856a364ccc7d356faec017af86b3543a8539795f22b6cabad11 \ - --hash=sha256:46be9c0685cce2ea02151aa8308f2c1b78581be41a5dd239448a941a210ef5dd \ - --hash=sha256:4eb1faf8e2ee9a2de3cb3ae4c8c355914cdc85f2cd7f27edf76444c9550ce1e7 \ - --hash=sha256:50b6d80925dfeb573fc5e38582fb9517c6912dc462cc858a11c8177b0837127a \ - --hash=sha256:525d19ef0a999229ef0f0a7687ab2c9a00d1b6a47a005006f4d8c4b8975fdcec \ - --hash=sha256:533d728ea5ad5253af3395102723ca8a77b62de47b2295155650c9a88fcdeec8 \ - --hash=sha256:54b1d671a74395344239ee3adbcd8c496525f6a2b2e54c40fec69620a31a8dcb \ - --hash=sha256:54e513df45a8a9419e7952ffd26ac9a5b7b1df97fe72530421794b0de29f9d72 \ - --hash=sha256:5c2545bba02f68abdf398ef4990dc77592cc1e5d29438b35b3a3ca34d171fb4b \ - --hash=sha256:5c6824673f66c47f7ee759c21e973bfce3ceaf2c25cb940cb45b41105dc914e8 \ - --hash=sha256:6052bb47ea583646b8ff562acacb9a2ec5ec847267049cbae3919671929e94c6 \ - --hash=sha256:62772259b3381e2aabf274c74fd1e1ac03b0524de0a6593900684becfa8cfe4b \ - --hash=sha256:66eb5aa36e857f768c598d2082fafb733eaf53e06e1169c6b4de65636e04ffd0 \ - --hash=sha256:6ad465e5a70580ca9c1944f43a9a71bca3a7b74554347fc96ca0479eca8981f9 \ - --hash=sha256:70cfe098d915f566eeebcb683f49f9404d2f948432891b6e075354336eda9dfb \ - --hash=sha256:715df74cbcef4387d623c917f295352127f4b3e0388038d68fa577b4e4c6e540 \ - --hash=sha256:7472bd60a8293217444bdc6a46e516feb8d168da44d5f3fccea0336e88e3b79a \ - --hash=sha256:762013dd59df12380c5444f61ccbf9ae1297027cabbd7aa25891f724ebf8c8f7 \ - --hash=sha256:766b573a964389ef0d91a26bb31e1b59dbc5d06eff7707f3dfcec23d93080ba3 \ - --hash=sha256:7e5fbe9800f09c56967fda88c4d9272955e781699a66102bd098f22511a3f260 \ - --hash=sha256:8220321f2dccd9d66f72639185247cb7bbdd90753bf0b6bfca0fa31dba8af23c \ - --hash=sha256:84f7f3f18d29a1c645729634003d21d84028bd9c2fd78eba9d028998f46fa5aa \ - --hash=sha256:87f591ff8cc834fa01ca5899ab5edcd7ee590492a9cdcf43424ac142e731ce3e \ - --hash=sha256:8a33d2b6340261191bb59adb5a453fa6c7d99de85552bd4e8196411f0509c9bf \ - --hash=sha256:8b9c1dd90461940315981499df62a627571c4f0992e8bafc5396d33916224cac \ - --hash=sha256:8c4e84016ba225e09df20fed8befe8c68d14fbeff6078f4a0ff907ae2095e17e \ - --hash=sha256:8dd69e01b29ff45a0062cad5c480d8aa9301c3ef09da471f86337a78eb2d3405 \ - --hash=sha256:91ca9aaee7ccdfa66d800b5c4ec634fefca947721bab52d6ad2f6350969a3771 \ - --hash=sha256:9435bf4832555c4f769c6be9401664357be33d5f5d8dc58f5c20fb8d21e2c45d \ - --hash=sha256:95375c44ffb9ea2bc25d67fb66e726ea266ff1572df50b9556fe28a5f3519cd7 \ - --hash=sha256:95c11647fac2a3515ea2614a79e14b7c75025724ad54c91c7db4a6ea5c25ef19 \ - --hash=sha256:9645f7fe10a68b2396d238250b4b264c2632d2eb6ce2cb90aa0fe08adee194be \ - --hash=sha256:977c6123c359dcc70ce3161b781ab70b0d342de2666944b776617e01a0a7822a \ - --hash=sha256:97c1be5a018cdad54fa7e5f7d36b9ab45ef941a1d185987f18bdab0a42344012 \ - --hash=sha256:981e46e1e5064f95460381bff4353783b4b5ce351c930e5b507ebe0278c61dac \ - --hash=sha256:9c4c4b4ff3de834ec5c1c690e5a18233ca78547d003eb83664668ccf09ef1398 \ - --hash=sha256:9f50ca0460f1f7a89ab9b8355d83ac993d5998ad4218e76654ecf8afe648d8aa \ - --hash=sha256:a2383f400691fd7bd63347d4d75eb2fd525de9d901799a33a4e896c9885609f8 \ - --hash=sha256:a25f514a53927b6b4bd04a9a6a13b55209df54f548660eeed673336c0c946d14 \ - --hash=sha256:a61a152d61e3ae26e0bbba7b2f568f6f25ca0abdeb6553eca7e7c45b59d9b1a9 \ - --hash=sha256:a78861123b002725633871a2096c3a4313224aab3d11b953dced87cfba702418 \ - --hash=sha256:afcec1f5b09d0db70aeb2d90528a9164acb61841a3124e28f6ac0137f4c36cb4 \ - --hash=sha256:afde37e3763c602d0385bce5c12f262e7b1dd2a0f323e239fa9d7b2d4d5d8509 \ - --hash=sha256:b431c2c0ff1ea56048a2b066d99d0c2d151ae7625b20be159b7e699f3e80390b \ - --hash=sha256:b4de9d20fe68c16b4d97f551a09920745add0c86430262230528b83c2ed2fe90 \ - --hash=sha256:b70a54fb628c1d6400e351674a31ba63d2912b8c5b707f99b408674a5d8b69ab \ - --hash=sha256:b9a0507342c37132813449393e6e6f351bbff376031cfff1ee6e616402ac7908 \ - --hash=sha256:bad6758df5f1042b35683bd1811d5432ac1b17700a5a2a51fdc293f7df5f7827 \ - --hash=sha256:c07cb9bcccd08f9bc2fd05bf586479df4272ea5a6a70fbcb59b018ed48a5a84d \ - --hash=sha256:c10326e30c97a95b7e1d75e5200ef0b9827aa0f861e331e43b15dfdfd63e669b \ - --hash=sha256:c1a920fa679ec2758411d66bf68840b0a21317b9954ab0e973742d723bb67709 \ - --hash=sha256:c1e37dfffe8959a492b7b331995f291847a41a035b4aad82d6060f38e8378a2b \ - --hash=sha256:c472409037e05ed87b99430f97a6b82130328bb977502813547e8ee6a3392502 \ - --hash=sha256:c8a9cec0f49df9bac252d92f138c0d7708d98828e21fd57db78087d8f50b5656 \ - --hash=sha256:c99f9dda2c959f7bb69a7125e192c74fcafb7a534a95ccf49313ae3a04807804 \ - --hash=sha256:c9f4c2b7d989426e9fe9b720211172cf10eb5f7aa16c63de2e5dc61457abcf35 \ - --hash=sha256:cdded3cf9e36840b09ccef714d5fa74a03f4eb6cf81e694226ed9cb5e6f90de0 \ - --hash=sha256:d5bf560634ea6e9a59ceb2181a6cd6195a03f48cef9a400eb15e197e18f14548 \ - --hash=sha256:d70a93a40e55da117c511ddc514642bc7d59a95a99137168a5f3f2f876b47962 \ - --hash=sha256:da2852201e8e00c86be82c43d6893e6c380ef648ae53f337ffd1eaa35e3dfb8a \ - --hash=sha256:e1f40faf406c52c7ae7d208b9140377c06397248978ccb03fbfbb30a0571e359 \ - --hash=sha256:e33b17915c8e4fb2ea8b91bb4c46cba92242c63dd38b87e869ead5ba217e2970 \ - --hash=sha256:e499bf2200eb74774a6f85a7465e3bc5273fa8ef0055590d97a88c1e7ea02eea \ - --hash=sha256:e6c6fed07d13b9e0fb689356c40c81f1aa92e3c9d91d8fd5816a0348ccd999f7 \ - --hash=sha256:e8f1d466a9747213d3cf7e1afec849cc51edb70d5b4ae9a82eca0f172bfbb6d0 \ - --hash=sha256:eef7ee7c70f8b8698be468d54f9f5e01804f3a1dd5657e8a96363dbd52b9b5ec \ - --hash=sha256:efdd02971a02f98492a72b25484f1f6125fb9f2166e48cc4c9bfa563349c851b \ - --hash=sha256:f6c225011467021879c0482316e42d8a28852fc29f0c15d2a435ff457cadccd4 \ - --hash=sha256:f714dd5b705f1c394d1b361d96486c4981055c434a7eafb1a3147ac75e34a3de \ - --hash=sha256:f7c7ddc8d1a64623068da5a15e28001fbd0f0aff754aae7a75a4be5042191638 \ - --hash=sha256:f9339d1404b87e6d8cb35e485945753be57a99ab9bb389f42629215b2f6bda0f \ - --hash=sha256:fdaef49055cc0c701fb17b9b34a38ef375e5cdb230b3722d4a12baf9b7cbc6d3 \ - --hash=sha256:fea99967d4a978ce95dd52310bcb4a943b77c61725393bca631b0908047d6e2f +rpds-py==0.13.1 \ + --hash=sha256:0290712eb5603a725769b5d857f7cf15cf6ca93dda3128065bbafe6fdb709beb \ + --hash=sha256:032c242a595629aacace44128f9795110513ad27217b091e834edec2fb09e800 \ + --hash=sha256:08832078767545c5ee12561ce980714e1e4c6619b5b1e9a10248de60cddfa1fd \ + --hash=sha256:08b335fb0c45f0a9e2478a9ece6a1bfb00b6f4c4780f9be3cf36479c5d8dd374 \ + --hash=sha256:0b70c1f800059c92479dc94dda41288fd6607f741f9b1b8f89a21a86428f6383 \ + --hash=sha256:0d9f8930092558fd15c9e07198625efb698f7cc00b3dc311c83eeec2540226a8 \ + --hash=sha256:181ee352691c4434eb1c01802e9daa5edcc1007ff15023a320e2693fed6a661b \ + --hash=sha256:19f5aa7f5078d35ed8e344bcba40f35bc95f9176dddb33fc4f2084e04289fa63 \ + --hash=sha256:1a3b2583c86bbfbf417304eeb13400ce7f8725376dc7d3efbf35dc5d7052ad48 \ + --hash=sha256:1c9a1dc5e898ce30e2f9c0aa57181cddd4532b22b7780549441d6429d22d3b58 \ + --hash=sha256:1f36a1e80ef4ed1996445698fd91e0d3e54738bf597c9995118b92da537d7a28 \ + --hash=sha256:20147996376be452cd82cd6c17701daba69a849dc143270fa10fe067bb34562a \ + --hash=sha256:249c8e0055ca597707d71c5ad85fd2a1c8fdb99386a8c6c257e1b47b67a9bec1 \ + --hash=sha256:2647192facf63be9ed2d7a49ceb07efe01dc6cfb083bd2cc53c418437400cb99 \ + --hash=sha256:264f3a5906c62b9df3a00ad35f6da1987d321a053895bd85f9d5c708de5c0fbf \ + --hash=sha256:2abd669a39be69cdfe145927c7eb53a875b157740bf1e2d49e9619fc6f43362e \ + --hash=sha256:2b2415d5a7b7ee96aa3a54d4775c1fec140476a17ee12353806297e900eaeddc \ + --hash=sha256:2c173f529666bab8e3f948b74c6d91afa22ea147e6ebae49a48229d9020a47c4 \ + --hash=sha256:2da81c1492291c1a90987d76a47c7b2d310661bf7c93a9de0511e27b796a8b46 \ + --hash=sha256:2eca04a365be380ca1f8fa48b334462e19e3382c0bb7386444d8ca43aa01c481 \ + --hash=sha256:37b08df45f02ff1866043b95096cbe91ac99de05936dd09d6611987a82a3306a \ + --hash=sha256:37f79f4f1f06cc96151f4a187528c3fd4a7e1065538a4af9eb68c642365957f7 \ + --hash=sha256:3dd5fb7737224e1497c886fb3ca681c15d9c00c76171f53b3c3cc8d16ccfa7fb \ + --hash=sha256:3e3ac5b602fea378243f993d8b707189f9061e55ebb4e56cb9fdef8166060f28 \ + --hash=sha256:3f55ae773abd96b1de25fc5c3fb356f491bd19116f8f854ba705beffc1ddc3c5 \ + --hash=sha256:4011d5c854aa804c833331d38a2b6f6f2fe58a90c9f615afdb7aa7cf9d31f721 \ + --hash=sha256:4145172ab59b6c27695db6d78d040795f635cba732cead19c78cede74800949a \ + --hash=sha256:42b9535aa22ab023704cfc6533e968f7e420affe802d85e956d8a7b4c0b0b5ea \ + --hash=sha256:46a07a258bda12270de02b34c4884f200f864bba3dcd6e3a37fef36a168b859d \ + --hash=sha256:4f13d3f6585bd07657a603780e99beda96a36c86acaba841f131e81393958336 \ + --hash=sha256:528e2afaa56d815d2601b857644aeb395afe7e59212ab0659906dc29ae68d9a6 \ + --hash=sha256:545e94c84575057d3d5c62634611858dac859702b1519b6ffc58eca7fb1adfcf \ + --hash=sha256:577d40a72550eac1386b77b43836151cb61ff6700adacda2ad4d883ca5a0b6f2 \ + --hash=sha256:5967fa631d0ed9f8511dede08bc943a9727c949d05d1efac4ac82b2938024fb7 \ + --hash=sha256:5b769396eb358d6b55dbf78f3f7ca631ca1b2fe02136faad5af74f0111b4b6b7 \ + --hash=sha256:63c9e2794329ef070844ff9bfc012004aeddc0468dc26970953709723f76c8a5 \ + --hash=sha256:6574f619e8734140d96c59bfa8a6a6e7a3336820ccd1bfd95ffa610673b650a2 \ + --hash=sha256:6bfe72b249264cc1ff2f3629be240d7d2fdc778d9d298087cdec8524c91cd11f \ + --hash=sha256:736817dbbbd030a69a1faf5413a319976c9c8ba8cdcfa98c022d3b6b2e01eca6 \ + --hash=sha256:74a2044b870df7c9360bb3ce7e12f9ddf8e72e49cd3a353a1528cbf166ad2383 \ + --hash=sha256:74be3b215a5695690a0f1a9f68b1d1c93f8caad52e23242fcb8ba56aaf060281 \ + --hash=sha256:76a8374b294e4ccb39ccaf11d39a0537ed107534139c00b4393ca3b542cc66e5 \ + --hash=sha256:7ba239bb37663b2b4cd08e703e79e13321512dccd8e5f0e9451d9e53a6b8509a \ + --hash=sha256:7c40851b659d958c5245c1236e34f0d065cc53dca8d978b49a032c8e0adfda6e \ + --hash=sha256:7cf241dbb50ea71c2e628ab2a32b5bfcd36e199152fc44e5c1edb0b773f1583e \ + --hash=sha256:7cfae77da92a20f56cf89739a557b76e5c6edc094f6ad5c090b9e15fbbfcd1a4 \ + --hash=sha256:7d152ec7bb431040af2500e01436c9aa0d993f243346f0594a15755016bf0be1 \ + --hash=sha256:80080972e1d000ad0341c7cc58b6855c80bd887675f92871221451d13a975072 \ + --hash=sha256:82dbcd6463e580bcfb7561cece35046aaabeac5a9ddb775020160b14e6c58a5d \ + --hash=sha256:8308a8d49d1354278d5c068c888a58d7158a419b2e4d87c7839ed3641498790c \ + --hash=sha256:839676475ac2ccd1532d36af3d10d290a2ca149b702ed464131e450a767550df \ + --hash=sha256:83feb0f682d75a09ddc11aa37ba5c07dd9b824b22915207f6176ea458474ff75 \ + --hash=sha256:88956c993a20201744282362e3fd30962a9d86dc4f1dcf2bdb31fab27821b61f \ + --hash=sha256:8a6ad8429340e0a4de89353447c6441329def3632e7b2293a7d6e873217d3c2b \ + --hash=sha256:8ba9fbc5d6e36bfeb5292530321cc56c4ef3f98048647fabd8f57543c34174ec \ + --hash=sha256:8c1f6c8df23be165eb0cb78f305483d00c6827a191e3a38394c658d5b9c80bbd \ + --hash=sha256:91276caef95556faeb4b8f09fe4439670d3d6206fee78d47ddb6e6de837f0b4d \ + --hash=sha256:960e7e460fda2d0af18c75585bbe0c99f90b8f09963844618a621b804f8c3abe \ + --hash=sha256:9656a09653b18b80764647d585750df2dff8928e03a706763ab40ec8c4872acc \ + --hash=sha256:9cd935c0220d012a27c20135c140f9cdcbc6249d5954345c81bfb714071b985c \ + --hash=sha256:a2b3c79586636f1fa69a7bd59c87c15fca80c0d34b5c003d57f2f326e5276575 \ + --hash=sha256:a4b9d3f5c48bbe8d9e3758e498b3c34863f2c9b1ac57a4e6310183740e59c980 \ + --hash=sha256:a8c2bf286e5d755a075e5e97ba56b3de08cccdad6b323ab0b21cc98875176b03 \ + --hash=sha256:a90031658805c63fe488f8e9e7a88b260ea121ba3ee9cdabcece9c9ddb50da39 \ + --hash=sha256:ad666a904212aa9a6c77da7dce9d5170008cda76b7776e6731928b3f8a0d40fa \ + --hash=sha256:af2d1648eb625a460eee07d3e1ea3a4a6e84a1fb3a107f6a8e95ac19f7dcce67 \ + --hash=sha256:b3d4b390ee70ca9263b331ccfaf9819ee20e90dfd0201a295e23eb64a005dbef \ + --hash=sha256:ba4432301ad7eeb1b00848cf46fae0e5fecfd18a8cb5fdcf856c67985f79ecc7 \ + --hash=sha256:bc3179e0815827cf963e634095ae5715ee73a5af61defbc8d6ca79f1bdae1d1d \ + --hash=sha256:c5fd099acaee2325f01281a130a39da08d885e4dedf01b84bf156ec2737d78fe \ + --hash=sha256:c797ea56f36c6f248656f0223b11307fdf4a1886f3555eba371f34152b07677f \ + --hash=sha256:cd4ea56c9542ad0091dfdef3e8572ae7a746e1e91eb56c9e08b8d0808b40f1d1 \ + --hash=sha256:cdd6f8738e1f1d9df5b1603bb03cb30e442710e5672262b95d0f9fcb4edb0dab \ + --hash=sha256:d0580faeb9def6d0beb7aa666294d5604e569c4e24111ada423cf9936768d95c \ + --hash=sha256:d11afdc5992bbd7af60ed5eb519873690d921425299f51d80aa3099ed49f2bcc \ + --hash=sha256:d1d388d2f5f5a6065cf83c54dd12112b7389095669ff395e632003ae8999c6b8 \ + --hash=sha256:d20da6b4c7aa9ee75ad0730beaba15d65157f5beeaca54a038bb968f92bf3ce3 \ + --hash=sha256:d22e0660de24bd8e9ac82f4230a22a5fe4e397265709289d61d5fb333839ba50 \ + --hash=sha256:d22f2cb82e0b40e427a74a93c9a4231335bbc548aed79955dde0b64ea7f88146 \ + --hash=sha256:d4fa1eeb9bea6d9b64ac91ec51ee94cc4fc744955df5be393e1c923c920db2b0 \ + --hash=sha256:d9793d46d3e6522ae58e9321032827c9c0df1e56cbe5d3de965facb311aed6aa \ + --hash=sha256:dab979662da1c9fbb464e310c0b06cb5f1d174d09a462553af78f0bfb3e01920 \ + --hash=sha256:db8d0f0ad92f74feb61c4e4a71f1d573ef37c22ef4dc19cab93e501bfdad8cbd \ + --hash=sha256:df2af1180b8eeececf4f819d22cc0668bfadadfd038b19a90bd2fb2ee419ec6f \ + --hash=sha256:dfb5d2ab183c0efe5e7b8917e4eaa2e837aacafad8a69b89aa6bc81550eed857 \ + --hash=sha256:e04f8c76b8d5c70695b4e8f1d0b391d8ef91df00ef488c6c1ffb910176459bc6 \ + --hash=sha256:e4a45ba34f904062c63049a760790c6a2fa7a4cc4bd160d8af243b12371aaa05 \ + --hash=sha256:e9be1f7c5f9673616f875299339984da9447a40e3aea927750c843d6e5e2e029 \ + --hash=sha256:edc91c50e17f5cd945d821f0f1af830522dba0c10267c3aab186dc3dbaab8def \ + --hash=sha256:ee70ee5f4144a45a9e6169000b5b525d82673d5dab9f7587eccc92794814e7ac \ + --hash=sha256:f1059ca9a51c936c9a8d46fbc2c9a6b4c15ab3f13a97f1ad32f024b39666ba85 \ + --hash=sha256:f47eef55297799956464efc00c74ae55c48a7b68236856d56183fe1ddf866205 \ + --hash=sha256:f4ae6f423cb7d1c6256b7482025ace2825728f53b7ac58bcd574de6ee9d242c2 \ + --hash=sha256:f4b15a163448ec79241fb2f1bc5a8ae1a4a304f7a48d948d208a2935b26bf8a5 \ + --hash=sha256:f55601fb58f92e4f4f1d05d80c24cb77505dc42103ddfd63ddfdc51d3da46fa2 \ + --hash=sha256:fa84bbe22ffa108f91631935c28a623001e335d66e393438258501e618fb0dde \ + --hash=sha256:faa12a9f34671a30ea6bb027f04ec4e1fb8fa3fb3ed030893e729d4d0f3a9791 \ + --hash=sha256:fcfd5f91b882eedf8d9601bd21261d6ce0e61a8c66a7152d1f5df08d3f643ab1 \ + --hash=sha256:fe30ef31172bdcf946502a945faad110e8fff88c32c4bec9a593df0280e64d8a # via # jsonschema # referencing @@ -844,9 +844,9 @@ sphinx-automodapi==0.16.0 \ --hash=sha256:68fc47064804604b90aa27c047016e86aaf970981d90a0082d5b5dd2e9d38afd \ --hash=sha256:6c673ef93066408e5ad3e2fa3533044d432a47fe6a826212b9ebf5f52a872554 # via documenteer -sphinx-click==5.0.1 \ - --hash=sha256:31836ca22f746d3c26cbfdfe0c58edf0bca5783731a0b2e25bb6d59800bb75a1 \ - --hash=sha256:fcc7df15e56e3ff17ebf446cdd316c2eb79580b37c49579fba11e5468802ef25 +sphinx-click==5.1.0 \ + --hash=sha256:6812c2db62d3fae71a4addbe5a8a0a16c97eb491f3cd63fe34b4ed7e07236f33 \ + --hash=sha256:ae97557a4e9ec646045089326c3b90e026c58a45e083b8f35f17d5d6558d08a0 # via -r requirements/dev.in sphinx-copybutton==0.5.2 \ --hash=sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd \ @@ -983,13 +983,13 @@ urllib3==2.1.0 \ # -c requirements/main.txt # documenteer # requests -virtualenv==20.24.6 \ - --hash=sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af \ - --hash=sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381 +virtualenv==20.24.7 \ + --hash=sha256:69050ffb42419c91f6c1284a7b24e0475d793447e35929b488bf6a0aade39353 \ + --hash=sha256:a18b3fd0314ca59a2e9f4b556819ed07183b3e9a3702ecfe213f593d44f7b3fd # via pre-commit # The following packages are considered to be unsafe in a requirements file: -setuptools==68.2.2 \ - --hash=sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87 \ - --hash=sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a +setuptools==69.0.2 \ + --hash=sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2 \ + --hash=sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6 # via nodeenv diff --git a/requirements/main.txt b/requirements/main.txt index 5e659214e1..fdd183be12 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -259,9 +259,9 @@ hvac==2.0.0 \ --hash=sha256:3b14d0979b98ea993eca73b7dac7161b5547ede369a9b28f4fa40f18e74ec3f3 \ --hash=sha256:6a51cb9a0d22fe13e824cb0b0a1ce2eeacb9ce6af68b7d1b6689e25ec1becaf5 # via -r requirements/main.in -idna==3.4 \ - --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ - --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 +idna==3.6 \ + --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ + --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f # via # anyio # requests @@ -340,119 +340,119 @@ pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pydantic==2.5.1 \ - --hash=sha256:0b8be5413c06aadfbe56f6dc1d45c9ed25fd43264414c571135c97dd77c2bedb \ - --hash=sha256:dc5244a8939e0d9a68f1f1b5f550b2e1c879912033b1becbedb315accc75441b +pydantic==2.5.2 \ + --hash=sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0 \ + --hash=sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd # via # -r requirements/main.in # fastapi # safir -pydantic-core==2.14.3 \ - --hash=sha256:056ea7cc3c92a7d2a14b5bc9c9fa14efa794d9f05b9794206d089d06d3433dc7 \ - --hash=sha256:0653fb9fc2fa6787f2fa08631314ab7fc8070307bd344bf9471d1b7207c24623 \ - --hash=sha256:076edc972b68a66870cec41a4efdd72a6b655c4098a232314b02d2bfa3bfa157 \ - --hash=sha256:0a3e51c2be472b7867eb0c5d025b91400c2b73a0823b89d4303a9097e2ec6655 \ - --hash=sha256:0c7f8e8a7cf8e81ca7d44bea4f181783630959d41b4b51d2f74bc50f348a090f \ - --hash=sha256:10904368261e4509c091cbcc067e5a88b070ed9a10f7ad78f3029c175487490f \ - --hash=sha256:113752a55a8eaece2e4ac96bc8817f134c2c23477e477d085ba89e3aa0f4dc44 \ - --hash=sha256:12e05a76b223577a4696c76d7a6b36a0ccc491ffb3c6a8cf92d8001d93ddfd63 \ - --hash=sha256:136bc7247e97a921a020abbd6ef3169af97569869cd6eff41b6a15a73c44ea9b \ - --hash=sha256:1582f01eaf0537a696c846bea92082082b6bfc1103a88e777e983ea9fbdc2a0f \ - --hash=sha256:1767bd3f6370458e60c1d3d7b1d9c2751cc1ad743434e8ec84625a610c8b9195 \ - --hash=sha256:1e2979dc80246e18e348de51246d4c9b410186ffa3c50e77924bec436b1e36cb \ - --hash=sha256:1ea992659c03c3ea811d55fc0a997bec9dde863a617cc7b25cfde69ef32e55af \ - --hash=sha256:1f2d4516c32255782153e858f9a900ca6deadfb217fd3fb21bb2b60b4e04d04d \ - --hash=sha256:2494d20e4c22beac30150b4be3b8339bf2a02ab5580fa6553ca274bc08681a65 \ - --hash=sha256:260692420028319e201b8649b13ac0988974eeafaaef95d0dfbf7120c38dc000 \ - --hash=sha256:2646f8270f932d79ba61102a15ea19a50ae0d43b314e22b3f8f4b5fabbfa6e38 \ - --hash=sha256:27828f0227b54804aac6fb077b6bb48e640b5435fdd7fbf0c274093a7b78b69c \ - --hash=sha256:2bc736725f9bd18a60eec0ed6ef9b06b9785454c8d0105f2be16e4d6274e63d0 \ - --hash=sha256:2c08ac60c3caa31f825b5dbac47e4875bd4954d8f559650ad9e0b225eaf8ed0c \ - --hash=sha256:2c83892c7bf92b91d30faca53bb8ea21f9d7e39f0ae4008ef2c2f91116d0464a \ - --hash=sha256:354db020b1f8f11207b35360b92d95725621eb92656725c849a61e4b550f4acc \ - --hash=sha256:364dba61494e48f01ef50ae430e392f67ee1ee27e048daeda0e9d21c3ab2d609 \ - --hash=sha256:37dad73a2f82975ed563d6a277fd9b50e5d9c79910c4aec787e2d63547202315 \ - --hash=sha256:38113856c7fad8c19be7ddd57df0c3e77b1b2336459cb03ee3903ce9d5e236ce \ - --hash=sha256:38aed5a1bbc3025859f56d6a32f6e53ca173283cb95348e03480f333b1091e7d \ - --hash=sha256:3ad083df8fe342d4d8d00cc1d3c1a23f0dc84fce416eb301e69f1ddbbe124d3f \ - --hash=sha256:3c1bf1a7b05a65d3b37a9adea98e195e0081be6b17ca03a86f92aeb8b110f468 \ - --hash=sha256:3d1dde10bd9962b1434053239b1d5490fc31a2b02d8950a5f731bc584c7a5a0f \ - --hash=sha256:44aaf1a07ad0824e407dafc637a852e9a44d94664293bbe7d8ee549c356c8882 \ - --hash=sha256:44afa3c18d45053fe8d8228950ee4c8eaf3b5a7f3b64963fdeac19b8342c987f \ - --hash=sha256:4a70d23eedd88a6484aa79a732a90e36701048a1509078d1b59578ef0ea2cdf5 \ - --hash=sha256:4aa89919fbd8a553cd7d03bf23d5bc5deee622e1b5db572121287f0e64979476 \ - --hash=sha256:4cc6bb11f4e8e5ed91d78b9880774fbc0856cb226151b0a93b549c2b26a00c19 \ - --hash=sha256:536e1f58419e1ec35f6d1310c88496f0d60e4f182cacb773d38076f66a60b149 \ - --hash=sha256:5402ee0f61e7798ea93a01b0489520f2abfd9b57b76b82c93714c4318c66ca06 \ - --hash=sha256:56814b41486e2d712a8bc02a7b1f17b87fa30999d2323bbd13cf0e52296813a1 \ - --hash=sha256:5b73441a1159f1fb37353aaefb9e801ab35a07dd93cb8177504b25a317f4215a \ - --hash=sha256:61beaa79d392d44dc19d6f11ccd824d3cccb865c4372157c40b92533f8d76dd0 \ - --hash=sha256:6c2d118d1b6c9e2d577e215567eedbe11804c3aafa76d39ec1f8bc74e918fd07 \ - --hash=sha256:6e2f9d76c00e805d47f19c7a96a14e4135238a7551a18bfd89bb757993fd0933 \ - --hash=sha256:71ed769b58d44e0bc2701aa59eb199b6665c16e8a5b8b4a84db01f71580ec448 \ - --hash=sha256:7349f99f1ef8b940b309179733f2cad2e6037a29560f1b03fdc6aa6be0a8d03c \ - --hash=sha256:75f3f534f33651b73f4d3a16d0254de096f43737d51e981478d580f4b006b427 \ - --hash=sha256:76fc18653a5c95e5301a52d1b5afb27c9adc77175bf00f73e94f501caf0e05ad \ - --hash=sha256:7cb0c397f29688a5bd2c0dbd44451bc44ebb9b22babc90f97db5ec3e5bb69977 \ - --hash=sha256:7cc24728a1a9cef497697e53b3d085fb4d3bc0ef1ef4d9b424d9cf808f52c146 \ - --hash=sha256:7e63a56eb7fdee1587d62f753ccd6d5fa24fbeea57a40d9d8beaef679a24bdd6 \ - --hash=sha256:832d16f248ca0cc96929139734ec32d21c67669dcf8a9f3f733c85054429c012 \ - --hash=sha256:8488e973547e8fb1b4193fd9faf5236cf1b7cd5e9e6dc7ff6b4d9afdc4c720cb \ - --hash=sha256:849cff945284c577c5f621d2df76ca7b60f803cc8663ff01b778ad0af0e39bb9 \ - --hash=sha256:88ec906eb2d92420f5b074f59cf9e50b3bb44f3cb70e6512099fdd4d88c2f87c \ - --hash=sha256:8d3b9c91eeb372a64ec6686c1402afd40cc20f61a0866850f7d989b6bf39a41a \ - --hash=sha256:8f5624f0f67f2b9ecaa812e1dfd2e35b256487566585160c6c19268bf2ffeccc \ - --hash=sha256:905a12bf088d6fa20e094f9a477bf84bd823651d8b8384f59bcd50eaa92e6a52 \ - --hash=sha256:92486a04d54987054f8b4405a9af9d482e5100d6fe6374fc3303015983fc8bda \ - --hash=sha256:96eb10ef8920990e703da348bb25fedb8b8653b5966e4e078e5be382b430f9e0 \ - --hash=sha256:96fb679c7ca12a512d36d01c174a4fbfd912b5535cc722eb2c010c7b44eceb8e \ - --hash=sha256:98d8b3932f1a369364606417ded5412c4ffb15bedbcf797c31317e55bd5d920e \ - --hash=sha256:9dbab442a8d9ca918b4ed99db8d89d11b1f067a7dadb642476ad0889560dac79 \ - --hash=sha256:9ef3e2e407e4cad2df3c89488a761ed1f1c33f3b826a2ea9a411b0a7d1cccf1b \ - --hash=sha256:9ff737f24b34ed26de62d481ef522f233d3c5927279f6b7229de9b0deb3f76b5 \ - --hash=sha256:a1a39fecb5f0b19faee9a8a8176c805ed78ce45d760259a4ff3d21a7daa4dfc1 \ - --hash=sha256:a402ae1066be594701ac45661278dc4a466fb684258d1a2c434de54971b006ca \ - --hash=sha256:a5c51460ede609fbb4fa883a8fe16e749964ddb459966d0518991ec02eb8dfb9 \ - --hash=sha256:a8ca13480ce16daad0504be6ce893b0ee8ec34cd43b993b754198a89e2787f7e \ - --hash=sha256:ab4a2381005769a4af2ffddae74d769e8a4aae42e970596208ec6d615c6fb080 \ - --hash=sha256:aeafc7f5bbddc46213707266cadc94439bfa87ecf699444de8be044d6d6eb26f \ - --hash=sha256:aecd5ed096b0e5d93fb0367fd8f417cef38ea30b786f2501f6c34eabd9062c38 \ - --hash=sha256:af452e69446fadf247f18ac5d153b1f7e61ef708f23ce85d8c52833748c58075 \ - --hash=sha256:af46f0b7a1342b49f208fed31f5a83b8495bb14b652f621e0a6787d2f10f24ee \ - --hash=sha256:b02b5e1f54c3396c48b665050464803c23c685716eb5d82a1d81bf81b5230da4 \ - --hash=sha256:b28996872b48baf829ee75fa06998b607c66a4847ac838e6fd7473a6b2ab68e7 \ - --hash=sha256:b7692f539a26265cece1e27e366df5b976a6db6b1f825a9e0466395b314ee48b \ - --hash=sha256:ba44fad1d114539d6a1509966b20b74d2dec9a5b0ee12dd7fd0a1bb7b8785e5f \ - --hash=sha256:bf15145b1f8056d12c67255cd3ce5d317cd4450d5ee747760d8d088d85d12a2d \ - --hash=sha256:c3dc2920cc96f9aa40c6dc54256e436cc95c0a15562eb7bd579e1811593c377e \ - --hash=sha256:c54af5069da58ea643ad34ff32fd6bc4eebb8ae0fef9821cd8919063e0aeeaab \ - --hash=sha256:c5ea0153482e5b4d601c25465771c7267c99fddf5d3f3bdc238ef930e6d051cf \ - --hash=sha256:c9ffd823c46e05ef3eb28b821aa7bc501efa95ba8880b4a1380068e32c5bed47 \ - --hash=sha256:ca55c9671bb637ce13d18ef352fd32ae7aba21b4402f300a63f1fb1fd18e0364 \ - --hash=sha256:caa94726791e316f0f63049ee00dff3b34a629b0d099f3b594770f7d0d8f1f56 \ - --hash=sha256:cc956f78651778ec1ab105196e90e0e5f5275884793ab67c60938c75bcca3989 \ - --hash=sha256:ccbf355b7276593c68fa824030e68cb29f630c50e20cb11ebb0ee450ae6b3d08 \ - --hash=sha256:cf08b43d1d5d1678f295f0431a4a7e1707d4652576e1d0f8914b5e0213bfeee5 \ - --hash=sha256:d06c78074646111fb01836585f1198367b17d57c9f427e07aaa9ff499003e58d \ - --hash=sha256:d2b53e1f851a2b406bbb5ac58e16c4a5496038eddd856cc900278fa0da97f3fc \ - --hash=sha256:d41df8e10b094640a6b234851b624b76a41552f637b9fb34dc720b9fe4ef3be4 \ - --hash=sha256:d7abd17a838a52140e3aeca271054e321226f52df7e0a9f0da8f91ea123afe98 \ - --hash=sha256:de52ddfa6e10e892d00f747bf7135d7007302ad82e243cf16d89dd77b03b649d \ - --hash=sha256:df33902464410a1f1a0411a235f0a34e7e129f12cb6340daca0f9d1390f5fe10 \ - --hash=sha256:e16aaf788f1de5a85c8f8fcc9c1ca1dd7dd52b8ad30a7889ca31c7c7606615b8 \ - --hash=sha256:e3ad4968711fb379a67c8c755beb4dae8b721a83737737b7bcee27c05400b047 \ - --hash=sha256:e483b8b913fcd3b48badec54185c150cb7ab0e6487914b84dc7cde2365e0c892 \ - --hash=sha256:e71f666c3bf019f2490a47dddb44c3ccea2e69ac882f7495c68dc14d4065eac2 \ - --hash=sha256:ea1498ce4491236d1cffa0eee9ad0968b6ecb0c1cd711699c5677fc689905f00 \ - --hash=sha256:eaab9dc009e22726c62fe3b850b797e7f0e7ba76d245284d1064081f512c7226 \ - --hash=sha256:ec79dbe23702795944d2ae4c6925e35a075b88acd0d20acde7c77a817ebbce94 \ - --hash=sha256:f1b92e72babfd56585c75caf44f0b15258c58e6be23bc33f90885cebffde3400 \ - --hash=sha256:f1f46700402312bdc31912f6fc17f5ecaaaa3bafe5487c48f07c800052736289 \ - --hash=sha256:f518eac285c9632be337323eef9824a856f2680f943a9b68ac41d5f5bad7df7c \ - --hash=sha256:f86f20a9d5bee1a6ede0f2757b917bac6908cde0f5ad9fcb3606db1e2968bcf5 \ - --hash=sha256:f8fc652c354d3362e2932a79d5ac4bbd7170757a41a62c4fe0f057d29f10bebb \ - --hash=sha256:fe272a72c7ed29f84c42fedd2d06c2f9858dc0c00dae3b34ba15d6d8ae0fbaaf \ - --hash=sha256:fe863491664c6720d65ae438d4efaa5eca766565a53adb53bf14bc3246c72fe0 +pydantic-core==2.14.5 \ + --hash=sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b \ + --hash=sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b \ + --hash=sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d \ + --hash=sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8 \ + --hash=sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124 \ + --hash=sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189 \ + --hash=sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c \ + --hash=sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d \ + --hash=sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f \ + --hash=sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520 \ + --hash=sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4 \ + --hash=sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6 \ + --hash=sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955 \ + --hash=sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3 \ + --hash=sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b \ + --hash=sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a \ + --hash=sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68 \ + --hash=sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3 \ + --hash=sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd \ + --hash=sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de \ + --hash=sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b \ + --hash=sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634 \ + --hash=sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7 \ + --hash=sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459 \ + --hash=sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7 \ + --hash=sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3 \ + --hash=sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331 \ + --hash=sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf \ + --hash=sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d \ + --hash=sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36 \ + --hash=sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59 \ + --hash=sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937 \ + --hash=sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc \ + --hash=sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093 \ + --hash=sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753 \ + --hash=sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706 \ + --hash=sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca \ + --hash=sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260 \ + --hash=sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997 \ + --hash=sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588 \ + --hash=sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71 \ + --hash=sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb \ + --hash=sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e \ + --hash=sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69 \ + --hash=sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5 \ + --hash=sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07 \ + --hash=sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1 \ + --hash=sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0 \ + --hash=sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd \ + --hash=sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8 \ + --hash=sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944 \ + --hash=sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26 \ + --hash=sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda \ + --hash=sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4 \ + --hash=sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9 \ + --hash=sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00 \ + --hash=sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe \ + --hash=sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6 \ + --hash=sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada \ + --hash=sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4 \ + --hash=sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7 \ + --hash=sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325 \ + --hash=sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4 \ + --hash=sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b \ + --hash=sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88 \ + --hash=sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04 \ + --hash=sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863 \ + --hash=sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0 \ + --hash=sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911 \ + --hash=sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b \ + --hash=sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e \ + --hash=sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144 \ + --hash=sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5 \ + --hash=sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720 \ + --hash=sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab \ + --hash=sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d \ + --hash=sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789 \ + --hash=sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec \ + --hash=sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2 \ + --hash=sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db \ + --hash=sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f \ + --hash=sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef \ + --hash=sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3 \ + --hash=sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209 \ + --hash=sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc \ + --hash=sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651 \ + --hash=sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8 \ + --hash=sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e \ + --hash=sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66 \ + --hash=sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7 \ + --hash=sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550 \ + --hash=sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd \ + --hash=sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405 \ + --hash=sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27 \ + --hash=sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093 \ + --hash=sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077 \ + --hash=sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113 \ + --hash=sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3 \ + --hash=sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6 \ + --hash=sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf \ + --hash=sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed \ + --hash=sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88 \ + --hash=sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe \ + --hash=sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18 \ + --hash=sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867 # via pydantic pyjwt[crypto]==2.8.0 \ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ @@ -526,9 +526,9 @@ rfc3986[idna2008]==1.5.0 \ # via # httpx # rfc3986 -safir==5.0.0a4 \ - --hash=sha256:8cd7c8212f777af6afef0023614279cfd1695b35932b551278fdad7d8bc202a9 \ - --hash=sha256:eadaffb3b32129c2db562fc0823d5b1237beaf037e2cdd946fda309c3a9f50b1 +safir==5.0.0a5 \ + --hash=sha256:6a38dbdcfc63ea0261d25cefde0defc9f445a7da2a2612cd864d22bf1f292180 \ + --hash=sha256:73348465c732fb89ddbd3b73cb8dcaa1294611c49d5db225e1d2a8205558f29b # via -r requirements/main.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ From a714f592a6f8e1a6ec717a5bfe309b7aa69376aa Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 27 Nov 2023 13:06:35 -0700 Subject: [PATCH 283/588] Increase query timeout to 300s - USDF EFD default proxy timeout is 60s. Some queries are taking longer to execute in both Chronograf and the EFD client. - Increase both proxy timeout and the InfluxDB coordinator query timeout to 300s --- applications/sasquatch/values-usdfprod.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 9c425c7908..16a7da7592 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -19,18 +19,30 @@ influxdb: ingress: enabled: true hostname: usdf-rsp.slac.stanford.edu + annotations: + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" persistence: enabled: true size: 15Ti + config: + coordinator: + query-timeout: "300s" source-influxdb: enabled: true ingress: enabled: true hostname: usdf-rsp.slac.stanford.edu + annotations: + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" persistence: enabled: true size: 15Ti + config: + coordinator: + query-timeout: "300s" kafka-connect-manager: influxdbSink: From 808216b93a837d6e81f41cbff73fcc4944eaeea0 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 27 Nov 2023 13:44:56 -0800 Subject: [PATCH 284/588] Switch lsst.io back to www.lsst.io www.lsst.io is the canonical URL, so use that everywhere. It's no longer a redirect. --- applications/squareone/values-idfdev.yaml | 2 +- applications/squareone/values-idfint.yaml | 2 +- applications/squareone/values.yaml | 2 +- docs/applications/ook/index.rst | 2 +- docs/index.rst | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/squareone/values-idfdev.yaml b/applications/squareone/values-idfdev.yaml index 98798e453e..30c341b27a 100644 --- a/applications/squareone/values-idfdev.yaml +++ b/applications/squareone/values-idfdev.yaml @@ -90,7 +90,7 @@ config: Want to dive deeper into the Rubin Observatory and Legacy Survey of Space and Time? [Search in our technical documentation - portal.](https://lsst.io) + portal.](https://www.lsst.io) diff --git a/applications/squareone/values-idfint.yaml b/applications/squareone/values-idfint.yaml index aa757ae6e8..c0b24974fd 100644 --- a/applications/squareone/values-idfint.yaml +++ b/applications/squareone/values-idfint.yaml @@ -154,7 +154,7 @@ config: Want to dive deeper into the Rubin Observatory and Legacy Survey of Space and Time? [Search in our technical documentation - portal.](https://lsst.io) + portal.](https://www.lsst.io) supportPageMdx: | diff --git a/applications/squareone/values.yaml b/applications/squareone/values.yaml index 363a8b2b28..e626eef66b 100644 --- a/applications/squareone/values.yaml +++ b/applications/squareone/values.yaml @@ -229,7 +229,7 @@ config: Want to dive deeper into the Rubin Observatory and Legacy Survey of Space and Time? [Search in our technical documentation - portal.](https://lsst.io) + portal.](https://www.lsst.io) diff --git a/docs/applications/ook/index.rst b/docs/applications/ook/index.rst index faab8901f1..9228055682 100644 --- a/docs/applications/ook/index.rst +++ b/docs/applications/ook/index.rst @@ -5,7 +5,7 @@ ook — Documentation indexing ############################ Ook is the librarian service for Rubin Observatory. -Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, https://lsst.io. +Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, https://www.lsst.io. .. jinja:: ook :file: applications/_summary.rst.jinja diff --git a/docs/index.rst b/docs/index.rst index 4b85393527..bcedc9a8cf 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,7 +6,7 @@ Phalanx [#name]_ is a GitOps repository for Rubin Observatory's Kubernetes envir Using Helm_ and `Argo CD`_, Phalanx defines the configurations of applications in each environment. This documentation is for Rubin team members that are developing applications and administering Kubernetes clusters. -Astronomers and other end-users can visit the `Rubin Documentation Portal `__ to learn how to use Rubin Observatory's software, services, and datasets. +Astronomers and other end-users can visit the `Rubin Documentation Portal `__ to learn how to use Rubin Observatory's software, services, and datasets. Phalanx is on GitHub at https://github.com/lsst-sqre/phalanx. From fddef43c08e818ac04a43e06bb1417bb7869fc0c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 18 Sep 2023 16:57:42 -0700 Subject: [PATCH 285/588] Use Keycloak on base-lsp.lsst.codes Point Gafaelfawr at local Keycloak on base-lsp.lsst.codes instead of using GitHub. Get user metadata from the IPA LDAP server. Switch to the new home directory source. Add values.yaml documentation for the new groupSearchByDn setting. --- applications/gafaelfawr/README.md | 1 + .../gafaelfawr/templates/configmap.yaml | 1 + applications/gafaelfawr/values-base.yaml | 101 +++++------------- applications/gafaelfawr/values.yaml | 6 ++ applications/nublado/values-base.yaml | 8 +- 5 files changed, 39 insertions(+), 78 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index b88a433aa2..ed3427506c 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -49,6 +49,7 @@ Authentication and identity system | config.ldap.groupBaseDn | string | None, must be set | Base DN for the LDAP search to find a user's groups | | config.ldap.groupMemberAttr | string | `"member"` | Member attribute of the object class. Values must match the username returned in the token from the OpenID Connect authentication server. | | config.ldap.groupObjectClass | string | `"posixGroup"` | Object class containing group information | +| config.ldap.groupSearchByDn | bool | `false` | Whether to search for group membership by user DN rather than bare usernames. Most LDAP servers use full DNs for group membership, so normally this should be set to true, but it requires `userBaseDn` also be set. | | config.ldap.kerberosConfig | string | Use anonymous binds | Enable GSSAPI (Kerberos) binds to LDAP using this `krb5.conf` file. If set, `ldap-keytab` must be set in the Gafaelfawr Vault secret. Set either this or `userDn`, not both. | | config.ldap.nameAttr | string | `"displayName"` | Attribute containing the user's full name | | config.ldap.uidAttr | string | Get UID from upstream authentication provider | Attribute containing the user's UID number (set to `uidNumber` for most LDAP servers) | diff --git a/applications/gafaelfawr/templates/configmap.yaml b/applications/gafaelfawr/templates/configmap.yaml index 93c2f61976..d5511ecc23 100644 --- a/applications/gafaelfawr/templates/configmap.yaml +++ b/applications/gafaelfawr/templates/configmap.yaml @@ -129,6 +129,7 @@ {{- end }} groupObjectClass: {{ .Values.config.ldap.groupObjectClass | quote }} groupMemberAttr: {{ .Values.config.ldap.groupMemberAttr | quote }} + groupSearchByDn: {{ .Values.config.ldap.groupSearchByDn }} {{- if .Values.config.ldap.userBaseDn }} userBaseDn: {{ .Values.config.ldap.userBaseDn | quote }} userSearchAttr: {{ .Values.config.ldap.userSearchAttr | quote }} diff --git a/applications/gafaelfawr/values-base.yaml b/applications/gafaelfawr/values-base.yaml index ceef9a995f..1d7f4775be 100644 --- a/applications/gafaelfawr/values-base.yaml +++ b/applications/gafaelfawr/values-base.yaml @@ -3,11 +3,28 @@ redis: storageClass: "rook-ceph-block" config: + logLevel: "DEBUG" slackAlerts: true databaseUrl: "postgresql://gafaelfawr@postgresdb01.ls.lsst.org/gafaelfawr" - github: - clientId: "ec88b9b897f302b620d1" + oidc: + clientId: "rsp-bts" + audience: "rsp-bts" + loginUrl: "https://keycloak.ls.lsst.org/realms/master/protocol/openid-connect/auth" + tokenUrl: "https://keycloak.ls.lsst.org/realms/master/protocol/openid-connect/token" + issuer: "https://keycloak.ls.lsst.org/realms/master" + scopes: + - "openid" + usernameClaim: "preferred_username" + + ldap: + url: "ldap://ipa1.ls.lsst.org" + userDn: "uid=svc_rsp,cn=users,cn=accounts,dc=lsst,dc=cloud" + userBaseDn: "cn=users,cn=accounts,dc=lsst,dc=cloud" + uidAttr: "uidNumber" + gidAttr: "gidNumber" + groupBaseDn: "cn=groups,cn=accounts,dc=lsst,dc=cloud" + groupSearchByDn: true # Support OpenID Connect clients like Chronograf. oidcServer: @@ -16,85 +33,21 @@ config: # Allow access by GitHub team. groupMapping: "admin:provision": - - github: - organization: "lsst-sqre" - team: "square" + - "sqre" "exec:admin": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-ts" - team: "integration-testing-team" + - "sqre" "exec:internal-tools": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "exec:notebook": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "exec:portal": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "read:image": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "read:tap": - - github: - organization: "lsst-sqre" - team: "square" - - github: - organization: "lsst-sqre" - team: "friends" - - github: - organization: "lsst-ts" - team: "base-access" - - github: - organization: "rubin-summit" - team: "rsp-access" + - "rsp-bts" "write:sasquatch": - - github: - organization: "lsst-sqre" - team: "square" + - "sqre" initialAdmins: - "afausti" diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 07d0b88c11..fba8421537 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -199,6 +199,12 @@ config: # returned in the token from the OpenID Connect authentication server. groupMemberAttr: "member" + # -- Whether to search for group membership by user DN rather than bare + # usernames. Most LDAP servers use full DNs for group membership, so + # normally this should be set to true, but it requires `userBaseDn` also + # be set. + groupSearchByDn: false + # -- Base DN for the LDAP search to find a user's entry # @default -- Get user metadata from the upstream authentication provider userBaseDn: "" diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 046db05ee7..05ab4aaba7 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -30,16 +30,16 @@ controller: - containerPath: "/home" mode: "rw" source: - serverPath: "/jhome" - server: "nfs-jhome.ls.lsst.org" + serverPath: "/rsphome" + server: "nfs-rsphome.ls.lsst.org" type: "nfs" volumes: - containerPath: "/home" mode: "rw" source: type: "nfs" - serverPath: "/jhome" - server: "nfs-jhome.ls.lsst.org" + serverPath: "/rsphome" + server: "nfs-rsphome.ls.lsst.org" - containerPath: "/project" mode: "rw" source: From e94d975e5b1456d057607fd6a157095e2fe00207 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 28 Nov 2023 13:49:21 -0800 Subject: [PATCH 286/588] Fix LDAP hostname for base Use ipa.lsst.org (a dynamic DNS entry), not ipa1.ls.lsst.org (a single server that's currently down). --- applications/gafaelfawr/values-base.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/values-base.yaml b/applications/gafaelfawr/values-base.yaml index 1d7f4775be..b0b61d05b5 100644 --- a/applications/gafaelfawr/values-base.yaml +++ b/applications/gafaelfawr/values-base.yaml @@ -18,7 +18,7 @@ config: usernameClaim: "preferred_username" ldap: - url: "ldap://ipa1.ls.lsst.org" + url: "ldap://ipa.lsst.org" userDn: "uid=svc_rsp,cn=users,cn=accounts,dc=lsst,dc=cloud" userBaseDn: "cn=users,cn=accounts,dc=lsst,dc=cloud" uidAttr: "uidNumber" From 5aed02d6330bbaed7fb2b931b2662a30e75449ec Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Tue, 28 Nov 2023 15:15:43 -0700 Subject: [PATCH 287/588] [DM-41941] Use official siav2 image --- applications/siav2/README.md | 4 ++-- applications/siav2/values.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/siav2/README.md b/applications/siav2/README.md index 7869c7d7d6..0046ff2b84 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -15,8 +15,8 @@ Simple Image Access v2 service | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the siav2 image | -| image.repository | string | `"docker.io/cbanek/siav2"` | Image to use in the siav2 deployment | -| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| image.repository | string | `"ghcr.io/lsst-sqre/dal-siav2"` | Image to use in the siav2 deployment | +| image.tag | string | `"0.0.3"` | Overrides the image tag whose default is the chart appVersion. | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | | nodeSelector | object | `{}` | Node selection rules for the siav2 deployment pod | | obsCoreTable | string | `"ivoa.ObsCore"` | ObsCore table on the TAP service to query | diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml index 094730a1a7..f511e9ef2f 100644 --- a/applications/siav2/values.yaml +++ b/applications/siav2/values.yaml @@ -7,13 +7,13 @@ replicaCount: 1 image: # -- Image to use in the siav2 deployment - repository: "docker.io/cbanek/siav2" + repository: "ghcr.io/lsst-sqre/dal-siav2" # -- Pull policy for the siav2 image pullPolicy: "IfNotPresent" # -- Overrides the image tag whose default is the chart appVersion. - tag: "" + tag: "0.0.3" ingress: # -- Additional annotations for the ingress rule From ebe374660f15e72d85910c60c590912e820704a3 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 28 Nov 2023 17:09:42 -0800 Subject: [PATCH 288/588] Add k8s-manke exec:admin on base exec:admin is used to control Argo Workflows. For now, grant access to everyone with Kubernetes access to the cluster until we have a proper group for this purpose. --- applications/gafaelfawr/values-base.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/gafaelfawr/values-base.yaml b/applications/gafaelfawr/values-base.yaml index b0b61d05b5..68f389572f 100644 --- a/applications/gafaelfawr/values-base.yaml +++ b/applications/gafaelfawr/values-base.yaml @@ -35,6 +35,7 @@ config: "admin:provision": - "sqre" "exec:admin": + - "k8s-manke" - "sqre" "exec:internal-tools": - "rsp-bts" From 2a643ce2d9361691afec9c293115b6fc4c343d03 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Tue, 28 Nov 2023 08:54:55 -0800 Subject: [PATCH 289/588] Template for schedview-snapshot --- applications/schedview-snapshot/.helmignore | 23 +++++++ applications/schedview-snapshot/Chart.yaml | 8 +++ .../schedview-snapshot/templates/_helpers.tpl | 26 ++++++++ .../templates/deployment.yaml | 59 +++++++++++++++++ .../schedview-snapshot/templates/hpa.yaml | 28 ++++++++ .../schedview-snapshot/templates/ingress.yaml | 31 +++++++++ .../templates/networkpolicy.yaml | 21 ++++++ .../schedview-snapshot/templates/service.yaml | 15 +++++ applications/schedview-snapshot/values.yaml | 64 +++++++++++++++++++ .../applications/schedview-snapshot/index.rst | 16 +++++ .../applications/schedview-snapshot/values.md | 12 ++++ .../schedview-snapshot-application.yaml | 34 ++++++++++ environments/values.yaml | 3 + 13 files changed, 340 insertions(+) create mode 100644 applications/schedview-snapshot/.helmignore create mode 100644 applications/schedview-snapshot/Chart.yaml create mode 100644 applications/schedview-snapshot/templates/_helpers.tpl create mode 100644 applications/schedview-snapshot/templates/deployment.yaml create mode 100644 applications/schedview-snapshot/templates/hpa.yaml create mode 100644 applications/schedview-snapshot/templates/ingress.yaml create mode 100644 applications/schedview-snapshot/templates/networkpolicy.yaml create mode 100644 applications/schedview-snapshot/templates/service.yaml create mode 100644 applications/schedview-snapshot/values.yaml create mode 100644 docs/applications/schedview-snapshot/index.rst create mode 100644 docs/applications/schedview-snapshot/values.md create mode 100644 environments/templates/schedview-snapshot-application.yaml diff --git a/applications/schedview-snapshot/.helmignore b/applications/schedview-snapshot/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/schedview-snapshot/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/schedview-snapshot/Chart.yaml b/applications/schedview-snapshot/Chart.yaml new file mode 100644 index 0000000000..f275de4c00 --- /dev/null +++ b/applications/schedview-snapshot/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: 0.1.0 +description: Dashboard for examination of scheduler snapshots. +name: schedview-snapshot +sources: +- https://github.com/lsst-sqre/schedview-snapshot +type: application +version: 1.0.0 diff --git a/applications/schedview-snapshot/templates/_helpers.tpl b/applications/schedview-snapshot/templates/_helpers.tpl new file mode 100644 index 0000000000..e489b7f79b --- /dev/null +++ b/applications/schedview-snapshot/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "schedview-snapshot.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "schedview-snapshot.labels" -}} +helm.sh/chart: {{ include "schedview-snapshot.chart" . }} +{{ include "schedview-snapshot.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "schedview-snapshot.selectorLabels" -}} +app.kubernetes.io/name: "schedview-snapshot" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/schedview-snapshot/templates/deployment.yaml b/applications/schedview-snapshot/templates/deployment.yaml new file mode 100644 index 0000000000..545763140a --- /dev/null +++ b/applications/schedview-snapshot/templates/deployment.yaml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "schedview-snapshot" + labels: + {{- include "schedview-snapshot.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "schedview-snapshot.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "schedview-snapshot.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/schedview-snapshot/templates/hpa.yaml b/applications/schedview-snapshot/templates/hpa.yaml new file mode 100644 index 0000000000..ec4a91c543 --- /dev/null +++ b/applications/schedview-snapshot/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "schedview-snapshot" + labels: + {{- include "schedview-snapshot.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "schedview-snapshot" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/schedview-snapshot/templates/ingress.yaml b/applications/schedview-snapshot/templates/ingress.yaml new file mode 100644 index 0000000000..915dd71730 --- /dev/null +++ b/applications/schedview-snapshot/templates/ingress.yaml @@ -0,0 +1,31 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "schedview-snapshot" + labels: + {{- include "schedview-snapshot.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + loginRedirect: true +template: + metadata: + name: "schedview-snapshot" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/schedview-snapshot" + pathType: "Prefix" + backend: + service: + name: "schedview-snapshot" + port: + number: 8080 diff --git a/applications/schedview-snapshot/templates/networkpolicy.yaml b/applications/schedview-snapshot/templates/networkpolicy.yaml new file mode 100644 index 0000000000..20ad995598 --- /dev/null +++ b/applications/schedview-snapshot/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "schedview-snapshot" +spec: + podSelector: + matchLabels: + {{- include "schedview-snapshot.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/schedview-snapshot/templates/service.yaml b/applications/schedview-snapshot/templates/service.yaml new file mode 100644 index 0000000000..015e50343c --- /dev/null +++ b/applications/schedview-snapshot/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "schedview-snapshot" + labels: + {{- include "schedview-snapshot.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "schedview-snapshot.selectorLabels" . | nindent 4 }} diff --git a/applications/schedview-snapshot/values.yaml b/applications/schedview-snapshot/values.yaml new file mode 100644 index 0000000000..66ad86ebc4 --- /dev/null +++ b/applications/schedview-snapshot/values.yaml @@ -0,0 +1,64 @@ +# Default values for schedview-snapshot. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the schedview-snapshot deployment + repository: "ghcr.io/lsst-sqre/schedview-snapshot" + + # -- Pull policy for the schedview-snapshot image + pullPolicy: "IfNotPresent" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + +autoscaling: + # -- Enable autoscaling of schedview-snapshot deployment + enabled: false + + # -- Minimum number of schedview-snapshot deployment pods + minReplicas: 1 + + # -- Maximum number of schedview-snapshot deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of schedview-snapshot deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the schedview-snapshot deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the schedview-snapshot deployment pod +resources: {} + +# -- Node selection rules for the schedview-snapshot deployment pod +nodeSelector: {} + +# -- Tolerations for the schedview-snapshot deployment pod +tolerations: [] + +# -- Affinity rules for the schedview-snapshot deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/docs/applications/schedview-snapshot/index.rst b/docs/applications/schedview-snapshot/index.rst new file mode 100644 index 0000000000..8084232007 --- /dev/null +++ b/docs/applications/schedview-snapshot/index.rst @@ -0,0 +1,16 @@ +.. px-app:: schedview-snapshot + +###################################################################### +schedview-snapshot — Dashboard for examination of scheduler snapshots. +###################################################################### + +.. jinja:: schedview-snapshot + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/schedview-snapshot/values.md b/docs/applications/schedview-snapshot/values.md new file mode 100644 index 0000000000..ebf761876b --- /dev/null +++ b/docs/applications/schedview-snapshot/values.md @@ -0,0 +1,12 @@ +```{px-app-values} schedview-snapshot +``` + +# schedview-snapshot Helm values reference + +Helm values reference table for the {px-app}`schedview-snapshot` application. + +```{include} ../../../applications/schedview-snapshot/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/environments/templates/schedview-snapshot-application.yaml b/environments/templates/schedview-snapshot-application.yaml new file mode 100644 index 0000000000..5f8764e9c4 --- /dev/null +++ b/environments/templates/schedview-snapshot-application.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "schedview-snapshot") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "schedview-snapshot" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "schedview-snapshot" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "schedview-snapshot" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/schedview-snapshot" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/values.yaml b/environments/values.yaml index e3cdae5244..97b5c66376 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -131,6 +131,9 @@ applications: # -- Enable the schedview-prenight application schedview-prenight: false + # -- Enable the schedview-snapshot application + schedview-snapshot: false + # -- Enable the siav2 application siav2: false From 933448da17ccc56ae6d89c77b9a6c248ce703123 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Tue, 28 Nov 2023 08:56:56 -0800 Subject: [PATCH 290/588] customize schedview-snapshot config --- applications/schedview-snapshot/Chart.yaml | 7 ++++--- .../schedview-snapshot/templates/deployment.yaml | 16 +++++++++++++++- .../schedview-snapshot/templates/ingress.yaml | 2 +- .../schedview-snapshot/values-usdfdev.yaml | 0 applications/schedview-snapshot/values.yaml | 4 ++-- docs/applications/schedview-snapshot/index.rst | 4 ++++ environments/values-usdfdev.yaml | 1 + 7 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 applications/schedview-snapshot/values-usdfdev.yaml diff --git a/applications/schedview-snapshot/Chart.yaml b/applications/schedview-snapshot/Chart.yaml index f275de4c00..696e41ec6b 100644 --- a/applications/schedview-snapshot/Chart.yaml +++ b/applications/schedview-snapshot/Chart.yaml @@ -1,8 +1,9 @@ apiVersion: v2 -appVersion: 0.1.0 -description: Dashboard for examination of scheduler snapshots. +appVersion: u-neilsen-preops-4603 +description: Dashboard for examination of scheduler snapshots name: schedview-snapshot sources: -- https://github.com/lsst-sqre/schedview-snapshot +- https://github.com/lsst/schedview +home: https://schedview.lsst.io/ type: application version: 1.0.0 diff --git a/applications/schedview-snapshot/templates/deployment.yaml b/applications/schedview-snapshot/templates/deployment.yaml index 545763140a..434d2b7db7 100644 --- a/applications/schedview-snapshot/templates/deployment.yaml +++ b/applications/schedview-snapshot/templates/deployment.yaml @@ -31,16 +31,22 @@ spec: readOnlyRootFilesystem: true image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: BOKEH_ALLOW_WS_ORIGIN + value: {{ .Values.global.host }} ports: - name: "http" containerPort: 8080 protocol: "TCP" readinessProbe: httpGet: - path: "/" + path: "/schedview-snapshot" port: "http" resources: {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: tmp + mountPath: /tmp securityContext: runAsNonRoot: true runAsUser: 1000 @@ -57,3 +63,11 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} + volumes: + - name: tmp + emptyDir: {} + command: + - /bin/bash + - -c + - micromamba run scheduler_dashboard --data_dir /home/mambauser/schedview/test_data + diff --git a/applications/schedview-snapshot/templates/ingress.yaml b/applications/schedview-snapshot/templates/ingress.yaml index 915dd71730..b6cb8ae716 100644 --- a/applications/schedview-snapshot/templates/ingress.yaml +++ b/applications/schedview-snapshot/templates/ingress.yaml @@ -8,7 +8,7 @@ config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: all: - - "read:image" + - "exec:portal" loginRedirect: true template: metadata: diff --git a/applications/schedview-snapshot/values-usdfdev.yaml b/applications/schedview-snapshot/values-usdfdev.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/schedview-snapshot/values.yaml b/applications/schedview-snapshot/values.yaml index 66ad86ebc4..1cf7fabb8e 100644 --- a/applications/schedview-snapshot/values.yaml +++ b/applications/schedview-snapshot/values.yaml @@ -7,10 +7,10 @@ replicaCount: 1 image: # -- Image to use in the schedview-snapshot deployment - repository: "ghcr.io/lsst-sqre/schedview-snapshot" + repository: "ghcr.io/lsst/schedview" # -- Pull policy for the schedview-snapshot image - pullPolicy: "IfNotPresent" + pullPolicy: "Always" # -- Overrides the image tag whose default is the chart appVersion. tag: "" diff --git a/docs/applications/schedview-snapshot/index.rst b/docs/applications/schedview-snapshot/index.rst index 8084232007..79ed7a6bc1 100644 --- a/docs/applications/schedview-snapshot/index.rst +++ b/docs/applications/schedview-snapshot/index.rst @@ -4,6 +4,10 @@ schedview-snapshot — Dashboard for examination of scheduler snapshots. ###################################################################### +schedview's pre-night dashboard is a web application for examination of +Rubin Observatory/LSST scheduler snapshots, as stored (for example) during +observing. + .. jinja:: schedview-snapshot :file: applications/_summary.rst.jinja diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 0c33540aa3..0670dd6e84 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -23,6 +23,7 @@ applications: rubintv: true sasquatch: true schedview-prenight: true + schedview-snapshot: true semaphore: true siav2: true ssotap: true From c94cc040fbf45e767624eb5649773efb01ef7569 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Tue, 28 Nov 2023 10:20:55 -0800 Subject: [PATCH 291/588] pre-commit fiddles --- applications/schedview-snapshot/README.md | 31 +++++++++++++++++++++++ environments/README.md | 1 + 2 files changed, 32 insertions(+) create mode 100644 applications/schedview-snapshot/README.md diff --git a/applications/schedview-snapshot/README.md b/applications/schedview-snapshot/README.md new file mode 100644 index 0000000000..7cee94076a --- /dev/null +++ b/applications/schedview-snapshot/README.md @@ -0,0 +1,31 @@ +# schedview-snapshot + +Dashboard for examination of scheduler snapshots + +**Homepage:** + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the schedview-snapshot deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of schedview-snapshot deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of schedview-snapshot deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of schedview-snapshot deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of schedview-snapshot deployment pods | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"Always"` | Pull policy for the schedview-snapshot image | +| image.repository | string | `"ghcr.io/lsst/schedview"` | Image to use in the schedview-snapshot deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nodeSelector | object | `{}` | Node selection rules for the schedview-snapshot deployment pod | +| podAnnotations | object | `{}` | Annotations for the schedview-snapshot deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the schedview-snapshot deployment pod | +| tolerations | list | `[]` | Tolerations for the schedview-snapshot deployment pod | diff --git a/environments/README.md b/environments/README.md index 752e8f500e..889fee61d9 100644 --- a/environments/README.md +++ b/environments/README.md @@ -41,6 +41,7 @@ | applications.rubintv | bool | `false` | Enable the rubintv application | | applications.sasquatch | bool | `false` | Enable the sasquatch application | | applications.schedview-prenight | bool | `false` | Enable the schedview-prenight application | +| applications.schedview-snapshot | bool | `false` | Enable the schedview-snapshot application | | applications.semaphore | bool | `false` | Enable the semaphore application | | applications.sherlock | bool | `false` | Enable the sherlock application | | applications.siav2 | bool | `false` | Enable the siav2 application | From ec313c7def20d3557ade305619532c0a6a7444e4 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Tue, 28 Nov 2023 10:32:04 -0800 Subject: [PATCH 292/588] add app to index.rst --- docs/applications/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/applications/index.rst b/docs/applications/index.rst index 30071b779d..05dbd070ef 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -59,6 +59,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde rubintv/index sasquatch/index schedview-prenight/index + schedview-snapshot/index strimzi/index strimzi-access-operator/index telegraf/index From 274e8564ae32110c35e93eaca52c17cf88d7f7d5 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 08:22:31 -0800 Subject: [PATCH 293/588] update container version --- applications/schedview-snapshot/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview-snapshot/Chart.yaml b/applications/schedview-snapshot/Chart.yaml index 696e41ec6b..ae32e4b045 100644 --- a/applications/schedview-snapshot/Chart.yaml +++ b/applications/schedview-snapshot/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: u-neilsen-preops-4603 +appVersion: tickets-preops-4603 description: Dashboard for examination of scheduler snapshots name: schedview-snapshot sources: From 2fd8c38da625999eee8482cd4d1694afe24a0d61 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 08:31:44 -0800 Subject: [PATCH 294/588] change schedview container branch to PREOPS-4606 --- applications/schedview-snapshot/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview-snapshot/Chart.yaml b/applications/schedview-snapshot/Chart.yaml index ae32e4b045..42905d3b7f 100644 --- a/applications/schedview-snapshot/Chart.yaml +++ b/applications/schedview-snapshot/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: tickets-preops-4603 +appVersion: tickets-preops-4606 description: Dashboard for examination of scheduler snapshots name: schedview-snapshot sources: From 12ae0719103bd32e7fa4846c9fc3139541b559b7 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 08:40:41 -0800 Subject: [PATCH 295/588] move "command" under "containers" --- .../schedview-snapshot/templates/deployment.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/applications/schedview-snapshot/templates/deployment.yaml b/applications/schedview-snapshot/templates/deployment.yaml index 434d2b7db7..20ee5c4ba8 100644 --- a/applications/schedview-snapshot/templates/deployment.yaml +++ b/applications/schedview-snapshot/templates/deployment.yaml @@ -47,6 +47,10 @@ spec: volumeMounts: - name: tmp mountPath: /tmp + command: + - /bin/bash + - -c + - micromamba run scheduler_dashboard --data_dir /home/mambauser/schedview/test_data securityContext: runAsNonRoot: true runAsUser: 1000 @@ -66,8 +70,5 @@ spec: volumes: - name: tmp emptyDir: {} - command: - - /bin/bash - - -c - - micromamba run scheduler_dashboard --data_dir /home/mambauser/schedview/test_data + From 57b0a3425015842489cff509c3c679a0531aae1e Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 08:56:25 -0800 Subject: [PATCH 296/588] add cache dir needed by mamba --- applications/schedview-snapshot/templates/deployment.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/applications/schedview-snapshot/templates/deployment.yaml b/applications/schedview-snapshot/templates/deployment.yaml index 20ee5c4ba8..f6eff6ab4a 100644 --- a/applications/schedview-snapshot/templates/deployment.yaml +++ b/applications/schedview-snapshot/templates/deployment.yaml @@ -47,6 +47,8 @@ spec: volumeMounts: - name: tmp mountPath: /tmp + - name: slashdatcache + mountPath: /.cache command: - /bin/bash - -c @@ -70,5 +72,5 @@ spec: volumes: - name: tmp emptyDir: {} - - + - name: slashdatcache + emptyDir: {} From d9ca5a0d7a54efdc131a085bfa7aa21c3c9f8d08 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 09:07:26 -0800 Subject: [PATCH 297/588] change container version for schedview-snapshot --- applications/schedview-snapshot/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview-snapshot/Chart.yaml b/applications/schedview-snapshot/Chart.yaml index 42905d3b7f..ae32e4b045 100644 --- a/applications/schedview-snapshot/Chart.yaml +++ b/applications/schedview-snapshot/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: tickets-preops-4606 +appVersion: tickets-preops-4603 description: Dashboard for examination of scheduler snapshots name: schedview-snapshot sources: From c38548935ec8dc27759abd479bad3950558d128c Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 10:02:59 -0800 Subject: [PATCH 298/588] pre-commit fixes --- applications/schedview-snapshot/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview-snapshot/templates/deployment.yaml b/applications/schedview-snapshot/templates/deployment.yaml index f6eff6ab4a..4c71d3da8f 100644 --- a/applications/schedview-snapshot/templates/deployment.yaml +++ b/applications/schedview-snapshot/templates/deployment.yaml @@ -48,7 +48,7 @@ spec: - name: tmp mountPath: /tmp - name: slashdatcache - mountPath: /.cache + mountPath: /.cache command: - /bin/bash - -c From c1b7fad9d3ede13e864f0581cced8ee6ae0fc7c6 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 10:55:19 -0800 Subject: [PATCH 299/588] move branch tag to values yaml --- applications/schedview-prenight/Chart.yaml | 2 +- applications/schedview-prenight/values-usdfdev.yaml | 3 +++ applications/schedview-snapshot/Chart.yaml | 2 +- applications/schedview-snapshot/values-usdfdev.yaml | 3 +++ 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/applications/schedview-prenight/Chart.yaml b/applications/schedview-prenight/Chart.yaml index c3c37afbbc..c55978fef1 100644 --- a/applications/schedview-prenight/Chart.yaml +++ b/applications/schedview-prenight/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: tickets-PREOPS-4508 +appVersion: v0.10.0 description: Run the schedview pre-night briefing dashboard. name: schedview-prenight sources: diff --git a/applications/schedview-prenight/values-usdfdev.yaml b/applications/schedview-prenight/values-usdfdev.yaml index e69de29bb2..947a58d46f 100644 --- a/applications/schedview-prenight/values-usdfdev.yaml +++ b/applications/schedview-prenight/values-usdfdev.yaml @@ -0,0 +1,3 @@ +image: + # -- Overrides the image tag whose default is the chart appVersion. + tag: "tickets-preops-4603" \ No newline at end of file diff --git a/applications/schedview-snapshot/Chart.yaml b/applications/schedview-snapshot/Chart.yaml index ae32e4b045..b3c7307acf 100644 --- a/applications/schedview-snapshot/Chart.yaml +++ b/applications/schedview-snapshot/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: tickets-preops-4603 +appVersion: v0.10.0 description: Dashboard for examination of scheduler snapshots name: schedview-snapshot sources: diff --git a/applications/schedview-snapshot/values-usdfdev.yaml b/applications/schedview-snapshot/values-usdfdev.yaml index e69de29bb2..947a58d46f 100644 --- a/applications/schedview-snapshot/values-usdfdev.yaml +++ b/applications/schedview-snapshot/values-usdfdev.yaml @@ -0,0 +1,3 @@ +image: + # -- Overrides the image tag whose default is the chart appVersion. + tag: "tickets-preops-4603" \ No newline at end of file From 30adddbed66d2acec2610e378dc1ba2a77ff216f Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 11:02:11 -0800 Subject: [PATCH 300/588] cleanup for yamllint --- applications/schedview-prenight/values-usdfdev.yaml | 2 +- applications/schedview-snapshot/values-usdfdev.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/schedview-prenight/values-usdfdev.yaml b/applications/schedview-prenight/values-usdfdev.yaml index 947a58d46f..b030121701 100644 --- a/applications/schedview-prenight/values-usdfdev.yaml +++ b/applications/schedview-prenight/values-usdfdev.yaml @@ -1,3 +1,3 @@ image: # -- Overrides the image tag whose default is the chart appVersion. - tag: "tickets-preops-4603" \ No newline at end of file + tag: "tickets-preops-4603" diff --git a/applications/schedview-snapshot/values-usdfdev.yaml b/applications/schedview-snapshot/values-usdfdev.yaml index 947a58d46f..b030121701 100644 --- a/applications/schedview-snapshot/values-usdfdev.yaml +++ b/applications/schedview-snapshot/values-usdfdev.yaml @@ -1,3 +1,3 @@ image: # -- Overrides the image tag whose default is the chart appVersion. - tag: "tickets-preops-4603" \ No newline at end of file + tag: "tickets-preops-4603" From 9ea9abcb23df4a435c3f6a3f96d1c9a08e95dc6c Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 11:24:15 -0800 Subject: [PATCH 301/588] fix prenight readiness probe url --- applications/schedview-prenight/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview-prenight/templates/deployment.yaml b/applications/schedview-prenight/templates/deployment.yaml index 573e937939..eafc4a616f 100644 --- a/applications/schedview-prenight/templates/deployment.yaml +++ b/applications/schedview-prenight/templates/deployment.yaml @@ -37,7 +37,7 @@ spec: protocol: "TCP" readinessProbe: httpGet: - path: "/" + path: "/schedview-prenight" port: "http" resources: {{- toYaml .Values.resources | nindent 12 }} From 21a2a95719e0d9a11eda14666d354a6dc770a5c2 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 11:46:51 -0800 Subject: [PATCH 302/588] set pull policy to always for schedview-prenight --- applications/schedview-prenight/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview-prenight/values.yaml b/applications/schedview-prenight/values.yaml index 63afd2ce47..b49993951d 100644 --- a/applications/schedview-prenight/values.yaml +++ b/applications/schedview-prenight/values.yaml @@ -10,7 +10,7 @@ image: repository: "ghcr.io/lsst/schedview" # -- Pull policy for the schedview-prenight image - pullPolicy: "IfNotPresent" + pullPolicy: "Always" # -- Overrides the image tag whose default is the chart appVersion. tag: "" From 70f042911c97192a47d6c7f8f177b354c6b40f0e Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 11:47:15 -0800 Subject: [PATCH 303/588] set BOKEH_ALLOW_WS_ORIGIN for schedview prenight --- applications/schedview-prenight/templates/deployment.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/schedview-prenight/templates/deployment.yaml b/applications/schedview-prenight/templates/deployment.yaml index eafc4a616f..3bd8261d49 100644 --- a/applications/schedview-prenight/templates/deployment.yaml +++ b/applications/schedview-prenight/templates/deployment.yaml @@ -31,6 +31,9 @@ spec: readOnlyRootFilesystem: true image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: BOKEH_ALLOW_WS_ORIGIN + value: {{ .Values.global.host }} ports: - name: "http" containerPort: 8080 From b093612eae87c8d2a1dc086bb2453a6abe62c65c Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 11:49:24 -0800 Subject: [PATCH 304/588] fix lint --- applications/schedview-prenight/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview-prenight/README.md b/applications/schedview-prenight/README.md index fd4d975cea..d32b3999f1 100644 --- a/applications/schedview-prenight/README.md +++ b/applications/schedview-prenight/README.md @@ -20,7 +20,7 @@ Run the schedview pre-night briefing dashboard. | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the schedview-prenight image | +| image.pullPolicy | string | `"Always"` | Pull policy for the schedview-prenight image | | image.repository | string | `"ghcr.io/lsst/schedview"` | Image to use in the schedview-prenight deployment | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | From 33ba232d56a2a803d4ea7c5b09f2045701da4294 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 29 Nov 2023 12:25:18 -0800 Subject: [PATCH 305/588] remove schedview-prenight from minikube --- environments/values-minikube.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index 595ce8d38f..aea3579661 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -15,4 +15,3 @@ applications: mobu: true postgres: true squareone: true - schedview-prenight: true From 3d2b4419d16cff4e7bdc0a7045e304b8f9a01592 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 29 Nov 2023 15:53:51 -0700 Subject: [PATCH 306/588] Update secrets and their path --- applications/nublado/values-base.yaml | 7 ++++++- applications/nublado/values-summit.yaml | 7 ++++++- applications/nublado/values-tucson-teststand.yaml | 7 ++++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 05ab4aaba7..193b3d88c3 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -20,7 +20,7 @@ controller: LSST_DDS_INTERFACE: "net1" LSST_DDS_PARTITION_PREFIX: "base" LSST_SITE: "base" - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - name: "initdir" @@ -33,6 +33,11 @@ controller: serverPath: "/rsphome" server: "nfs-rsphome.ls.lsst.org" type: "nfs" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "aws-credentials.ini" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" volumes: - containerPath: "/home" mode: "rw" diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 6c225d716c..a5dc72b4b2 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -19,7 +19,7 @@ controller: LSST_DDS_INTERFACE: "net1" LSST_DDS_PARTITION_PREFIX: "summit" LSST_SITE: "summit" - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - name: "initdir" @@ -32,6 +32,11 @@ controller: serverPath: "/jhome" server: "nfs1.cp.lsst.org" type: "nfs" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "aws-credentials.ini" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" volumes: - containerPath: "/home" mode: "rw" diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index df1bafd98a..4cde29af6e 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -19,7 +19,7 @@ controller: LSST_DDS_INTERFACE: net1 LSST_DDS_PARTITION_PREFIX: tucson LSST_SITE: tucson - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - name: "initdir" @@ -32,6 +32,11 @@ controller: serverPath: "/jhome" server: "nfs-jhome.tu.lsst.org" type: "nfs" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "aws-credentials.ini" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" volumes: - containerPath: "/home" mode: "rw" From 244a78c08057aafc658c562262c6a87b3d0dd70f Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 29 Nov 2023 16:14:17 -0700 Subject: [PATCH 307/588] remove unused aws credentials --- applications/nublado/values-base.yaml | 2 -- applications/nublado/values-summit.yaml | 2 -- applications/nublado/values-tucson-teststand.yaml | 2 -- 3 files changed, 6 deletions(-) diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 193b3d88c3..d026a96805 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -34,8 +34,6 @@ controller: server: "nfs-rsphome.ls.lsst.org" type: "nfs" secrets: - - secretName: "nublado-lab-secret" - secretKey: "aws-credentials.ini" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index a5dc72b4b2..1bfd84b15b 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -33,8 +33,6 @@ controller: server: "nfs1.cp.lsst.org" type: "nfs" secrets: - - secretName: "nublado-lab-secret" - secretKey: "aws-credentials.ini" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 4cde29af6e..5392070d26 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -33,8 +33,6 @@ controller: server: "nfs-jhome.tu.lsst.org" type: "nfs" secrets: - - secretName: "nublado-lab-secret" - secretKey: "aws-credentials.ini" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: From 72aa59bd515604c5b2f25e6433f53fff037093e1 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Thu, 30 Nov 2023 13:59:59 -0300 Subject: [PATCH 308/588] Update image repository to point to new ghcr `lsst-ts` space --- applications/rubintv/README.md | 4 ++-- applications/rubintv/values.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/rubintv/README.md b/applications/rubintv/README.md index 16cae3fda9..ce3a6ff28d 100644 --- a/applications/rubintv/README.md +++ b/applications/rubintv/README.md @@ -12,9 +12,9 @@ Real-time display front end |-----|------|---------|-------------| | frontend.affinity | object | `{}` | Affinity rules for the rubintv frontend pod | | frontend.debug | bool | `false` | If set to true, enable more verbose logging. | -| frontend.image | object | `{"pullPolicy":"IfNotPresent","repository":"ghcr.io/lsst-sqre/rubintv","tag":""}` | Settings for rubintv OCI image | +| frontend.image | object | `{"pullPolicy":"IfNotPresent","repository":"ghcr.io/lsst-ts/rubintv","tag":""}` | Settings for rubintv OCI image | | frontend.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the rubintv image | -| frontend.image.repository | string | `"ghcr.io/lsst-sqre/rubintv"` | rubintv frontend image to use | +| frontend.image.repository | string | `"ghcr.io/lsst-ts/rubintv"` | rubintv frontend image to use | | frontend.image.tag | string | The appVersion of the chart | Tag of rubintv image to use | | frontend.nodeSelector | object | `{}` | Node selector rules for the rubintv frontend pod | | frontend.pathPrefix | string | `"/rubintv"` | Prefix for rubintv's frontend API routes. | diff --git a/applications/rubintv/values.yaml b/applications/rubintv/values.yaml index 4fc58e1983..8225d1a40a 100644 --- a/applications/rubintv/values.yaml +++ b/applications/rubintv/values.yaml @@ -25,7 +25,7 @@ frontend: # -- Settings for rubintv OCI image image: # -- rubintv frontend image to use - repository: "ghcr.io/lsst-sqre/rubintv" + repository: "ghcr.io/lsst-ts/rubintv" # -- Pull policy for the rubintv image pullPolicy: "IfNotPresent" From 4cf671b3d1c9a5cab5015eb69f958d5c70ecda10 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 30 Nov 2023 13:32:10 -0700 Subject: [PATCH 309/588] Make ro/rw independently scalable, move to better image --- applications/giftless/README.md | 10 ++++++++-- .../giftless/templates/deployment.yaml | 12 +++++------ .../giftless/values-roundtable-dev.yaml | 2 +- .../giftless/values-roundtable-prod.yaml | 4 +++- applications/giftless/values.yaml | 20 +++++++++++++++---- 5 files changed, 34 insertions(+), 14 deletions(-) diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 04b608d874..c228c423dc 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -30,6 +30,12 @@ Git-LFS server with GCS S3 backend, with Rubin-specific auth | podAnnotations | object | `{}` | Annotations for the giftless frontend pod | | resources | object | `{}` | Resource limits and requests for the giftless frontend pod | | server.debug | bool | `false` | Turn on debugging mode | -| server.processes | int | `2` | Number of processes for server | -| server.threads | int | `2` | Number of threads per process | +| server.readonly | object | `{"processes":2,"replicas":1,"threads":2}` | Values for readonly server | +| server.readonly.processes | int | `2` | Number of processes for readonly server | +| server.readonly.replicas | int | `1` | Number of replicas for readonly server | +| server.readonly.threads | int | `2` | Number of threads per readonly process | +| server.readwrite | object | `{"processes":2,"replicas":1,"threads":2}` | Values for readwrite server | +| server.readwrite.processes | int | `2` | Number of processes for readwrite server | +| server.readwrite.replicas | int | `1` | Number of replicas for readwrite server | +| server.readwrite.threads | int | `2` | Number of threads per readwrite process | | tolerations | list | `[]` | Tolerations for the giftless frontend pod | diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index 93f9a86bfd..96ab1977fc 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -15,7 +15,7 @@ metadata: labels: {{- include "giftless.labels" . | nindent 4 }} spec: - replicas: 1 + replicas: {{ .Values.server.readonly.replicas }} selector: matchLabels: {{- include "giftless.selectorLabels" . | nindent 6 }} @@ -42,9 +42,9 @@ spec: - "-T" - "--die-on-term" - "--threads" - - "{{- .Values.server.threads }}" + - "{{- .Values.server.readonly.threads }}" - "-p" - - "{{- .Values.server.processes }}" + - "{{- .Values.server.readonly.processes }}" - "--manage-script-name" - "--callable" - "app" @@ -106,7 +106,7 @@ metadata: labels: {{- include "giftless-rw.labels" . | nindent 4 }} spec: - replicas: 1 + replicas: {{ .Values.server.readwrite.replicas }} selector: matchLabels: {{- include "giftless-rw.selectorLabels" . | nindent 6 }} @@ -133,9 +133,9 @@ spec: - "-T" - "--die-on-term" - "--threads" - - "{{- .Values.server.threads }}" + - "{{- .Values.server.readwrite.threads }}" - "-p" - - "{{- .Values.server.processes }}" + - "{{- .Values.server.readwrite.processes }}" - "--manage-script-name" - "--callable" - "app" diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 5397bebba3..7bd41a2b77 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "ajt-dev" + tag: "upstream-master" server: debug: true ingress: diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index 79012d5065..d33d08f5ea 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -1,9 +1,11 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "ajt-dev" + tag: "upstream-master" server: debug: true + readonly: + replicas: 3 ingress: hostname: readonly: "git-lfs.lsst.cloud" diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index e930cf1b82..c1edd2c65b 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -48,10 +48,22 @@ ingress: server: # -- Turn on debugging mode debug: false - # -- Number of processes for server - processes: 2 - # -- Number of threads per process - threads: 2 + # -- Values for readonly server + readonly: + # -- Number of replicas for readonly server + replicas: 1 + # -- Number of processes for readonly server + processes: 2 + # -- Number of threads per readonly process + threads: 2 + # -- Values for readwrite server + readwrite: + # -- Number of replicas for readwrite server + replicas: 1 + # -- Number of processes for readwrite server + processes: 2 + # -- Number of threads per readwrite process + threads: 2 # -- Configuration for giftless server config: From 3a7a2d6c8fadf405a5450a37031f34f028cc3c74 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 30 Nov 2023 16:11:20 -0500 Subject: [PATCH 310/588] Build docs on argocd environment values changes The argocd environment values.yaml files are a component of the documentation inputs for environments. --- .github/workflows/docs.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index e524d4d984..d2a398ca3c 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -39,6 +39,7 @@ jobs: - "docs/**" - "applications/*/Chart.yaml" - "applications/*/values.yaml" + - "applications/argocd/values-*.yaml" - "applications/gafaelfawr/values-*.yaml" - "environments/values-*.yaml" - "src/phalanx/**" From 298ba36244993dc352d89ba53e9ea7dc43ba8e5b Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 30 Nov 2023 17:35:15 -0500 Subject: [PATCH 311/588] Adjust conditions on uploading docs Now on pull requests, only upload the docs if content in the docs/ directory itself changes. This saves us CI time for changes that incidentally change the docs; we still CI that the docs will build and the docs themselves will update on the final merge to main. In re-working the conditional, I've also found that we don't have a good overal strategy for tags; we'd need to generally build the docs for any tag. I've dropped the "tags" from the "on" clause. But if we decide to start tagging Phalanx we'd want to revisit that. --- .github/workflows/docs.yaml | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index d2a398ca3c..e62632e811 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -14,8 +14,6 @@ name: Docs - "renovate/**" - "tickets/**" - "u/**" - tags: - - "*" workflow_dispatch: {} jobs: @@ -43,6 +41,8 @@ jobs: - "applications/gafaelfawr/values-*.yaml" - "environments/values-*.yaml" - "src/phalanx/**" + docsSpecific: + - "docs/**" - name: Install graphviz if: steps.filter.outputs.docs == 'true' @@ -55,10 +55,10 @@ jobs: python-version: "3.11" tox-envs: docs - # Only attempt documentation uploads for tagged releases and pull - # requests from ticket branches in the same repository. This avoids - # version clutter in the docs and failures when a PR doesn't have access - # to secrets. + # Upload docs: + # - on pushes to main if *any* documentation content might have changed + # - on workflow dispatches if any documentation content might have changed + # - on pushes to tickets/ branches if docs/ directory content changed - name: Upload to LSST the Docs uses: lsst-sqre/ltd-upload@v1 with: @@ -67,7 +67,6 @@ jobs: username: ${{ secrets.LTD_USERNAME }} password: ${{ secrets.LTD_PASSWORD }} if: >- - steps.filter.outputs.docs == 'true' - && github.event_name != 'merge_group' - && (github.event_name != 'pull_request' - || startsWith(github.head_ref, 'tickets/')) + (github.event_name == 'push' && github.head_ref == 'main' && steps.filter.outputs.docs == 'true') + || (github.event_name == 'workflow_dispatch' && steps.filter.outputs.docs == 'true') + || (github.event_name == 'pull_request' && startsWith(github.head_ref, 'tickets/') && steps.filter.outputs.docSpecific == 'true') From 8c4c7ffdcaff39e9fa9d12b4ea01019331699bab Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 30 Nov 2023 15:27:04 -0800 Subject: [PATCH 312/588] Also upgrade pip on make init Using an old version of pip produces a warning, and there doesn't seem to be a reason not to always upgrade it on make init. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 5020c0ccf4..dd81ada03d 100644 --- a/Makefile +++ b/Makefile @@ -15,11 +15,11 @@ clean: .PHONY: init init: + pip install --upgrade pip pre-commit tox + pre-commit install pip install --editable . pip install --upgrade -r requirements/main.txt -r requirements/dev.txt rm -rf .tox - pip install --upgrade pre-commit tox - pre-commit install # This is defined as a Makefile target instead of only a tox command because # if the command fails we want to cat output.txt, which contains the From c011637e05435fd945af786ae62f0ddc87378471 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 30 Nov 2023 15:27:54 -0800 Subject: [PATCH 313/588] Add secrets configuration for linters The linters application was missing a secret configuration. Add one. --- applications/linters/secrets.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 applications/linters/secrets.yaml diff --git a/applications/linters/secrets.yaml b/applications/linters/secrets.yaml new file mode 100644 index 0000000000..cf3da81fc7 --- /dev/null +++ b/applications/linters/secrets.yaml @@ -0,0 +1,12 @@ +aws: + description: >- + Shell commands to set the environment variables required for + authentication to AWS. + onepassword: + encoded: true +slack: + description: >- + Shell commands to set the environment variable pointing to a Slack + incoming webhook for reporting status. + onepassword: + encoded: true From b09bd9a6679b11d96277f4f65e3af9fd556b43db Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:28:14 -0800 Subject: [PATCH 314/588] pin new recommended --- applications/nublado/values-usdfdev.yaml | 2 +- applications/nublado/values-usdfint.yaml | 1 + applications/nublado/values-usdfprod.yaml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index ea70b8ec7d..9c2cfd95c6 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -12,7 +12,7 @@ controller: registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" recommendedTag: "recommended" - pin: ["w_2023_37"] + pin: ["w_2023_47"] numReleases: 1 numWeeklies: 2 numDailies: 3 diff --git a/applications/nublado/values-usdfint.yaml b/applications/nublado/values-usdfint.yaml index 66c9726d34..9c2cfd95c6 100644 --- a/applications/nublado/values-usdfint.yaml +++ b/applications/nublado/values-usdfint.yaml @@ -12,6 +12,7 @@ controller: registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" recommendedTag: "recommended" + pin: ["w_2023_47"] numReleases: 1 numWeeklies: 2 numDailies: 3 diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml index e5e31c34e3..b0836ffa7b 100644 --- a/applications/nublado/values-usdfprod.yaml +++ b/applications/nublado/values-usdfprod.yaml @@ -12,7 +12,7 @@ controller: registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" recommendedTag: "recommended" - pin: ["w_2023_37"] + pin: ["w_2023_47"] numReleases: 1 numWeeklies: 2 numDailies: 3 From d51dfc93eb9771d8d656227bf8a1f36b2e0250f0 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 30 Nov 2023 17:09:24 -0800 Subject: [PATCH 315/588] Update Python dependencies Picks up a security fix for the cryptography library. --- requirements/dev.txt | 212 +++++++++++++++++++++--------------------- requirements/main.txt | 88 +++++++++--------- 2 files changed, 148 insertions(+), 152 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 3b23dcb1b6..be9fd57322 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -278,9 +278,9 @@ jsonschema==4.20.0 \ --hash=sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa \ --hash=sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3 # via sphinxcontrib-redoc -jsonschema-specifications==2023.11.1 \ - --hash=sha256:c9b234904ffe02f079bf91b14d79987faa685fd4b39c377a0996954c0090b9ca \ - --hash=sha256:f596778ab612b3fd29f72ea0d990393d0540a5aab18bf0407a46632eab540779 +jsonschema-specifications==2023.11.2 \ + --hash=sha256:9472fc4fea474cd74bea4a2b190daeccb5a9e4db2ea80efcf7a1b582fc9a81b8 \ + --hash=sha256:e74ba7c0a65e8cb49dc26837d6cfe576557084a8b423ed16a420984228104f93 # via jsonschema latexcodec==2.0.1 \ --hash=sha256:2aa2551c373261cefe2ad3a8953a6d6533e68238d180eb4bb91d7964adb3fe9a \ @@ -650,9 +650,9 @@ pyyaml==6.0.1 \ # pre-commit # pybtex # sphinxcontrib-redoc -referencing==0.31.0 \ - --hash=sha256:381b11e53dd93babb55696c71cf42aef2d36b8a150c49bf0bc301e36d536c882 \ - --hash=sha256:cc28f2c88fbe7b961a7817a0abc034c09a1e36358f82fedb4ffdf29a25398863 +referencing==0.31.1 \ + --hash=sha256:81a1471c68c9d5e3831c30ad1dd9815c45b558e596653db751a2bfdd17b3b9ec \ + --hash=sha256:c19c4d006f1757e3dd75c4f784d38f8698d87b649c54f9ace14e5e8c9667c01d # via # jsonschema # jsonschema-specifications @@ -667,106 +667,106 @@ rich==13.7.0 \ --hash=sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa \ --hash=sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235 # via pytest-pretty -rpds-py==0.13.1 \ - --hash=sha256:0290712eb5603a725769b5d857f7cf15cf6ca93dda3128065bbafe6fdb709beb \ - --hash=sha256:032c242a595629aacace44128f9795110513ad27217b091e834edec2fb09e800 \ - --hash=sha256:08832078767545c5ee12561ce980714e1e4c6619b5b1e9a10248de60cddfa1fd \ - --hash=sha256:08b335fb0c45f0a9e2478a9ece6a1bfb00b6f4c4780f9be3cf36479c5d8dd374 \ - --hash=sha256:0b70c1f800059c92479dc94dda41288fd6607f741f9b1b8f89a21a86428f6383 \ - --hash=sha256:0d9f8930092558fd15c9e07198625efb698f7cc00b3dc311c83eeec2540226a8 \ - --hash=sha256:181ee352691c4434eb1c01802e9daa5edcc1007ff15023a320e2693fed6a661b \ - --hash=sha256:19f5aa7f5078d35ed8e344bcba40f35bc95f9176dddb33fc4f2084e04289fa63 \ - --hash=sha256:1a3b2583c86bbfbf417304eeb13400ce7f8725376dc7d3efbf35dc5d7052ad48 \ - --hash=sha256:1c9a1dc5e898ce30e2f9c0aa57181cddd4532b22b7780549441d6429d22d3b58 \ - --hash=sha256:1f36a1e80ef4ed1996445698fd91e0d3e54738bf597c9995118b92da537d7a28 \ - --hash=sha256:20147996376be452cd82cd6c17701daba69a849dc143270fa10fe067bb34562a \ - --hash=sha256:249c8e0055ca597707d71c5ad85fd2a1c8fdb99386a8c6c257e1b47b67a9bec1 \ - --hash=sha256:2647192facf63be9ed2d7a49ceb07efe01dc6cfb083bd2cc53c418437400cb99 \ - --hash=sha256:264f3a5906c62b9df3a00ad35f6da1987d321a053895bd85f9d5c708de5c0fbf \ - --hash=sha256:2abd669a39be69cdfe145927c7eb53a875b157740bf1e2d49e9619fc6f43362e \ - --hash=sha256:2b2415d5a7b7ee96aa3a54d4775c1fec140476a17ee12353806297e900eaeddc \ - --hash=sha256:2c173f529666bab8e3f948b74c6d91afa22ea147e6ebae49a48229d9020a47c4 \ - --hash=sha256:2da81c1492291c1a90987d76a47c7b2d310661bf7c93a9de0511e27b796a8b46 \ - --hash=sha256:2eca04a365be380ca1f8fa48b334462e19e3382c0bb7386444d8ca43aa01c481 \ - --hash=sha256:37b08df45f02ff1866043b95096cbe91ac99de05936dd09d6611987a82a3306a \ - --hash=sha256:37f79f4f1f06cc96151f4a187528c3fd4a7e1065538a4af9eb68c642365957f7 \ - --hash=sha256:3dd5fb7737224e1497c886fb3ca681c15d9c00c76171f53b3c3cc8d16ccfa7fb \ - --hash=sha256:3e3ac5b602fea378243f993d8b707189f9061e55ebb4e56cb9fdef8166060f28 \ - --hash=sha256:3f55ae773abd96b1de25fc5c3fb356f491bd19116f8f854ba705beffc1ddc3c5 \ - --hash=sha256:4011d5c854aa804c833331d38a2b6f6f2fe58a90c9f615afdb7aa7cf9d31f721 \ - --hash=sha256:4145172ab59b6c27695db6d78d040795f635cba732cead19c78cede74800949a \ - --hash=sha256:42b9535aa22ab023704cfc6533e968f7e420affe802d85e956d8a7b4c0b0b5ea \ - --hash=sha256:46a07a258bda12270de02b34c4884f200f864bba3dcd6e3a37fef36a168b859d \ - --hash=sha256:4f13d3f6585bd07657a603780e99beda96a36c86acaba841f131e81393958336 \ - --hash=sha256:528e2afaa56d815d2601b857644aeb395afe7e59212ab0659906dc29ae68d9a6 \ - --hash=sha256:545e94c84575057d3d5c62634611858dac859702b1519b6ffc58eca7fb1adfcf \ - --hash=sha256:577d40a72550eac1386b77b43836151cb61ff6700adacda2ad4d883ca5a0b6f2 \ - --hash=sha256:5967fa631d0ed9f8511dede08bc943a9727c949d05d1efac4ac82b2938024fb7 \ - --hash=sha256:5b769396eb358d6b55dbf78f3f7ca631ca1b2fe02136faad5af74f0111b4b6b7 \ - --hash=sha256:63c9e2794329ef070844ff9bfc012004aeddc0468dc26970953709723f76c8a5 \ - --hash=sha256:6574f619e8734140d96c59bfa8a6a6e7a3336820ccd1bfd95ffa610673b650a2 \ - --hash=sha256:6bfe72b249264cc1ff2f3629be240d7d2fdc778d9d298087cdec8524c91cd11f \ - --hash=sha256:736817dbbbd030a69a1faf5413a319976c9c8ba8cdcfa98c022d3b6b2e01eca6 \ - --hash=sha256:74a2044b870df7c9360bb3ce7e12f9ddf8e72e49cd3a353a1528cbf166ad2383 \ - --hash=sha256:74be3b215a5695690a0f1a9f68b1d1c93f8caad52e23242fcb8ba56aaf060281 \ - --hash=sha256:76a8374b294e4ccb39ccaf11d39a0537ed107534139c00b4393ca3b542cc66e5 \ - --hash=sha256:7ba239bb37663b2b4cd08e703e79e13321512dccd8e5f0e9451d9e53a6b8509a \ - --hash=sha256:7c40851b659d958c5245c1236e34f0d065cc53dca8d978b49a032c8e0adfda6e \ - --hash=sha256:7cf241dbb50ea71c2e628ab2a32b5bfcd36e199152fc44e5c1edb0b773f1583e \ - --hash=sha256:7cfae77da92a20f56cf89739a557b76e5c6edc094f6ad5c090b9e15fbbfcd1a4 \ - --hash=sha256:7d152ec7bb431040af2500e01436c9aa0d993f243346f0594a15755016bf0be1 \ - --hash=sha256:80080972e1d000ad0341c7cc58b6855c80bd887675f92871221451d13a975072 \ - --hash=sha256:82dbcd6463e580bcfb7561cece35046aaabeac5a9ddb775020160b14e6c58a5d \ - --hash=sha256:8308a8d49d1354278d5c068c888a58d7158a419b2e4d87c7839ed3641498790c \ - --hash=sha256:839676475ac2ccd1532d36af3d10d290a2ca149b702ed464131e450a767550df \ - --hash=sha256:83feb0f682d75a09ddc11aa37ba5c07dd9b824b22915207f6176ea458474ff75 \ - --hash=sha256:88956c993a20201744282362e3fd30962a9d86dc4f1dcf2bdb31fab27821b61f \ - --hash=sha256:8a6ad8429340e0a4de89353447c6441329def3632e7b2293a7d6e873217d3c2b \ - --hash=sha256:8ba9fbc5d6e36bfeb5292530321cc56c4ef3f98048647fabd8f57543c34174ec \ - --hash=sha256:8c1f6c8df23be165eb0cb78f305483d00c6827a191e3a38394c658d5b9c80bbd \ - --hash=sha256:91276caef95556faeb4b8f09fe4439670d3d6206fee78d47ddb6e6de837f0b4d \ - --hash=sha256:960e7e460fda2d0af18c75585bbe0c99f90b8f09963844618a621b804f8c3abe \ - --hash=sha256:9656a09653b18b80764647d585750df2dff8928e03a706763ab40ec8c4872acc \ - --hash=sha256:9cd935c0220d012a27c20135c140f9cdcbc6249d5954345c81bfb714071b985c \ - --hash=sha256:a2b3c79586636f1fa69a7bd59c87c15fca80c0d34b5c003d57f2f326e5276575 \ - --hash=sha256:a4b9d3f5c48bbe8d9e3758e498b3c34863f2c9b1ac57a4e6310183740e59c980 \ - --hash=sha256:a8c2bf286e5d755a075e5e97ba56b3de08cccdad6b323ab0b21cc98875176b03 \ - --hash=sha256:a90031658805c63fe488f8e9e7a88b260ea121ba3ee9cdabcece9c9ddb50da39 \ - --hash=sha256:ad666a904212aa9a6c77da7dce9d5170008cda76b7776e6731928b3f8a0d40fa \ - --hash=sha256:af2d1648eb625a460eee07d3e1ea3a4a6e84a1fb3a107f6a8e95ac19f7dcce67 \ - --hash=sha256:b3d4b390ee70ca9263b331ccfaf9819ee20e90dfd0201a295e23eb64a005dbef \ - --hash=sha256:ba4432301ad7eeb1b00848cf46fae0e5fecfd18a8cb5fdcf856c67985f79ecc7 \ - --hash=sha256:bc3179e0815827cf963e634095ae5715ee73a5af61defbc8d6ca79f1bdae1d1d \ - --hash=sha256:c5fd099acaee2325f01281a130a39da08d885e4dedf01b84bf156ec2737d78fe \ - --hash=sha256:c797ea56f36c6f248656f0223b11307fdf4a1886f3555eba371f34152b07677f \ - --hash=sha256:cd4ea56c9542ad0091dfdef3e8572ae7a746e1e91eb56c9e08b8d0808b40f1d1 \ - --hash=sha256:cdd6f8738e1f1d9df5b1603bb03cb30e442710e5672262b95d0f9fcb4edb0dab \ - --hash=sha256:d0580faeb9def6d0beb7aa666294d5604e569c4e24111ada423cf9936768d95c \ - --hash=sha256:d11afdc5992bbd7af60ed5eb519873690d921425299f51d80aa3099ed49f2bcc \ - --hash=sha256:d1d388d2f5f5a6065cf83c54dd12112b7389095669ff395e632003ae8999c6b8 \ - --hash=sha256:d20da6b4c7aa9ee75ad0730beaba15d65157f5beeaca54a038bb968f92bf3ce3 \ - --hash=sha256:d22e0660de24bd8e9ac82f4230a22a5fe4e397265709289d61d5fb333839ba50 \ - --hash=sha256:d22f2cb82e0b40e427a74a93c9a4231335bbc548aed79955dde0b64ea7f88146 \ - --hash=sha256:d4fa1eeb9bea6d9b64ac91ec51ee94cc4fc744955df5be393e1c923c920db2b0 \ - --hash=sha256:d9793d46d3e6522ae58e9321032827c9c0df1e56cbe5d3de965facb311aed6aa \ - --hash=sha256:dab979662da1c9fbb464e310c0b06cb5f1d174d09a462553af78f0bfb3e01920 \ - --hash=sha256:db8d0f0ad92f74feb61c4e4a71f1d573ef37c22ef4dc19cab93e501bfdad8cbd \ - --hash=sha256:df2af1180b8eeececf4f819d22cc0668bfadadfd038b19a90bd2fb2ee419ec6f \ - --hash=sha256:dfb5d2ab183c0efe5e7b8917e4eaa2e837aacafad8a69b89aa6bc81550eed857 \ - --hash=sha256:e04f8c76b8d5c70695b4e8f1d0b391d8ef91df00ef488c6c1ffb910176459bc6 \ - --hash=sha256:e4a45ba34f904062c63049a760790c6a2fa7a4cc4bd160d8af243b12371aaa05 \ - --hash=sha256:e9be1f7c5f9673616f875299339984da9447a40e3aea927750c843d6e5e2e029 \ - --hash=sha256:edc91c50e17f5cd945d821f0f1af830522dba0c10267c3aab186dc3dbaab8def \ - --hash=sha256:ee70ee5f4144a45a9e6169000b5b525d82673d5dab9f7587eccc92794814e7ac \ - --hash=sha256:f1059ca9a51c936c9a8d46fbc2c9a6b4c15ab3f13a97f1ad32f024b39666ba85 \ - --hash=sha256:f47eef55297799956464efc00c74ae55c48a7b68236856d56183fe1ddf866205 \ - --hash=sha256:f4ae6f423cb7d1c6256b7482025ace2825728f53b7ac58bcd574de6ee9d242c2 \ - --hash=sha256:f4b15a163448ec79241fb2f1bc5a8ae1a4a304f7a48d948d208a2935b26bf8a5 \ - --hash=sha256:f55601fb58f92e4f4f1d05d80c24cb77505dc42103ddfd63ddfdc51d3da46fa2 \ - --hash=sha256:fa84bbe22ffa108f91631935c28a623001e335d66e393438258501e618fb0dde \ - --hash=sha256:faa12a9f34671a30ea6bb027f04ec4e1fb8fa3fb3ed030893e729d4d0f3a9791 \ - --hash=sha256:fcfd5f91b882eedf8d9601bd21261d6ce0e61a8c66a7152d1f5df08d3f643ab1 \ - --hash=sha256:fe30ef31172bdcf946502a945faad110e8fff88c32c4bec9a593df0280e64d8a +rpds-py==0.13.2 \ + --hash=sha256:06d218e4464d31301e943b65b2c6919318ea6f69703a351961e1baaf60347276 \ + --hash=sha256:12ecf89bd54734c3c2c79898ae2021dca42750c7bcfb67f8fb3315453738ac8f \ + --hash=sha256:15253fff410873ebf3cfba1cc686a37711efcd9b8cb30ea21bb14a973e393f60 \ + --hash=sha256:188435794405c7f0573311747c85a96b63c954a5f2111b1df8018979eca0f2f0 \ + --hash=sha256:1ceebd0ae4f3e9b2b6b553b51971921853ae4eebf3f54086be0565d59291e53d \ + --hash=sha256:244e173bb6d8f3b2f0c4d7370a1aa341f35da3e57ffd1798e5b2917b91731fd3 \ + --hash=sha256:25b28b3d33ec0a78e944aaaed7e5e2a94ac811bcd68b557ca48a0c30f87497d2 \ + --hash=sha256:25ea41635d22b2eb6326f58e608550e55d01df51b8a580ea7e75396bafbb28e9 \ + --hash=sha256:29d311e44dd16d2434d5506d57ef4d7036544fc3c25c14b6992ef41f541b10fb \ + --hash=sha256:2a1472956c5bcc49fb0252b965239bffe801acc9394f8b7c1014ae9258e4572b \ + --hash=sha256:2a7bef6977043673750a88da064fd513f89505111014b4e00fbdd13329cd4e9a \ + --hash=sha256:2ac26f50736324beb0282c819668328d53fc38543fa61eeea2c32ea8ea6eab8d \ + --hash=sha256:2e72f750048b32d39e87fc85c225c50b2a6715034848dbb196bf3348aa761fa1 \ + --hash=sha256:31e220a040b89a01505128c2f8a59ee74732f666439a03e65ccbf3824cdddae7 \ + --hash=sha256:35f53c76a712e323c779ca39b9a81b13f219a8e3bc15f106ed1e1462d56fcfe9 \ + --hash=sha256:38d4f822ee2f338febcc85aaa2547eb5ba31ba6ff68d10b8ec988929d23bb6b4 \ + --hash=sha256:38f9bf2ad754b4a45b8210a6c732fe876b8a14e14d5992a8c4b7c1ef78740f53 \ + --hash=sha256:3a44c8440183b43167fd1a0819e8356692bf5db1ad14ce140dbd40a1485f2dea \ + --hash=sha256:3ab96754d23372009638a402a1ed12a27711598dd49d8316a22597141962fe66 \ + --hash=sha256:3c55d7f2d817183d43220738270efd3ce4e7a7b7cbdaefa6d551ed3d6ed89190 \ + --hash=sha256:46e1ed994a0920f350a4547a38471217eb86f57377e9314fbaaa329b71b7dfe3 \ + --hash=sha256:4a5375c5fff13f209527cd886dc75394f040c7d1ecad0a2cb0627f13ebe78a12 \ + --hash=sha256:4c2d26aa03d877c9730bf005621c92da263523a1e99247590abbbe252ccb7824 \ + --hash=sha256:4c4e314d36d4f31236a545696a480aa04ea170a0b021e9a59ab1ed94d4c3ef27 \ + --hash=sha256:4d0c10d803549427f427085ed7aebc39832f6e818a011dcd8785e9c6a1ba9b3e \ + --hash=sha256:4dcc5ee1d0275cb78d443fdebd0241e58772a354a6d518b1d7af1580bbd2c4e8 \ + --hash=sha256:51967a67ea0d7b9b5cd86036878e2d82c0b6183616961c26d825b8c994d4f2c8 \ + --hash=sha256:530190eb0cd778363bbb7596612ded0bb9fef662daa98e9d92a0419ab27ae914 \ + --hash=sha256:5379e49d7e80dca9811b36894493d1c1ecb4c57de05c36f5d0dd09982af20211 \ + --hash=sha256:5493569f861fb7b05af6d048d00d773c6162415ae521b7010197c98810a14cab \ + --hash=sha256:5a4c1058cdae6237d97af272b326e5f78ee7ee3bbffa6b24b09db4d828810468 \ + --hash=sha256:5d75d6d220d55cdced2f32cc22f599475dbe881229aeddba6c79c2e9df35a2b3 \ + --hash=sha256:5d97e9ae94fb96df1ee3cb09ca376c34e8a122f36927230f4c8a97f469994bff \ + --hash=sha256:5feae2f9aa7270e2c071f488fab256d768e88e01b958f123a690f1cc3061a09c \ + --hash=sha256:603d5868f7419081d616dab7ac3cfa285296735e7350f7b1e4f548f6f953ee7d \ + --hash=sha256:61d42d2b08430854485135504f672c14d4fc644dd243a9c17e7c4e0faf5ed07e \ + --hash=sha256:61dbc1e01dc0c5875da2f7ae36d6e918dc1b8d2ce04e871793976594aad8a57a \ + --hash=sha256:65cfed9c807c27dee76407e8bb29e6f4e391e436774bcc769a037ff25ad8646e \ + --hash=sha256:67a429520e97621a763cf9b3ba27574779c4e96e49a27ff8a1aa99ee70beb28a \ + --hash=sha256:6aadae3042f8e6db3376d9e91f194c606c9a45273c170621d46128f35aef7cd0 \ + --hash=sha256:6ba8858933f0c1a979781272a5f65646fca8c18c93c99c6ddb5513ad96fa54b1 \ + --hash=sha256:6bc568b05e02cd612be53900c88aaa55012e744930ba2eeb56279db4c6676eb3 \ + --hash=sha256:729408136ef8d45a28ee9a7411917c9e3459cf266c7e23c2f7d4bb8ef9e0da42 \ + --hash=sha256:751758d9dd04d548ec679224cc00e3591f5ebf1ff159ed0d4aba6a0746352452 \ + --hash=sha256:76d59d4d451ba77f08cb4cd9268dec07be5bc65f73666302dbb5061989b17198 \ + --hash=sha256:79bf58c08f0756adba691d480b5a20e4ad23f33e1ae121584cf3a21717c36dfa \ + --hash=sha256:7de12b69d95072394998c622cfd7e8cea8f560db5fca6a62a148f902a1029f8b \ + --hash=sha256:7f55cd9cf1564b7b03f238e4c017ca4794c05b01a783e9291065cb2858d86ce4 \ + --hash=sha256:80e5acb81cb49fd9f2d5c08f8b74ffff14ee73b10ca88297ab4619e946bcb1e1 \ + --hash=sha256:87a90f5545fd61f6964e65eebde4dc3fa8660bb7d87adb01d4cf17e0a2b484ad \ + --hash=sha256:881df98f0a8404d32b6de0fd33e91c1b90ed1516a80d4d6dc69d414b8850474c \ + --hash=sha256:8a776a29b77fe0cc28fedfd87277b0d0f7aa930174b7e504d764e0b43a05f381 \ + --hash=sha256:8c2a61c0e4811012b0ba9f6cdcb4437865df5d29eab5d6018ba13cee1c3064a0 \ + --hash=sha256:8fa6bd071ec6d90f6e7baa66ae25820d57a8ab1b0a3c6d3edf1834d4b26fafa2 \ + --hash=sha256:96f2975fb14f39c5fe75203f33dd3010fe37d1c4e33177feef1107b5ced750e3 \ + --hash=sha256:96fb0899bb2ab353f42e5374c8f0789f54e0a94ef2f02b9ac7149c56622eaf31 \ + --hash=sha256:97163a1ab265a1073a6372eca9f4eeb9f8c6327457a0b22ddfc4a17dcd613e74 \ + --hash=sha256:9c95a1a290f9acf7a8f2ebbdd183e99215d491beea52d61aa2a7a7d2c618ddc6 \ + --hash=sha256:9d94d78418203904730585efa71002286ac4c8ac0689d0eb61e3c465f9e608ff \ + --hash=sha256:a6ba2cb7d676e9415b9e9ac7e2aae401dc1b1e666943d1f7bc66223d3d73467b \ + --hash=sha256:aa0379c1935c44053c98826bc99ac95f3a5355675a297ac9ce0dfad0ce2d50ca \ + --hash=sha256:ac96d67b37f28e4b6ecf507c3405f52a40658c0a806dffde624a8fcb0314d5fd \ + --hash=sha256:ade2ccb937060c299ab0dfb2dea3d2ddf7e098ed63ee3d651ebfc2c8d1e8632a \ + --hash=sha256:aefbdc934115d2f9278f153952003ac52cd2650e7313750390b334518c589568 \ + --hash=sha256:b07501b720cf060c5856f7b5626e75b8e353b5f98b9b354a21eb4bfa47e421b1 \ + --hash=sha256:b5267feb19070bef34b8dea27e2b504ebd9d31748e3ecacb3a4101da6fcb255c \ + --hash=sha256:b5f6328e8e2ae8238fc767703ab7b95785521c42bb2b8790984e3477d7fa71ad \ + --hash=sha256:b8996ffb60c69f677245f5abdbcc623e9442bcc91ed81b6cd6187129ad1fa3e7 \ + --hash=sha256:b981a370f8f41c4024c170b42fbe9e691ae2dbc19d1d99151a69e2c84a0d194d \ + --hash=sha256:b9d121be0217787a7d59a5c6195b0842d3f701007333426e5154bf72346aa658 \ + --hash=sha256:bcef4f2d3dc603150421de85c916da19471f24d838c3c62a4f04c1eb511642c1 \ + --hash=sha256:bed0252c85e21cf73d2d033643c945b460d6a02fc4a7d644e3b2d6f5f2956c64 \ + --hash=sha256:bfdfbe6a36bc3059fff845d64c42f2644cf875c65f5005db54f90cdfdf1df815 \ + --hash=sha256:c0095b8aa3e432e32d372e9a7737e65b58d5ed23b9620fea7cb81f17672f1fa1 \ + --hash=sha256:c1f41d32a2ddc5a94df4b829b395916a4b7f103350fa76ba6de625fcb9e773ac \ + --hash=sha256:c45008ca79bad237cbc03c72bc5205e8c6f66403773929b1b50f7d84ef9e4d07 \ + --hash=sha256:c82bbf7e03748417c3a88c1b0b291288ce3e4887a795a3addaa7a1cfd9e7153e \ + --hash=sha256:c918621ee0a3d1fe61c313f2489464f2ae3d13633e60f520a8002a5e910982ee \ + --hash=sha256:d204957169f0b3511fb95395a9da7d4490fb361763a9f8b32b345a7fe119cb45 \ + --hash=sha256:d329896c40d9e1e5c7715c98529e4a188a1f2df51212fd65102b32465612b5dc \ + --hash=sha256:d3a61e928feddc458a55110f42f626a2a20bea942ccedb6fb4cee70b4830ed41 \ + --hash=sha256:d48db29bd47814671afdd76c7652aefacc25cf96aad6daefa82d738ee87461e2 \ + --hash=sha256:d5593855b5b2b73dd8413c3fdfa5d95b99d657658f947ba2c4318591e745d083 \ + --hash=sha256:d79c159adea0f1f4617f54aa156568ac69968f9ef4d1e5fefffc0a180830308e \ + --hash=sha256:db09b98c7540df69d4b47218da3fbd7cb466db0fb932e971c321f1c76f155266 \ + --hash=sha256:ddf23960cb42b69bce13045d5bc66f18c7d53774c66c13f24cf1b9c144ba3141 \ + --hash=sha256:e06cfea0ece444571d24c18ed465bc93afb8c8d8d74422eb7026662f3d3f779b \ + --hash=sha256:e7c564c58cf8f248fe859a4f0fe501b050663f3d7fbc342172f259124fb59933 \ + --hash=sha256:e86593bf8637659e6a6ed58854b6c87ec4e9e45ee8a4adfd936831cef55c2d21 \ + --hash=sha256:eaffbd8814bb1b5dc3ea156a4c5928081ba50419f9175f4fc95269e040eff8f0 \ + --hash=sha256:ee353bb51f648924926ed05e0122b6a0b1ae709396a80eb583449d5d477fcdf7 \ + --hash=sha256:ee6faebb265e28920a6f23a7d4c362414b3f4bb30607141d718b991669e49ddc \ + --hash=sha256:efe093acc43e869348f6f2224df7f452eab63a2c60a6c6cd6b50fd35c4e075ba \ + --hash=sha256:f03a1b3a4c03e3e0161642ac5367f08479ab29972ea0ffcd4fa18f729cd2be0a \ + --hash=sha256:f0d320e70b6b2300ff6029e234e79fe44e9dbbfc7b98597ba28e054bd6606a57 \ + --hash=sha256:f252dfb4852a527987a9156cbcae3022a30f86c9d26f4f17b8c967d7580d65d2 \ + --hash=sha256:f5f4424cb87a20b016bfdc157ff48757b89d2cc426256961643d443c6c277007 \ + --hash=sha256:f8eae66a1304de7368932b42d801c67969fd090ddb1a7a24f27b435ed4bed68f \ + --hash=sha256:fdb82eb60d31b0c033a8e8ee9f3fc7dfbaa042211131c29da29aea8531b4f18f # via # jsonschema # referencing diff --git a/requirements/main.txt b/requirements/main.txt index fdd183be12..cc2a1a8bb8 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -15,28 +15,24 @@ anyio==3.7.1 \ # fastapi # httpcore # starlette -bcrypt==4.0.1 \ - --hash=sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535 \ - --hash=sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0 \ - --hash=sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410 \ - --hash=sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd \ - --hash=sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665 \ - --hash=sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab \ - --hash=sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71 \ - --hash=sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215 \ - --hash=sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b \ - --hash=sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda \ - --hash=sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9 \ - --hash=sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a \ - --hash=sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344 \ - --hash=sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f \ - --hash=sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d \ - --hash=sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c \ - --hash=sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c \ - --hash=sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2 \ - --hash=sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d \ - --hash=sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e \ - --hash=sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3 +bcrypt==4.1.1 \ + --hash=sha256:12611c4b0a8b1c461646228344784a1089bc0c49975680a2f54f516e71e9b79e \ + --hash=sha256:12f40f78dcba4aa7d1354d35acf45fae9488862a4fb695c7eeda5ace6aae273f \ + --hash=sha256:14d41933510717f98aac63378b7956bbe548986e435df173c841d7f2bd0b2de7 \ + --hash=sha256:196008d91201bbb1aa4e666fee5e610face25d532e433a560cabb33bfdff958b \ + --hash=sha256:24c2ebd287b5b11016f31d506ca1052d068c3f9dc817160628504690376ff050 \ + --hash=sha256:2ade10e8613a3b8446214846d3ddbd56cfe9205a7d64742f0b75458c868f7492 \ + --hash=sha256:2e197534c884336f9020c1f3a8efbaab0aa96fc798068cb2da9c671818b7fbb0 \ + --hash=sha256:3d6c4e0d6963c52f8142cdea428e875042e7ce8c84812d8e5507bd1e42534e07 \ + --hash=sha256:476aa8e8aca554260159d4c7a97d6be529c8e177dbc1d443cb6b471e24e82c74 \ + --hash=sha256:755b9d27abcab678e0b8fb4d0abdebeea1f68dd1183b3f518bad8d31fa77d8be \ + --hash=sha256:a7a7b8a87e51e5e8ca85b9fdaf3a5dc7aaf123365a09be7a27883d54b9a0c403 \ + --hash=sha256:bab33473f973e8058d1b2df8d6e095d237c49fbf7a02b527541a86a5d1dc4444 \ + --hash=sha256:c6450538a0fc32fb7ce4c6d511448c54c4ff7640b2ed81badf9898dcb9e5b737 \ + --hash=sha256:d573885b637815a7f3a3cd5f87724d7d0822da64b0ab0aa7f7c78bae534e86dc \ + --hash=sha256:df37f5418d4f1cdcff845f60e747a015389fa4e63703c918330865e06ad80007 \ + --hash=sha256:f33b385c3e80b5a26b3a5e148e6165f873c1c202423570fdf45fe34e00e5f3e5 \ + --hash=sha256:fb931cd004a7ad36a89789caf18a54c20287ec1cd62161265344b9c4554fdb2e # via -r requirements/main.in certifi==2023.11.17 \ --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \ @@ -197,30 +193,30 @@ click==8.1.7 \ # via # -r requirements/main.in # safir -cryptography==41.0.5 \ - --hash=sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf \ - --hash=sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84 \ - --hash=sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e \ - --hash=sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8 \ - --hash=sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7 \ - --hash=sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1 \ - --hash=sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88 \ - --hash=sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86 \ - --hash=sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179 \ - --hash=sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81 \ - --hash=sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20 \ - --hash=sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548 \ - --hash=sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d \ - --hash=sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d \ - --hash=sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5 \ - --hash=sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1 \ - --hash=sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147 \ - --hash=sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936 \ - --hash=sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797 \ - --hash=sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696 \ - --hash=sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72 \ - --hash=sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da \ - --hash=sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723 +cryptography==41.0.7 \ + --hash=sha256:079b85658ea2f59c4f43b70f8119a52414cdb7be34da5d019a77bf96d473b960 \ + --hash=sha256:09616eeaef406f99046553b8a40fbf8b1e70795a91885ba4c96a70793de5504a \ + --hash=sha256:13f93ce9bea8016c253b34afc6bd6a75993e5c40672ed5405a9c832f0d4a00bc \ + --hash=sha256:37a138589b12069efb424220bf78eac59ca68b95696fc622b6ccc1c0a197204a \ + --hash=sha256:3c78451b78313fa81607fa1b3f1ae0a5ddd8014c38a02d9db0616133987b9cdf \ + --hash=sha256:43f2552a2378b44869fe8827aa19e69512e3245a219104438692385b0ee119d1 \ + --hash=sha256:48a0476626da912a44cc078f9893f292f0b3e4c739caf289268168d8f4702a39 \ + --hash=sha256:49f0805fc0b2ac8d4882dd52f4a3b935b210935d500b6b805f321addc8177406 \ + --hash=sha256:5429ec739a29df2e29e15d082f1d9ad683701f0ec7709ca479b3ff2708dae65a \ + --hash=sha256:5a1b41bc97f1ad230a41657d9155113c7521953869ae57ac39ac7f1bb471469a \ + --hash=sha256:68a2dec79deebc5d26d617bfdf6e8aab065a4f34934b22d3b5010df3ba36612c \ + --hash=sha256:7a698cb1dac82c35fcf8fe3417a3aaba97de16a01ac914b89a0889d364d2f6be \ + --hash=sha256:841df4caa01008bad253bce2a6f7b47f86dc9f08df4b433c404def869f590a15 \ + --hash=sha256:90452ba79b8788fa380dfb587cca692976ef4e757b194b093d845e8d99f612f2 \ + --hash=sha256:928258ba5d6f8ae644e764d0f996d61a8777559f72dfeb2eea7e2fe0ad6e782d \ + --hash=sha256:af03b32695b24d85a75d40e1ba39ffe7db7ffcb099fe507b39fd41a565f1b157 \ + --hash=sha256:b640981bf64a3e978a56167594a0e97db71c89a479da8e175d8bb5be5178c003 \ + --hash=sha256:c5ca78485a255e03c32b513f8c2bc39fedb7f5c5f8535545bdc223a03b24f248 \ + --hash=sha256:c7f3201ec47d5207841402594f1d7950879ef890c0c495052fa62f58283fde1a \ + --hash=sha256:d5ec85080cce7b0513cfd233914eb8b7bbd0633f1d1703aa28d1dd5a72f678ec \ + --hash=sha256:d6c391c021ab1f7a82da5d8d0b3cee2f4b2c455ec86c8aebbc84837a631ff309 \ + --hash=sha256:e3114da6d7f95d2dee7d3f4eec16dacff819740bbab931aff8648cb13c5ff5e7 \ + --hash=sha256:f983596065a18a2183e7f79ab3fd4c475205b839e02cbc0efbbf9666c4b3083d # via # -r requirements/main.in # pyjwt From c8107d03d809fff76768a9dccf096084f4246cc3 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 30 Nov 2023 17:16:22 -0800 Subject: [PATCH 316/588] Disable nublado2 on IDF int We no longer have any internal users of nublado2, so stop running it on IDF int to check for regressions. We aren't yet deleting it from Phalanx pending feedback from the international data centers, but it's unlikely that we need to continue to test it. --- applications/cachemachine/values-idfint.yaml | 37 --- applications/mobu/values-idfint.yaml | 17 -- applications/moneypenny/values-idfint.yaml | 15 -- applications/nublado2/values-idfint.yaml | 246 ------------------- environments/values-idfint.yaml | 3 - 5 files changed, 318 deletions(-) delete mode 100644 applications/cachemachine/values-idfint.yaml delete mode 100644 applications/moneypenny/values-idfint.yaml delete mode 100644 applications/nublado2/values-idfint.yaml diff --git a/applications/cachemachine/values-idfint.yaml b/applications/cachemachine/values-idfint.yaml deleted file mode 100644 index 0e80940198..0000000000 --- a/applications/cachemachine/values-idfint.yaml +++ /dev/null @@ -1,37 +0,0 @@ -image: - tag: "1.2.3" - -serviceAccount: - annotations: { - iam.gke.io/gcp-service-account: cachemachine-wi@science-platform-int-dc5d.iam.gserviceaccount.com - } - -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoGar", - "registry_url": "us-central1-docker.pkg.dev", - "gar_repository": "sciplat", - "gar_image": "sciplat-lab", - "project_id": "rubin-shared-services-71ec", - "location": "us-central1", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - }, - { - "type": "SimpleRepoMan", - "images": [ - { - "image_url": "us-central1-docker.pkg.dev/rubin-shared-services-71ec/sciplat/sciplat-lab:w_2023_07", - "name": "Weekly 2023_07" - } - ] - } - ] - } diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 6f5f7d0beb..03bf1450a6 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -1,22 +1,5 @@ config: autostart: - - name: "nublado2" - count: 1 - users: - - username: "bot-mobu-nublado2" - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - repo_url: "https://github.com/lsst-sqre/system-test.git" - repo_branch: "prod" - max_executions: 1 - url_prefix: "/n2" - restart: true - name: "recommended" count: 1 users: diff --git a/applications/moneypenny/values-idfint.yaml b/applications/moneypenny/values-idfint.yaml deleted file mode 100644 index bf3fa84444..0000000000 --- a/applications/moneypenny/values-idfint.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: 10.22.240.130 - path: /share1/home diff --git a/applications/nublado2/values-idfint.yaml b/applications/nublado2/values-idfint.yaml deleted file mode 100644 index 13466360bf..0000000000 --- a/applications/nublado2/values-idfint.yaml +++ /dev/null @@ -1,246 +0,0 @@ -jupyterhub: - hub: - baseUrl: "/n2" - config: - ServerApp: - shutdown_no_activity_timeout: 432000 - - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["data-int.lsst.cloud"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://data-int.lsst.cloud/login" -config: - base_url: "https://data-int.lsst.cloud" - butler_secret_path: "secret/phalanx/idfint/butler-secret" - pull_secret_path: "secret/phalanx/idfint/pull-secret" - cachemachine_image_policy: "desired" - lab_environment: - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" - S3_ENDPOINT_URL: "https://storage.googleapis.com" - GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/butler-secret/butler-gcs-idf-creds.json" - DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" - AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks - AUTO_REPO_BRANCH: prod - AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod - PANDA_AUTH: oidc - PANDA_VERIFY_HOST: "off" - PANDA_AUTH_VO: Rubin - PANDA_URL_SSL: https://pandaserver-doma.cern.ch:25443/server/panda - PANDA_URL: http://pandaserver-doma.cern.ch:25080/server/panda - IDDS_CONFIG: /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template - PANDA_CONFIG_ROOT: "~" - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - sizes: - - name: Small - cpu: 1 - ram: 4096M - - name: Medium - cpu: 2 - ram: 8192M - - name: Large - cpu: 4 - ram: 16384M - - name: Huge - cpu: 8 - ram: 32768M - volumes: - - name: home - nfs: - path: /share1/home - server: 10.22.240.130 - - name: project - nfs: - path: /share1/project - server: 10.22.240.130 - - name: scratch - nfs: - path: /share1/scratch - server: 10.22.240.130 - volume_mounts: - - name: home - mountPath: /home - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - # Workaround to impose resource quotas at IDF - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - lsst_lcl:x:1000:{{ user }} - tss:x:59: - cgred:x:997: - screen:x:84: - jovyan:x:768:{{ user }}{% for g in groups %} - {{ g.name }}:x:{{ g.id }}:{{ user if g.id != gid else "" }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - {{ user }}:x:{{ uid }}:{{ gid if gid else uid }}::/home/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds.cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - - apiVersion: v1 - kind: ResourceQuota - metadata: - name: user-quota - namespace: "{{ user_namespace }}" - spec: - hard: - limits.cpu: 9 - limits.memory: 27Gi diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index c9194241af..6f0df726ce 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -8,14 +8,11 @@ vaultPathPrefix: secret/phalanx/idfint applications: alert-stream-broker: true - cachemachine: true datalinker: true hips: true linters: true mobu: true - moneypenny: true nublado: true - nublado2: true plot-navigator: true portal: true postgres: true From 6590376e9704fbd5e8a38f62f76d442d50074e5b Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Fri, 1 Dec 2023 10:48:36 -0300 Subject: [PATCH 317/588] Update rubintv repository --- applications/rubintv/Chart.yaml | 2 +- applications/rubintv/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/rubintv/Chart.yaml b/applications/rubintv/Chart.yaml index 52aeec7f6d..d9580e1818 100644 --- a/applications/rubintv/Chart.yaml +++ b/applications/rubintv/Chart.yaml @@ -3,7 +3,7 @@ name: rubintv version: 1.0.0 description: Real-time display front end sources: - - https://github.com/lsst-sqre/rubintv + - https://github.com/lsst-ts/rubintv appVersion: 0.1.0 dependencies: - name: redis diff --git a/applications/rubintv/README.md b/applications/rubintv/README.md index ce3a6ff28d..19fd8fa5c1 100644 --- a/applications/rubintv/README.md +++ b/applications/rubintv/README.md @@ -4,7 +4,7 @@ Real-time display front end ## Source Code -* +* ## Values From 6c041662a281d88544724480869440051a7c9284 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Fri, 1 Dec 2023 08:47:14 -0800 Subject: [PATCH 318/588] add users to dev argocd rbac --- applications/argocd/values-usdfdev.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index 3eae1cf297..561b45be9b 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -57,6 +57,8 @@ argo-cd: g, fritzm@slac.stanford.edu, role:admin g, cslater@slac.stanford.edu, role:admin g, neilsen@slac.stanford.edu, role:admin + g, saranda@slac.stanford.edu, role:admin + g, ktl@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | From d98b0e96581670f6188c5a9bc7c6cf75edc67827 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 08:02:12 +0000 Subject: [PATCH 319/588] Update Helm release argo-cd to v5.51.6 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index a0ac414350..574f794f5d 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.51.4 + version: 5.51.6 repository: https://argoproj.github.io/argo-helm From 018abdafe1a8ebccc7be874348b13c7d63c09f91 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 08:02:16 +0000 Subject: [PATCH 320/588] Update Helm release argo-workflows to v0.39.5 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 8fea1ce9dc..9f205f22ff 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.39.3 + version: 0.39.5 repository: https://argoproj.github.io/argo-helm From c156f21cc4324a40b28452530eaa0f2eb6dbaff8 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 09:50:38 +0000 Subject: [PATCH 321/588] Update Helm release ingress-nginx to v4.8.4 --- applications/ingress-nginx/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index ef223689ba..101c089ecc 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.8.3 + version: 4.8.4 repository: https://kubernetes.github.io/ingress-nginx From a9bc7035ff40ecf7c4f81b1e747dd4fec27e3e80 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 12:31:10 +0000 Subject: [PATCH 322/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index be9fd57322..d690bd30e1 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -207,9 +207,9 @@ distlib==0.3.7 \ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -documenteer[guide]==1.0.0a13 \ - --hash=sha256:3d2acd02110751166ea7e8dd7cebed723074afe4346079a816a3f9cd6297d24e \ - --hash=sha256:4dbac173d529d23127138fd45a187a426746ec5f11f94f0b8a02fe088a266381 +documenteer[guide]==1.0.0a15 \ + --hash=sha256:9590ba12c6aca7f76faef5605070059113b1d0a801875f42e69444848c3746ec \ + --hash=sha256:caa258ec5f5f68dca976e56098fa0d8a15974566ecca1df6962419ea27063c27 # via # -r requirements/dev.in # documenteer @@ -983,9 +983,9 @@ urllib3==2.1.0 \ # -c requirements/main.txt # documenteer # requests -virtualenv==20.24.7 \ - --hash=sha256:69050ffb42419c91f6c1284a7b24e0475d793447e35929b488bf6a0aade39353 \ - --hash=sha256:a18b3fd0314ca59a2e9f4b556819ed07183b3e9a3702ecfe213f593d44f7b3fd +virtualenv==20.25.0 \ + --hash=sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3 \ + --hash=sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b # via pre-commit # The following packages are considered to be unsafe in a requirements file: From de8261d8597364af8cd32bedd8301837f64de6ea Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 1 Dec 2023 13:54:34 -0700 Subject: [PATCH 323/588] Document nodeStatusMaxImages for nublado prepuller --- docs/admin/index.rst | 1 + docs/admin/infrastructure/nublado/index.rst | 56 +++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 docs/admin/infrastructure/nublado/index.rst diff --git a/docs/admin/index.rst b/docs/admin/index.rst index 96326b4fc9..a6f0621388 100644 --- a/docs/admin/index.rst +++ b/docs/admin/index.rst @@ -37,6 +37,7 @@ Administrators operate infrastructure, manage secrets, and are involved in the d :maxdepth: 2 infrastructure/filestore/index + infrastructure/nublado/index .. toctree:: :caption: Reference diff --git a/docs/admin/infrastructure/nublado/index.rst b/docs/admin/infrastructure/nublado/index.rst new file mode 100644 index 0000000000..dd7b28d738 --- /dev/null +++ b/docs/admin/infrastructure/nublado/index.rst @@ -0,0 +1,56 @@ +####### +Nublado +####### + +Although Nublado is itself a well-behaved Kubernetes application, in +many environments it requires particular attention to the Kubernetes +configuration settings in order to display the menu of available images +correctly and to keep the prepuller from running continuously. + +Prepulled Images +================ + +The fundamental problem is that the Kubernetes setting +``nodeStatusMaxImages`` is set to 50 by default. The only way to +retrieve a list of which images are present on a node is to query the +node, and look through its ``status.images`` information. + +In general, our implementation relies on the supposition that prepulled +images are the freshest, and that, eventually, people will stop using +old images, and when the disk pressure garbage collection threshold is +exceeded, the images that have not been used in the longest time will be +purged. When the ephemeral storage is sized such that there is not room +for very many ``sciplat-lab`` images (but enough space to hold at least +the full set that should be prepulled), and when ``sciplat-lab`` is the +most common image found on nodes, this generally just works with no +further attention: the menu stays populated with the current images, and +since they are prepulled, they spawn quickly when selected. Disk +pressure cleans up outdated images, and everything works as it should. + +However, if the node has a large amount of ephemeral storage, and/or +there is much non-Lab application use on the node, this can cause a +problem for the prepuller: it is entirely possible for images that are +indeed present on the node to not be in the first fifty images in the +image list, and therefore not to be found when the prepuller determines +which images need prepulling. + +This has two consequences: first, the prepuller will be constantly +scheduling images as it prepulls the ones it wants, because even though +the image is already resident on the node, the prepuller doesn't know +that. Second, these images, because the prepuller thinks they are not +resident on all nodes, will not be visible in the JupyterHub spawner +menu, although they will be available from the dropdown list. + +Fortunately there is a simple fix, provided one has access to kubelet +configuration. The configuration is described in +https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ +and includes the ``nodeStatusMaxImages`` setting. The default value of +50 should either be increased to something large enough that it's +implausible that that many images would fit into ephemeral storage, or +set to ``-1`` to remove the cap entirely. While disabling the cap could +in theory make node status extremely large, in practice, with the size +of nodes we've been running RSP instances on, we have had at most +hundreds, rather than thousands or millions, of container images on any +given node. + + From 0bc85bf4166843634ab973d4d7eb64bb5307c17d Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 4 Dec 2023 10:17:23 -0700 Subject: [PATCH 324/588] Address PR suggestions --- docs/admin/index.rst | 2 +- .../kubernetes-node-status-max-images.rst | 35 ++++++++++++ docs/admin/infrastructure/nublado/index.rst | 56 ------------------- docs/admin/troubleshooting.rst | 18 +++--- docs/applications/nublado/troubleshoot.rst | 7 +++ 5 files changed, 53 insertions(+), 65 deletions(-) create mode 100644 docs/admin/infrastructure/kubernetes-node-status-max-images.rst delete mode 100644 docs/admin/infrastructure/nublado/index.rst diff --git a/docs/admin/index.rst b/docs/admin/index.rst index a6f0621388..d107bd9114 100644 --- a/docs/admin/index.rst +++ b/docs/admin/index.rst @@ -37,7 +37,7 @@ Administrators operate infrastructure, manage secrets, and are involved in the d :maxdepth: 2 infrastructure/filestore/index - infrastructure/nublado/index + infrastructure/kubernetes-node-status-max-images .. toctree:: :caption: Reference diff --git a/docs/admin/infrastructure/kubernetes-node-status-max-images.rst b/docs/admin/infrastructure/kubernetes-node-status-max-images.rst new file mode 100644 index 0000000000..11c7650c35 --- /dev/null +++ b/docs/admin/infrastructure/kubernetes-node-status-max-images.rst @@ -0,0 +1,35 @@ +########################################################## +Kubernetes kubelet nodeStatusMaxImages setting for Nublado +########################################################## + +Setting nodeStatusMaxImages +=========================== + +The image prepuller in the :px-app:`nublado` application requires Kubernetes to keep track of a number of images and ensure each of those images are present on every node. This is required in order to provide a pleasant user experience, because the ``sciplat-lab`` images are large and typically take 3-5 minutes to pull and unpack when they are not already present on a node. +The default Kubernetes settings can in some circumstances result in the :px-app:`nublado` failing to display images in its spawner menu, as well as the image prepuller running continuously. +The solution, described here, is to set the ``nodeStatusMaxImages`` in the Kubernetes cluster's `kubelet config`_. + +.. _`kubelet config`: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ + +The recommended remediation is to disable each node's cap on ``nodeStatusMaxImages`` by setting its value to ``-1`` in the node's kubelet configuration file. Typically this file is found at ``/var/lib/kubelet/config``. However, your Kubernetes deployment may have relocated it, you may be using a drop-in configuration directory, or you may be managing it with some other automation tool. + +After editing the configuration file, you must then restart kubelet on each node. + +Background +========== + +The fundamental problem is that the Kubernetes setting ``nodeStatusMaxImages`` is set to 50 by default. The only way to retrieve a list of which images are present on a node is to query the node, and look through its ``status.images`` information. + +In general, the Nublado prepulling strategy relies on the supposition that prepulled images are the freshest; that, eventually, people will stop using old images; and finally, when the disk pressure garbage collection threshold is exceeded, the images that have not been used in the longest time will be purged. + +When the ephemeral storage is sized such that there is not room for very many ``sciplat-lab`` images (but enough space to hold at least the full set that should be prepulled), and when ``sciplat-lab`` is the most common image found on nodes, this generally just works with no further attention: the menu stays populated with the current images, and since they are prepulled, they spawn quickly when selected. +Disk pressure cleans up outdated images, and everything works as it should. + +However, if the node has a large amount of ephemeral storage, and/or there is much non-Lab application use on the node, this can cause a problem for the prepuller: it is entirely possible for images that are indeed present on the node to not be in the first fifty images in the image list, and therefore not to be found when the prepuller determines which images need prepulling. + +This has two consequences: first, the prepuller will be constantly scheduling images as it prepulls the ones it wants, because even though the image is already resident on the node, the prepuller does not, and cannot, know that. +Second, these images, because the prepuller incorrectly believes they are not resident on all nodes, will not be visible in the JupyterHub spawner menu, although they will be available from the dropdown list. + +Fortunately there is a simple fix: increase the kubelet ``nodeStatusMaxImages`` setting. The default value of 50 should either be increased to something large enough that it's implausible that that many images would fit into ephemeral storage, or set to ``-1`` to remove the cap entirely. While disabling the cap could, in theory, make node status extremely large (which is the reason the cap exists in the first place), in practice it has never proven problematic in a Phalanx deployment. Those deployments have had at most hundreds, rather than thousands or millions, of container images on any given node, so the size of the status document has always remained modest. + +Should you go the route of choosing a larger positive value for ``nodeStatusMaxImages`` a reasonable rule of thumb is to pick a number one-third of the size of each node's ephemeral storage in gigabytes. Thus if you had a terabyte of ephemeral storage, a ``nodeStatusMaxImages`` of ``350`` would be a good starting guess. This value is also dependent on how broadly mixed your workload is, and how large the images for the other aspects of your workload are, which is why disabling the cap entirely is the initial recommendation. diff --git a/docs/admin/infrastructure/nublado/index.rst b/docs/admin/infrastructure/nublado/index.rst deleted file mode 100644 index dd7b28d738..0000000000 --- a/docs/admin/infrastructure/nublado/index.rst +++ /dev/null @@ -1,56 +0,0 @@ -####### -Nublado -####### - -Although Nublado is itself a well-behaved Kubernetes application, in -many environments it requires particular attention to the Kubernetes -configuration settings in order to display the menu of available images -correctly and to keep the prepuller from running continuously. - -Prepulled Images -================ - -The fundamental problem is that the Kubernetes setting -``nodeStatusMaxImages`` is set to 50 by default. The only way to -retrieve a list of which images are present on a node is to query the -node, and look through its ``status.images`` information. - -In general, our implementation relies on the supposition that prepulled -images are the freshest, and that, eventually, people will stop using -old images, and when the disk pressure garbage collection threshold is -exceeded, the images that have not been used in the longest time will be -purged. When the ephemeral storage is sized such that there is not room -for very many ``sciplat-lab`` images (but enough space to hold at least -the full set that should be prepulled), and when ``sciplat-lab`` is the -most common image found on nodes, this generally just works with no -further attention: the menu stays populated with the current images, and -since they are prepulled, they spawn quickly when selected. Disk -pressure cleans up outdated images, and everything works as it should. - -However, if the node has a large amount of ephemeral storage, and/or -there is much non-Lab application use on the node, this can cause a -problem for the prepuller: it is entirely possible for images that are -indeed present on the node to not be in the first fifty images in the -image list, and therefore not to be found when the prepuller determines -which images need prepulling. - -This has two consequences: first, the prepuller will be constantly -scheduling images as it prepulls the ones it wants, because even though -the image is already resident on the node, the prepuller doesn't know -that. Second, these images, because the prepuller thinks they are not -resident on all nodes, will not be visible in the JupyterHub spawner -menu, although they will be available from the dropdown list. - -Fortunately there is a simple fix, provided one has access to kubelet -configuration. The configuration is described in -https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ -and includes the ``nodeStatusMaxImages`` setting. The default value of -50 should either be increased to something large enough that it's -implausible that that many images would fit into ephemeral storage, or -set to ``-1`` to remove the cap entirely. While disabling the cap could -in theory make node status extremely large, in practice, with the size -of nodes we've been running RSP instances on, we have had at most -hundreds, rather than thousands or millions, of container images on any -given node. - - diff --git a/docs/admin/troubleshooting.rst b/docs/admin/troubleshooting.rst index 1ee51aedeb..1372c483ab 100644 --- a/docs/admin/troubleshooting.rst +++ b/docs/admin/troubleshooting.rst @@ -18,6 +18,15 @@ When this happens, you may need to recreate the persistent volume. **Solution:** :ref:`recreate-postgres-pvc` +Spawner menu missing images, nublado stuck pulling the same image +================================================================= +**Symptoms: **When a user goes to the spawner page for the Notebook Aspect, the expected menu of images is not available. +Instead, the menu is missing one or more images. +The same image or set of images is pulled again each on each prepuller loop the nublado lab controller attempts. + +**Solution:** :doc:`infrastructure/kubernetes-node-status-max-images` + + Spawner menu missing images, cachemachine stuck pulling the same image ====================================================================== @@ -26,14 +35,7 @@ Instead, the menu is either empty or missing the right number of images of diffe The cachemachine application is continuously creating a ``DaemonSet`` for the same image without apparent forward progress. Querying the cachemachine ``/available`` API shows either nothing in ``images`` or not everything that was expected. -**Cause:** Cachemachine is responsible for generating the menu used for spawning new JupyterLab instances. -The list of available images is pulled from the list of images that are already cached on every non-cordoned node to ensure that spawning will be quick. -If the desired types of images are not present on each node, cachemachine will create a ``DaemonSet`` for that image to attempt to start a pod using that image on every node, which will cache it. -If this fails to change the reported images available on each node, it will keep retrying. - -The most common cause of this problem is a Kubernetes limitation. -By default, the Kubernetes list node API only returns the "first" (which usually means oldest) 50 cached images. -If more than 50 images are cached, images may go missing from that list even though they are cached, leading cachemachine to think they aren't cached and omitting them from the spawner menu. +**Cause:** This is the same problem as above, but with the older (cachemachine+moneypenny)-based infrastructure rather than nublado v3. The solution is the same: :doc:`infrastructure/kubernetes-node-status-max-images`. **Solution:** :doc:`/applications/cachemachine/pruning` diff --git a/docs/applications/nublado/troubleshoot.rst b/docs/applications/nublado/troubleshoot.rst index eeae4ddf3a..36438f8add 100644 --- a/docs/applications/nublado/troubleshoot.rst +++ b/docs/applications/nublado/troubleshoot.rst @@ -32,3 +32,10 @@ Recovery may require manually clearing the user's entry in the session database In some cases you may also need to remove the user from the spawner table. To do this, run ``select * from spawners`` and find the pod with the user's name in it, and then delete that row. + +.. _nublado_node_status_max_images: + +Prepuller is running continuously and/or expected menu items are missing +======================================================================== + +``nodeStatusMaxImages`` should be increased or disabled: :doc:`/admin/infrastructure/kubernetes-node-status-max-images` From 18eff3681f1592b70e0bfbbe0ff0f784d9f72f40 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 4 Dec 2023 12:48:51 -0700 Subject: [PATCH 325/588] Address second round of commentary --- .../infrastructure/kubernetes-node-status-max-images.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/admin/infrastructure/kubernetes-node-status-max-images.rst b/docs/admin/infrastructure/kubernetes-node-status-max-images.rst index 11c7650c35..0136a376dc 100644 --- a/docs/admin/infrastructure/kubernetes-node-status-max-images.rst +++ b/docs/admin/infrastructure/kubernetes-node-status-max-images.rst @@ -2,11 +2,12 @@ Kubernetes kubelet nodeStatusMaxImages setting for Nublado ########################################################## +The image prepuller in the :px-app:`nublado` application requires Kubernetes to keep track of a number of images and ensure each of those images are present on every node. This is required in order to provide a pleasant user experience, because the ``sciplat-lab`` images are large and typically take 3-5 minutes to pull and unpack when they are not already present on a node. +The default Kubernetes settings can in some circumstances result in the :px-app:`nublado` failing to display images in its spawner menu, as well as the image prepuller running continuously. + Setting nodeStatusMaxImages =========================== -The image prepuller in the :px-app:`nublado` application requires Kubernetes to keep track of a number of images and ensure each of those images are present on every node. This is required in order to provide a pleasant user experience, because the ``sciplat-lab`` images are large and typically take 3-5 minutes to pull and unpack when they are not already present on a node. -The default Kubernetes settings can in some circumstances result in the :px-app:`nublado` failing to display images in its spawner menu, as well as the image prepuller running continuously. The solution, described here, is to set the ``nodeStatusMaxImages`` in the Kubernetes cluster's `kubelet config`_. .. _`kubelet config`: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/ From 80d8998801179a9b469415029d41b78723c94af4 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 5 Dec 2023 08:53:05 -0800 Subject: [PATCH 326/588] Update Gafaelfawr to 9.6.0 Adds support for ingresses restricted to a specific user, which is required for the new Nublado user file servers. --- applications/gafaelfawr/Chart.yaml | 2 +- applications/gafaelfawr/crds/ingress.yaml | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index 6fc1197bea..fe2674600c 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,7 +5,7 @@ description: Authentication and identity system home: https://gafaelfawr.lsst.io/ sources: - https://github.com/lsst-sqre/gafaelfawr -appVersion: 9.5.1 +appVersion: 9.6.0 dependencies: - name: redis diff --git a/applications/gafaelfawr/crds/ingress.yaml b/applications/gafaelfawr/crds/ingress.yaml index d81837e8f4..89d7ddbe2a 100644 --- a/applications/gafaelfawr/crds/ingress.yaml +++ b/applications/gafaelfawr/crds/ingress.yaml @@ -172,6 +172,13 @@ spec: - true required: - anonymous + username: + type: string + description: >- + Restrict access to this ingress to the given username. All + other users, regardless of their scopes, will receive 403 + errors. The user's token must still satisfy any scope + constraints. template: type: object description: "The template used to create the ingress." From 534592c4d80264db773cff6918049cadbeaf3e99 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 5 Dec 2023 08:58:19 -0800 Subject: [PATCH 327/588] Restrict minikube testing Previously, we always ran minikube testing for changes to Phalanx applications even if that application was not deployed on minikube. That's pointless, wastes resources, and risks false positives. Skip minikube testing unless the change affects one of the applications deployed on minikube, and remove hips from that set since we don't run any meaningful tests against it. --- .github/workflows/ci.yaml | 10 +++++----- environments/values-minikube.yaml | 1 - 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 51dbad3218..94afac8961 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -89,12 +89,12 @@ jobs: filters: | minikube: - ".github/workflows/ci.yaml" - - "applications/*/Chart.yaml" - - "applications/*/templates/**" - - "applications/*/values.yaml" - - "applications/*/values-minikube.yaml" + - "applications/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}/Chart.yaml" + - "applications/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}/templates/**" + - "applications/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}/values.yaml" + - "applications/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}/values-minikube.yaml" - "environments/Chart.yaml" - - "environments/templates/**" + - "environments/templates/{argocd,gafaelfawr,ingress-nginx,mobu,postgres,squareone,vault-secrets-operator}*" - "environments/values-minikube.yaml" - "installer/**" diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index aea3579661..309e43a461 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -11,7 +11,6 @@ vaultPathPrefix: secret/phalanx/minikube # currently, which substantially limits the applications that can be # meaningfully deployed. applications: - hips: true mobu: true postgres: true squareone: true From c9c6dc951b98128cc47504d34488bcd76f0bfd61 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 5 Dec 2023 09:00:51 -0800 Subject: [PATCH 328/588] Remove minikube configuration for some apps Delete the minikube configuration for nublado2, since we're unlikely to ever run nublado2 on minikube again. Also remove the configuration for hips (not meaningful) and schedview-prenight (confusing when adding that application). --- .../cachemachine/values-minikube.yaml | 17 -------------- applications/hips/values-minikube.yaml | 4 ---- applications/moneypenny/values-minikube.yaml | 0 applications/nublado2/values-minikube.yaml | 23 ------------------- .../schedview-prenight/values-minikube.yaml | 0 5 files changed, 44 deletions(-) delete mode 100644 applications/cachemachine/values-minikube.yaml delete mode 100644 applications/hips/values-minikube.yaml delete mode 100644 applications/moneypenny/values-minikube.yaml delete mode 100644 applications/nublado2/values-minikube.yaml delete mode 100644 applications/schedview-prenight/values-minikube.yaml diff --git a/applications/cachemachine/values-minikube.yaml b/applications/cachemachine/values-minikube.yaml deleted file mode 100644 index 4369a6be97..0000000000 --- a/applications/cachemachine/values-minikube.yaml +++ /dev/null @@ -1,17 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "registry.hub.docker.com", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 0, - "num_weeklies": 0, - "num_dailies": 0 - } - ] - } diff --git a/applications/hips/values-minikube.yaml b/applications/hips/values-minikube.yaml deleted file mode 100644 index 44e7bb33bc..0000000000 --- a/applications/hips/values-minikube.yaml +++ /dev/null @@ -1,4 +0,0 @@ -config: - gcsProject: "bogus" - gcsBucket: "bogus" - serviceAccount: "bogus" diff --git a/applications/moneypenny/values-minikube.yaml b/applications/moneypenny/values-minikube.yaml deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/applications/nublado2/values-minikube.yaml b/applications/nublado2/values-minikube.yaml deleted file mode 100644 index 01245f89b7..0000000000 --- a/applications/nublado2/values-minikube.yaml +++ /dev/null @@ -1,23 +0,0 @@ -jupyterhub: - hub: - resources: {} - debug: - enabled: true - ingress: - hosts: ["minikube.lsst.codes"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://minikube.lsst.codes/login" -config: - base_url: "https://minikube.lsst.codes" - butler_secret_path: "secret/k8s_operator/minikube.lsst.codes/butler-secret" - pull_secret_path: "secret/k8s_operator/minikube.lsst.codes/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - volumes: - - name: home - emptyDir: {} - volume_mounts: - - name: home - mountPath: /home diff --git a/applications/schedview-prenight/values-minikube.yaml b/applications/schedview-prenight/values-minikube.yaml deleted file mode 100644 index e69de29bb2..0000000000 From bb4f1caf65bce4c9fd33c984bc5ff8812bdde5f7 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 5 Dec 2023 15:54:32 -0700 Subject: [PATCH 329/588] Fix lsst.sal.Test topic regexp elsewhere - Make sure we are filtering the Test CSC topics correctly --- applications/sasquatch/values-idfint.yaml | 2 +- applications/sasquatch/values-tucson-teststand.yaml | 2 +- applications/sasquatch/values-usdfdev.yaml | 2 +- applications/sasquatch/values-usdfint.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/values-idfint.yaml b/applications/sasquatch/values-idfint.yaml index 12d9e206f3..d76b58e184 100644 --- a/applications/sasquatch/values-idfint.yaml +++ b/applications/sasquatch/values-idfint.yaml @@ -47,7 +47,7 @@ kafka-connect-manager: connectors: test: enabled: true - topicsRegex: ".*Test" + topicsRegex: "lsst.sal.Test" kafdrop: ingress: diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index cd2e0e01bd..8e93e98774 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -154,7 +154,7 @@ kafka-connect-manager: topicsRegex: ".*OCPS" test: enabled: true - topicsRegex: ".*Test" + topicsRegex: "lsst.sal.Test" pmd: enabled: true topicsRegex: ".*PMD" diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index 16ecb7b735..f527fba509 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -60,7 +60,7 @@ kafka-connect-manager: topicsRegex: ".*OCPS" test: enabled: false - topicsRegex: ".*Test" + topicsRegex: "lsst.sal.Test" pmd: enabled: false topicsRegex: ".*PMD" diff --git a/applications/sasquatch/values-usdfint.yaml b/applications/sasquatch/values-usdfint.yaml index d0b57610d4..f710f33562 100644 --- a/applications/sasquatch/values-usdfint.yaml +++ b/applications/sasquatch/values-usdfint.yaml @@ -60,7 +60,7 @@ kafka-connect-manager: topicsRegex: ".*OCPS" test: enabled: false - topicsRegex: ".*Test" + topicsRegex: "lsst.sal.Test" pmd: enabled: false topicsRegex: ".*PMD" From 4f72c0bc9ecf352dc723b39e2c2c62bc10ffefa7 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 5 Dec 2023 20:45:16 -0700 Subject: [PATCH 330/588] Fix default connector configuration - Rename the connector used in the default configuration of kafka-connect-manager to avoid overriding the test connector configuration. --- applications/sasquatch/README.md | 24 +++++++++---------- .../charts/kafka-connect-manager/README.md | 12 +++++----- .../charts/kafka-connect-manager/values.yaml | 6 ++--- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index cc8f0f7165..9695beee8f 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -154,12 +154,12 @@ Rubin Observatory's telemetry service. | kafka-connect-manager.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | kafka-connect-manager.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | kafka-connect-manager.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | -| kafka-connect-manager.influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | -| kafka-connect-manager.influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | -| kafka-connect-manager.influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | -| kafka-connect-manager.influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | -| kafka-connect-manager.influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | -| kafka-connect-manager.influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | +| kafka-connect-manager.influxdbSink.connectors | object | `{"example":{"enabled":false,"removePrefix":"","repairerConnector":false,"tags":"","topicsRegex":"example.topic"}}` | Connector instances to deploy. | +| kafka-connect-manager.influxdbSink.connectors.example.enabled | bool | `false` | Whether this connector instance is deployed. | +| kafka-connect-manager.influxdbSink.connectors.example.removePrefix | string | `""` | Remove prefix from topic name. | +| kafka-connect-manager.influxdbSink.connectors.example.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| kafka-connect-manager.influxdbSink.connectors.example.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| kafka-connect-manager.influxdbSink.connectors.example.topicsRegex | string | `"example.topic"` | Regex to select topics from Kafka. | | kafka-connect-manager.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | | kafka-connect-manager.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | | kafka-connect-manager.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | @@ -240,12 +240,12 @@ Rubin Observatory's telemetry service. | source-kafka-connect-manager.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | source-kafka-connect-manager.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | source-kafka-connect-manager.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | -| source-kafka-connect-manager.influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | -| source-kafka-connect-manager.influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | -| source-kafka-connect-manager.influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | -| source-kafka-connect-manager.influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | -| source-kafka-connect-manager.influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | -| source-kafka-connect-manager.influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | +| source-kafka-connect-manager.influxdbSink.connectors | object | `{"example":{"enabled":false,"removePrefix":"","repairerConnector":false,"tags":"","topicsRegex":"example.topic"}}` | Connector instances to deploy. | +| source-kafka-connect-manager.influxdbSink.connectors.example.enabled | bool | `false` | Whether this connector instance is deployed. | +| source-kafka-connect-manager.influxdbSink.connectors.example.removePrefix | string | `""` | Remove prefix from topic name. | +| source-kafka-connect-manager.influxdbSink.connectors.example.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| source-kafka-connect-manager.influxdbSink.connectors.example.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| source-kafka-connect-manager.influxdbSink.connectors.example.topicsRegex | string | `"example.topic"` | Regex to select topics from Kafka. | | source-kafka-connect-manager.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | | source-kafka-connect-manager.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | | source-kafka-connect-manager.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md index c9ff922025..c5b9da41fc 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/README.md +++ b/applications/sasquatch/charts/kafka-connect-manager/README.md @@ -21,12 +21,12 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | -| influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | -| influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | -| influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | -| influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | -| influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | -| influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | +| influxdbSink.connectors | object | `{"example":{"enabled":false,"removePrefix":"","repairerConnector":false,"tags":"","topicsRegex":"example.topic"}}` | Connector instances to deploy. | +| influxdbSink.connectors.example.enabled | bool | `false` | Whether this connector instance is deployed. | +| influxdbSink.connectors.example.removePrefix | string | `""` | Remove prefix from topic name. | +| influxdbSink.connectors.example.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| influxdbSink.connectors.example.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| influxdbSink.connectors.example.topicsRegex | string | `"example.topic"` | Regex to select topics from Kafka. | | influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | | influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | | influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | diff --git a/applications/sasquatch/charts/kafka-connect-manager/values.yaml b/applications/sasquatch/charts/kafka-connect-manager/values.yaml index e508350f03..2c534cd62d 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/values.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/values.yaml @@ -34,17 +34,17 @@ influxdbSink: excludedTopicsRegex: "" # -- Connector instances to deploy. connectors: - test: + example: # -- Whether this connector instance is deployed. enabled: false # -- Whether to deploy a repairer connector in addition to the original connector instance. repairerConnector: false # -- Regex to select topics from Kafka. - topicsRegex: "source.lsst.sal.Test" + topicsRegex: "example.topic" # -- Fields in the Avro payload that are treated as InfluxDB tags. tags: "" # -- Remove prefix from topic name. - removePrefix: "source." + removePrefix: "" # The s3Sink connector assumes Parquet format with Snappy compression # and a time based partitioner. From 4be06b13e18c600cbbc6b45b2e1b95f0b1522477 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 5 Dec 2023 21:31:23 -0700 Subject: [PATCH 331/588] Disable InfluxDB v2 at the summit - Disable InfluxDB v2 in favor of InfluxDB Enterprise --- applications/sasquatch/values-summit.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 447a9f4583..d2756e97a9 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -33,7 +33,7 @@ influxdb: hostname: summit-lsp.lsst.codes influxdb2: - enabled: true + enabled: false persistence: storageClass: rook-ceph-block size: 5Ti From c4ead9ffa999297e3bb26aadaa16fa1868dff482 Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Tue, 5 Dec 2023 09:07:39 -0800 Subject: [PATCH 332/588] avoid redirect in schedview-snapshot probe --- applications/schedview-snapshot/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/schedview-snapshot/templates/deployment.yaml b/applications/schedview-snapshot/templates/deployment.yaml index 4c71d3da8f..d85a2afde7 100644 --- a/applications/schedview-snapshot/templates/deployment.yaml +++ b/applications/schedview-snapshot/templates/deployment.yaml @@ -40,7 +40,7 @@ spec: protocol: "TCP" readinessProbe: httpGet: - path: "/schedview-snapshot" + path: "/schedview-snapshot/dashboard" port: "http" resources: {{- toYaml .Values.resources | nindent 12 }} From c90994fbc3fae823fb42ee4427fe34096c12042e Mon Sep 17 00:00:00 2001 From: Eric Neilsen Date: Wed, 6 Dec 2023 07:34:27 -0800 Subject: [PATCH 333/588] relax readinessProbe timing in schedview-snapshot --- applications/schedview-snapshot/templates/deployment.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/schedview-snapshot/templates/deployment.yaml b/applications/schedview-snapshot/templates/deployment.yaml index d85a2afde7..3d129a04c0 100644 --- a/applications/schedview-snapshot/templates/deployment.yaml +++ b/applications/schedview-snapshot/templates/deployment.yaml @@ -42,6 +42,9 @@ spec: httpGet: path: "/schedview-snapshot/dashboard" port: "http" + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 15 resources: {{- toYaml .Values.resources | nindent 12 }} volumeMounts: From 44a1ac2862f42334bdc8d0a4006680d1fa236fe0 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 7 Dec 2023 12:48:47 -0700 Subject: [PATCH 334/588] Fix default value for replication.policy.separator - This fixes the warning "replication.policy.separator: A null value is not allowed for this key" that's spamming the strimzi operator logs --- .../sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml index 39ae427957..5225d7feb7 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml @@ -57,7 +57,7 @@ spec: # Policy to define the remote topic naming convention. # The default is to preserve topic names in the target cluster. # To add the source cluster alias as a prefix to the topic name, use replication.policy.separator="." and replication.policy.class="org.apache.kafka.connect.mirror.DefaultReplicationPolicy" - replication.policy.separator: {{ default "" .Values.mirrormaker2.replication.policy.separator }} + replication.policy.separator: {{ default "." .Values.mirrormaker2.replication.policy.separator }} replication.policy.class: {{ default "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" .Values.mirrormaker2.replication.policy.class }} # Handling high volumes of messages # By increasing the batch size, produce requests are delayed and more messages are From 0bfd8c5b01df70354aaa18b46868846f3acee869 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 7 Dec 2023 14:33:28 -0700 Subject: [PATCH 335/588] Increase MM2 request timeouts - Seeing timeouts and DisconnectException errors in USDF -> Summit connection over the LHN --- .../charts/strimzi-kafka/templates/mirrormaker2.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml index 5225d7feb7..26ff37d788 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml @@ -75,8 +75,8 @@ spec: producer.max.request.size: 10485760 producer.buffer.memory: 10485760 # Increase request timeout - producer.request.timeout.ms: 120000 - consumer.request.timeout.ms: 120000 + producer.request.timeout.ms: 240000 + consumer.request.timeout.ms: 240000 heartbeatConnector: config: heartbeats.topic.replication.factor: 3 From 11eea778807ceb0169f08dbb709ba5e734b3993a Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Thu, 7 Dec 2023 15:32:28 -0800 Subject: [PATCH 336/588] Update prompt processing to use d_2023_12_05 --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 55edaf9936..b1023525f4 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -7,7 +7,7 @@ prompt-proto-service: image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: d_2023_11_22 + tag: d_2023_12_05 instrument: pipelines: >- From 9d9b38e016746437ab5b04f9a09cd41fff963fc6 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 8 Dec 2023 08:09:13 -0800 Subject: [PATCH 337/588] Switch target container for mobu on idfdev, idfint system-test is broken with the latest weekly image. Switch idfdev over to using recommended, switch idfint to recommended for tutorial notebooks, and turn off its latest weekly run with system-test for now. --- applications/mobu/values-idfdev.yaml | 8 ++------ applications/mobu/values-idfint.yaml | 20 -------------------- 2 files changed, 2 insertions(+), 26 deletions(-) diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 37d3033f71..d9b6aa4eb7 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -1,10 +1,10 @@ config: debug: true autostart: - - name: "weekly" + - name: "recommended" count: 1 users: - - username: "bot-mobu-weekly" + - username: "bot-mobu-recommended" scopes: - "exec:notebook" - "exec:portal" @@ -13,8 +13,6 @@ config: business: type: "NotebookRunner" options: - image: - image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" use_cachemachine: false @@ -31,8 +29,6 @@ config: business: type: "NotebookRunner" options: - image: - image_class: "latest-weekly" repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" repo_branch: "prod" max_executions: 1 diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 03bf1450a6..42f01fe92e 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -16,24 +16,6 @@ config: repo_branch: "prod" use_cachemachine: false restart: true - - name: "weekly" - count: 1 - users: - - username: "bot-mobu-weekly" - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - image: - image_class: "latest-weekly" - repo_url: "https://github.com/lsst-sqre/system-test.git" - repo_branch: "prod" - use_cachemachine: false - restart: true - name: "tutorial" count: 1 users: @@ -46,8 +28,6 @@ config: business: type: "NotebookRunner" options: - image: - image_class: "latest-weekly" repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" repo_branch: "prod" max_executions: 1 From 60fd02abab46115108d344df09498da9c7fa973f Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Thu, 12 Oct 2023 10:40:02 -0700 Subject: [PATCH 338/588] Use the butler repo config directly from the remote butler Now that /repo/embargo's butler.yaml file lives in the bucket s3://rubin-summit-users, we no longer need to maintain our own copy in Vault. --- .../values-usdfprod-prompt-processing.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index b1023525f4..dd7ad2268e 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -25,7 +25,7 @@ prompt-proto-service: (survey="spec_pole")=[] (survey="spec_pole_with_rotation")=[] (survey="")=[] - calibRepo: /app/butler + calibRepo: s3://rubin-summit-users s3: imageBucket: rubin-summit @@ -41,7 +41,6 @@ prompt-proto-service: registry: ip: usdf-butler.slac.stanford.edu:5432 # TODO: remove on DM-40839 - centralRepoFile: true logLevel: lsst.resources=DEBUG From e3f3f20f7f0f3af93e313976f063d1c492cedb6f Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Thu, 12 Oct 2023 11:06:31 -0700 Subject: [PATCH 339/588] Make each deployment decide what calibRepo to use --- applications/prompt-proto-service-latiss/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index bd1b7dfb7f..227d480b4c 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -32,7 +32,7 @@ prompt-proto-service: # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set - calibRepo: s3://rubin-summit-users/ + calibRepo: "" s3: # -- Bucket containing the incoming raw images From c444fef0e4fef39d5287dde1b5a6e9bb6c962588 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Fri, 8 Dec 2023 10:16:54 -0800 Subject: [PATCH 340/588] Remove obsolete environment variables from prompt-proto-service. These variables were used to construct a `.pgpass` file programatically. Now that we mount a .pgpass file directly from a secret, these variables are just clutter. --- .../prompt-proto-service-hsc/README.md | 6 ----- .../values-usdfdev-prompt-processing.yaml | 6 ----- .../prompt-proto-service-hsc/values.yaml | 16 ------------- .../prompt-proto-service-latiss/README.md | 6 ----- .../values-usdfdev-prompt-processing.yaml | 6 ----- .../values-usdfprod-prompt-processing.yaml | 4 ---- .../prompt-proto-service-latiss/values.yaml | 16 ------------- .../prompt-proto-service-lsstcam/README.md | 6 ----- .../values-usdfdev-prompt-processing.yaml | 4 ---- .../prompt-proto-service-lsstcam/values.yaml | 16 ------------- .../prompt-proto-service-lsstcomcam/README.md | 6 ----- .../values-usdfdev-prompt-processing.yaml | 4 ---- .../values.yaml | 16 ------------- charts/prompt-proto-service/README.md | 6 ----- .../templates/prompt-proto-service.yaml | 24 ------------------- charts/prompt-proto-service/values.yaml | 16 ------------- 16 files changed, 158 deletions(-) diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index e472f0abda..777795d6bb 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -13,11 +13,8 @@ Prompt Proto Service is an event driven service for processing camera images. Th | Key | Type | Default | Description | |-----|------|---------|-------------| | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | -| prompt-proto-service.apdb.db | string | `"lsst-devl"` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| prompt-proto-service.apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | prompt-proto-service.apdb.namespace | string | `"pp_apdb"` | Database namespace for the APDB | | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -36,9 +33,6 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| prompt-proto-service.registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| prompt-proto-service.registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | prompt-proto-service.s3.endpointUrl | string | None, must be set | S3 endpoint containing `imageBucket` | diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index 99c7031c66..736e870385 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -25,11 +25,5 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu:5432/lsst-devl - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: # TODO: remove on DM-40839 - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 - db: ppcentralbutler - user: pp fullnameOverride: "prompt-proto-service-hsc" diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index 658e9070a1..ed03e7407b 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -59,26 +59,10 @@ prompt-proto-service: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: lsst-devl # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: rubin # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: pp_apdb registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: "" # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: "" # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index 4209990b4f..1c09a6b8f3 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -13,11 +13,8 @@ Prompt Proto Service is an event driven service for processing camera images. Th | Key | Type | Default | Description | |-----|------|---------|-------------| | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | -| prompt-proto-service.apdb.db | string | `"lsst-devl"` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| prompt-proto-service.apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | prompt-proto-service.apdb.namespace | string | `"pp_apdb"` | Database namespace for the APDB | | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -36,9 +33,6 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| prompt-proto-service.registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| prompt-proto-service.registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | prompt-proto-service.s3.endpointUrl | string | `""` | | diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index acd5dd126b..c72b31150e 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -24,11 +24,5 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu/lsst-devl - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: # TODO: remove on DM-40839 - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 - db: ppcentralbutler - user: pp fullnameOverride: "prompt-proto-service-latiss" diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index dd7ad2268e..393960ec60 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -37,10 +37,6 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing.slac.stanford.edu:5432/lsst-devl - ip: usdf-prompt-processing.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: - ip: usdf-butler.slac.stanford.edu:5432 # TODO: remove on DM-40839 logLevel: lsst.resources=DEBUG diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 227d480b4c..3fdb3f7b20 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -59,26 +59,10 @@ prompt-proto-service: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: lsst-devl # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: rubin # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: pp_apdb registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: lsstdb1 # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: rubin # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index 9d54c3e0e2..98e9d304c8 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -13,11 +13,8 @@ Prompt Proto Service is an event driven service for processing camera images. Th | Key | Type | Default | Description | |-----|------|---------|-------------| | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | -| prompt-proto-service.apdb.db | string | `"lsst-devl"` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| prompt-proto-service.apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | prompt-proto-service.apdb.namespace | string | `"pp_apdb"` | Database namespace for the APDB | | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -36,9 +33,6 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| prompt-proto-service.registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| prompt-proto-service.registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | prompt-proto-service.s3.endpointUrl | string | None, must be set | S3 endpoint containing `imageBucket` | diff --git a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml index fa7596695c..5b2fdab778 100644 --- a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml @@ -24,9 +24,5 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu:5432/lsst-devl - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: # TODO: remove on DM-40839 - ip: usdf-butler.slac.stanford.edu:5432 fullnameOverride: "prompt-proto-service-lsstcam" diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index ac5ddb3110..af00f570e0 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -59,26 +59,10 @@ prompt-proto-service: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: lsst-devl # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: rubin # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: pp_apdb registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: lsstdb1 # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: rubin # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index 70f26b1e79..a7e1a0fbfb 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -12,11 +12,8 @@ Prompt Proto Service is an event driven service for processing camera images. Th | Key | Type | Default | Description | |-----|------|---------|-------------| -| prompt-proto-service.apdb.db | string | `"lsst-devl"` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| prompt-proto-service.apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | prompt-proto-service.apdb.namespace | string | `"pp_apdb"` | Database namespace for the APDB | | prompt-proto-service.apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| prompt-proto-service.apdb.user | string | `"rubin"` | Database user for the APDB (deprecated for apdb.url) | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | @@ -35,9 +32,6 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| prompt-proto-service.registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| prompt-proto-service.registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| prompt-proto-service.registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | prompt-proto-service.s3.endpointUrl | string | None, must be set | S3 endpoint containing `imageBucket` | diff --git a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml index 077f6293dd..ab8962cb4e 100644 --- a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml @@ -24,9 +24,5 @@ prompt-proto-service: apdb: url: postgresql://rubin@usdf-prompt-processing-dev.slac.stanford.edu:5432/lsst-devl - ip: usdf-prompt-processing-dev.slac.stanford.edu:5432 # TODO: remove on DM-40839 - - registry: # TODO: remove on DM-40839 - ip: usdf-butler.slac.stanford.edu:5432 fullnameOverride: "prompt-proto-service-lsstcomcam" diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index de786d31fc..620c8b1f3c 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -59,26 +59,10 @@ prompt-proto-service: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: lsst-devl # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: rubin # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: pp_apdb registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: lsstdb1 # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: rubin # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index 134a32565e..95249f99fc 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -14,11 +14,8 @@ Event-driven processing of camera images |-----|------|---------|-------------| | additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | affinity | object | `{}` | | -| apdb.db | string | `""` | PostgreSQL database name for the APDB (deprecated for apdb.url) | -| apdb.ip | string | None, must be set | IP address or hostname and port of the APDB (deprecated for apdb.url) | | apdb.namespace | string | `""` | Database namespace for the APDB | | apdb.url | string | None, must be set | URL to the APDB, in any form recognized by SQLAlchemy | -| apdb.user | string | `""` | Database user for the APDB (deprecated for apdb.url) | | containerConcurrency | int | `1` | | | fullnameOverride | string | `"prompt-proto-service"` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | @@ -42,9 +39,6 @@ Event-driven processing of camera images | nodeSelector | object | `{}` | | | podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | | registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | -| registry.db | string | None, must be set | PostgreSQL database name for the Butler registry database (deprecated) | -| registry.ip | string | None, must be set | IP address or hostname and port of the Butler registry database (deprecated) | -| registry.user | string | None, must be set | Database user for the Butler registry database (deprecated) | | s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | s3.disableBucketValidation | string | `"0"` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | | s3.endpointUrl | string | None, must be set | S3 endpoint containing `imageBucket` | diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index 4b0a01bf7c..f3f8379241 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -59,36 +59,12 @@ spec: value: {{ .Values.instrument.calibRepo }} - name: LSST_DISABLE_BUCKET_VALIDATION value: {{ .Values.s3.disableBucketValidation | quote }} - - name: IP_APDB # TODO: remove on DM-40839 - # Need explicit port for make_pgpass.py - value: {{ .Values.apdb.ip }} - - name: DB_APDB # TODO: remove on DM-40839 - value: {{ .Values.apdb.db }} - - name: USER_APDB # TODO: remove on DM-40839 - value: {{ .Values.apdb.user }} - name: URL_APDB value: {{ .Values.apdb.url }} - name: NAMESPACE_APDB value: {{ .Values.apdb.namespace }} - - name: IP_REGISTRY # TODO: remove on DM-40839 - # Need explicit port for make_pgpass.py - value: {{ .Values.registry.ip }} - - name: DB_REGISTRY # TODO: remove on DM-40839 - value: {{ .Values.registry.db }} - - name: USER_REGISTRY # TODO: remove on DM-40839 - value: {{ .Values.registry.user }} - name: KAFKA_CLUSTER value: {{ .Values.imageNotifications.kafkaClusterAddress }} - - name: PSQL_REGISTRY_PASS # TODO: remove on DM-40839 - valueFrom: - secretKeyRef: - name: {{ template "prompt-proto-service.fullname" . }}-secret - key: registry_password - - name: PSQL_APDB_PASS # TODO: remove on DM-40839 - valueFrom: - secretKeyRef: - name: {{ template "prompt-proto-service.fullname" . }}-secret - key: apdb_password - name: S3_ENDPOINT_URL value: {{ .Values.s3.endpointUrl }} {{- if .Values.s3.auth_env }} diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 3d795d7148..4608ff1afb 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -61,26 +61,10 @@ apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy # @default -- None, must be set url: "" - # -- IP address or hostname and port of the APDB (deprecated for apdb.url) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the APDB (deprecated for apdb.url) - db: "" # TODO: remove on DM-40839 - # -- Database user for the APDB (deprecated for apdb.url) - user: "" # TODO: remove on DM-40839 # -- Database namespace for the APDB namespace: "" registry: - # -- IP address or hostname and port of the Butler registry database (deprecated) - # @default -- None, must be set - ip: "" # TODO: remove on DM-40839 - # -- PostgreSQL database name for the Butler registry database (deprecated) - # @default -- None, must be set - db: "" # TODO: remove on DM-40839 - # -- Database user for the Butler registry database (deprecated) - # @default -- None, must be set - user: "" # TODO: remove on DM-40839 # -- If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. centralRepoFile: false From 8505799bb1954cae3585c0275ac374d9d9a3cfd1 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Fri, 8 Dec 2023 11:07:58 -0800 Subject: [PATCH 341/588] Reduce default image timeout to 20 seconds. This is the high end of the timeout recommended by Kian-Tat Lim on DM-38594. In the future, we may want to select different values for -dev (USDF transfer) and -prod (Summit transfer) environments. --- applications/prompt-proto-service-hsc/README.md | 2 +- applications/prompt-proto-service-hsc/values.yaml | 2 +- applications/prompt-proto-service-latiss/README.md | 2 +- applications/prompt-proto-service-latiss/values.yaml | 2 +- applications/prompt-proto-service-lsstcam/README.md | 2 +- applications/prompt-proto-service-lsstcam/values.yaml | 2 +- applications/prompt-proto-service-lsstcomcam/README.md | 2 +- applications/prompt-proto-service-lsstcomcam/values.yaml | 2 +- charts/prompt-proto-service/README.md | 2 +- charts/prompt-proto-service/values.yaml | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index 777795d6bb..145a6d9cc3 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -18,7 +18,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| prompt-proto-service.imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index ed03e7407b..8bc1a6f9e6 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -53,7 +53,7 @@ prompt-proto-service: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index 1c09a6b8f3..82a0056ddc 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -18,7 +18,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| prompt-proto-service.imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 3fdb3f7b20..a62ed6ce5b 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -53,7 +53,7 @@ prompt-proto-service: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index 98e9d304c8..ca3bd5b987 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -18,7 +18,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| prompt-proto-service.imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index af00f570e0..9e41e9a8b4 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -53,7 +53,7 @@ prompt-proto-service: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index a7e1a0fbfb..7b067af8a9 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -17,7 +17,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | prompt-proto-service.image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| prompt-proto-service.imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| prompt-proto-service.imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index 620c8b1f3c..6fa3d7c6ba 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -53,7 +53,7 @@ prompt-proto-service: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index 95249f99fc..8730d06c0e 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -21,7 +21,7 @@ Event-driven processing of camera images | image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | | image.tag | string | `"latest"` | Overrides the image tag whose default is the chart appVersion. | -| imageNotifications.imageTimeout | string | `"120"` | Timeout to wait after expected script completion for raw image arrival (seconds). | +| imageNotifications.imageTimeout | string | `"20"` | Timeout to wait after expected script completion for raw image arrival (seconds). | | imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | imagePullSecrets | list | `[]` | | diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 4608ff1afb..96fc9cf489 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -55,7 +55,7 @@ imageNotifications: # @default -- None, must be set topic: "" # -- Timeout to wait after expected script completion for raw image arrival (seconds). - imageTimeout: '120' + imageTimeout: '20' apdb: # -- URL to the APDB, in any form recognized by SQLAlchemy From d33b0ba075a390afb623c4144a1cd502da807565 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 8 Dec 2023 11:54:10 -0800 Subject: [PATCH 342/588] Update Gafaelafwr to 9.6.1 Includes a fix for recovery after the Redis server was restarted. --- applications/gafaelfawr/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index fe2674600c..b501a8635a 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,7 +5,7 @@ description: Authentication and identity system home: https://gafaelfawr.lsst.io/ sources: - https://github.com/lsst-sqre/gafaelfawr -appVersion: 9.6.0 +appVersion: 9.6.1 dependencies: - name: redis From b6707e14904c3855676e909444fbf9f90a78ef61 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 08:06:10 +0000 Subject: [PATCH 343/588] Update Helm release argo-workflows to v0.39.8 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 9f205f22ff..efde61b26f 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.39.5 + version: 0.39.8 repository: https://argoproj.github.io/argo-helm From f7850fe83ab13ffc1f79d3e1aff037d438cfeaa4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 12:20:40 +0000 Subject: [PATCH 344/588] Update gcr.io/cloudsql-docker/gce-proxy Docker tag to v1.33.15 --- applications/gafaelfawr/values.yaml | 2 +- applications/nublado/values.yaml | 2 +- applications/sqlproxy-cross-project/values.yaml | 2 +- applications/times-square/values.yaml | 2 +- applications/vo-cutouts/values.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index fba8421537..5dfa393634 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -316,7 +316,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.14" + tag: "1.33.15" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index bbeab11844..15aeffd678 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -469,7 +469,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.14" + tag: "1.33.15" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index f23980b684..ab096071df 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.33.14" + tag: "1.33.15" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 3e91e29499..fa3f53e24a 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -126,7 +126,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.14" + tag: "1.33.15" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index 658b5f08b8..4b2c230498 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -75,7 +75,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.14" + tag: "1.33.15" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" From 76665350821630bd7566471f7519e4f12cb9bd4f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 12:20:43 +0000 Subject: [PATCH 345/588] Update lsstsqre/tap-schema-mock Docker tag to v2.1.6 --- charts/cadc-tap/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index 2b26702bb8..c3b672af67 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -165,7 +165,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.1.4" + tag: "2.1.6" # -- Resource limits and requests for the TAP schema database pod resources: {} From 7a31ef46d5877044816c3f296dd9f83965980d8e Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 12:32:31 +0000 Subject: [PATCH 346/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 72 +++++++++++++++++++++---------------------- requirements/main.txt | 12 ++++---- 2 files changed, 42 insertions(+), 42 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index d690bd30e1..df37dddbf9 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -207,9 +207,9 @@ distlib==0.3.7 \ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -documenteer[guide]==1.0.0a15 \ - --hash=sha256:9590ba12c6aca7f76faef5605070059113b1d0a801875f42e69444848c3746ec \ - --hash=sha256:caa258ec5f5f68dca976e56098fa0d8a15974566ecca1df6962419ea27063c27 +documenteer[guide]==1.0.0a16 \ + --hash=sha256:188d3d48394f30e204526324cbd62d0fa930465f91d4519cca0e97aa57afe64c \ + --hash=sha256:2c4a588a0647955e0c711c85b871bd4a279126940eb4d00a944c4a37deef57c9 # via # -r requirements/dev.in # documenteer @@ -246,9 +246,9 @@ graphviz==0.20.1 \ --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \ --hash=sha256:8c58f14adaa3b947daf26c19bc1e98c4e0702cdc31cf99153e6f06904d492bf8 # via diagrams -identify==2.5.32 \ - --hash=sha256:0b7656ef6cba81664b783352c73f8c24b39cf82f926f78f4550eda928e5e0545 \ - --hash=sha256:5d9979348ec1a21c768ae07e0a652924538e8bce67313a73cb0f681cf08ba407 +identify==2.5.33 \ + --hash=sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d \ + --hash=sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34 # via pre-commit idna==3.6 \ --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ @@ -418,17 +418,17 @@ packaging==23.2 \ # pydata-sphinx-theme # pytest # sphinx -platformdirs==4.0.0 \ - --hash=sha256:118c954d7e949b35437270383a3f2531e99dd93cf7ce4dc8340d3356d30f173b \ - --hash=sha256:cb633b2bcf10c51af60beb0ab06d2f1d69064b43abf4c185ca6b28865f3f9731 +platformdirs==4.1.0 \ + --hash=sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380 \ + --hash=sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420 # via virtualenv pluggy==1.3.0 \ --hash=sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12 \ --hash=sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7 # via pytest -pre-commit==3.5.0 \ - --hash=sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32 \ - --hash=sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660 +pre-commit==3.6.0 \ + --hash=sha256:c255039ef399049a5544b6ce13d135caba8f2c28c3b4033277a788f434308376 \ + --hash=sha256:d30bad9abf165f7785c15a21a1f46da7d0677cb00ee7ff4c579fd38922efe15d # via -r requirements/dev.in pybtex==0.24.0 \ --hash=sha256:818eae35b61733e5c007c3fcd2cfb75ed1bc8b4173c1f70b56cc4c0802d34755 \ @@ -650,9 +650,9 @@ pyyaml==6.0.1 \ # pre-commit # pybtex # sphinxcontrib-redoc -referencing==0.31.1 \ - --hash=sha256:81a1471c68c9d5e3831c30ad1dd9815c45b558e596653db751a2bfdd17b3b9ec \ - --hash=sha256:c19c4d006f1757e3dd75c4f784d38f8698d87b649c54f9ace14e5e8c9667c01d +referencing==0.32.0 \ + --hash=sha256:689e64fe121843dcfd57b71933318ef1f91188ffb45367332700a86ac8fd6161 \ + --hash=sha256:bdcd3efb936f82ff86f993093f6da7435c7de69a3b3a5a06678a6050184bee99 # via # jsonschema # jsonschema-specifications @@ -770,24 +770,24 @@ rpds-py==0.13.2 \ # via # jsonschema # referencing -ruff==0.1.6 \ - --hash=sha256:03910e81df0d8db0e30050725a5802441c2022ea3ae4fe0609b76081731accbc \ - --hash=sha256:05991ee20d4ac4bb78385360c684e4b417edd971030ab12a4fbd075ff535050e \ - --hash=sha256:137852105586dcbf80c1717facb6781555c4e99f520c9c827bd414fac67ddfb6 \ - --hash=sha256:1610e14750826dfc207ccbcdd7331b6bd285607d4181df9c1c6ae26646d6848a \ - --hash=sha256:1b09f29b16c6ead5ea6b097ef2764b42372aebe363722f1605ecbcd2b9207184 \ - --hash=sha256:1cf5f701062e294f2167e66d11b092bba7af6a057668ed618a9253e1e90cfd76 \ - --hash=sha256:3a0cd909d25f227ac5c36d4e7e681577275fb74ba3b11d288aff7ec47e3ae745 \ - --hash=sha256:4558b3e178145491e9bc3b2ee3c4b42f19d19384eaa5c59d10acf6e8f8b57e33 \ - --hash=sha256:491262006e92f825b145cd1e52948073c56560243b55fb3b4ecb142f6f0e9543 \ - --hash=sha256:5c549ed437680b6105a1299d2cd30e4964211606eeb48a0ff7a93ef70b902248 \ - --hash=sha256:683aa5bdda5a48cb8266fcde8eea2a6af4e5700a392c56ea5fb5f0d4bfdc0240 \ - --hash=sha256:87455a0c1f739b3c069e2f4c43b66479a54dea0276dd5d4d67b091265f6fd1dc \ - --hash=sha256:88b8cdf6abf98130991cbc9f6438f35f6e8d41a02622cc5ee130a02a0ed28703 \ - --hash=sha256:bd98138a98d48a1c36c394fd6b84cd943ac92a08278aa8ac8c0fdefcf7138f35 \ - --hash=sha256:e8fd1c62a47aa88a02707b5dd20c5ff20d035d634aa74826b42a1da77861b5ff \ - --hash=sha256:ea284789861b8b5ca9d5443591a92a397ac183d4351882ab52f6296b4fdd5462 \ - --hash=sha256:fd89b45d374935829134a082617954120d7a1470a9f0ec0e7f3ead983edc48cc +ruff==0.1.7 \ + --hash=sha256:0683b7bfbb95e6df3c7c04fe9d78f631f8e8ba4868dfc932d43d690698057e2e \ + --hash=sha256:1ea109bdb23c2a4413f397ebd8ac32cb498bee234d4191ae1a310af760e5d287 \ + --hash=sha256:276a89bcb149b3d8c1b11d91aa81898fe698900ed553a08129b38d9d6570e717 \ + --hash=sha256:290ecab680dce94affebefe0bbca2322a6277e83d4f29234627e0f8f6b4fa9ce \ + --hash=sha256:416dfd0bd45d1a2baa3b1b07b1b9758e7d993c256d3e51dc6e03a5e7901c7d80 \ + --hash=sha256:45b38c3f8788a65e6a2cab02e0f7adfa88872696839d9882c13b7e2f35d64c5f \ + --hash=sha256:4af95fd1d3b001fc41325064336db36e3d27d2004cdb6d21fd617d45a172dd96 \ + --hash=sha256:69a4bed13bc1d5dabf3902522b5a2aadfebe28226c6269694283c3b0cecb45fd \ + --hash=sha256:6b05e3b123f93bb4146a761b7a7d57af8cb7384ccb2502d29d736eaade0db519 \ + --hash=sha256:6c64cb67b2025b1ac6d58e5ffca8f7b3f7fd921f35e78198411237e4f0db8e73 \ + --hash=sha256:7f80496854fdc65b6659c271d2c26e90d4d401e6a4a31908e7e334fab4645aac \ + --hash=sha256:8b0c2de9dd9daf5e07624c24add25c3a490dbf74b0e9bca4145c632457b3b42a \ + --hash=sha256:90c958fe950735041f1c80d21b42184f1072cc3975d05e736e8d66fc377119ea \ + --hash=sha256:9dcc6bb2f4df59cb5b4b40ff14be7d57012179d69c6565c1da0d1f013d29951b \ + --hash=sha256:de02ca331f2143195a712983a57137c5ec0f10acc4aa81f7c1f86519e52b92a1 \ + --hash=sha256:df2bb4bb6bbe921f6b4f5b6fdd8d8468c940731cb9406f274ae8c5ed7a78c478 \ + --hash=sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306 # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -964,9 +964,9 @@ types-pyyaml==6.0.12.12 \ --hash=sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062 \ --hash=sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24 # via -r requirements/dev.in -typing-extensions==4.8.0 \ - --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ - --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef +typing-extensions==4.9.0 \ + --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \ + --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd # via # -c requirements/main.txt # mypy diff --git a/requirements/main.txt b/requirements/main.txt index cc2a1a8bb8..4f3b9ec47d 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -522,9 +522,9 @@ rfc3986[idna2008]==1.5.0 \ # via # httpx # rfc3986 -safir==5.0.0a5 \ - --hash=sha256:6a38dbdcfc63ea0261d25cefde0defc9f445a7da2a2612cd864d22bf1f292180 \ - --hash=sha256:73348465c732fb89ddbd3b73cb8dcaa1294611c49d5db225e1d2a8205558f29b +safir==5.1.0 \ + --hash=sha256:0e4162b3b1fca558b037c06d7221b96996d7a55c92108e2e28e744d224c0076d \ + --hash=sha256:e04019e7e914aefc5ce1a9ca73c227eb3a84255d952bcd4cf3746e11cf7b1a15 # via -r requirements/main.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -551,9 +551,9 @@ structlog==23.2.0 \ --hash=sha256:16a167e87b9fa7fae9a972d5d12805ef90e04857a93eba479d4be3801a6a1482 \ --hash=sha256:334666b94707f89dbc4c81a22a8ccd34449f0201d5b1ee097a030b577fa8c858 # via safir -typing-extensions==4.8.0 \ - --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ - --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef +typing-extensions==4.9.0 \ + --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \ + --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd # via # fastapi # pydantic From ef4dd7516ad055f931184d32d7d551e0dff73974 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 11 Dec 2023 08:22:42 -0800 Subject: [PATCH 347/588] Update Helm docs --- applications/gafaelfawr/README.md | 2 +- applications/nublado/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index ed3427506c..f3b1f9ed93 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -17,7 +17,7 @@ Authentication and identity system | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 9763338186..2056529e1a 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -16,7 +16,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a separate service, because shoehorning it into Zero to Jupyterhub's extraContainers looks messy, and it's not necessary that it be very performant. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index 04ba518112..5d53e76e72 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.33.14"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.33.15"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/times-square/README.md b/applications/times-square/README.md index 9b8c7af307..7d0e998cb5 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -18,7 +18,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index 8926e9ef7a..6a3d5959c6 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | From fe2b70d35a9432b0a8cce28e8496374726456358 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 11 Dec 2023 08:23:23 -0800 Subject: [PATCH 348/588] Update Helm docs --- charts/cadc-tap/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index c787651f79..284dcd6975 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -56,7 +56,7 @@ IVOA TAP service | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.1.4"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"2.1.6"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the TAP schema database pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the TAP schema database pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | From 4aff295d296ae6952757c6403b5bda887e513e20 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Dec 2023 10:55:46 -0700 Subject: [PATCH 349/588] Summit: Update nublado to Cycle 34. --- applications/nublado/values-summit.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 1bfd84b15b..3c2f173345 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -8,8 +8,8 @@ controller: num_releases: 0 num_weeklies: 3 num_dailies: 2 - cycle: 33 - recommended_tag: "recommended_c0033" + cycle: 34 + recommended_tag: "recommended_c0034" lab: pullSecret: "pull-secret" extraAnnotations: From 2f7d9cad8a14a13fc348c5524a298c2c6052f62f Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Dec 2023 10:56:23 -0700 Subject: [PATCH 350/588] Summit: Update cachemachine to Cycle 34. --- applications/cachemachine/values-summit.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/cachemachine/values-summit.yaml b/applications/cachemachine/values-summit.yaml index 703f6e6a50..30816c3254 100644 --- a/applications/cachemachine/values-summit.yaml +++ b/applications/cachemachine/values-summit.yaml @@ -8,11 +8,11 @@ autostart: "type": "RubinRepoMan", "registry_url": "ts-dockerhub.lsst.org", "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0033", + "recommended_tag": "recommended_c0034", "num_releases": 0, "num_weeklies": 3, "num_dailies": 2, - "cycle": 33, + "cycle": 34, "alias_tags": [ "latest", "latest_daily", From 5c2e7ea742214b30e302483b790ff8fb88e7426d Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Tue, 12 Dec 2023 15:10:43 -0700 Subject: [PATCH 351/588] [DM-41951] Try to fix memory leaks in QServ TAP service --- charts/cadc-tap/templates/tap-deployment.yaml | 2 +- charts/cadc-tap/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/cadc-tap/templates/tap-deployment.yaml b/charts/cadc-tap/templates/tap-deployment.yaml index 077ff6aa88..960fdad306 100644 --- a/charts/cadc-tap/templates/tap-deployment.yaml +++ b/charts/cadc-tap/templates/tap-deployment.yaml @@ -55,7 +55,7 @@ spec: -Dqservuser.username=qsmaster -Dqservuser.password= -Dqservuser.driverClassName=com.mysql.cj.jdbc.Driver - -Dqservuser.url=jdbc:mysql://{{ .Values.config.qserv.host }}/ + -Dqservuser.url=jdbc:mysql://{{ .Values.config.qserv.host }}/?useCursorFetch=true -Dqservuser.maxActive=100 -Dca.nrc.cadc.auth.Authenticator=org.opencadc.tap.impl.AuthenticatorImpl {{- end }} diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index c3b672af67..93c4da7d3a 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -93,7 +93,7 @@ config: # -- Tag of tap image to use # @default -- Latest release - tag: "2.1.0" + tag: "2.1.1" # -- Address to a MySQL database containing TAP schema data tapSchemaAddress: "cadc-tap-schema-db:3306" From 9b3f2a78d9821a517846e4e62b58b0fbc3dca58d Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Tue, 12 Dec 2023 15:24:59 -0700 Subject: [PATCH 352/588] [DM-41951] Use cursor for tap-postgres (1.14.1) --- charts/cadc-tap/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index 93c4da7d3a..ff4d468d23 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -77,7 +77,7 @@ config: # -- Tag of tap image to use # @default -- Latest release - tag: "1.14.0" + tag: "1.14.1" qserv: # -- QServ hostname:port to connect to From ef4e9bad846826bdca00dfa0fe77984ef03dc006 Mon Sep 17 00:00:00 2001 From: Krzysztof Findeisen Date: Wed, 13 Dec 2023 11:07:02 -0800 Subject: [PATCH 353/588] Give prompt-proto-service-latiss a long timeout in production. There can be a significant delay between when a LATISS visit is scheduled and when it is taken, let alone when the image arrives at USDF. The issue is expected to be specific to LATISS. --- .../values-usdfprod-prompt-processing.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 393960ec60..90882f9985 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -34,6 +34,10 @@ prompt-proto-service: imageNotifications: kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing-prod + # Scheduler adds an extra 60-80-second delay for first visit in a sequence, + # and files can take up to 20 seconds to arrive. Scheduler delay associated + # with CWFS engineering data, should not apply to other cameras. + imageTimeout: '110' apdb: url: postgresql://rubin@usdf-prompt-processing.slac.stanford.edu:5432/lsst-devl From 83dc8f664ebbef29d3565c48aef900f7de02c5dd Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 13 Dec 2023 12:52:51 -0800 Subject: [PATCH 354/588] Re-add mobu tests against latest weekly Test latest-weekly against system-test on IDF int, and test the tutorial notebooks against latest-weekly on IDF prod. Do the latter on prod because there is more data there. --- applications/mobu/values-idfint.yaml | 16 ++++++++++++++++ applications/mobu/values-idfprod.yaml | 20 ++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 42f01fe92e..09a3c64b8e 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -16,6 +16,22 @@ config: repo_branch: "prod" use_cachemachine: false restart: true + - name: "weekly" + count: 1 + users: + - username: "bot-mobu-weekly" + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + repo_url: "https://github.com/lsst-sqre/system-test.git" + repo_branch: "prod" + use_cachemachine: false + restart: true - name: "tutorial" count: 1 users: diff --git a/applications/mobu/values-idfprod.yaml b/applications/mobu/values-idfprod.yaml index 0f6813639a..218c2021a8 100644 --- a/applications/mobu/values-idfprod.yaml +++ b/applications/mobu/values-idfprod.yaml @@ -53,6 +53,26 @@ config: working_directory: "notebooks/tutorial-notebooks" use_cachemachine: false restart: true + - name: "tutorial-weekly" + count: 1 + users: + - username: "bot-mobu-tutorial-weekly" + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + image: + image_class: "latest-weekly" + repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" + repo_branch: "prod" + max_executions: 1 + working_directory: "notebooks/tutorial-notebooks" + use_cachemachine: false + restart: true - name: "tap" count: 1 users: From f2cb1e739ba04c053ca1e8dc3964d60bc0b6acec Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 8 Dec 2023 14:31:41 -0800 Subject: [PATCH 355/588] Add GCP information to documentation Add a way to fill out GCP hosting information for environments running on GKE, and expose that information in the generated environments documentation along with links to the Google Console and Log Explorer. --- docs/environments/_summary.rst.jinja | 22 ++++++++-- docs/extras/schemas/environment.json | 40 +++++++++++++++++++ environments/values-idfdev.yaml | 10 +++-- environments/values-idfint.yaml | 10 +++-- environments/values-idfprod.yaml | 10 +++-- environments/values-roundtable-dev.yaml | 11 +++-- environments/values-roundtable-prod.yaml | 10 +++-- src/phalanx/models/environments.py | 40 +++++++++++++++++++ .../input/environments/values-idfdev.yaml | 4 ++ tests/docs/jinja_test.py | 4 ++ 10 files changed, 141 insertions(+), 20 deletions(-) diff --git a/docs/environments/_summary.rst.jinja b/docs/environments/_summary.rst.jinja index 7f192f5df6..ecca4a2cc9 100644 --- a/docs/environments/_summary.rst.jinja +++ b/docs/environments/_summary.rst.jinja @@ -4,8 +4,24 @@ - ``{{ env.name }}`` * - Root domain - `{{ env.fqdn }} `__ + * - Identity provider + - {{ env.identity_provider.value }} * - Argo CD - {% if env.argocd_url %}{{ env.argocd_url }}{% else %}N/A{% endif %} + {%- if env.gcp %} + * - Google console + - - `Log Explorer `__ + - `Google Kubernetes Engine `__ + * - Google Cloud Platform + - .. list-table:: + + * - Project ID + - {{ env.gcp.project_id }} + * - Region + - {{ env.gcp.region }} + * - Cluster name + - {{ env.gcp.cluster_name }} + {%- endif %} * - Applications - .. list-table:: @@ -25,9 +41,7 @@ - {%- endif %} {% endfor %} - * - Identity provider - - {{ env.identity_provider.value }} - {% if env.gafaelfawr_scopes %} + {%- if env.gafaelfawr_scopes %} * - Gafaelfawr groups - .. list-table:: @@ -43,7 +57,7 @@ {%- endif %} {%- endfor %} {%- endif %} - {% if env.argocd_rbac %} + {%- if env.argocd_rbac %} * - Argo CD RBAC - .. csv-table:: {% for line in env.argocd_rbac_csv %} diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index bf0a22509e..f31d0a264b 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -1,5 +1,32 @@ { "$defs": { + "GCPMetadata": { + "description": "Google Cloud Platform hosting metadata.\n\nHolds information about where in Google Cloud Platform this Phalanx\nenvironment is hosted. This supports generating documentation that\nincludes this metadata, making it easier for administrators to know what\noptions to pass to :command:`gcloud` to do things such as get Kubernetes\ncredentials.", + "properties": { + "projectId": { + "description": "Project ID of GCP project hosting this environment", + "title": "GCP project ID", + "type": "string" + }, + "region": { + "description": "GCP region in which this environment is hosted", + "title": "GCP region", + "type": "string" + }, + "clusterName": { + "description": "Name of the GKE cluster hosting this environment", + "title": "Kubernetes cluster name", + "type": "string" + } + }, + "required": [ + "projectId", + "region", + "clusterName" + ], + "title": "GCPMetadata", + "type": "object" + }, "OnepasswordConfig": { "description": "Configuration for 1Password static secrets source.", "properties": { @@ -51,6 +78,19 @@ "description": "URL to Butler repository index", "title": "Butler repository index URL" }, + "gcp": { + "anyOf": [ + { + "$ref": "#/$defs/GCPMetadata" + }, + { + "type": "null" + } + ], + "default": null, + "description": "If this environment is hosted on Google Cloud Platform, metadata about the hosting project, location, and other details. Used to generate additional environment documentation.", + "title": "GCP hosting metadata" + }, "onepassword": { "anyOf": [ { diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 97eb0ca6f0..801bc0196a 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -1,10 +1,14 @@ +name: "idfdev" +fqdn: "data-dev.lsst.cloud" butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml" -fqdn: data-dev.lsst.cloud -name: idfdev +gcp: + projectId: "science-platform-dev-7696" + region: "us-central1" + clusterName: "science-platform-dev" onepassword: connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP data-dev.lsst.cloud" -vaultPathPrefix: secret/phalanx/idfdev +vaultPathPrefix: "secret/phalanx/idfdev" applications: argo-workflows: true diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 6f0df726ce..71d63833a8 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -1,10 +1,14 @@ +name: "idfint" +fqdn: "data-int.lsst.cloud" butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" -fqdn: data-int.lsst.cloud -name: idfint +gcp: + projectId: "science-platform-int-dc5d" + region: "us-central1" + clusterName: "science-platform-int" onepassword: connectUrl: "https://roundtable.lsst.cloud/1password" vaultTitle: "RSP data-int.lsst.cloud" -vaultPathPrefix: secret/phalanx/idfint +vaultPathPrefix: "secret/phalanx/idfint" applications: alert-stream-broker: true diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index 2ff2dfe78a..f8f78da0f7 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -1,10 +1,14 @@ +name: "idfprod" +fqdn: "data.lsst.cloud" butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-repos.yaml" -fqdn: data.lsst.cloud -name: idfprod +gcp: + projectId: "science-platform-stable-6994" + region: "us-central1" + clusterName: "science-platform-stable" onepassword: connectUrl: "https://roundtable.lsst.cloud/1password" vaultTitle: "RSP data.lsst.cloud" -vaultPathPrefix: secret/phalanx/idfprod +vaultPathPrefix: "secret/phalanx/idfprod" applications: datalinker: true diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 514fe3f0a5..48709c90f2 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -1,10 +1,13 @@ -name: roundtable-dev -fqdn: roundtable-dev.lsst.cloud +name: "roundtable-dev" +fqdn: "roundtable-dev.lsst.cloud" +gcp: + projectId: "roundtable-dev-abe2" + region: "us-central1" + clusterName: "roundtable-dev" onepassword: connectUrl: "https://roundtable-dev.lsst.cloud/1password" vaultTitle: "RSP roundtable-dev.lsst.cloud" -vaultUrl: "https://vault.lsst.codes" -vaultPathPrefix: secret/phalanx/roundtable-dev +vaultPathPrefix: "secret/phalanx/roundtable-dev" applications: giftless: true diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index e0745ae962..e8af504612 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -1,9 +1,13 @@ -name: roundtable-prod -fqdn: roundtable.lsst.cloud +name: "roundtable-prod" +fqdn: "roundtable.lsst.cloud" +gcp: + projectId: "roundtable-prod-f6fd" + region: "us-central1" + clusterName: "roundtable-prod" onepassword: connectUrl: "https://roundtable.lsst.cloud/1password" vaultTitle: "RSP roundtable.lsst.cloud" -vaultPathPrefix: secret/phalanx/roundtable-prod +vaultPathPrefix: "secret/phalanx/roundtable-prod" applications: giftless: true diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index cb0180af15..db384cd8a0 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -24,6 +24,7 @@ "EnvironmentBaseConfig", "EnvironmentConfig", "EnvironmentDetails", + "GCPMetadata", "GafaelfawrGitHubGroup", "GafaelfawrGitHubTeam", "GafaelfawrScope", @@ -33,6 +34,35 @@ ] +class GCPMetadata(CamelCaseModel): + """Google Cloud Platform hosting metadata. + + Holds information about where in Google Cloud Platform this Phalanx + environment is hosted. This supports generating documentation that + includes this metadata, making it easier for administrators to know what + options to pass to :command:`gcloud` to do things such as get Kubernetes + credentials. + """ + + project_id: str = Field( + ..., + title="GCP project ID", + description="Project ID of GCP project hosting this environment", + ) + + region: str = Field( + ..., + title="GCP region", + description="GCP region in which this environment is hosted", + ) + + cluster_name: str = Field( + ..., + title="Kubernetes cluster name", + description="Name of the GKE cluster hosting this environment", + ) + + class OnepasswordConfig(CamelCaseModel): """Configuration for 1Password static secrets source.""" @@ -70,6 +100,16 @@ class EnvironmentBaseConfig(CamelCaseModel): description="URL to Butler repository index", ) + gcp: GCPMetadata | None = Field( + None, + title="GCP hosting metadata", + description=( + "If this environment is hosted on Google Cloud Platform," + " metadata about the hosting project, location, and other details." + " Used to generate additional environment documentation." + ), + ) + onepassword: OnepasswordConfig | None = Field( None, title="1Password configuration", diff --git a/tests/data/input/environments/values-idfdev.yaml b/tests/data/input/environments/values-idfdev.yaml index 3abdd21a6f..e47fe975f8 100644 --- a/tests/data/input/environments/values-idfdev.yaml +++ b/tests/data/input/environments/values-idfdev.yaml @@ -1,5 +1,9 @@ name: idfdev fqdn: data-dev.lsst.cloud +gcp: + projectId: science-platform-dev-7696 + region: us-central1 + clusterName: science-platform vaultUrl: https://vault.lsst.codes/ vaultPathPrefix: secret/phalanx/idfdev diff --git a/tests/docs/jinja_test.py b/tests/docs/jinja_test.py index 040b3ae244..2fbb94a194 100644 --- a/tests/docs/jinja_test.py +++ b/tests/docs/jinja_test.py @@ -44,10 +44,14 @@ def test_build_jinja_contexts(factory: Factory) -> None: assert idfdev.fqdn == "data-dev.lsst.cloud" assert idfdev.argocd_url == "https://data-dev.lsst.cloud/argo-cd" assert idfdev.identity_provider == IdentityProvider.CILOGON + assert idfdev.gcp.project_id == "science-platform-dev-7696" + assert idfdev.gcp.region == "us-central1" + assert idfdev.gcp.cluster_name == "science-platform" assert minikube.name == "minikube" assert minikube.fqdn == "minikube.lsst.cloud" assert minikube.argocd_url is None assert minikube.identity_provider == IdentityProvider.GITHUB + assert minikube.gcp is None # Check some of the more complex data. expected = read_output_data("idfdev", "argocd-rbac-rst") From 30a02a5bb4493cf1d5fc2d3e4cb9e99c98532a13 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 8 Dec 2023 14:37:45 -0800 Subject: [PATCH 356/588] Fix typo in GitHub Actions docs rule Documentation builds for PRs that changed files under docs were not being uploaded due to a typo in referencing the output from the earlier filter stage. Fix it. --- .github/workflows/docs.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index e62632e811..15f430a5c6 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -69,4 +69,4 @@ jobs: if: >- (github.event_name == 'push' && github.head_ref == 'main' && steps.filter.outputs.docs == 'true') || (github.event_name == 'workflow_dispatch' && steps.filter.outputs.docs == 'true') - || (github.event_name == 'pull_request' && startsWith(github.head_ref, 'tickets/') && steps.filter.outputs.docSpecific == 'true') + || (github.event_name == 'pull_request' && startsWith(github.head_ref, 'tickets/') && steps.filter.outputs.docsSpecific == 'true') From 728b3447eb5afd2ca3cc2f97331c94ca1e0c28ff Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 8 Dec 2023 16:52:35 -0800 Subject: [PATCH 357/588] Add some basic Google infrastructure documentation Document how to get Kubernetes credentials for a cluster hosted on GKE, and add a stub page about using Terraform. For now, the latter contains only a link to the GitHub repository. --- docs/admin/index.rst | 1 + .../infrastructure/google/credentials.rst | 42 +++++++++++++++++++ docs/admin/infrastructure/google/index.rst | 13 ++++++ .../admin/infrastructure/google/terraform.rst | 9 ++++ 4 files changed, 65 insertions(+) create mode 100644 docs/admin/infrastructure/google/credentials.rst create mode 100644 docs/admin/infrastructure/google/index.rst create mode 100644 docs/admin/infrastructure/google/terraform.rst diff --git a/docs/admin/index.rst b/docs/admin/index.rst index d107bd9114..c5f6ae4d70 100644 --- a/docs/admin/index.rst +++ b/docs/admin/index.rst @@ -36,6 +36,7 @@ Administrators operate infrastructure, manage secrets, and are involved in the d :caption: Infrastructure :maxdepth: 2 + infrastructure/google/index infrastructure/filestore/index infrastructure/kubernetes-node-status-max-images diff --git a/docs/admin/infrastructure/google/credentials.rst b/docs/admin/infrastructure/google/credentials.rst new file mode 100644 index 0000000000..c879202fc5 --- /dev/null +++ b/docs/admin/infrastructure/google/credentials.rst @@ -0,0 +1,42 @@ +################################## +Getting GKE Kubernetes credentials +################################## + +In order to use the standard Kubernetes administrative command :command:`kubectl` or other commands built on the same protocol (such as Helm_ or the Phalanx installer), you must first have stored authentication credentials for the target Kubernetes cluster. +Google provides a mechanism to obtain those credentials using the :command:`gcloud` command. +Here are the steps: + +#. Ensure you have a Google account with access to the Google Cloud Platform project where your target Kubernetes cluster is running. + For Phalanx environments run by SQuaRE, this access must be via an ``lsst.cloud`` Google account that is used only for Rubin activities. + If you do not already have such an account or permissions and need administrative access to a Phalanx environment maintained by SQuaRE, contact SQuaRE for access. + +#. `Install gcloud `__ on the system on which you want to run privileged Kubernetes commands. + +#. `Initialize gcloud `__. + You will need to have access to the Google Cloud Platform project where your target Kubernetes cluster is running. + + If you have access to multiple Google Cloud Platform projects, you will be asked to select one as your default project. + You may wish to choose the project for the Phalanx environment you use most often. + You can find the project ID of a Phalanx project hosted on GKE in its :doc:`environments page `. + +#. `Install kubectl and the GKE auth plugin `__. + As part of that installation, you will run the :command:`gcloud` command that obtains credentials usable by :command:`kubectl` and other privileged Kubernetes commands. + See below for assistance with the precise :command:`gcloud` command to run. + +You will only have to follow this process once on each machine from which you want to use Kubernetes. + +The final step has an example :command:`gcloud` command, but it assumes that you are getting credentials for your default project. +Rubin uses multiple Google Cloud Platform projects for different environments, so you may have to provide the project ID as well. +Here is the full command to run: + +.. prompt:: bash + + gcloud container clusters get-credentials --project --region + +You can get the cluster name, project ID, and region of a Phalanx environment hosted on Google Cloud Platform from its :doc:`environments page `. + +.. note:: + + If the control plane credentials of the Kubernetes cluster are rotated, you will have to re-run the above command to refresh your credentials. + The Kubernetes control plane credentials eventually expire and have to be rotated. + If you discover that your credentials are no longer working, try running the above command again to refresh your credentials and see if the problem persists. diff --git a/docs/admin/infrastructure/google/index.rst b/docs/admin/infrastructure/google/index.rst new file mode 100644 index 0000000000..8555e0e5b5 --- /dev/null +++ b/docs/admin/infrastructure/google/index.rst @@ -0,0 +1,13 @@ +############################## +Using Google Kubernetes Engine +############################## + +Google Kubernetes Engine (GKE) is the Google Cloud Platform (GCP) implementation of Kubernetes. +It is an excellent hosting platform for Phalanx environments. + +This page collects advice and supplemental documentation for Phalanx administrators of environments hosted on GKE. + +.. toctree:: + + credentials + terraform diff --git a/docs/admin/infrastructure/google/terraform.rst b/docs/admin/infrastructure/google/terraform.rst new file mode 100644 index 0000000000..71a68c9283 --- /dev/null +++ b/docs/admin/infrastructure/google/terraform.rst @@ -0,0 +1,9 @@ +##################################### +Managing GCP resources with Terraform +##################################### + +All SQuaRE-managed Google Cloud Platform projects use Terraform to manage all GCP resources outside of Kubernetes. +These include CLoud SQL databases used by Phalanx applications, Google Firestore for UID and GID assignment, service accounts used with workload identity for authenticated access to Google services, and so forth. + +The Terraform configuration for all SQuaRE-managed projects and most other Rubin Observatory GCP projects is maintained in https://github.com/lsst/idf_deploy. +Changes to this repository are automatically applied to the relevant Google Cloud Platform project when the pull request has been reviewed and merged. From 70ce2264d3c19a17b01ccca1a3051d173a78e8f9 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 8 Dec 2023 16:56:24 -0800 Subject: [PATCH 358/588] Document gcp.* settings for new environments Tell people to set the gcp.* settings in the environment values file when creating a new environment hosted on GKE. --- docs/admin/installation.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/admin/installation.rst b/docs/admin/installation.rst index 4837535918..38e129333a 100644 --- a/docs/admin/installation.rst +++ b/docs/admin/installation.rst @@ -25,6 +25,7 @@ To create a new Phalanx environment, take the following steps: Edit it so that ``name``, ``fqdn``, ``vaultUrl``, and ``vaultPathPrefix`` at the top match your new environment. You may omit ``vaultUrl`` for SQuaRE-managed environments. See :doc:`secrets-setup` for more information about the latter two settings and additional settings you may need. + If the environment will be hosted on Google Kubernetes Engine, also fill out ``gcp.projectId``, ``gcp.region``, and ``gcp.clusterName`` with metadata about where the environment will be hosted. Enable the applications this environment should include. #. Decide on your approach to TLS certificates. From 46fc72ce9022c288dbba35f36d5b0957f79065f7 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 8 Dec 2023 17:26:55 -0800 Subject: [PATCH 359/588] Use bare URLs for more GitHub repositories Bare URLs for GitHub repositories are automatically nicely formatted with a GitHub icon and the repository name. Use them in more places where the link text wasn't adding anything that the automated formatting would not have also added. --- docs/about/repository.rst | 6 +++--- docs/applications/livetap/index.rst | 4 ++-- docs/applications/nublado/updating-recommended.rst | 4 ++-- docs/applications/semaphore/index.rst | 2 +- docs/applications/ssotap/index.rst | 4 ++-- docs/applications/tap/index.rst | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/about/repository.rst b/docs/about/repository.rst index 5546459183..216795a1a6 100644 --- a/docs/about/repository.rst +++ b/docs/about/repository.rst @@ -2,7 +2,7 @@ Phalanx Git repository structure ################################ -Phalanx is an open source Git repository hosted on `GitHub `__. +Phalanx is an open source Git repository hosted at https://github.com/lsst-sqre/phalanx. This page provides an overview of this repository's structure, for both application developers and environment administrators alike. For background on Phalanx and its technologies, see :doc:`introduction` first. @@ -73,7 +73,7 @@ This directory contains Helm charts shared by multiple Phalanx applications that In some cases, several Phalanx applications should use common Helm templates to avoid duplication. The best way to do this within Helm is to use a subchart. -This can be done by publishing a separate Helm chart using the `charts repository `__, but publication as a Helm chart implies that the chart may be useful outside of Phalanx. +This can be done by publishing a separate Helm chart in https://github.com/lsst-sqre/charts, but publication as a Helm chart implies that the chart may be useful outside of Phalanx. Sometimes these shared subcharts are merely artifacts of code organization and deduplication within Phalanx, and should not have an independent existence outside of Phalanx. In those cases, they're maintained in the :file:`charts` directory. @@ -118,7 +118,7 @@ The default branch is ``main``. This default branch is considered the source of truth for fullly synchronized Phalanx environments. Updates to Phalanx are introduced as pull requests on GitHub. -Repository members create branches directly in the `GitHub lsst-sqre/phalanx repository `__ (see the `Data Management workflow guide`_) +Repository members create branches directly in https://github.com/lsst-sqre/phalanx (see the `Data Management workflow guide`_) External collaborators should fork Phalanx and create pull requests. It is possible (particularly in non-production environments) to deploy applications from branches of Phalanx, which is useful for debugging new and updating applications before updating the ``main`` branch. diff --git a/docs/applications/livetap/index.rst b/docs/applications/livetap/index.rst index ce1e83c8b6..1e9da2a7e5 100644 --- a/docs/applications/livetap/index.rst +++ b/docs/applications/livetap/index.rst @@ -5,11 +5,11 @@ livetap — IVOA livetap Table Access Protocol ############################################ LIVETAP (Live Obscore Table Access Protocol) is an IVOA_ service that provides access to the live obscore table which is hosted on postgres. -On the Rubin Science Platform, it is provided by `tap-postgres `__, which is derived from the `CADC TAP service `__. +On the Rubin Science Platform, it is provided by https://github.com/lsst-sqre/tap-postgres, which is derived from the `CADC TAP service `__. This service provides access to the ObsCore tables that are created and served by the butler and updated live. The TAP data itself, apart from schema queries, comes from Postgres. -The TAP schema is provided by images built from the `sdm_schemas `__ repository. +The TAP schema is provided by images built from https://github.com/lsst/sdm_schemas. .. jinja:: tap :file: applications/_summary.rst.jinja diff --git a/docs/applications/nublado/updating-recommended.rst b/docs/applications/nublado/updating-recommended.rst index 09c6da77af..eb6eb8c829 100644 --- a/docs/applications/nublado/updating-recommended.rst +++ b/docs/applications/nublado/updating-recommended.rst @@ -12,7 +12,7 @@ Tagging a new container version When a new version has been approved (after passing through its prior QA and sign-off gates), the ``recommended`` tag must be updated to point to the new version. -To do this, run the GitHub retag workflow for the `sciplat-lab `__ repository, as follows: +To do this, run the GitHub retag workflow for https://github.com/lsst-sqre/sciplat-lab repository, as follows: #. Go to `the retag workflow page `__. #. Click :guilabel:`Run workflow`. @@ -38,7 +38,7 @@ If you do not find it, then that environment is currently using ``recommended`` Set this key (creating it if necessary) to whatever string represents the correct recommended-by-default image for that instance. For instance, for a Telescope and Site environment, this will likely look something like ``recommended_c0032``. -Create a pull request against `Phalanx `__ that updates the tag. +Create a pull request against https://github.com/lsst-sqre/phalanx that updates the tag. Once this change is merged, sync the nublado application (using Argo CD) in the affected environments. You do not have to wait for a maintenance window to do this, since the change is low risk, although it will result in a very brief outage for Notebook Aspect lab spawning while the JupyterLab Controller is restarted. diff --git a/docs/applications/semaphore/index.rst b/docs/applications/semaphore/index.rst index 438a37e47a..05755a6773 100644 --- a/docs/applications/semaphore/index.rst +++ b/docs/applications/semaphore/index.rst @@ -7,7 +7,7 @@ semaphore — User notification Semaphore is the user notification and messaging service for the Rubin Science Platform. UI applications like :px-app:`squareone` can display messages from Semaphore's API. -Edit broadcast messages for SQuaRE-managed environments at `lsst-sqre/rsp_broadcast `__. +Edit broadcast messages for SQuaRE-managed environments at https://github.com/lsst-sqre/rsp_broadcast. .. jinja:: semaphore :file: applications/_summary.rst.jinja diff --git a/docs/applications/ssotap/index.rst b/docs/applications/ssotap/index.rst index 9b4e1c2dac..a0e1262494 100644 --- a/docs/applications/ssotap/index.rst +++ b/docs/applications/ssotap/index.rst @@ -5,11 +5,11 @@ ssotap — IVOA DP03 Solar System Table Access Protocol ##################################################### SSOTAP (SSO Table Access Protocol) is an IVOA_ service that provides access to the ObsCore table which is hosted on postgres. -On the Rubin Science Platform, it is provided by `tap-postgres `__, which is derived from the `CADC TAP service `__. +On the Rubin Science Platform, it is provided by https://github.com/lsst-sqre/tap-postgres, which is derived from the `CADC TAP service `__. This service provides access to the Solar System tables that are created and served by the butler. The TAP data itself, apart from schema queries, comes from Postgres. -The TAP schema is provided by images built from the `sdm_schemas `__ repository. +The TAP schema is provided by images built from https://github.com/lsst/sdm_schemas. .. jinja:: tap :file: applications/_summary.rst.jinja diff --git a/docs/applications/tap/index.rst b/docs/applications/tap/index.rst index 73cd30d3b2..b289b88788 100644 --- a/docs/applications/tap/index.rst +++ b/docs/applications/tap/index.rst @@ -5,11 +5,11 @@ tap — IVOA Table Access Protocol ################################ TAP_ (Table Access Protocol) is an IVOA_ service that provides access to general table data, including astronomical catalogs. -On the Rubin Science Platform, it is provided by `lsst-tap-service `__, which is derived from the `CADC TAP service `__. +On the Rubin Science Platform, it is provided by https://github.com/lsst-sqre/lsst-tap-service, which is derived from the `CADC TAP service `__. The same service provides both TAP and ObsTAP_ schemas. The TAP data itself, apart from schema queries, comes from Qserv. -The TAP schema is provided by images built from the `sdm_schemas `__ repository. +The TAP schema is provided by images built from https://github.com/lsst/sdm_schemas. .. jinja:: tap :file: applications/_summary.rst.jinja From b8657258505b1e63137630161422610ae613a281 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 11 Dec 2023 11:09:49 -0800 Subject: [PATCH 360/588] Provide the gcloud command on environment pages We can generate the full command to run to refresh credentials for environments hosted at GCP, which seems better than giving people instructions for how to construct it. Do that and update the documentation to point people to it. --- docs/admin/infrastructure/google/credentials.rst | 12 +++--------- docs/environments/_summary.rst.jinja | 10 ++++++++++ 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/docs/admin/infrastructure/google/credentials.rst b/docs/admin/infrastructure/google/credentials.rst index c879202fc5..e856e9d231 100644 --- a/docs/admin/infrastructure/google/credentials.rst +++ b/docs/admin/infrastructure/google/credentials.rst @@ -27,16 +27,10 @@ You will only have to follow this process once on each machine from which you wa The final step has an example :command:`gcloud` command, but it assumes that you are getting credentials for your default project. Rubin uses multiple Google Cloud Platform projects for different environments, so you may have to provide the project ID as well. -Here is the full command to run: - -.. prompt:: bash - - gcloud container clusters get-credentials --project --region - -You can get the cluster name, project ID, and region of a Phalanx environment hosted on Google Cloud Platform from its :doc:`environments page `. +For the full command to run, see the bottom of the relevant :doc:`environments page `. .. note:: - If the control plane credentials of the Kubernetes cluster are rotated, you will have to re-run the above command to refresh your credentials. The Kubernetes control plane credentials eventually expire and have to be rotated. - If you discover that your credentials are no longer working, try running the above command again to refresh your credentials and see if the problem persists. + If the control plane credentials of the Kubernetes cluster are rotated, you will have to re-run the :command:`gcloud` command to refresh your credentials. + If you discover that your credentials are no longer working, try that command and see if the problem persists. diff --git a/docs/environments/_summary.rst.jinja b/docs/environments/_summary.rst.jinja index ecca4a2cc9..9c47b905ef 100644 --- a/docs/environments/_summary.rst.jinja +++ b/docs/environments/_summary.rst.jinja @@ -64,3 +64,13 @@ {{ line }} {%- endfor %} {%- endif %} +{%- if env.gcp %} + +To obtain Kubernetes admin credentials for this cluster, run: + +.. prompt:: bash + + gcloud container clusters get-credentials {{ env.gcp.cluster_name }} --project {{ env.gcp.project_id }} --region {{ env.gcp.region }} + +For details on how to set up :command:`gcloud` and the necessarily plugins, see :doc:`/admin/infrastructure/google/credentials`. +{%- endif %} From 37227c25be9db03f32cb6fe19b04a2fac80b0508 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 13 Dec 2023 13:02:46 -0800 Subject: [PATCH 361/588] Improve wording for getting Google credentials Co-authored-by: Jonathan Sick --- docs/admin/infrastructure/google/credentials.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/admin/infrastructure/google/credentials.rst b/docs/admin/infrastructure/google/credentials.rst index e856e9d231..64df5ed038 100644 --- a/docs/admin/infrastructure/google/credentials.rst +++ b/docs/admin/infrastructure/google/credentials.rst @@ -2,9 +2,8 @@ Getting GKE Kubernetes credentials ################################## -In order to use the standard Kubernetes administrative command :command:`kubectl` or other commands built on the same protocol (such as Helm_ or the Phalanx installer), you must first have stored authentication credentials for the target Kubernetes cluster. -Google provides a mechanism to obtain those credentials using the :command:`gcloud` command. -Here are the steps: +To use the standard Kubernetes administrative command :command:`kubectl` or other commands built on the same protocol (such as Helm_ or the Phalanx installer), you must have authentication credentials stored for the target Kubernetes cluster. +Google provides a mechanism to obtain those credentials using the :command:`gcloud` command: #. Ensure you have a Google account with access to the Google Cloud Platform project where your target Kubernetes cluster is running. For Phalanx environments run by SQuaRE, this access must be via an ``lsst.cloud`` Google account that is used only for Rubin activities. From 5bf53524451bcd54c91f05b7fbeb03e32fd223c7 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 13 Dec 2023 13:02:09 -0800 Subject: [PATCH 362/588] Minor Google credentials fixes Remove an unnecessary sentence and reorder some text. --- docs/admin/infrastructure/google/credentials.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/admin/infrastructure/google/credentials.rst b/docs/admin/infrastructure/google/credentials.rst index 64df5ed038..da41abbf02 100644 --- a/docs/admin/infrastructure/google/credentials.rst +++ b/docs/admin/infrastructure/google/credentials.rst @@ -20,16 +20,15 @@ Google provides a mechanism to obtain those credentials using the :command:`gclo #. `Install kubectl and the GKE auth plugin `__. As part of that installation, you will run the :command:`gcloud` command that obtains credentials usable by :command:`kubectl` and other privileged Kubernetes commands. - See below for assistance with the precise :command:`gcloud` command to run. - -You will only have to follow this process once on each machine from which you want to use Kubernetes. The final step has an example :command:`gcloud` command, but it assumes that you are getting credentials for your default project. Rubin uses multiple Google Cloud Platform projects for different environments, so you may have to provide the project ID as well. For the full command to run, see the bottom of the relevant :doc:`environments page `. +Once you have followed this process on a system, the credentials will remain valid unless the Kubernetes control plane credentials are rotated. + .. note:: - The Kubernetes control plane credentials eventually expire and have to be rotated. + The Kubernetes control plane credentials eventually expire and have to periodically be rotated. If the control plane credentials of the Kubernetes cluster are rotated, you will have to re-run the :command:`gcloud` command to refresh your credentials. If you discover that your credentials are no longer working, try that command and see if the problem persists. From 775d9193e2bffba86ef9f729a2c82deb77267e82 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 13 Dec 2023 13:40:41 -0800 Subject: [PATCH 363/588] Update pre-commit and CI dependencies - Update all pre-commit plugins to the latest versions - Switch to using Ruff to format code instead of Black - Update the Ruff configuration based on Nublado - Update the versions of Vault and Argo CD used for minikube CI - Update Python to 3.12 everywhere in CI except the Python tests - Test Python code with both 3.11 and 3.12 --- .github/workflows/ci.yaml | 15 +++++------ .github/workflows/dependencies.yaml | 2 +- .github/workflows/docs.yaml | 2 +- .github/workflows/linkcheck.yaml | 2 +- .pre-commit-config.yaml | 16 +++++------ pyproject.toml | 41 +++++++++++++++++++++-------- tests/support/helm.py | 3 +-- 7 files changed, 47 insertions(+), 34 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 94afac8961..0cf4ae663e 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -15,14 +15,14 @@ jobs: uses: actions/setup-go@v4 - name: Install helm-docs - run: go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.0 + run: go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.3 env: GOBIN: /usr/local/bin/ - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Run pre-commit uses: pre-commit/action@v3.0.0 @@ -35,6 +35,7 @@ jobs: matrix: python: - "3.11" + - "3.12" steps: - uses: actions/checkout@v4 @@ -44,8 +45,6 @@ jobs: python-version: ${{ matrix.python }} tox-envs: "typing,py,coverage-report" cache-key-prefix: test - env: - COLUMNS: 120 helm: runs-on: ubuntu-latest @@ -63,7 +62,7 @@ jobs: - uses: lsst-sqre/run-tox@v1 with: - python-version: "3.11" + python-version: "3.12" tox-envs: phalanx-lint-change cache-key-prefix: test @@ -113,11 +112,11 @@ jobs: - name: Download installer dependencies if: steps.filter.outputs.minikube == 'true' run: | - curl -sSL -o /tmp/vault.zip https://releases.hashicorp.com/vault/1.14.0/vault_1.14.0_linux_amd64.zip + curl -sSL -o /tmp/vault.zip https://releases.hashicorp.com/vault/1.15.4/vault_1.15.4_linux_amd64.zip unzip /tmp/vault.zip sudo mv vault /usr/local/bin/vault sudo chmod +x /usr/local/bin/vault - sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.10/argocd-linux-amd64 + sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.8.6/argocd-linux-amd64 sudo chmod +x /usr/local/bin/argocd sudo apt-get install socat diff --git a/.github/workflows/dependencies.yaml b/.github/workflows/dependencies.yaml index edca727e70..49b52fbb6d 100644 --- a/.github/workflows/dependencies.yaml +++ b/.github/workflows/dependencies.yaml @@ -18,7 +18,7 @@ jobs: - name: Run neophile uses: lsst-sqre/run-neophile@v1 with: - python-version: "3.11" + python-version: "3.12" mode: pr types: python app-id: ${{ secrets.NEOPHILE_APP_ID }} diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 15f430a5c6..3c37d2a750 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -52,7 +52,7 @@ jobs: if: steps.filter.outputs.docs == 'true' uses: lsst-sqre/run-tox@v1 with: - python-version: "3.11" + python-version: "3.12" tox-envs: docs # Upload docs: diff --git a/.github/workflows/linkcheck.yaml b/.github/workflows/linkcheck.yaml index 831f0d6436..458e5f558a 100644 --- a/.github/workflows/linkcheck.yaml +++ b/.github/workflows/linkcheck.yaml @@ -48,5 +48,5 @@ jobs: - name: Check links uses: lsst-sqre/run-tox@v1 with: - python-version: "3.11" + python-version: "3.12" tox-envs: docs-linkcheck diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2303624b95..6ebea0460f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,20 +1,20 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-merge-conflict - id: check-toml - id: trailing-whitespace - repo: https://github.com/adrienverge/yamllint - rev: v1.32.0 + rev: v1.33.0 hooks: - id: yamllint args: - -c=.yamllint.yml - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.26.3 + rev: 0.27.3 hooks: - id: check-jsonschema files: ^applications/.*/secrets(-[^./-]+)?\.yaml @@ -26,7 +26,7 @@ repos: files: ^docs/extras/schemas/.*\.json - repo: https://github.com/norwoodj/helm-docs - rev: v1.11.0 + rev: v1.11.3 hooks: - id: helm-docs args: @@ -46,15 +46,11 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.292 + rev: v0.1.8 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] - - - repo: https://github.com/psf/black - rev: 23.7.0 - hooks: - - id: black + - id: ruff-format - repo: https://github.com/adamchainz/blacken-docs rev: 1.16.0 diff --git a/pyproject.toml b/pyproject.toml index 6583388dd1..9b7834e8eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,8 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Intended Audience :: Developers", "Operating System :: POSIX", ] @@ -138,16 +140,21 @@ ignore = [ "D104", # don't see the point of documenting every package "D105", # our style doesn't require docstrings for magic methods "D106", # Pydantic uses a nested Config class that doesn't warrant docs + "D205", # our documentation style allows a folded first line "EM101", # justification (duplicate string in traceback) is silly "EM102", # justification (duplicate string in traceback) is silly "FBT003", # positional booleans are normal for Pydantic field defaults + "FIX002", # point of a TODO comment is that we're not ready to fix it "G004", # forbidding logging f-strings is appealing, but not our style "PD011", # false positive with non-NumPY code that uses .values + "RET505", # disagree that omitting else always makes code more readable + "PLR0911", # often many returns is clearer and simpler style "PLR0913", # factory pattern uses constructors with many arguments "PLR2004", # too aggressive about magic values - "RET505", # disagree that omitting else always makes code more readable + "PLW0603", # yes global is discouraged but if needed, it's needed "S105", # good idea but too many false positives on non-passwords "S106", # good idea but too many false positives on non-passwords + "S107", # good idea but too many false positives on non-passwords "S603", # impossible to write subprocess code without triggering this "S607", # searching for executables on PATH is often correct "SIM102", # sometimes the formatting of nested if statements is clearer @@ -158,14 +165,32 @@ ignore = [ "TCH003", # we decided to not maintain separate TYPE_CHECKING blocks "TID252", # if we're going to use relative imports, use them always "TRY003", # good general advice but lint is way too aggressive - - # Phalanx-specific exclusions. - "T201", # print makes sense to use because Phalanx is interactive + "TRY301", # sometimes raising exceptions inside try is the best flow + + # The following settings should be disabled when using ruff format + # per https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules + "W191", + "E111", + "E114", + "E117", + "D206", + "D300", + "Q000", + "Q001", + "Q002", + "Q003", + "COM812", + "COM819", + "ISC001", + "ISC002", ] select = ["ALL"] -target-version = "py311" +target-version = "py312" [tool.ruff.per-file-ignores] +"src/phalanx/**" = [ + "T201", # print makes sense to use because Phalanx is interactive +] "tests/**" = [ "D103", # tests don't need docstrings "PLR0915", # tests are allowed to be long, sometimes that's convenient @@ -203,11 +228,5 @@ builtins-ignorelist = [ fixture-parentheses = false mark-parentheses = false -[tool.ruff.pep8-naming] -classmethod-decorators = [ - "pydantic.root_validator", - "pydantic.validator", -] - [tool.ruff.pydocstyle] convention = "numpy" diff --git a/tests/support/helm.py b/tests/support/helm.py index 5c38e7bc86..7fafa728fe 100644 --- a/tests/support/helm.py +++ b/tests/support/helm.py @@ -5,10 +5,9 @@ import subprocess from collections.abc import Iterator from pathlib import Path +from typing import Protocol from unittest.mock import patch -from typing_extensions import Protocol - from phalanx.exceptions import HelmFailedError from phalanx.storage.helm import HelmStorage From 92be8e7f8ece095cf6b46ba5959eef3aa1c31d6a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 22:20:59 +0000 Subject: [PATCH 364/588] Update Helm release argo-workflows to v0.39.9 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index efde61b26f..e572f97f83 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.39.8 + version: 0.39.9 repository: https://argoproj.github.io/argo-helm From 8974d93d623bceea6fd0608081922506c4975258 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 22:21:02 +0000 Subject: [PATCH 365/588] Update Helm release cert-manager to v1.13.3 --- applications/cert-manager/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml index 5d9d661070..41d5841a87 100644 --- a/applications/cert-manager/Chart.yaml +++ b/applications/cert-manager/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/cert-manager/cert-manager dependencies: - name: cert-manager - version: v1.13.2 + version: v1.13.3 repository: https://charts.jetstack.io From c31b237b245c0e7ba83246a4ef795c65d7e50d7c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 22:21:06 +0000 Subject: [PATCH 366/588] Update Helm release telegraf to v1.8.39 --- applications/telegraf/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 5f28ebd25f..8f5ac9e5a6 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.38 + version: 1.8.39 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 0446bccd7cb80328596a476f03e1917c38d74074 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 22:21:08 +0000 Subject: [PATCH 367/588] Update Helm release telegraf-ds to v1.1.21 --- applications/telegraf-ds/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index a82062eb47..3483a73918 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.20 + version: 1.1.21 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 01c1e73ac83cd9f30e68ac3d09f7a10ef5e1059f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 22:21:14 +0000 Subject: [PATCH 368/588] Update ghcr.io/lsst-sqre/lsst-tap-uws-db Docker tag to v2.1.1 --- charts/cadc-tap/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index ff4d468d23..12ee98f35b 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -192,7 +192,7 @@ uws: # -- Tag of UWS database image to use # @default -- Version of QServ TAP image - tag: "2.1.0" + tag: "2.1.1" # -- Resource limits and requests for the UWS database pod resources: From bfbbd4ad3942ff285088097766c6e6ccbef7749e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 22:21:19 +0000 Subject: [PATCH 369/588] Update actions/setup-go action to v5 --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0cf4ae663e..9c7b97e803 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@v4 - name: Set up go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 - name: Install helm-docs run: go install github.com/norwoodj/helm-docs/cmd/helm-docs@v1.11.3 From f6dfe67ff73d15a90571021e55931759b224c0e1 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 13 Dec 2023 14:23:54 -0800 Subject: [PATCH 370/588] Fix the rule for upload of documentation The conditional deciding whether to upload changes after a merge to the main branch was using the wrong GitHub variable. Also unconditionally build documentation and upload if the workflow was triggered with workflow dispatch, instead of checking the documentation filter. --- .github/workflows/docs.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 3c37d2a750..8dc01ded18 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -45,11 +45,11 @@ jobs: - "docs/**" - name: Install graphviz - if: steps.filter.outputs.docs == 'true' + if: steps.filter.outputs.docs == 'true' || github.event_name == 'workflow_dispatch' run: sudo apt-get install graphviz - name: Build docs - if: steps.filter.outputs.docs == 'true' + if: steps.filter.outputs.docs == 'true' || github.event_name == 'workflow_dispatch' uses: lsst-sqre/run-tox@v1 with: python-version: "3.12" @@ -67,6 +67,6 @@ jobs: username: ${{ secrets.LTD_USERNAME }} password: ${{ secrets.LTD_PASSWORD }} if: >- - (github.event_name == 'push' && github.head_ref == 'main' && steps.filter.outputs.docs == 'true') - || (github.event_name == 'workflow_dispatch' && steps.filter.outputs.docs == 'true') + (github.event_name == 'push' && github.ref_name == 'main' && steps.filter.outputs.docs == 'true') + || (github.event_name == 'workflow_dispatch') || (github.event_name == 'pull_request' && startsWith(github.head_ref, 'tickets/') && steps.filter.outputs.docsSpecific == 'true') From b189e799c8fac85d8933795364ea69f450cf7569 Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Fri, 3 Nov 2023 16:22:03 -0700 Subject: [PATCH 371/588] Add fallback pipelines in prompt processing dev for LATISS --- .../values-usdfdev-prompt-processing.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index c72b31150e..986d96fad9 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -11,7 +11,10 @@ prompt-proto-service: tag: latest instrument: - pipelines: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml] + pipelines: >- + (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] calibRepo: s3://rubin-pp-users/central_repo/ s3: From e0fed33389bdfa3cd2a55d1588310e23653cd992 Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Fri, 8 Dec 2023 16:43:50 -0800 Subject: [PATCH 372/588] Skip single frame processing in the dev fall back Currently one visit in dev is expected to fall in calibrate. The fallback cannot handle pipeline error today. When it can, add this config back for testing that fallback. --- .../values-usdfdev-prompt-processing.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index 986d96fad9..3d736c7bb2 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -13,7 +13,6 @@ prompt-proto-service: instrument: pipelines: >- (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, - ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/SingleFrame.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] calibRepo: s3://rubin-pp-users/central_repo/ From 8e6ba69ed4c5814845752b1ff36cfcbadc534672 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 12 Dec 2023 14:08:41 -0700 Subject: [PATCH 373/588] Enable repairer connectors at the Summit - Recover data from Kafka after InfluxDB restarts --- applications/sasquatch/values-summit.yaml | 34 +++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index d2756e97a9..eb43a4eba1 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -48,76 +48,76 @@ kafka-connect-manager: connectors: auxtel: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" maintel: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" mtmount: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*MTMount" tasksMax: "8" comcam: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" eas: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*DIMM|.*DSM|.*ESS|.*HVAC|.*WeatherForecast" latiss: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" m1m3: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*MTM1M3" tasksMax: "8" m2: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" obssys: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" ocps: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*OCPS" test: enabled: true topicsRegex: "lsst.sal.Test" pmd: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*PMD" calsys: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" mtaircompressor: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*MTAirCompressor" genericcamera: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*GCHeaderService|.*GenericCamera" gis: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*GIS" mtvms: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*MTVMS" lasertracker: enabled: true - repairerConnector: false + repairerConnector: true topicsRegex: ".*LaserTracker" telegraf-kafka-consumer: From fda82794ffb51ae9b5f0140660d254a479a5f7e2 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 12 Dec 2023 14:58:25 -0700 Subject: [PATCH 374/588] Constrain queries even further --- applications/sasquatch/README.md | 4 ++-- applications/sasquatch/values.yaml | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 9695beee8f..e2a6e25789 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -34,7 +34,7 @@ Rubin Observatory's telemetry service. | influxdb-staging.resources.requests.cpu | int | `8` | | | influxdb-staging.resources.requests.memory | string | `"96Gi"` | | | influxdb-staging.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | -| influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"60s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":1000,"query-timeout":"30s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb.enabled | bool | `true` | Enable InfluxDB. | | influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | | influxdb.ingress | object | disabled | InfluxDB ingress configuration. | @@ -85,7 +85,7 @@ Rubin Observatory's telemetry service. | kapacitor.resources.requests.cpu | int | `1` | | | kapacitor.resources.requests.memory | string | `"1Gi"` | | | rest-proxy | object | `{"enabled":false}` | Override rest-proxy configuration. | -| source-influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"60s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| source-influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":1000,"query-timeout":"30s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | source-influxdb.enabled | bool | `false` | Enable InfluxDB staging deployment. | | source-influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | | source-influxdb.ingress | object | disabled | InfluxDB ingress configuration. | diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index 64bf6c24e1..02c94d48d3 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -52,8 +52,8 @@ influxdb: max-row-limit: 0 coordinator: write-timeout: "1h" - max-concurrent-queries: 0 - query-timeout: "60s" + max-concurrent-queries: 1000 + query-timeout: "30s" log-queries-after: "15s" continuous_queries: enabled: false @@ -178,8 +178,8 @@ source-influxdb: max-row-limit: 0 coordinator: write-timeout: "1h" - max-concurrent-queries: 0 - query-timeout: "60s" + max-concurrent-queries: 1000 + query-timeout: "30s" log-queries-after: "15s" continuous_queries: enabled: false From 4eccb5230e5383c420434bcc409e33d1bd6023d3 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 13 Dec 2023 16:11:20 -0700 Subject: [PATCH 375/588] Disable repairer connectors at the Summit - Finished syncing data from kafka --- applications/sasquatch/values-summit.yaml | 34 +++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index eb43a4eba1..d2756e97a9 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -48,76 +48,76 @@ kafka-connect-manager: connectors: auxtel: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" maintel: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" mtmount: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTMount" tasksMax: "8" comcam: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" eas: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*DIMM|.*DSM|.*ESS|.*HVAC|.*WeatherForecast" latiss: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" m1m3: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTM1M3" tasksMax: "8" m2: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" obssys: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" ocps: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*OCPS" test: enabled: true topicsRegex: "lsst.sal.Test" pmd: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*PMD" calsys: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" mtaircompressor: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTAirCompressor" genericcamera: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*GCHeaderService|.*GenericCamera" gis: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*GIS" mtvms: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTVMS" lasertracker: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*LaserTracker" telegraf-kafka-consumer: From ab41d7d806c7c94d29175d1934b687d2df8b9bfc Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 14 Dec 2023 07:25:40 -0700 Subject: [PATCH 376/588] Increase memory and cpu resources for InfluxDB at the Summit - We have noticed the pod restarting lately even with further constraint on query limits --- applications/sasquatch/values-summit.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index d2756e97a9..b9f55d0364 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -31,6 +31,13 @@ influxdb: ingress: enabled: true hostname: summit-lsp.lsst.codes + resources: + requests: + memory: 128Gi + cpu: 16 + limits: + memory: 128Gi + cpu: 16 influxdb2: enabled: false From 47de4a632b65cf98f22ceb7094a968e792315881 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 14 Dec 2023 14:26:14 -0700 Subject: [PATCH 377/588] [DM-41951] Disable useCursorFetch for QServ This seems to confuse the mysql proxy and not return results although it looks like the query is executing. --- charts/cadc-tap/templates/tap-deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/cadc-tap/templates/tap-deployment.yaml b/charts/cadc-tap/templates/tap-deployment.yaml index 960fdad306..077ff6aa88 100644 --- a/charts/cadc-tap/templates/tap-deployment.yaml +++ b/charts/cadc-tap/templates/tap-deployment.yaml @@ -55,7 +55,7 @@ spec: -Dqservuser.username=qsmaster -Dqservuser.password= -Dqservuser.driverClassName=com.mysql.cj.jdbc.Driver - -Dqservuser.url=jdbc:mysql://{{ .Values.config.qserv.host }}/?useCursorFetch=true + -Dqservuser.url=jdbc:mysql://{{ .Values.config.qserv.host }}/ -Dqservuser.maxActive=100 -Dca.nrc.cadc.auth.Authenticator=org.opencadc.tap.impl.AuthenticatorImpl {{- end }} From 48076e24f79d813c7487d6de55d879d5ea14e4f8 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 14 Dec 2023 16:11:51 -0700 Subject: [PATCH 378/588] [DM-42069] Advance datalink to 2.1.6 tap schema release is already there. --- charts/cadc-tap/README.md | 2 +- charts/cadc-tap/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index 284dcd6975..0a2111b8fd 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -14,7 +14,7 @@ IVOA TAP service |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the TAP pod | | config.backend | string | None, must be set to "pg" or "qserv" | What type of backend are we connecting to? | -| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip"` | Datalink payload URL | +| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/2.1.6/datalink-snippets.zip"` | Datalink payload URL | | config.gcsBucket | string | The common GCS bucket | Name of GCS bucket in which to store results | | config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | The common GCS bucket | Base URL for results stored in GCS bucket | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index 12ee98f35b..9af72fe1f0 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -99,7 +99,7 @@ config: tapSchemaAddress: "cadc-tap-schema-db:3306" # -- Datalink payload URL - datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip" + datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/2.1.6/datalink-snippets.zip" # -- Name of GCS bucket in which to store results # @default -- The common GCS bucket From c11caebf0c09c0ea1fa8217358ae5f12e5b10eda Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Sanchez Date: Thu, 14 Dec 2023 20:34:32 -0300 Subject: [PATCH 379/588] Add rubintv to base --- environments/values-base.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-base.yaml b/environments/values-base.yaml index 1b0108032f..92025c349c 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -8,6 +8,7 @@ applications: narrativelog: true nublado: true portal: true + rubintv: true sasquatch: true squareone: true strimzi: true From dac68ff87980aefc23e497b6d1051da181b582c8 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 15 Dec 2023 15:32:15 -0800 Subject: [PATCH 380/588] Upgrade mobu to 7.0.0 Update the Phalanx configuration for the mobu 7.0.0 release, which changes environment variable names, drops support for cachemachine, and renames some monkey businesses. --- applications/mobu/Chart.yaml | 6 +++--- applications/mobu/templates/deployment.yaml | 12 ++++++------ applications/mobu/values-idfdev.yaml | 4 +--- applications/mobu/values-idfint.yaml | 5 +---- applications/mobu/values-idfprod.yaml | 6 +----- applications/mobu/values-usdfdev.yaml | 3 +-- applications/mobu/values-usdfint.yaml | 1 - applications/mobu/values-usdfprod.yaml | 3 +-- 8 files changed, 14 insertions(+), 26 deletions(-) diff --git a/applications/mobu/Chart.yaml b/applications/mobu/Chart.yaml index 9d1709f040..c348afc8e8 100644 --- a/applications/mobu/Chart.yaml +++ b/applications/mobu/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: mobu version: 1.0.0 -description: Continuous integration testing +description: "Continuous integration testing" sources: - - https://github.com/lsst-sqre/mobu -appVersion: 6.1.1 + - "https://github.com/lsst-sqre/mobu" +appVersion: 7.0.0 diff --git a/applications/mobu/templates/deployment.yaml b/applications/mobu/templates/deployment.yaml index d80bb97975..95b7299cb9 100644 --- a/applications/mobu/templates/deployment.yaml +++ b/applications/mobu/templates/deployment.yaml @@ -25,27 +25,27 @@ spec: - name: {{ .Chart.Name }} env: {{- if .Values.config.slackAlerts }} - - name: "ALERT_HOOK" + - name: "MOBU_ALERT_HOOK" valueFrom: secretKeyRef: name: {{ template "mobu.fullname" . }}-secret key: "ALERT_HOOK" {{- end }} {{- if .Values.config.autostart }} - - name: "AUTOSTART" + - name: "MOBU_AUTOSTART_PATH" value: "/etc/mobu/autostart.yaml" {{- end }} - - name: "ENVIRONMENT_URL" + - name: "MOBU_ENVIRONMENT_URL" value: {{ .Values.global.baseUrl }} - - name: "GAFAELFAWR_TOKEN" + - name: "MOBU_GAFAELFAWR_TOKEN" valueFrom: secretKeyRef: name: {{ template "mobu.fullname" . }}-gafaelfawr-token key: "token" - - name: "SAFIR_PATH_PREFIX" + - name: "MOBU_PATH_PREFIX" value: {{ .Values.config.pathPrefix | quote }} {{- if (not .Values.config.debug) }} - - name: "SAFIR_PROFILE" + - name: "MOBU_LOGGING_PROFILE" value: "production" {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index d9b6aa4eb7..19c702a5af 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -15,7 +15,6 @@ config: options: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "tutorial" count: 1 @@ -33,7 +32,6 @@ config: repo_branch: "prod" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false restart: true - name: "tap" count: 1 @@ -41,7 +39,7 @@ config: - username: "bot-mobu-tap" scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 09a3c64b8e..6a0cdf5515 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -14,7 +14,6 @@ config: options: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "weekly" count: 1 @@ -30,7 +29,6 @@ config: options: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "tutorial" count: 1 @@ -48,7 +46,6 @@ config: repo_branch: "prod" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false restart: true - name: "tap" count: 1 @@ -56,7 +53,7 @@ config: - username: "bot-mobu-tap" scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/mobu/values-idfprod.yaml b/applications/mobu/values-idfprod.yaml index 218c2021a8..ca438e299c 100644 --- a/applications/mobu/values-idfprod.yaml +++ b/applications/mobu/values-idfprod.yaml @@ -15,7 +15,6 @@ config: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" max_executions: 1 - use_cachemachine: false restart: true - name: "quickbeam" count: 1 @@ -33,7 +32,6 @@ config: repo_branch: "prod" idle_time: 900 delete_lab: false - use_cachemachine: false restart: true - name: "tutorial" count: 1 @@ -51,7 +49,6 @@ config: repo_branch: "prod" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false restart: true - name: "tutorial-weekly" count: 1 @@ -71,7 +68,6 @@ config: repo_branch: "prod" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false restart: true - name: "tap" count: 1 @@ -79,7 +75,7 @@ config: - username: "bot-mobu-tap" scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/mobu/values-usdfdev.yaml b/applications/mobu/values-usdfdev.yaml index f2b77ee023..facf862243 100644 --- a/applications/mobu/values-usdfdev.yaml +++ b/applications/mobu/values-usdfdev.yaml @@ -19,7 +19,6 @@ config: image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "tap" count: 1 @@ -29,7 +28,7 @@ config: gidnumber: 1126 scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true diff --git a/applications/mobu/values-usdfint.yaml b/applications/mobu/values-usdfint.yaml index 0bd165194f..84c264637d 100644 --- a/applications/mobu/values-usdfint.yaml +++ b/applications/mobu/values-usdfint.yaml @@ -19,5 +19,4 @@ config: image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true diff --git a/applications/mobu/values-usdfprod.yaml b/applications/mobu/values-usdfprod.yaml index 3bd79a6fe1..b04c82af58 100644 --- a/applications/mobu/values-usdfprod.yaml +++ b/applications/mobu/values-usdfprod.yaml @@ -17,7 +17,6 @@ config: options: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false restart: true - name: "tap" count: 1 @@ -27,7 +26,7 @@ config: gidnumber: 1126 scopes: ["read:tap"] business: - type: "TAPQueryRunner" + type: "TAPQuerySetRunner" options: query_set: "dp0.2" restart: true From dbf86560137ab8d49c6bc22b35839f6efe778eb1 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 12:32:14 +0000 Subject: [PATCH 381/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 375 +++++++++++++++++++++--------------------- requirements/main.txt | 54 +++--- 2 files changed, 223 insertions(+), 206 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index df37dddbf9..87bef5ef30 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.11 +# This file is autogenerated by pip-compile with Python 3.12 # by the following command: # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/dev.txt requirements/dev.in @@ -24,9 +24,9 @@ autodoc-pydantic==2.0.1 \ --hash=sha256:7a125a4ff18e4903e27be71e4ddb3269380860eacab4a584d6cc2e212fa96991 \ --hash=sha256:d3c302fdb6d37edb5b721f0f540252fa79cea7018bc1a9a85bf70f33a68b0ce4 # via -r requirements/dev.in -babel==2.13.1 \ - --hash=sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900 \ - --hash=sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed +babel==2.14.0 \ + --hash=sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363 \ + --hash=sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287 # via sphinx beautifulsoup4==4.12.2 \ --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ @@ -143,59 +143,59 @@ click==8.1.7 \ # -c requirements/main.txt # documenteer # sphinx-click -coverage[toml]==7.3.2 \ - --hash=sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1 \ - --hash=sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63 \ - --hash=sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9 \ - --hash=sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312 \ - --hash=sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3 \ - --hash=sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb \ - --hash=sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25 \ - --hash=sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92 \ - --hash=sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda \ - --hash=sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148 \ - --hash=sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6 \ - --hash=sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216 \ - --hash=sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a \ - --hash=sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640 \ - --hash=sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836 \ - --hash=sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c \ - --hash=sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f \ - --hash=sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2 \ - --hash=sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901 \ - --hash=sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed \ - --hash=sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a \ - --hash=sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074 \ - --hash=sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc \ - --hash=sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84 \ - --hash=sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083 \ - --hash=sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f \ - --hash=sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c \ - --hash=sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c \ - --hash=sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637 \ - --hash=sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2 \ - --hash=sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82 \ - --hash=sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f \ - --hash=sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce \ - --hash=sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef \ - --hash=sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f \ - --hash=sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611 \ - --hash=sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c \ - --hash=sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76 \ - --hash=sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9 \ - --hash=sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce \ - --hash=sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9 \ - --hash=sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf \ - --hash=sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf \ - --hash=sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9 \ - --hash=sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6 \ - --hash=sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2 \ - --hash=sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a \ - --hash=sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a \ - --hash=sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf \ - --hash=sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738 \ - --hash=sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a \ - --hash=sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4 +coverage[toml]==7.3.3 \ + --hash=sha256:007a7e49831cfe387473e92e9ff07377f6121120669ddc39674e7244350a6a29 \ + --hash=sha256:1191270b06ecd68b1d00897b2daddb98e1719f63750969614ceb3438228c088e \ + --hash=sha256:1367aa411afb4431ab58fd7ee102adb2665894d047c490649e86219327183134 \ + --hash=sha256:1f0f8f0c497eb9c9f18f21de0750c8d8b4b9c7000b43996a094290b59d0e7523 \ + --hash=sha256:222b038f08a7ebed1e4e78ccf3c09a1ca4ac3da16de983e66520973443b546bc \ + --hash=sha256:243576944f7c1a1205e5cd658533a50eba662c74f9be4c050d51c69bd4532936 \ + --hash=sha256:2e9223a18f51d00d3ce239c39fc41410489ec7a248a84fab443fbb39c943616c \ + --hash=sha256:307aecb65bb77cbfebf2eb6e12009e9034d050c6c69d8a5f3f737b329f4f15fb \ + --hash=sha256:31c0b1b8b5a4aebf8fcd227237fc4263aa7fa0ddcd4d288d42f50eff18b0bac4 \ + --hash=sha256:3b15e03b8ee6a908db48eccf4e4e42397f146ab1e91c6324da44197a45cb9132 \ + --hash=sha256:3c854c1d2c7d3e47f7120b560d1a30c1ca221e207439608d27bc4d08fd4aeae8 \ + --hash=sha256:475de8213ed95a6b6283056d180b2442eee38d5948d735cd3d3b52b86dd65b92 \ + --hash=sha256:50c472c1916540f8b2deef10cdc736cd2b3d1464d3945e4da0333862270dcb15 \ + --hash=sha256:593efa42160c15c59ee9b66c5f27a453ed3968718e6e58431cdfb2d50d5ad284 \ + --hash=sha256:65d716b736f16e250435473c5ca01285d73c29f20097decdbb12571d5dfb2c94 \ + --hash=sha256:733537a182b5d62184f2a72796eb6901299898231a8e4f84c858c68684b25a70 \ + --hash=sha256:757453848c18d7ab5d5b5f1827293d580f156f1c2c8cef45bfc21f37d8681069 \ + --hash=sha256:79c32f875fd7c0ed8d642b221cf81feba98183d2ff14d1f37a1bbce6b0347d9f \ + --hash=sha256:7f3bad1a9313401ff2964e411ab7d57fb700a2d5478b727e13f156c8f89774a0 \ + --hash=sha256:7fbf3f5756e7955174a31fb579307d69ffca91ad163467ed123858ce0f3fd4aa \ + --hash=sha256:811ca7373da32f1ccee2927dc27dc523462fd30674a80102f86c6753d6681bc6 \ + --hash=sha256:89400aa1752e09f666cc48708eaa171eef0ebe3d5f74044b614729231763ae69 \ + --hash=sha256:8c944cf1775235c0857829c275c777a2c3e33032e544bcef614036f337ac37bb \ + --hash=sha256:9437a4074b43c177c92c96d051957592afd85ba00d3e92002c8ef45ee75df438 \ + --hash=sha256:9e17d9cb06c13b4f2ef570355fa45797d10f19ca71395910b249e3f77942a837 \ + --hash=sha256:9ede881c7618f9cf93e2df0421ee127afdfd267d1b5d0c59bcea771cf160ea4a \ + --hash=sha256:a1f76cfc122c9e0f62dbe0460ec9cc7696fc9a0293931a33b8870f78cf83a327 \ + --hash=sha256:a2ac4245f18057dfec3b0074c4eb366953bca6787f1ec397c004c78176a23d56 \ + --hash=sha256:a702e66483b1fe602717020a0e90506e759c84a71dbc1616dd55d29d86a9b91f \ + --hash=sha256:ad2453b852a1316c8a103c9c970db8fbc262f4f6b930aa6c606df9b2766eee06 \ + --hash=sha256:af75cf83c2d57717a8493ed2246d34b1f3398cb8a92b10fd7a1858cad8e78f59 \ + --hash=sha256:afdcc10c01d0db217fc0a64f58c7edd635b8f27787fea0a3054b856a6dff8717 \ + --hash=sha256:c59a3e59fb95e6d72e71dc915e6d7fa568863fad0a80b33bc7b82d6e9f844973 \ + --hash=sha256:cad9afc1644b979211989ec3ff7d82110b2ed52995c2f7263e7841c846a75348 \ + --hash=sha256:d299d379b676812e142fb57662a8d0d810b859421412b4d7af996154c00c31bb \ + --hash=sha256:d31650d313bd90d027f4be7663dfa2241079edd780b56ac416b56eebe0a21aab \ + --hash=sha256:d874434e0cb7b90f7af2b6e3309b0733cde8ec1476eb47db148ed7deeb2a9494 \ + --hash=sha256:db0338c4b0951d93d547e0ff8d8ea340fecf5885f5b00b23be5aa99549e14cfd \ + --hash=sha256:df04c64e58df96b4427db8d0559e95e2df3138c9916c96f9f6a4dd220db2fdb7 \ + --hash=sha256:e995efb191f04b01ced307dbd7407ebf6e6dc209b528d75583277b10fd1800ee \ + --hash=sha256:eda7f6e92358ac9e1717ce1f0377ed2b9320cea070906ece4e5c11d172a45a39 \ + --hash=sha256:ee453085279df1bac0996bc97004771a4a052b1f1e23f6101213e3796ff3cb85 \ + --hash=sha256:ee6621dccce8af666b8c4651f9f43467bfbf409607c604b840b78f4ff3619aeb \ + --hash=sha256:eee5e741b43ea1b49d98ab6e40f7e299e97715af2488d1c77a90de4a663a86e2 \ + --hash=sha256:f3bfd2c2f0e5384276e12b14882bf2c7621f97c35320c3e7132c156ce18436a1 \ + --hash=sha256:f501e36ac428c1b334c41e196ff6bd550c0353c7314716e80055b1f0a32ba394 \ + --hash=sha256:f9191be7af41f0b54324ded600e8ddbcabea23e1e8ba419d9a53b241dece821d \ + --hash=sha256:fbd8a5fe6c893de21a3c6835071ec116d79334fbdf641743332e442a3466f7ea \ + --hash=sha256:fc200cec654311ca2c3f5ab3ce2220521b3d4732f68e1b1e79bef8fcfc1f2b97 \ + --hash=sha256:ff4800783d85bff132f2cc7d007426ec698cdce08c3062c8d501ad3f4ea3d16c \ + --hash=sha256:ffb0eacbadb705c0a6969b0adf468f126b064f3362411df95f6d4f31c40d31c1 \ + --hash=sha256:fff0b2f249ac642fd735f009b8363c2b46cf406d3caec00e4deeb79b5ff39b40 # via # -r requirements/dev.in # pytest-cov @@ -203,13 +203,13 @@ diagrams==0.23.4 \ --hash=sha256:1ba69d98fcf8d768dbddf07d2c77aba6cc95c2e6f90f37146c04c96bc6765450 \ --hash=sha256:b7ada0b119b5189dd021b1dc1467fad3704737452bb18b1e06d05e4d1fa48ed7 # via sphinx-diagrams -distlib==0.3.7 \ - --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ - --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 +distlib==0.3.8 \ + --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ + --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -documenteer[guide]==1.0.0a16 \ - --hash=sha256:188d3d48394f30e204526324cbd62d0fa930465f91d4519cca0e97aa57afe64c \ - --hash=sha256:2c4a588a0647955e0c711c85b871bd4a279126940eb4d00a944c4a37deef57c9 +documenteer[guide]==1.0.0a18 \ + --hash=sha256:25f20e46408b4188f54d958557fc206512102425d72742bb6a46d57d032f6dff \ + --hash=sha256:6a3b4123eef3f8d5cbb8168b2cc897d95c590a0f280e0bff8b46da89c4257772 # via # -r requirements/dev.in # documenteer @@ -573,6 +573,9 @@ pygments==2.17.2 \ # rich # sphinx # sphinx-prompt +pylatexenc==2.10 \ + --hash=sha256:3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3 + # via documenteer pytest==7.4.3 \ --hash=sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac \ --hash=sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5 @@ -667,127 +670,127 @@ rich==13.7.0 \ --hash=sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa \ --hash=sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235 # via pytest-pretty -rpds-py==0.13.2 \ - --hash=sha256:06d218e4464d31301e943b65b2c6919318ea6f69703a351961e1baaf60347276 \ - --hash=sha256:12ecf89bd54734c3c2c79898ae2021dca42750c7bcfb67f8fb3315453738ac8f \ - --hash=sha256:15253fff410873ebf3cfba1cc686a37711efcd9b8cb30ea21bb14a973e393f60 \ - --hash=sha256:188435794405c7f0573311747c85a96b63c954a5f2111b1df8018979eca0f2f0 \ - --hash=sha256:1ceebd0ae4f3e9b2b6b553b51971921853ae4eebf3f54086be0565d59291e53d \ - --hash=sha256:244e173bb6d8f3b2f0c4d7370a1aa341f35da3e57ffd1798e5b2917b91731fd3 \ - --hash=sha256:25b28b3d33ec0a78e944aaaed7e5e2a94ac811bcd68b557ca48a0c30f87497d2 \ - --hash=sha256:25ea41635d22b2eb6326f58e608550e55d01df51b8a580ea7e75396bafbb28e9 \ - --hash=sha256:29d311e44dd16d2434d5506d57ef4d7036544fc3c25c14b6992ef41f541b10fb \ - --hash=sha256:2a1472956c5bcc49fb0252b965239bffe801acc9394f8b7c1014ae9258e4572b \ - --hash=sha256:2a7bef6977043673750a88da064fd513f89505111014b4e00fbdd13329cd4e9a \ - --hash=sha256:2ac26f50736324beb0282c819668328d53fc38543fa61eeea2c32ea8ea6eab8d \ - --hash=sha256:2e72f750048b32d39e87fc85c225c50b2a6715034848dbb196bf3348aa761fa1 \ - --hash=sha256:31e220a040b89a01505128c2f8a59ee74732f666439a03e65ccbf3824cdddae7 \ - --hash=sha256:35f53c76a712e323c779ca39b9a81b13f219a8e3bc15f106ed1e1462d56fcfe9 \ - --hash=sha256:38d4f822ee2f338febcc85aaa2547eb5ba31ba6ff68d10b8ec988929d23bb6b4 \ - --hash=sha256:38f9bf2ad754b4a45b8210a6c732fe876b8a14e14d5992a8c4b7c1ef78740f53 \ - --hash=sha256:3a44c8440183b43167fd1a0819e8356692bf5db1ad14ce140dbd40a1485f2dea \ - --hash=sha256:3ab96754d23372009638a402a1ed12a27711598dd49d8316a22597141962fe66 \ - --hash=sha256:3c55d7f2d817183d43220738270efd3ce4e7a7b7cbdaefa6d551ed3d6ed89190 \ - --hash=sha256:46e1ed994a0920f350a4547a38471217eb86f57377e9314fbaaa329b71b7dfe3 \ - --hash=sha256:4a5375c5fff13f209527cd886dc75394f040c7d1ecad0a2cb0627f13ebe78a12 \ - --hash=sha256:4c2d26aa03d877c9730bf005621c92da263523a1e99247590abbbe252ccb7824 \ - --hash=sha256:4c4e314d36d4f31236a545696a480aa04ea170a0b021e9a59ab1ed94d4c3ef27 \ - --hash=sha256:4d0c10d803549427f427085ed7aebc39832f6e818a011dcd8785e9c6a1ba9b3e \ - --hash=sha256:4dcc5ee1d0275cb78d443fdebd0241e58772a354a6d518b1d7af1580bbd2c4e8 \ - --hash=sha256:51967a67ea0d7b9b5cd86036878e2d82c0b6183616961c26d825b8c994d4f2c8 \ - --hash=sha256:530190eb0cd778363bbb7596612ded0bb9fef662daa98e9d92a0419ab27ae914 \ - --hash=sha256:5379e49d7e80dca9811b36894493d1c1ecb4c57de05c36f5d0dd09982af20211 \ - --hash=sha256:5493569f861fb7b05af6d048d00d773c6162415ae521b7010197c98810a14cab \ - --hash=sha256:5a4c1058cdae6237d97af272b326e5f78ee7ee3bbffa6b24b09db4d828810468 \ - --hash=sha256:5d75d6d220d55cdced2f32cc22f599475dbe881229aeddba6c79c2e9df35a2b3 \ - --hash=sha256:5d97e9ae94fb96df1ee3cb09ca376c34e8a122f36927230f4c8a97f469994bff \ - --hash=sha256:5feae2f9aa7270e2c071f488fab256d768e88e01b958f123a690f1cc3061a09c \ - --hash=sha256:603d5868f7419081d616dab7ac3cfa285296735e7350f7b1e4f548f6f953ee7d \ - --hash=sha256:61d42d2b08430854485135504f672c14d4fc644dd243a9c17e7c4e0faf5ed07e \ - --hash=sha256:61dbc1e01dc0c5875da2f7ae36d6e918dc1b8d2ce04e871793976594aad8a57a \ - --hash=sha256:65cfed9c807c27dee76407e8bb29e6f4e391e436774bcc769a037ff25ad8646e \ - --hash=sha256:67a429520e97621a763cf9b3ba27574779c4e96e49a27ff8a1aa99ee70beb28a \ - --hash=sha256:6aadae3042f8e6db3376d9e91f194c606c9a45273c170621d46128f35aef7cd0 \ - --hash=sha256:6ba8858933f0c1a979781272a5f65646fca8c18c93c99c6ddb5513ad96fa54b1 \ - --hash=sha256:6bc568b05e02cd612be53900c88aaa55012e744930ba2eeb56279db4c6676eb3 \ - --hash=sha256:729408136ef8d45a28ee9a7411917c9e3459cf266c7e23c2f7d4bb8ef9e0da42 \ - --hash=sha256:751758d9dd04d548ec679224cc00e3591f5ebf1ff159ed0d4aba6a0746352452 \ - --hash=sha256:76d59d4d451ba77f08cb4cd9268dec07be5bc65f73666302dbb5061989b17198 \ - --hash=sha256:79bf58c08f0756adba691d480b5a20e4ad23f33e1ae121584cf3a21717c36dfa \ - --hash=sha256:7de12b69d95072394998c622cfd7e8cea8f560db5fca6a62a148f902a1029f8b \ - --hash=sha256:7f55cd9cf1564b7b03f238e4c017ca4794c05b01a783e9291065cb2858d86ce4 \ - --hash=sha256:80e5acb81cb49fd9f2d5c08f8b74ffff14ee73b10ca88297ab4619e946bcb1e1 \ - --hash=sha256:87a90f5545fd61f6964e65eebde4dc3fa8660bb7d87adb01d4cf17e0a2b484ad \ - --hash=sha256:881df98f0a8404d32b6de0fd33e91c1b90ed1516a80d4d6dc69d414b8850474c \ - --hash=sha256:8a776a29b77fe0cc28fedfd87277b0d0f7aa930174b7e504d764e0b43a05f381 \ - --hash=sha256:8c2a61c0e4811012b0ba9f6cdcb4437865df5d29eab5d6018ba13cee1c3064a0 \ - --hash=sha256:8fa6bd071ec6d90f6e7baa66ae25820d57a8ab1b0a3c6d3edf1834d4b26fafa2 \ - --hash=sha256:96f2975fb14f39c5fe75203f33dd3010fe37d1c4e33177feef1107b5ced750e3 \ - --hash=sha256:96fb0899bb2ab353f42e5374c8f0789f54e0a94ef2f02b9ac7149c56622eaf31 \ - --hash=sha256:97163a1ab265a1073a6372eca9f4eeb9f8c6327457a0b22ddfc4a17dcd613e74 \ - --hash=sha256:9c95a1a290f9acf7a8f2ebbdd183e99215d491beea52d61aa2a7a7d2c618ddc6 \ - --hash=sha256:9d94d78418203904730585efa71002286ac4c8ac0689d0eb61e3c465f9e608ff \ - --hash=sha256:a6ba2cb7d676e9415b9e9ac7e2aae401dc1b1e666943d1f7bc66223d3d73467b \ - --hash=sha256:aa0379c1935c44053c98826bc99ac95f3a5355675a297ac9ce0dfad0ce2d50ca \ - --hash=sha256:ac96d67b37f28e4b6ecf507c3405f52a40658c0a806dffde624a8fcb0314d5fd \ - --hash=sha256:ade2ccb937060c299ab0dfb2dea3d2ddf7e098ed63ee3d651ebfc2c8d1e8632a \ - --hash=sha256:aefbdc934115d2f9278f153952003ac52cd2650e7313750390b334518c589568 \ - --hash=sha256:b07501b720cf060c5856f7b5626e75b8e353b5f98b9b354a21eb4bfa47e421b1 \ - --hash=sha256:b5267feb19070bef34b8dea27e2b504ebd9d31748e3ecacb3a4101da6fcb255c \ - --hash=sha256:b5f6328e8e2ae8238fc767703ab7b95785521c42bb2b8790984e3477d7fa71ad \ - --hash=sha256:b8996ffb60c69f677245f5abdbcc623e9442bcc91ed81b6cd6187129ad1fa3e7 \ - --hash=sha256:b981a370f8f41c4024c170b42fbe9e691ae2dbc19d1d99151a69e2c84a0d194d \ - --hash=sha256:b9d121be0217787a7d59a5c6195b0842d3f701007333426e5154bf72346aa658 \ - --hash=sha256:bcef4f2d3dc603150421de85c916da19471f24d838c3c62a4f04c1eb511642c1 \ - --hash=sha256:bed0252c85e21cf73d2d033643c945b460d6a02fc4a7d644e3b2d6f5f2956c64 \ - --hash=sha256:bfdfbe6a36bc3059fff845d64c42f2644cf875c65f5005db54f90cdfdf1df815 \ - --hash=sha256:c0095b8aa3e432e32d372e9a7737e65b58d5ed23b9620fea7cb81f17672f1fa1 \ - --hash=sha256:c1f41d32a2ddc5a94df4b829b395916a4b7f103350fa76ba6de625fcb9e773ac \ - --hash=sha256:c45008ca79bad237cbc03c72bc5205e8c6f66403773929b1b50f7d84ef9e4d07 \ - --hash=sha256:c82bbf7e03748417c3a88c1b0b291288ce3e4887a795a3addaa7a1cfd9e7153e \ - --hash=sha256:c918621ee0a3d1fe61c313f2489464f2ae3d13633e60f520a8002a5e910982ee \ - --hash=sha256:d204957169f0b3511fb95395a9da7d4490fb361763a9f8b32b345a7fe119cb45 \ - --hash=sha256:d329896c40d9e1e5c7715c98529e4a188a1f2df51212fd65102b32465612b5dc \ - --hash=sha256:d3a61e928feddc458a55110f42f626a2a20bea942ccedb6fb4cee70b4830ed41 \ - --hash=sha256:d48db29bd47814671afdd76c7652aefacc25cf96aad6daefa82d738ee87461e2 \ - --hash=sha256:d5593855b5b2b73dd8413c3fdfa5d95b99d657658f947ba2c4318591e745d083 \ - --hash=sha256:d79c159adea0f1f4617f54aa156568ac69968f9ef4d1e5fefffc0a180830308e \ - --hash=sha256:db09b98c7540df69d4b47218da3fbd7cb466db0fb932e971c321f1c76f155266 \ - --hash=sha256:ddf23960cb42b69bce13045d5bc66f18c7d53774c66c13f24cf1b9c144ba3141 \ - --hash=sha256:e06cfea0ece444571d24c18ed465bc93afb8c8d8d74422eb7026662f3d3f779b \ - --hash=sha256:e7c564c58cf8f248fe859a4f0fe501b050663f3d7fbc342172f259124fb59933 \ - --hash=sha256:e86593bf8637659e6a6ed58854b6c87ec4e9e45ee8a4adfd936831cef55c2d21 \ - --hash=sha256:eaffbd8814bb1b5dc3ea156a4c5928081ba50419f9175f4fc95269e040eff8f0 \ - --hash=sha256:ee353bb51f648924926ed05e0122b6a0b1ae709396a80eb583449d5d477fcdf7 \ - --hash=sha256:ee6faebb265e28920a6f23a7d4c362414b3f4bb30607141d718b991669e49ddc \ - --hash=sha256:efe093acc43e869348f6f2224df7f452eab63a2c60a6c6cd6b50fd35c4e075ba \ - --hash=sha256:f03a1b3a4c03e3e0161642ac5367f08479ab29972ea0ffcd4fa18f729cd2be0a \ - --hash=sha256:f0d320e70b6b2300ff6029e234e79fe44e9dbbfc7b98597ba28e054bd6606a57 \ - --hash=sha256:f252dfb4852a527987a9156cbcae3022a30f86c9d26f4f17b8c967d7580d65d2 \ - --hash=sha256:f5f4424cb87a20b016bfdc157ff48757b89d2cc426256961643d443c6c277007 \ - --hash=sha256:f8eae66a1304de7368932b42d801c67969fd090ddb1a7a24f27b435ed4bed68f \ - --hash=sha256:fdb82eb60d31b0c033a8e8ee9f3fc7dfbaa042211131c29da29aea8531b4f18f +rpds-py==0.15.2 \ + --hash=sha256:02744236ac1895d7be837878e707a5c35fb8edc5137602f253b63623d7ad5c8c \ + --hash=sha256:03f9c5875515820633bd7709a25c3e60c1ea9ad1c5d4030ce8a8c203309c36fd \ + --hash=sha256:044f6f46d62444800402851afa3c3ae50141f12013060c1a3a0677e013310d6d \ + --hash=sha256:07a2e1d78d382f7181789713cdf0c16edbad4fe14fe1d115526cb6f0eef0daa3 \ + --hash=sha256:082e0e55d73690ffb4da4352d1b5bbe1b5c6034eb9dc8c91aa2a3ee15f70d3e2 \ + --hash=sha256:13152dfe7d7c27c40df8b99ac6aab12b978b546716e99f67e8a67a1d441acbc3 \ + --hash=sha256:13716e53627ad97babf72ac9e01cf9a7d4af2f75dd5ed7b323a7a9520e948282 \ + --hash=sha256:13ff62d3561a23c17341b4afc78e8fcfd799ab67c0b1ca32091d71383a98ba4b \ + --hash=sha256:1607cda6129f815493a3c184492acb5ae4aa6ed61d3a1b3663aa9824ed26f7ac \ + --hash=sha256:164fcee32f15d04d61568c9cb0d919e37ff3195919cd604039ff3053ada0461b \ + --hash=sha256:1c24e30d720c0009b6fb2e1905b025da56103c70a8b31b99138e4ed1c2a6c5b0 \ + --hash=sha256:1e6fcd0a0f62f2997107f758bb372397b8d5fd5f39cc6dcb86f7cb98a2172d6c \ + --hash=sha256:1fd0f0b1ccd7d537b858a56355a250108df692102e08aa2036e1a094fd78b2dc \ + --hash=sha256:2181e86d4e1cdf49a7320cb72a36c45efcb7670d0a88f09fd2d3a7967c0540fd \ + --hash=sha256:2974e6dff38afafd5ccf8f41cb8fc94600b3f4fd9b0a98f6ece6e2219e3158d5 \ + --hash=sha256:2dccc623725d0b298f557d869a68496a2fd2a9e9c41107f234fa5f7a37d278ac \ + --hash=sha256:2df3d07a16a3bef0917b28cd564778fbb31f3ffa5b5e33584470e2d1b0f248f0 \ + --hash=sha256:2e7e5633577b3bd56bf3af2ef6ae3778bbafb83743989d57f0e7edbf6c0980e4 \ + --hash=sha256:2ee066a64f0d2ba45391cac15b3a70dcb549e968a117bd0500634754cfe0e5fc \ + --hash=sha256:2f1f295a5c28cfa74a7d48c95acc1c8a7acd49d7d9072040d4b694fe11cd7166 \ + --hash=sha256:2faa97212b0dc465afeedf49045cdd077f97be1188285e646a9f689cb5dfff9e \ + --hash=sha256:30479a9f1fce47df56b07460b520f49fa2115ec2926d3b1303c85c81f8401ed1 \ + --hash=sha256:337a8653fb11d2fbe7157c961cc78cb3c161d98cf44410ace9a3dc2db4fad882 \ + --hash=sha256:3423007fc0661827e06f8a185a3792c73dda41f30f3421562f210cf0c9e49569 \ + --hash=sha256:373b76eeb79e8c14f6d82cb1d4d5293f9e4059baec6c1b16dca7ad13b6131b39 \ + --hash=sha256:3b79c63d29101cbaa53a517683557bb550462394fb91044cc5998dd2acff7340 \ + --hash=sha256:3bbc89ce2a219662ea142f0abcf8d43f04a41d5b1880be17a794c39f0d609cb0 \ + --hash=sha256:3c11bc5814554b018f6c5d6ae0969e43766f81e995000b53a5d8c8057055e886 \ + --hash=sha256:3cd61e759c4075510052d1eca5cddbd297fe1164efec14ef1fce3f09b974dfe4 \ + --hash=sha256:3d40fb3ca22e3d40f494d577441b263026a3bd8c97ae6ce89b2d3c4b39ac9581 \ + --hash=sha256:3db0c998c92b909d7c90b66c965590d4f3cd86157176a6cf14aa1f867b77b889 \ + --hash=sha256:422b0901878a31ef167435c5ad46560362891816a76cc0d150683f3868a6f0d1 \ + --hash=sha256:46b4f3d47d1033db569173be62365fbf7808c2bd3fb742314d251f130d90d44c \ + --hash=sha256:485fbdd23becb822804ed05622907ee5c8e8a5f43f6f43894a45f463b2217045 \ + --hash=sha256:53304cc14b1d94487d70086e1cb0cb4c29ec6da994d58ae84a4d7e78c6a6d04d \ + --hash=sha256:5595c80dd03d7e6c6afb73f3594bf3379a7d79fa57164b591d012d4b71d6ac4c \ + --hash=sha256:56b51ba29a18e5f5810224bcf00747ad931c0716e3c09a76b4a1edd3d4aba71f \ + --hash=sha256:580182fa5b269c2981e9ce9764367cb4edc81982ce289208d4607c203f44ffde \ + --hash=sha256:5e99d6510c8557510c220b865d966b105464740dcbebf9b79ecd4fbab30a13d9 \ + --hash=sha256:5eb05b654a41e0f81ab27a7c3e88b6590425eb3e934e1d533ecec5dc88a6ffff \ + --hash=sha256:62b292fff4739c6be89e6a0240c02bda5a9066a339d90ab191cf66e9fdbdc193 \ + --hash=sha256:6a5122b17a4faf5d7a6d91fa67b479736c0cacc7afe791ddebb7163a8550b799 \ + --hash=sha256:6a8ff8e809da81363bffca2b965cb6e4bf6056b495fc3f078467d1f8266fe27f \ + --hash=sha256:6c43e1b89099279cc03eb1c725c5de12af6edcd2f78e2f8a022569efa639ada3 \ + --hash=sha256:709dc11af2f74ba89c68b1592368c6edcbccdb0a06ba77eb28c8fe08bb6997da \ + --hash=sha256:7e072f5da38d6428ba1fc1115d3cc0dae895df671cb04c70c019985e8c7606be \ + --hash=sha256:813a65f95bfcb7c8f2a70dd6add9b51e9accc3bdb3e03d0ff7a9e6a2d3e174bf \ + --hash=sha256:86c01299942b0f4b5b5f28c8701689181ad2eab852e65417172dbdd6c5b3ccc8 \ + --hash=sha256:893e38d0f4319dfa70c0f36381a37cc418985c87b11d9784365b1fff4fa6973b \ + --hash=sha256:8a5f574b92b3ee7d254e56d56e37ec0e1416acb1ae357c4956d76a1788dc58fb \ + --hash=sha256:8b9650f92251fdef843e74fc252cdfd6e3c700157ad686eeb0c6d7fdb2d11652 \ + --hash=sha256:8ec464f20fe803ae00419bd1610934e3bda963aeba1e6181dfc9033dc7e8940c \ + --hash=sha256:8f333bfe782a2d05a67cfaa0cc9cd68b36b39ee6acfe099f980541ed973a7093 \ + --hash=sha256:8ffdeb7dbd0160d4e391e1f857477e4762d00aa2199c294eb95dfb9451aa1d9f \ + --hash=sha256:911e600e798374c0d86235e7ef19109cf865d1336942d398ff313375a25a93ba \ + --hash=sha256:9235be95662559141934fced8197de6fee8c58870f36756b0584424b6d708393 \ + --hash=sha256:938518a11780b39998179d07f31a4a468888123f9b00463842cd40f98191f4d3 \ + --hash=sha256:93c18a1696a8e0388ed84b024fe1a188a26ba999b61d1d9a371318cb89885a8c \ + --hash=sha256:97532802f14d383f37d603a56e226909f825a83ff298dc1b6697de00d2243999 \ + --hash=sha256:98ee201a52a7f65608e5494518932e1473fd43535f12cade0a1b4ab32737fe28 \ + --hash=sha256:9d2ae79f31da5143e020a8d4fc74e1f0cbcb8011bdf97453c140aa616db51406 \ + --hash=sha256:9d38494a8d21c246c535b41ecdb2d562c4b933cf3d68de03e8bc43a0d41be652 \ + --hash=sha256:9d41ebb471a6f064c0d1c873c4f7dded733d16ca5db7d551fb04ff3805d87802 \ + --hash=sha256:9e09d017e3f4d9bd7d17a30d3f59e4d6d9ba2d2ced280eec2425e84112cf623f \ + --hash=sha256:a6945c2d61c42bb7e818677f43638675b8c1c43e858b67a96df3eb2426a86c9d \ + --hash=sha256:a72e00826a2b032dda3eb25aa3e3579c6d6773d22d8446089a57a123481cc46c \ + --hash=sha256:aa1e626c524d2c7972c0f3a8a575d654a3a9c008370dc2a97e46abd0eaa749b9 \ + --hash=sha256:ab095edf1d840a6a6a4307e1a5b907a299a94e7b90e75436ee770b8c35d22a25 \ + --hash=sha256:ac2ac84a4950d627d84b61f082eba61314373cfab4b3c264b62efab02ababe83 \ + --hash=sha256:ac7187bee72384b9cfedf09a29a3b2b6e8815cc64c095cdc8b5e6aec81e9fd5f \ + --hash=sha256:ae9d83a81b09ce3a817e2cbb23aabc07f86a3abc664c613cd283ce7a03541e95 \ + --hash=sha256:afeabb382c1256a7477b739820bce7fe782bb807d82927102cee73e79b41b38b \ + --hash=sha256:b2a4cd924d0e2f4b1a68034abe4cadc73d69ad5f4cf02db6481c0d4d749f548f \ + --hash=sha256:b414ef79f1f06fb90b5165db8aef77512c1a5e3ed1b4807da8476b7e2c853283 \ + --hash=sha256:b4ecbba7efd82bd2a4bb88aab7f984eb5470991c1347bdd1f35fb34ea28dba6e \ + --hash=sha256:b61d5096e75fd71018b25da50b82dd70ec39b5e15bb2134daf7eb7bbbc103644 \ + --hash=sha256:b629db53fe17e6ce478a969d30bd1d0e8b53238c46e3a9c9db39e8b65a9ef973 \ + --hash=sha256:b70b45a40ad0798b69748b34d508259ef2bdc84fb2aad4048bc7c9cafb68ddb3 \ + --hash=sha256:b88c3ab98556bc351b36d6208a6089de8c8db14a7f6e1f57f82a334bd2c18f0b \ + --hash=sha256:baf744e5f9d5ee6531deea443be78b36ed1cd36c65a0b95ea4e8d69fa0102268 \ + --hash=sha256:bbc7421cbd28b4316d1d017db338039a7943f945c6f2bb15e1439b14b5682d28 \ + --hash=sha256:c31272c674f725dfe0f343d73b0abe8c878c646967ec1c6106122faae1efc15b \ + --hash=sha256:c51a899792ee2c696072791e56b2020caff58b275abecbc9ae0cb71af0645c95 \ + --hash=sha256:c61e42b4ceb9759727045765e87d51c1bb9f89987aca1fcc8a040232138cad1c \ + --hash=sha256:c7cd0841a586b7105513a7c8c3d5c276f3adc762a072d81ef7fae80632afad1e \ + --hash=sha256:c827a931c6b57f50f1bb5de400dcfb00bad8117e3753e80b96adb72d9d811514 \ + --hash=sha256:d2aa3ca9552f83b0b4fa6ca8c6ce08da6580f37e3e0ab7afac73a1cfdc230c0e \ + --hash=sha256:d46ee458452727a147d7897bb33886981ae1235775e05decae5d5d07f537695a \ + --hash=sha256:d64a657de7aae8db2da60dc0c9e4638a0c3893b4d60101fd564a3362b2bfeb34 \ + --hash=sha256:d800a8e2ac62db1b9ea5d6d1724f1a93c53907ca061de4d05ed94e8dfa79050c \ + --hash=sha256:d9d7ebcd11ea76ba0feaae98485cd8e31467c3d7985210fab46983278214736b \ + --hash=sha256:dd7d3608589072f63078b4063a6c536af832e76b0b3885f1bfe9e892abe6c207 \ + --hash=sha256:ec19e823b4ccd87bd69e990879acbce9e961fc7aebe150156b8f4418d4b27b7f \ + --hash=sha256:ee40206d1d6e95eaa2b7b919195e3689a5cf6ded730632de7f187f35a1b6052c \ + --hash=sha256:f138f550b83554f5b344d6be35d3ed59348510edc3cb96f75309db6e9bfe8210 \ + --hash=sha256:f3e6e2e502c4043c52a99316d89dc49f416acda5b0c6886e0dd8ea7bb35859e8 \ + --hash=sha256:fb10bb720348fe1647a94eb605accb9ef6a9b1875d8845f9e763d9d71a706387 \ + --hash=sha256:fc066395e6332da1e7525d605b4c96055669f8336600bef8ac569d5226a7c76f \ + --hash=sha256:fc33267d58dfbb2361baed52668c5d8c15d24bc0372cecbb79fed77339b55e0d # via # jsonschema # referencing -ruff==0.1.7 \ - --hash=sha256:0683b7bfbb95e6df3c7c04fe9d78f631f8e8ba4868dfc932d43d690698057e2e \ - --hash=sha256:1ea109bdb23c2a4413f397ebd8ac32cb498bee234d4191ae1a310af760e5d287 \ - --hash=sha256:276a89bcb149b3d8c1b11d91aa81898fe698900ed553a08129b38d9d6570e717 \ - --hash=sha256:290ecab680dce94affebefe0bbca2322a6277e83d4f29234627e0f8f6b4fa9ce \ - --hash=sha256:416dfd0bd45d1a2baa3b1b07b1b9758e7d993c256d3e51dc6e03a5e7901c7d80 \ - --hash=sha256:45b38c3f8788a65e6a2cab02e0f7adfa88872696839d9882c13b7e2f35d64c5f \ - --hash=sha256:4af95fd1d3b001fc41325064336db36e3d27d2004cdb6d21fd617d45a172dd96 \ - --hash=sha256:69a4bed13bc1d5dabf3902522b5a2aadfebe28226c6269694283c3b0cecb45fd \ - --hash=sha256:6b05e3b123f93bb4146a761b7a7d57af8cb7384ccb2502d29d736eaade0db519 \ - --hash=sha256:6c64cb67b2025b1ac6d58e5ffca8f7b3f7fd921f35e78198411237e4f0db8e73 \ - --hash=sha256:7f80496854fdc65b6659c271d2c26e90d4d401e6a4a31908e7e334fab4645aac \ - --hash=sha256:8b0c2de9dd9daf5e07624c24add25c3a490dbf74b0e9bca4145c632457b3b42a \ - --hash=sha256:90c958fe950735041f1c80d21b42184f1072cc3975d05e736e8d66fc377119ea \ - --hash=sha256:9dcc6bb2f4df59cb5b4b40ff14be7d57012179d69c6565c1da0d1f013d29951b \ - --hash=sha256:de02ca331f2143195a712983a57137c5ec0f10acc4aa81f7c1f86519e52b92a1 \ - --hash=sha256:df2bb4bb6bbe921f6b4f5b6fdd8d8468c940731cb9406f274ae8c5ed7a78c478 \ - --hash=sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306 +ruff==0.1.8 \ + --hash=sha256:05ffe9dbd278965271252704eddb97b4384bf58b971054d517decfbf8c523f05 \ + --hash=sha256:5daaeaf00ae3c1efec9742ff294b06c3a2a9db8d3db51ee4851c12ad385cda30 \ + --hash=sha256:7d076717c67b34c162da7c1a5bda16ffc205e0e0072c03745275e7eab888719f \ + --hash=sha256:7de792582f6e490ae6aef36a58d85df9f7a0cfd1b0d4fe6b4fb51803a3ac96fa \ + --hash=sha256:a05b0ddd7ea25495e4115a43125e8a7ebed0aa043c3d432de7e7d6e8e8cd6448 \ + --hash=sha256:aa8ee4f8440023b0a6c3707f76cadce8657553655dcbb5fc9b2f9bb9bee389f6 \ + --hash=sha256:b6a21ab023124eafb7cef6d038f835cb1155cd5ea798edd8d9eb2f8b84be07d9 \ + --hash=sha256:bd8ee69b02e7bdefe1e5da2d5b6eaaddcf4f90859f00281b2333c0e3a0cc9cd6 \ + --hash=sha256:c8e3255afd186c142eef4ec400d7826134f028a85da2146102a1172ecc7c3696 \ + --hash=sha256:ce697c463458555027dfb194cb96d26608abab920fa85213deb5edf26e026664 \ + --hash=sha256:db6cedd9ffed55548ab313ad718bc34582d394e27a7875b4b952c2d29c001b26 \ + --hash=sha256:e49fbdfe257fa41e5c9e13c79b9e79a23a79bd0e40b9314bc53840f520c2c0b3 \ + --hash=sha256:e6f08ca730f4dc1b76b473bdf30b1b37d42da379202a059eae54ec7fc1fbcfed \ + --hash=sha256:f35960b02df6b827c1b903091bb14f4b003f6cf102705efc4ce78132a0aa5af3 \ + --hash=sha256:f41f692f1691ad87f51708b823af4bb2c5c87c9248ddd3191c8f088e66ce590a \ + --hash=sha256:f7ee467677467526cfe135eab86a40a0e8db43117936ac4f9b469ce9cdb3fb62 \ + --hash=sha256:ff78a7583020da124dd0deb835ece1d87bb91762d40c514ee9b67a087940528b # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -909,14 +912,18 @@ sphinxcontrib-serializinghtml==1.1.9 \ --hash=sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54 \ --hash=sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1 # via sphinx -sphinxext-opengraph==0.9.0 \ - --hash=sha256:4e57e25b6d56f47b9c06a5a5d68a2a00ed3577c8a39e459b52118c6bfe5e8c8b \ - --hash=sha256:ab1eb2ffb531fb85b695e719dba7b0245b0643f6b6c0d1cc258d15a81e72a9f1 +sphinxext-opengraph==0.9.1 \ + --hash=sha256:b3b230cc6a5b5189139df937f0d9c7b23c7c204493b22646273687969dcb760e \ + --hash=sha256:dd2868a1e7c9497977fbbf44cc0844a42af39ca65fe1bb0272518af225d06fc5 # via documenteer sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c # via documenteer +tomlkit==0.12.3 \ + --hash=sha256:75baf5012d06501f07bee5bf8e801b9f343e7aac5a92581f20f80ce632e6b5a4 \ + --hash=sha256:b0a645a9156dc7cb5d3a1f0d4bab66db287fcb8e0430bdd4664a095ea16414ba + # via documenteer typed-ast==1.5.5 \ --hash=sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10 \ --hash=sha256:045f9930a1550d9352464e5149710d56a2aed23a2ffe78946478f7b5416f1ede \ diff --git a/requirements/main.txt b/requirements/main.txt index 4f3b9ec47d..0eeabf6f06 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.11 +# This file is autogenerated by pip-compile with Python 3.12 # by the following command: # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/main.txt requirements/main.in @@ -15,24 +15,34 @@ anyio==3.7.1 \ # fastapi # httpcore # starlette -bcrypt==4.1.1 \ - --hash=sha256:12611c4b0a8b1c461646228344784a1089bc0c49975680a2f54f516e71e9b79e \ - --hash=sha256:12f40f78dcba4aa7d1354d35acf45fae9488862a4fb695c7eeda5ace6aae273f \ - --hash=sha256:14d41933510717f98aac63378b7956bbe548986e435df173c841d7f2bd0b2de7 \ - --hash=sha256:196008d91201bbb1aa4e666fee5e610face25d532e433a560cabb33bfdff958b \ - --hash=sha256:24c2ebd287b5b11016f31d506ca1052d068c3f9dc817160628504690376ff050 \ - --hash=sha256:2ade10e8613a3b8446214846d3ddbd56cfe9205a7d64742f0b75458c868f7492 \ - --hash=sha256:2e197534c884336f9020c1f3a8efbaab0aa96fc798068cb2da9c671818b7fbb0 \ - --hash=sha256:3d6c4e0d6963c52f8142cdea428e875042e7ce8c84812d8e5507bd1e42534e07 \ - --hash=sha256:476aa8e8aca554260159d4c7a97d6be529c8e177dbc1d443cb6b471e24e82c74 \ - --hash=sha256:755b9d27abcab678e0b8fb4d0abdebeea1f68dd1183b3f518bad8d31fa77d8be \ - --hash=sha256:a7a7b8a87e51e5e8ca85b9fdaf3a5dc7aaf123365a09be7a27883d54b9a0c403 \ - --hash=sha256:bab33473f973e8058d1b2df8d6e095d237c49fbf7a02b527541a86a5d1dc4444 \ - --hash=sha256:c6450538a0fc32fb7ce4c6d511448c54c4ff7640b2ed81badf9898dcb9e5b737 \ - --hash=sha256:d573885b637815a7f3a3cd5f87724d7d0822da64b0ab0aa7f7c78bae534e86dc \ - --hash=sha256:df37f5418d4f1cdcff845f60e747a015389fa4e63703c918330865e06ad80007 \ - --hash=sha256:f33b385c3e80b5a26b3a5e148e6165f873c1c202423570fdf45fe34e00e5f3e5 \ - --hash=sha256:fb931cd004a7ad36a89789caf18a54c20287ec1cd62161265344b9c4554fdb2e +bcrypt==4.1.2 \ + --hash=sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f \ + --hash=sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5 \ + --hash=sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb \ + --hash=sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258 \ + --hash=sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4 \ + --hash=sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc \ + --hash=sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2 \ + --hash=sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326 \ + --hash=sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483 \ + --hash=sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a \ + --hash=sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966 \ + --hash=sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63 \ + --hash=sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c \ + --hash=sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551 \ + --hash=sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d \ + --hash=sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e \ + --hash=sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0 \ + --hash=sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c \ + --hash=sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb \ + --hash=sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1 \ + --hash=sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42 \ + --hash=sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946 \ + --hash=sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab \ + --hash=sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1 \ + --hash=sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c \ + --hash=sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7 \ + --hash=sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369 # via -r requirements/main.in certifi==2023.11.17 \ --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \ @@ -221,9 +231,9 @@ cryptography==41.0.7 \ # -r requirements/main.in # pyjwt # safir -fastapi==0.104.1 \ - --hash=sha256:752dc31160cdbd0436bb93bad51560b57e525cbb1d4bbf6f4904ceee75548241 \ - --hash=sha256:e5e4540a7c5e1dcfbbcf5b903c234feddcdcd881f191977a1c5dfd917487e7ae +fastapi==0.105.0 \ + --hash=sha256:4d12838819aa52af244580675825e750ad67c9df4614f557a769606af902cf22 \ + --hash=sha256:f19ebf6fdc82a3281d10f2cb4774bdfa90238e3b40af3525a0c09fd08ad1c480 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ From cdf6e43ad3442d76f1e457448efae15573a45163 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 18 Dec 2023 13:54:26 -0700 Subject: [PATCH 382/588] Add the lsst.prompt namespace to Sasquatch - Configure the REST Proxy to expose topics with the lsst.prompt prefix - Configure the InfluxDB connector to write these topics to the lsst.prompt database - To start use the same set of tags defined for lsst.dm --- applications/sasquatch/values-usdfdev.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index f527fba509..3418ad7d31 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -123,6 +123,12 @@ kafka-connect-manager: connectInfluxDb: "lsst.lf" topicsRegex: "lsst.lf.*" tags: benchmark_env,module,benchmark_type + lsstprompt: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.prompt" + topicsRegex: "lsst.prompt.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run kafdrop: ingress: @@ -146,6 +152,7 @@ rest-proxy: - lsst.camera - lsst.verify - lsst.lf + - lsst.prompt chronograf: ingress: From c80d02c2bc27e05fffe640fc171b3c9425385fef Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 18 Dec 2023 14:18:14 -0700 Subject: [PATCH 383/588] Remove the `run` tag and add the `group` tag per review suggestion --- applications/sasquatch/values-usdfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index 3418ad7d31..0b04006e61 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -128,7 +128,7 @@ kafka-connect-manager: timestamp: "timestamp" connectInfluxDb: "lsst.prompt" topicsRegex: "lsst.prompt.*" - tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,group kafdrop: ingress: From 7bd015bd9d0fcbe4bb12147fe52ad9bf73792a81 Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Mon, 18 Dec 2023 15:01:45 -0800 Subject: [PATCH 384/588] Update prompt processing to use d_2023_12_18 --- .../values-usdfprod-prompt-processing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 90882f9985..196d694efc 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -7,7 +7,7 @@ prompt-proto-service: image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: d_2023_12_05 + tag: d_2023_12_18 instrument: pipelines: >- From 406d0a1c239a42e036994cada133feedf5aa991d Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 20 Dec 2023 17:36:41 -0700 Subject: [PATCH 385/588] Switch to updated giftless server --- applications/giftless/values-roundtable-dev.yaml | 2 +- applications/giftless/values-roundtable-prod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 7bd41a2b77..b7bf3c778f 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "upstream-master" + tag: "tickets-dm-42009" server: debug: true ingress: diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index d33d08f5ea..cac6b243db 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "upstream-master" + tag: "tickets-dm-42009" server: debug: true readonly: From 0e224c6ec36461d056e24dbca8fae39c13c803e3 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 8 Dec 2023 16:28:43 -0700 Subject: [PATCH 386/588] Drop support to InfluxDB v2 in favor of Enterprise - In Sasquatch we decided to migrate to InfluxDB Enterprise and won't support InfluxDB v2 anymore. - Bucketmapper can be removed since its job is to create the DBRP mapping required in InfluxDB v2 - The only place with v2 enabled at the moment is TTS and so it will be removed next time we sync that environment. --- applications/sasquatch/Chart.yaml | 4 -- applications/sasquatch/README.md | 38 ++------------ .../charts/telegraf-kafka-consumer/README.md | 12 ++--- .../telegraf-kafka-consumer/values.yaml | 10 ---- applications/sasquatch/secrets.yaml | 10 ---- .../sasquatch/templates/bucketmapper.yaml | 41 --------------- applications/sasquatch/values-idfdev.yaml | 14 ------ .../sasquatch/values-roundtable-dev.yaml | 3 -- .../sasquatch/values-roundtable-prod.yaml | 3 -- applications/sasquatch/values-summit.yaml | 9 ---- .../sasquatch/values-tucson-teststand.yaml | 9 ---- applications/sasquatch/values.yaml | 50 ------------------- 12 files changed, 8 insertions(+), 195 deletions(-) delete mode 100644 applications/sasquatch/templates/bucketmapper.yaml diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index edb149fcd3..8e8a024ed3 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -25,10 +25,6 @@ dependencies: condition: source-influxdb.enabled version: 4.12.5 repository: https://helm.influxdata.com/ - - name: influxdb2 - condition: influxdb2.enabled - version: 2.1.1 - repository: https://helm.influxdata.com/ - name: kafka-connect-manager alias: kafka-connect-manager condition: kafka-connect-manager.enabled diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index e2a6e25789..eae8e91aba 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -46,32 +46,6 @@ Rubin Observatory's telemetry service. | influxdb.resources.requests.cpu | int | `8` | | | influxdb.resources.requests.memory | string | `"96Gi"` | | | influxdb.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | -| influxdb2.adminUser.bucket | string | `"default"` | Admin default bucket. | -| influxdb2.adminUser.existingSecret | string | `"sasquatch"` | Get admin-password/admin-token keys from secret. | -| influxdb2.adminUser.organization | string | `"default"` | Admin default organization. | -| influxdb2.enabled | bool | `false` | | -| influxdb2.env[0].name | string | `"INFLUXD_STORAGE_WAL_FSYNC_DELAY"` | | -| influxdb2.env[0].value | string | `"100ms"` | | -| influxdb2.env[1].name | string | `"INFLUXD_HTTP_IDLE_TIMEOUT"` | | -| influxdb2.env[1].value | string | `"0"` | | -| influxdb2.env[2].name | string | `"INFLUXD_FLUX_LOG_ENABLED"` | | -| influxdb2.env[2].value | string | `"true"` | | -| influxdb2.env[3].name | string | `"INFLUXD_LOG_LEVEL"` | | -| influxdb2.env[3].value | string | `"debug"` | | -| influxdb2.image.tag | string | `"2.7.1-alpine"` | | -| influxdb2.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/api/v2/$2"` | | -| influxdb2.ingress.className | string | `"nginx"` | | -| influxdb2.ingress.enabled | bool | `false` | InfluxDB2 ingress configuration | -| influxdb2.ingress.hostname | string | `""` | | -| influxdb2.ingress.path | string | `"/influxdb2(/|$)(.*)"` | | -| influxdb2.initScripts.enabled | bool | `true` | InfluxDB2 initialization scripts | -| influxdb2.initScripts.scripts."init.sh" | string | `"#!/bin/bash\ninflux bucket create --name telegraf-kafka-consumer --org default\n"` | | -| influxdb2.persistence.enabled | bool | `true` | Enable persistent volume claim. By default storageClass is undefined choosing the default provisioner (standard on GKE). | -| influxdb2.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments. | -| influxdb2.resources.limits.cpu | int | `8` | | -| influxdb2.resources.limits.memory | string | `"96Gi"` | | -| influxdb2.resources.requests.cpu | int | `8` | | -| influxdb2.resources.requests.memory | string | `"16Gi"` | | | kafdrop.enabled | bool | `true` | Enable Kafdrop. | | kafka-connect-manager | object | `{}` | Override kafka-connect-manager configuration. | | kapacitor.enabled | bool | `true` | Enable Kapacitor. | @@ -343,21 +317,17 @@ Rubin Observatory's telemetry service. | telegraf-kafka-consumer.env[0].name | string | `"TELEGRAF_PASSWORD"` | | | telegraf-kafka-consumer.env[0].valueFrom.secretKeyRef.key | string | `"telegraf-password"` | Telegraf KafkaUser password. | | telegraf-kafka-consumer.env[0].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| telegraf-kafka-consumer.env[1].name | string | `"INFLUXDB_TOKEN"` | | -| telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.key | string | `"admin-token"` | InfluxDB v2 admin token. | +| telegraf-kafka-consumer.env[1].name | string | `"INFLUXDB_USER"` | | +| telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.key | string | `"influxdb-user"` | InfluxDB v1 user | | telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| telegraf-kafka-consumer.env[2].name | string | `"INFLUXDB_USER"` | | -| telegraf-kafka-consumer.env[2].valueFrom.secretKeyRef.key | string | `"influxdb-user"` | InfluxDB v1 user | +| telegraf-kafka-consumer.env[2].name | string | `"INFLUXDB_PASSWORD"` | | +| telegraf-kafka-consumer.env[2].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | | telegraf-kafka-consumer.env[2].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| telegraf-kafka-consumer.env[3].name | string | `"INFLUXDB_PASSWORD"` | | -| telegraf-kafka-consumer.env[3].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | -| telegraf-kafka-consumer.env[3].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | telegraf-kafka-consumer.image.pullPolicy | string | IfNotPresent | Image pull policy. | | telegraf-kafka-consumer.image.repo | string | `"quay.io/influxdb/telegraf-nightly:latest"` | Telegraf image repository. | | telegraf-kafka-consumer.image.tag | string | `"latest"` | Telegraf image tag. | | telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | telegraf-kafka-consumer.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to. | -| telegraf-kafka-consumer.influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. | | telegraf-kafka-consumer.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | telegraf-kafka-consumer.kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | | telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | `"1s"` | Default data flushing interval to InfluxDB. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index bd40c0c75e..a399f84152 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -13,21 +13,17 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | env[0].name | string | `"TELEGRAF_PASSWORD"` | | | env[0].valueFrom.secretKeyRef.key | string | `"telegraf-password"` | Telegraf KafkaUser password. | | env[0].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| env[1].name | string | `"INFLUXDB_TOKEN"` | | -| env[1].valueFrom.secretKeyRef.key | string | `"admin-token"` | InfluxDB v2 admin token. | +| env[1].name | string | `"INFLUXDB_USER"` | | +| env[1].valueFrom.secretKeyRef.key | string | `"influxdb-user"` | InfluxDB v1 user | | env[1].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| env[2].name | string | `"INFLUXDB_USER"` | | -| env[2].valueFrom.secretKeyRef.key | string | `"influxdb-user"` | InfluxDB v1 user | +| env[2].name | string | `"INFLUXDB_PASSWORD"` | | +| env[2].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | | env[2].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | -| env[3].name | string | `"INFLUXDB_PASSWORD"` | | -| env[3].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | -| env[3].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | image.pullPolicy | string | IfNotPresent | Image pull policy. | | image.repo | string | `"quay.io/influxdb/telegraf-nightly:latest"` | Telegraf image repository. | | image.tag | string | `"latest"` | Telegraf image tag. | | imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to. | -| influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. | | kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | kafkaConsumers.test.fields | list | `[]` | List of Avro fields to be recorded as InfluxDB fields. If not specified, any Avro field that is not marked as a tag will become an InfluxDB field. | | kafkaConsumers.test.flush_interval | string | `"1s"` | Default data flushing interval to InfluxDB. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index b716e36d35..82095b397b 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -31,12 +31,6 @@ env: name: sasquatch # -- Telegraf KafkaUser password. key: telegraf-password - - name: INFLUXDB_TOKEN - valueFrom: - secretKeyRef: - name: sasquatch - # -- InfluxDB v2 admin token. - key: admin-token - name: INFLUXDB_USER valueFrom: secretKeyRef: @@ -122,10 +116,6 @@ influxdb: # -- Name of the InfluxDB v1 database to write to. database: "telegraf-kafka-consumer-v1" -influxdb2: - # -- Name of the InfluxDB v2 bucket to write to. - bucket: "telegraf-kafka-consumer" - # -- Kubernetes resources requests and limits. resources: {} diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index 52bd82d4cc..ac7f00b61f 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -12,16 +12,6 @@ TOKEN_SECRET: ? generate: type: password -admin-password: - description: >- - ? - generate: - type: password -admin-token: - description: >- - ? - generate: - type: password influxdb-password: description: >- ? diff --git a/applications/sasquatch/templates/bucketmapper.yaml b/applications/sasquatch/templates/bucketmapper.yaml deleted file mode 100644 index de676abaf0..0000000000 --- a/applications/sasquatch/templates/bucketmapper.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.influxdb2.enabled }} -apiVersion: batch/v1 -kind: CronJob -metadata: - name: sasquatch-bucketmapper - namespace: sasquatch -spec: - schedule: "3-59/15 * * * *" - successfulJobsHistoryLimit: 1 - jobTemplate: - spec: - template: - spec: - restartPolicy: Never - automountServiceAccountToken: false - containers: - - name: bucketmapper - image: "{{ .Values.bucketmapper.image.repository }}:{{ .Values.bucketmapper.image.tag }}" - securityContext: - allowPrivilegeEscalation: false - runAsNonRoot: true - runAsUser: 405 - runAsGroup: 100 - capabilities: - drop: - - all - readOnlyRootFilesystem: true - env: - - name: "INFLUXDB_TOKEN" - valueFrom: - secretKeyRef: - name: "sasquatch" - key: "admin-token" - - name: "INFLUXDB_ORG" - value: "default" - - name: "INFLUXDB_URL" - value: "http://sasquatch-influxdb2.sasquatch:80" - - name: "DEBUG" - value: "true" - command: [ "bucketmapper" ] -{{- end }} diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index a4690b321c..49da473bbb 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -36,20 +36,6 @@ influxdb: memory: 16Gi cpu: 2 -influxdb2: - enabled: true - ingress: - enabled: true - hostname: data-dev.lsst.cloud - resources: - requests: - memory: 16Gi - cpu: 2 - limits: - memory: 16Gi - cpu: 2 - - telegraf-kafka-consumer: enabled: true image: diff --git a/applications/sasquatch/values-roundtable-dev.yaml b/applications/sasquatch/values-roundtable-dev.yaml index 39606b67ca..de45e0b1b9 100644 --- a/applications/sasquatch/values-roundtable-dev.yaml +++ b/applications/sasquatch/values-roundtable-dev.yaml @@ -85,9 +85,6 @@ strimzi-kafka: influxdb: enabled: false -influxdb2: - enabled: false - telegraf-kafka-consumer: enabled: false diff --git a/applications/sasquatch/values-roundtable-prod.yaml b/applications/sasquatch/values-roundtable-prod.yaml index 39606b67ca..de45e0b1b9 100644 --- a/applications/sasquatch/values-roundtable-prod.yaml +++ b/applications/sasquatch/values-roundtable-prod.yaml @@ -85,9 +85,6 @@ strimzi-kafka: influxdb: enabled: false -influxdb2: - enabled: false - telegraf-kafka-consumer: enabled: false diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index b9f55d0364..ed40f707b0 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -39,15 +39,6 @@ influxdb: memory: 128Gi cpu: 16 -influxdb2: - enabled: false - persistence: - storageClass: rook-ceph-block - size: 5Ti - ingress: - enabled: true - hostname: summit-lsp.lsst.codes - kafka-connect-manager: influxdbSink: # Based on the kafka producers configuration for the Summit diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 8e93e98774..92b28c6441 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -33,15 +33,6 @@ influxdb: enabled: true hostname: tucson-teststand.lsst.codes -influxdb2: - enabled: true - persistence: - storageClass: rook-ceph-block - ingress: - # -- InfluxDB2 ingress configuration - enabled: true - hostname: tucson-teststand.lsst.codes - telegraf-kafka-consumer: enabled: false kafkaConsumers: diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index 02c94d48d3..f50bd800c0 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -199,56 +199,6 @@ source-influxdb: memory: 96Gi cpu: 8 -influxdb2: - enabled: false - image: - tag: 2.7.1-alpine - adminUser: - # -- Admin default organization. - organization: "default" - # -- Admin default bucket. - bucket: "default" - # -- Get admin-password/admin-token keys from secret. - existingSecret: sasquatch - persistence: - # -- Enable persistent volume claim. - # By default storageClass is undefined choosing the default provisioner (standard on GKE). - enabled: true - # -- Persistent volume size. - # @default 1Ti for teststand deployments. - size: 1Ti - ingress: - # -- InfluxDB2 ingress configuration - enabled: false - hostname: "" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: /api/v2/$2 - className: "nginx" - path: /influxdb2(/|$)(.*) - env: - - name: INFLUXD_STORAGE_WAL_FSYNC_DELAY - value: "100ms" - - name: INFLUXD_HTTP_IDLE_TIMEOUT - value: "0" - - name: INFLUXD_FLUX_LOG_ENABLED - value: "true" - - name: INFLUXD_LOG_LEVEL - value: "debug" - initScripts: - # -- InfluxDB2 initialization scripts - enabled: true - scripts: - init.sh: |+ - #!/bin/bash - influx bucket create --name telegraf-kafka-consumer --org default - resources: - requests: - memory: 16Gi - cpu: 8 - limits: - memory: 96Gi - cpu: 8 - # -- Override kafka-connect-manager configuration. kafka-connect-manager: {} From b724be0c90de105744fe7d862b0b8a7420aecf22 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 15 Dec 2023 15:23:20 -0700 Subject: [PATCH 387/588] Add InfluxDB Enterprise chart - Add InfluxDB Enterprise chart to sasquatch - Get InfluxDB Enterprise license file from a secret - Enable InfluxDB Enterprise at USDF prod and add environment specific configuration - Enable persistence for meta and data nodes, data nodes use the zfs--rubin-efd storage class at USDF to provision their volumes on the local nodes - Deploy 3 meta pods in the the "c" nodes using default storage class - Deploy 2 data pods, they are automatically scheduled to the "n" nodes when using the zfs--rubin-efd storage class - Add pod anti affinity configuration to avoid deploying meta pods or data pods in the same node. - Allow meta nodes to use a different storage class and persistence configuration - Set resources requests and limits for meta and data pods - Configure a LoadBalancer service for the meta pods. That's required if we want to run the influxd-ctl tool remotely. --- applications/sasquatch/Chart.yaml | 3 + applications/sasquatch/README.md | 44 +++ .../charts/influxdb-enterprise/.helmignore | 23 ++ .../charts/influxdb-enterprise/Chart.yaml | 7 + .../charts/influxdb-enterprise/README.md | 55 ++++ .../influxdb-enterprise/templates/NOTES.txt | 0 .../templates/_helpers.tpl | 76 +++++ .../templates/bootstrap-job.yaml | 159 +++++++++++ .../templates/certmanager-issuer.yaml | 10 + .../templates/data-certmanager.yaml | 28 ++ .../templates/data-configmap.yaml | 120 ++++++++ .../templates/data-service.yaml | 59 ++++ .../templates/data-statefulset.yaml | 188 ++++++++++++ .../templates/meta-certmanager.yaml | 28 ++ .../templates/meta-configmap.yaml | 96 +++++++ .../templates/meta-service.yaml | 40 +++ .../templates/meta-statefulset.yaml | 178 ++++++++++++ .../templates/serviceaccount.yaml | 12 + .../charts/influxdb-enterprise/values.yaml | 270 ++++++++++++++++++ applications/sasquatch/values-usdfprod.yaml | 37 +++ applications/sasquatch/values.yaml | 3 + 21 files changed, 1436 insertions(+) create mode 100644 applications/sasquatch/charts/influxdb-enterprise/.helmignore create mode 100644 applications/sasquatch/charts/influxdb-enterprise/Chart.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/README.md create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/NOTES.txt create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/_helpers.tpl create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/certmanager-issuer.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/data-certmanager.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/data-service.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/meta-certmanager.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/meta-service.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/serviceaccount.yaml create mode 100644 applications/sasquatch/charts/influxdb-enterprise/values.yaml diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index 8e8a024ed3..b1d09650e2 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -25,6 +25,9 @@ dependencies: condition: source-influxdb.enabled version: 4.12.5 repository: https://helm.influxdata.com/ + - name: influxdb-enterprise + condition: influxdb-enterprise.enabled + version: 1.0.0 - name: kafka-connect-manager alias: kafka-connect-manager condition: kafka-connect-manager.enabled diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index eae8e91aba..9d361ea09c 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -22,6 +22,7 @@ Rubin Observatory's telemetry service. | chronograf.resources.limits.memory | string | `"64Gi"` | | | chronograf.resources.requests.cpu | int | `1` | | | chronograf.resources.requests.memory | string | `"4Gi"` | | +| influxdb-enterprise | object | `{}` | Override influxdb-enterprise configuration. | | influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"60s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb-staging.enabled | bool | `false` | Enable InfluxDB staging deployment. | | influxdb-staging.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | @@ -86,6 +87,49 @@ Rubin Observatory's telemetry service. | strimzi-kafka | object | `{}` | Override strimzi-kafka configuration. | | strimzi-registry-operator | object | `{"clusterName":"sasquatch","clusterNamespace":"sasquatch","operatorNamespace":"sasquatch"}` | strimzi-registry-operator configuration. | | telegraf-kafka-consumer | object | `{}` | Override telegraf-kafka-consumer configuration. | +| influxdb-enterprise.bootstrap.auth.secretName | string | `"sasquatch"` | | +| influxdb-enterprise.bootstrap.ddldml | object | `{}` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key | string | `"influxdb.influxdata.com/component"` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator | string | `"In"` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | +| influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| influxdb-enterprise.data.env | object | `{}` | | +| influxdb-enterprise.data.flux.enabled | bool | `true` | | +| influxdb-enterprise.data.https.enabled | bool | `false` | | +| influxdb-enterprise.data.https.insecure | bool | `true` | | +| influxdb-enterprise.data.https.secret.name | string | `"influxdb-tls"` | | +| influxdb-enterprise.data.https.useCertManager | bool | `false` | | +| influxdb-enterprise.data.image | object | `{}` | | +| influxdb-enterprise.data.persistence.enabled | bool | `false` | | +| influxdb-enterprise.data.replicas | int | `1` | | +| influxdb-enterprise.data.resources | object | `{}` | | +| influxdb-enterprise.data.service.type | string | `"ClusterIP"` | | +| influxdb-enterprise.fullnameOverride | string | `""` | | +| influxdb-enterprise.imagePullSecrets | list | `[]` | | +| influxdb-enterprise.license.secret.key | string | `"json"` | | +| influxdb-enterprise.license.secret.name | string | `"influxdb-enterprise-license"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key | string | `"influxdb.influxdata.com/component"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator | string | `"In"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"meta"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | +| influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| influxdb-enterprise.meta.env | object | `{}` | | +| influxdb-enterprise.meta.https.enabled | bool | `false` | | +| influxdb-enterprise.meta.https.insecure | bool | `true` | | +| influxdb-enterprise.meta.https.secret.name | string | `"influxdb-tls"` | | +| influxdb-enterprise.meta.https.useCertManager | bool | `false` | | +| influxdb-enterprise.meta.image | object | `{}` | | +| influxdb-enterprise.meta.persistence.enabled | bool | `false` | | +| influxdb-enterprise.meta.podDisruptionBudget.minAvailable | int | `2` | | +| influxdb-enterprise.meta.replicas | int | `3` | | +| influxdb-enterprise.meta.resources | object | `{}` | | +| influxdb-enterprise.meta.service.type | string | `"ClusterIP"` | | +| influxdb-enterprise.meta.sharedSecret.secretName | string | `"influxdb-enterprise-shared-secret"` | | +| influxdb-enterprise.nameOverride | string | `""` | | +| influxdb-enterprise.serviceAccount.annotations | object | `{}` | | +| influxdb-enterprise.serviceAccount.create | bool | `false` | | +| influxdb-enterprise.serviceAccount.name | string | `""` | | | kafdrop.affinity | object | `{}` | Affinity configuration. | | kafdrop.cmdArgs | string | `"--message.format=AVRO --topic.deleteEnabled=false --topic.createEnabled=false"` | Command line arguments to Kafdrop. | | kafdrop.existingSecret | string | `""` | Existing k8s secrect use to set kafdrop environment variables. Set SCHEMAREGISTRY_AUTH for basic auth credentials in the form username:password | diff --git a/applications/sasquatch/charts/influxdb-enterprise/.helmignore b/applications/sasquatch/charts/influxdb-enterprise/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/sasquatch/charts/influxdb-enterprise/Chart.yaml b/applications/sasquatch/charts/influxdb-enterprise/Chart.yaml new file mode 100644 index 0000000000..462b948693 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +name: influxdb-enterprise +version: 1.0.0 +description: Run InfluxDB Enterprise on Kubernetes +sources: + - https://github.com/influxdata/influxdb +appVersion: 1.11.3 diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md new file mode 100644 index 0000000000..060e6b19ea --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -0,0 +1,55 @@ +# influxdb-enterprise + +Run InfluxDB Enterprise on Kubernetes + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| bootstrap.auth.secretName | string | `"sasquatch"` | | +| bootstrap.ddldml | object | `{}` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key | string | `"influxdb.influxdata.com/component"` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator | string | `"In"` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | +| data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| data.env | object | `{}` | | +| data.flux.enabled | bool | `true` | | +| data.https.enabled | bool | `false` | | +| data.https.insecure | bool | `true` | | +| data.https.secret.name | string | `"influxdb-tls"` | | +| data.https.useCertManager | bool | `false` | | +| data.image | object | `{}` | | +| data.persistence.enabled | bool | `false` | | +| data.replicas | int | `1` | | +| data.resources | object | `{}` | | +| data.service.type | string | `"ClusterIP"` | | +| fullnameOverride | string | `""` | | +| imagePullSecrets | list | `[]` | | +| license.secret.key | string | `"json"` | | +| license.secret.name | string | `"influxdb-enterprise-license"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].key | string | `"influxdb.influxdata.com/component"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].operator | string | `"In"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"meta"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | +| meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| meta.env | object | `{}` | | +| meta.https.enabled | bool | `false` | | +| meta.https.insecure | bool | `true` | | +| meta.https.secret.name | string | `"influxdb-tls"` | | +| meta.https.useCertManager | bool | `false` | | +| meta.image | object | `{}` | | +| meta.persistence.enabled | bool | `false` | | +| meta.podDisruptionBudget.minAvailable | int | `2` | | +| meta.replicas | int | `3` | | +| meta.resources | object | `{}` | | +| meta.service.type | string | `"ClusterIP"` | | +| meta.sharedSecret.secretName | string | `"influxdb-enterprise-shared-secret"` | | +| nameOverride | string | `""` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `false` | | +| serviceAccount.name | string | `""` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/NOTES.txt b/applications/sasquatch/charts/influxdb-enterprise/templates/NOTES.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/_helpers.tpl b/applications/sasquatch/charts/influxdb-enterprise/templates/_helpers.tpl new file mode 100644 index 0000000000..581879ec0a --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/_helpers.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "influxdb-enterprise.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "influxdb-enterprise.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "influxdb-enterprise.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "influxdb-enterprise.labels" -}} +helm.sh/chart: {{ include "influxdb-enterprise.chart" . }} +{{ include "influxdb-enterprise.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "influxdb-enterprise.selectorLabels" -}} +app.kubernetes.io/name: {{ include "influxdb-enterprise.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account +*/}} +{{- define "influxdb-enterprise.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "influxdb-enterprise.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "influxdb-enterprise.image" -}} +{{- $dataTagName := (printf "%s-%s" .chart.AppVersion .podtype) -}} +{{- if (.imageroot) }} +{{- if (.imageroot.tag) -}} +{{- $dataTagName = .imageroot.tag -}} +{{- end -}} +{{- if (.imageroot.addsuffix) -}} +{{- $dataTagName = printf "%s-%s" $dataTagName .podtype -}} +{{- end -}} +{{- end }} +image: "{{ .podvals.image.repository | default "influxdb" }}:{{ $dataTagName }}" +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml new file mode 100644 index 0000000000..384bfc49a1 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml @@ -0,0 +1,159 @@ +{{- if or .Values.bootstrap.auth.secretName (or .Values.bootstrap.ddldml.raw .Values.bootstrap.ddldml.configMap) -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-bootstrap + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": post-install + "helm.sh/hook-delete-policy": hook-succeeded +spec: + activeDeadlineSeconds: 300 + backoffLimit: 10 + template: + metadata: + labels: + {{- include "influxdb-enterprise.selectorLabels" . | nindent 8 }} + spec: + {{- if .Values.bootstrap.ddldml.configMap }} + volumes: + - name: ddldml + configMap: + name: {{ .Values.bootstrap.ddldml.configMap }} + {{ end }} + restartPolicy: OnFailure + serviceAccountName: {{ template "influxdb-enterprise.serviceAccountName" . }} + # Consider this a middleware of setup components. + # Each is executed in-order until all of theme complete successfully. + # This means that each command must be idempotent. + initContainers: + {{- if .Values.bootstrap.auth.secretName }} + - name: auth + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 8 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + # Exposing these environment variables makes this command idempotent + # as even if the authentication has been setup, we can still execute the command + # and it won't error as nothing has changed + env: + - name: INFLUX_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-user" + - name: INFLUX_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-password" + command: + - influx + args: + {{- if .Values.data.https.enabled }} + - -ssl + {{- if .Values.data.https.insecure }} + - -unsafeSsl + {{ end }} + {{ end }} + - -host + - {{ include "influxdb-enterprise.fullname" . }}-data + - -execute + - CREATE USER $(INFLUX_USERNAME) WITH PASSWORD '$(INFLUX_PASSWORD)' WITH ALL PRIVILEGES + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 100m + memory: 50Mi + {{ end }} + {{- if .Values.bootstrap.ddldml.configMap }} + - name: ddl + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 8 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + {{- if .Values.bootstrap.auth.secretName }} + env: + - name: INFLUX_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-user" + - name: INFLUX_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-password" + {{ end }} + volumeMounts: + - name: ddldml + mountPath: /ddldml + command: + - influx + args: + {{- if .Values.data.https.enabled }} + - -ssl + {{- if .Values.data.https.insecure }} + - -unsafeSsl + {{ end }} + {{ end }} + - -host + - {{ include "influxdb-enterprise.fullname" . }}-data + - -import + - -path + - /ddldml/ddl + resources: + {{- toYaml .Values.bootstrap.ddldml.resources | nindent 10 }} + {{ end }} + {{- if .Values.bootstrap.ddldml.configMap }} + - name: dml + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 8 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + {{- if .Values.bootstrap.auth.secretName }} + env: + - name: INFLUX_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-user" + - name: INFLUX_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.bootstrap.auth.secretName }} + key: "influxdb-password" + {{ end }} + volumeMounts: + - name: ddldml + mountPath: /ddldml + command: + - influx + args: + {{- if .Values.data.https.enabled }} + - -ssl + {{- if .Values.data.https.insecure }} + - -unsafeSsl + {{ end }} + {{ end }} + - -host + - {{ include "influxdb-enterprise.fullname" . }}-data + - -import + - -path + - /ddldml/dml + resources: + {{- toYaml .Values.bootstrap.ddldml.resources | nindent 10 }} + {{ end }} + containers: + - name: success + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 8 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + command: + - echo + args: + - "Bootstrap Success" + resources: + requests: + cpu: 100m + memory: 50Mi + limits: + cpu: 100m + memory: 50Mi +{{ end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/certmanager-issuer.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/certmanager-issuer.yaml new file mode 100644 index 0000000000..72cbe9a68c --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/certmanager-issuer.yaml @@ -0,0 +1,10 @@ +{{- if or .Values.data.https.useCertManager .Values.meta.https.useCertManager -}} +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ include "influxdb-enterprise.fullname" . }} + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + selfSigned: {} +{{- end -}} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-certmanager.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-certmanager.yaml new file mode 100644 index 0000000000..ea53ddd991 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-certmanager.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.data.https.enabled .Values.data.https.useCertManager -}} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + {{- $replicas := (int $.Values.data.replicas) }} + {{- $fullname := include "influxdb-enterprise.fullname" . }} + {{- $namespace := .Release.Namespace }} + + dnsNames: + - {{ $fullname }}-data + - {{ $fullname }}-data.{{ .Release.Namespace }}.svc + + {{- range $i := until $replicas }} + - {{ $fullname }}-data-{{ $i | toString }}.{{ $fullname }}-data + - {{ $fullname }}-data-{{ $i | toString }}.{{ $fullname }}-data.{{ $namespace }} + - {{ $fullname }}-data-{{ $i | toString }}.{{ $fullname }}-data.{{ $namespace }}.svc + {{ end }} + + isCA: true + issuerRef: + kind: Issuer + name: {{ include "influxdb-enterprise.fullname" . }} + secretName: {{ include "influxdb-enterprise.fullname" . }}-data-tls +{{- end -}} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml new file mode 100644 index 0000000000..b8fb74a5df --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml @@ -0,0 +1,120 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data + labels: + app.kubernetes.io/component: data + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +data: + influxdb.conf: |+ + bind-address = ":8088" + reporting-disabled = false + + {{ if .Values.data.https.enabled }} + https-enabled = true + + https-certificate = "/var/run/secrets/tls/tls.crt" + https-private-key = "/var/run/secrets/tls/tls.key" + + {{ end }} + + {{ if .Values.data.flux.enabled }} + flux-enabled = true + {{ end }} + + [enterprise] + {{ if .Values.license.key }} + # license-key and license-path are mutually exclusive, use only one and leave the other blank + license-key = "{{ .Values.license.key }}" #✨ mutually exclusive with license-path + {{ else if .Values.license.secret }} + # license-key and license-path are mutually exclusive, use only one and leave the other blank + license-path = "/var/run/secrets/influxdb/license.json" + {{ end }} + + [cluster] + {{ if .Values.data.https.enabled }} + https-enabled = true + + https-certificate = "/var/run/secrets/tls/tls.crt" + https-private-key = "/var/run/secrets/tls/tls.key" + + {{ if .Values.data.https.insecure }} + https-insecure-tls = true + {{ end }} + {{ end }} + + [meta] + dir = "/var/lib/influxdb/meta" + + {{ if and .Values.meta.https.enabled }} + meta-tls-enabled = true + + {{ if .Values.meta.https.insecure }} + meta-insecure-tls = true + {{ end }} + + {{ end }} + + [hinted-handoff] + dir = "/var/lib/influxdb/hh" + + [data] + dir = "/var/lib/influxdb/data" + wal-dir = "/var/lib/influxdb/wal" + + entrypoint.pl: |+ + #!/usr/bin/env perl + $ENV{INFLUXDB_HOSTNAME} = `hostname -f`; + $ENV{INFLUXDB_HOSTNAME} =~ s/\n$//; + + {{ if .Values.data.preruncmds }} + # These are commands that will run before influxdb is initialized + {{- range .Values.data.preruncmds }} + {{ if .description }} + # {{ .description }} + {{- end }} + system('{{ .cmd }}'); + {{- end }} + {{ end }} + + $pid = fork(); + + # Inside this conditional is our child process, which + # will return `influxd-meta` + if($pid == 0) { + exec('influxd') or die("Failed to execute influxd: $!\n"); + } + + $SIG{HUP} = sub { kill 'HUP', $pid }; + $SIG{TERM} = sub { kill 'TERM', $pid }; + $SIG{KILL} = sub { kill 'KILL', $pid }; + + # Register data node with meta leader + {{ if .Values.meta.https.enabled }} + my $protocol = "https"; + {{ else }} + my $protocol = "http"; + {{ end }} + my $meta_service = $ENV{RELEASE_NAME} . "-meta"; + + # We're not going to define an exit strategy for failure here. + # This should be handled by the probes on the pods + while (true) { + # There's no LWP/Simple available in our images, so forking out to curl 😥 + print "\n\n\nREGISTER WITH META SERVICE\n\n\n"; + $exit_code = system('curl', {{ if .Values.meta.https.insecure }}'-k',{{ end }} '-XPOST', '--silent', '--fail', '--retry', '5', '--retry-delay', '0', "-Faddr=$ENV{INFLUXDB_HOSTNAME}:8088", "$protocol://$meta_service:8091/add-data"); + # $exit_code = system('curl', {{ if .Values.meta.https.insecure }}'-k',{{ end }} '-XPOST', '-v', '--silent', '--fail', '--retry', '5', '--retry-delay', '0', "-Faddr=$ENV{INFLUXDB_HOSTNAME}:8088", "$protocol://$meta_service:8091/add-data"); + + + if ($exit_code == 0) { + $| = 1; + last; + } + print "\n\n\nFailed: $!\n\n\n"; + $| = 1; + + exit 255 + } + + waitpid($pid, 0); + exit $? diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-service.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-service.yaml new file mode 100644 index 0000000000..fabcbc596c --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-service.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.data.service.annotations }} + annotations: +{{ toYaml .Values.data.service.annotations | indent 4 }} +{{- end }} + name: {{ template "influxdb-enterprise.fullname" . }}-data + labels: + influxdb.influxdata.com/component: data + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + type: {{ .Values.data.service.type }} +{{- if (eq "ClusterIP" .Values.data.service.type) }} + clusterIP: None +{{- end }} + publishNotReadyAddresses: true + ports: + - port: 8086 + protocol: TCP + name: http +{{- if .Values.data.service.nodePort }} + nodePort: {{ .Values.data.service.nodePort }} +{{- end }} + - port: 8088 + protocol: TCP + name: rpc + - port: 2003 + # Graphite supports TCP and UDP, + # so this should __maybe__ be configurable + # Though most use TCP + protocol: TCP + name: graphite + - port: 4242 + protocol: TCP + name: opentsdb + # LoadBalancer service type only allows for one protocol + # disbaling UDP ports +{{- if (ne "LoadBalancer" .Values.data.service.type) }} + - port: 25826 + protocol: UDP + name: collectd + - port: 8089 + protocol: UDP + name: udp +{{- end }} + selector: + influxdb.influxdata.com/component: data +{{- include "influxdb-enterprise.selectorLabels" . | nindent 4 }} +{{- if .Values.data.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.data.service.loadBalancerIP }} +{{- end }} +{{- if .Values.data.service.externalIPs }} + externalIPs: +{{ toYaml .Values.data.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.data.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.data.service.externalTrafficPolicy }} +{{- end }} \ No newline at end of file diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml new file mode 100644 index 0000000000..7239b4e7cd --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml @@ -0,0 +1,188 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.data.replicas | default 3 }} + podManagementPolicy: Parallel + serviceName: {{ include "influxdb-enterprise.fullname" . }}-data + selector: + matchLabels: + influxdb.influxdata.com/component: data + {{- include "influxdb-enterprise.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.data.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + influxdb.influxdata.com/component: data + {{- include "influxdb-enterprise.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.data.podSecurityContext | nindent 8 }} + serviceAccountName: {{ template "influxdb-enterprise.serviceAccountName" . }} + volumes: + {{ if not .Values.data.persistence.enabled }} + - name: {{ include "influxdb-enterprise.fullname" . }}-data-data + emptyDir: {} + {{ end }} + - name: config + configMap: + name: {{ include "influxdb-enterprise.fullname" . }}-data + {{- if .Values.license.secret }} + - name: license + secret: + secretName: {{ .Values.license.secret.name }} + items: + - key: {{ .Values.license.secret.key }} + path: license.json + {{- end }} + {{- if .Values.data.https.enabled }} + - name: tls + secret: + {{- if .Values.data.https.useCertManager }} + secretName: {{ include "influxdb-enterprise.fullname" . }}-data-tls + {{ else }} + secretName: {{ .Values.data.https.secret.name }} + {{ if or .Values.data.https.secret.crt .Values.data.https.secret.key }} + items: + - key: {{ .Values.data.https.secret.crt }} + path: tls.crt + - key: {{ .Values.data.https.secret.key }} + path: tls.key + {{ end }} + {{ end }} + {{ end }} + {{- if and .Values.data.https.enabled .Values.data.https.secret }} + {{- if .Values.data.https.secret.ca -}} + - name: tls-ca + secret: + {{ if .Values.data.https.secret.caSecret -}} + secretName: {{ .Values.data.https.secret.caSecret }} + {{ else }} + secretName: {{ .Values.data.https.secret.name }} + {{ end }} + items: + - key: {{ .Values.data.https.secret.ca }} + path: ca.crt + {{ end }} + {{ end }} + containers: + - name: {{ .Chart.Name }} + command: + - "/usr/bin/perl" + args: + - "/etc/influxdb/entrypoint.pl" + securityContext: + {{- toYaml .Values.data.securityContext | nindent 12 }} + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.data "podtype" "data") | indent 10 }} + imagePullPolicy: {{ .Values.data.image.pullPolicy }} + env: + - name: RELEASE_NAME + value: {{ include "influxdb-enterprise.fullname" . }} + {{- if .Values.data.env }} +{{ toYaml .Values.data.env | indent 10 }} + {{- end}} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + ports: + - name: http + containerPort: 8086 + protocol: TCP + - name: raft + containerPort: 8088 + protocol: TCP + - name: udp + containerPort: 8089 + protocol: UDP + - name: graphite + containerPort: 2003 + protocol: TCP + - name: opentsdb + containerPort: 4242 + protocol: TCP + - name: collectd + containerPort: 25826 + protocol: UDP + livenessProbe: + httpGet: + path: /ping + port: http + {{- if .Values.data.https.enabled }} + scheme: HTTPS + {{- end }} + readinessProbe: + initialDelaySeconds: 30 + httpGet: + path: /ping + port: http + {{- if .Values.data.https.enabled }} + scheme: HTTPS + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/influxdb + - name: {{ include "influxdb-enterprise.fullname" . }}-data-data + mountPath: /var/lib/influxdb + {{- if .Values.license.secret }} + - name: license + mountPath: /var/run/secrets/influxdb/ + {{- end }} + {{- if .Values.data.https.enabled }} + - name: tls + mountPath: /var/run/secrets/tls/ + {{ end }} + {{- if and .Values.data.https.enabled .Values.data.https.secret }} + {{- if .Values.data.https.secret.ca -}} + - name: tls-ca + mountPath: /usr/share/ca-certificates/selfsigned/ca.crt + subPath: ca.crt + {{ end }} + {{ end }} + resources: + {{- toYaml .Values.data.resources | nindent 12 }} + {{- with .Values.data.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.data.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.data.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if and .Values.data.persistence.enabled (not .Values.data.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data-data + annotations: + {{- range $key, $value := .Values.data.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + - {{ .Values.data.persistence.accessMode | quote}} + resources: + requests: + storage: {{ .Values.data.persistence.size | quote }} + {{- if .Values.data.persistence.storageClass }} + {{- if (eq "-" .Values.data.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.data.persistence.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-certmanager.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-certmanager.yaml new file mode 100644 index 0000000000..92330b7de9 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-certmanager.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.meta.https.enabled .Values.meta.https.useCertManager -}} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + {{- $replicas := (int $.Values.meta.replicas) }} + {{- $fullname := include "influxdb-enterprise.fullname" . }} + {{- $namespace := .Release.Namespace }} + + dnsNames: + - {{ $fullname }}-meta + - {{ $fullname }}-meta.{{ .Release.Namespace }}.svc + + {{- range $i := until $replicas }} + - {{ $fullname }}-meta-{{ $i | toString }}.{{ $fullname }}-meta + - {{ $fullname }}-meta-{{ $i | toString }}.{{ $fullname }}-meta.{{ $namespace }} + - {{ $fullname }}-meta-{{ $i | toString }}.{{ $fullname }}-meta.{{ $namespace }}.svc + {{ end }} + + isCA: true + issuerRef: + kind: Issuer + name: {{ include "influxdb-enterprise.fullname" . }} + secretName: {{ include "influxdb-enterprise.fullname" . }}-meta-tls +{{- end -}} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml new file mode 100644 index 0000000000..14275ab774 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml @@ -0,0 +1,96 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + labels: + app.kubernetes.io/component: meta + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +data: + influxdb-meta.conf: |+ + bind-address = ":8091" + reporting-disabled = false + + [enterprise] + {{ if .Values.license.key }} + # license-key and license-path are mutually exclusive, use only one and leave the other blank + license-key = "{{ .Values.license.key }}" #✨ mutually exclusive with license-path + {{ else if .Values.license.secret }} + # license-key and license-path are mutually exclusive, use only one and leave the other blank + license-path = "/var/run/secrets/influxdb/license.json" + {{ end }} + + [meta] + dir = "/var/lib/influxdb/meta" + + {{ if .Values.meta.https.enabled }} + https-enabled = true + + https-certificate = "/var/run/secrets/tls/tls.crt" + https-private-key = "/var/run/secrets/tls/tls.key" + + {{ if .Values.meta.https.insecure }} + https-insecure-tls = true + {{ end }} + + {{ end }} + + {{ if and .Values.data.https.enabled }} + data-use-tls = true + + {{ if .Values.data.https.insecure }} + data-insecure-tls = true + {{ end }} + + {{ end }} + + entrypoint.pl: |+ + #!/usr/bin/env perl + $ENV{INFLUXDB_HOSTNAME} = `hostname -f`; + $ENV{INFLUXDB_HOSTNAME} =~ s/\n$//; + + {{ if .Values.meta.preruncmds }} + # These are commands that will run before influxdb is initialized + {{- range .Values.meta.preruncmds }} + {{ if .description }} + # {{ .description }} + {{- end }} + system('{{ .cmd }}'); + {{- end }} + {{ end }} + + $pid = fork(); + + # Inside this conditional is our child process, which + # will return `influxd-meta` + if($pid == 0) { + exec('influxd-meta') or die("Failed to execute influxd-meta: $!\n"); + } + + $SIG{HUP} = sub { kill 'HUP', $pid }; + $SIG{TERM} = sub { kill 'TERM', $pid }; + $SIG{KILL} = sub { kill 'KILL', $pid }; + + # Register meta node + my $meta_leader = $ENV{INFLUXDB_HOSTNAME}; + $meta_leader =~ s/-[0-9]+./-0./; + + # We're not going to define an exit strategy for failure here. + # This should be handled by the probes on the pods + while (true) { + if($meta_leader eq $ENV{INFLUXDB_HOSTNAME}) { + system('influxd-ctl', {{ if .Values.meta.https.enabled }}'-bind-tls',{{ end }}{{ if .Values.meta.https.insecure }}'-k',{{ end }} 'add-meta', "$ENV{INFLUXDB_HOSTNAME}:8091"); + } else { + system('influxd-ctl', {{ if .Values.meta.https.enabled }}'-bind-tls',{{ end }}{{ if .Values.meta.https.insecure }}'-k',{{ end }} 'join', "$meta_leader:8091"); + } + + if ($? == 0) { + last; + } + + # Wait a few seconds and try again + # Maybe we should implement some rudamentary backoff + sleep(2); + } + + waitpid($pid, 0); + exit $? diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-service.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-service.yaml new file mode 100644 index 0000000000..177f6f172c --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-service.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.meta.service.annotations }} + annotations: +{{ toYaml .Values.meta.service.annotations | indent 4 }} +{{- end }} + name: {{ template "influxdb-enterprise.fullname" . }}-meta + labels: + influxdb.influxdata.com/component: meta + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + type: {{ .Values.meta.service.type }} +{{- if (eq "ClusterIP" .Values.meta.service.type) }} + clusterIP: None +{{- end }} + publishNotReadyAddresses: true + ports: + - port: 8089 + protocol: TCP + name: raft + - port: 8091 + protocol: TCP + name: http +{{- if .Values.meta.service.nodePort }} + nodePort: {{ .Values.meta.service.nodePort }} +{{- end }} + selector: + influxdb.influxdata.com/component: meta + {{- include "influxdb-enterprise.selectorLabels" . | nindent 4 }} +{{- if .Values.meta.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.meta.service.loadBalancerIP }} +{{- end }} +{{- if .Values.meta.service.externalIPs }} + externalIPs: +{{ toYaml .Values.meta.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.meta.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.meta.service.externalTrafficPolicy }} +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml new file mode 100644 index 0000000000..39995c7b35 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml @@ -0,0 +1,178 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.meta.replicas | default 3 }} + podManagementPolicy: Parallel + serviceName: {{ include "influxdb-enterprise.fullname" . }}-meta + selector: + matchLabels: + influxdb.influxdata.com/component: meta + {{- include "influxdb-enterprise.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.meta.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + influxdb.influxdata.com/component: meta + {{- include "influxdb-enterprise.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.meta.podSecurityContext | nindent 8 }} + serviceAccountName: {{ template "influxdb-enterprise.serviceAccountName" . }} + volumes: + {{ if not .Values.meta.persistence.enabled }} + - name: {{ include "influxdb-enterprise.fullname" . }}-meta-data + emptyDir: {} + {{ end }} + - name: config + configMap: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + {{- if .Values.license.secret }} + - name: license + secret: + secretName: {{ .Values.license.secret.name }} + items: + - key: {{ .Values.license.secret.key }} + path: license.json + {{- end }} + {{- if .Values.meta.https.enabled }} + - name: tls + secret: + {{- if .Values.meta.https.useCertManager }} + secretName: {{ include "influxdb-enterprise.fullname" . }}-meta-tls + {{ else }} + secretName: {{ .Values.meta.https.secret.name }} + {{ if or .Values.meta.https.secret.crt .Values.meta.https.secret.key }} + items: + - key: {{ .Values.meta.https.secret.crt }} + path: tls.crt + - key: {{ .Values.meta.https.secret.key }} + path: tls.key + {{ end }} + {{ end }} + {{ end }} + {{- if and .Values.meta.https.enabled .Values.meta.https.secret }} + {{- if .Values.meta.https.secret.ca -}} + - name: tls-ca + secret: + {{ if .Values.meta.https.secret.caSecret -}} + secretName: {{ .Values.meta.https.secret.caSecret }} + {{ else }} + secretName: {{ .Values.meta.https.secret.name }} + {{ end }} + items: + - key: {{ .Values.meta.https.secret.ca }} + path: ca.crt + {{ end }} + {{ end }} + containers: + - name: {{ .Chart.Name }} + command: + - "/usr/bin/perl" + args: + - "/etc/influxdb/entrypoint.pl" + securityContext: + {{- toYaml .Values.meta.securityContext | nindent 12 }} + {{- include "influxdb-enterprise.image" (dict "chart" .Chart "imageroot" .Values.image "podvals" .Values.meta "podtype" "meta") | indent 10 }} + imagePullPolicy: {{ .Values.meta.image.pullPolicy }} + env: + - name: INFLUXDB_META_INTERNAL_SHARED_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.meta.sharedSecret.secretName }} + key: secret + {{- if .Values.meta.env }} +{{ toYaml .Values.meta.env | indent 12 }} + {{- end}} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ .Values.envFromSecret }} + {{- end }} + ports: + - name: http + containerPort: 8091 + protocol: TCP + - name: raft + containerPort: 8089 + protocol: TCP + livenessProbe: + httpGet: + path: /ping + port: http + {{- if .Values.meta.https.enabled }} + scheme: HTTPS + {{- end }} + readinessProbe: + httpGet: + path: /ping + port: http + {{- if .Values.meta.https.enabled }} + scheme: HTTPS + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/influxdb + - name: {{ include "influxdb-enterprise.fullname" . }}-meta-data + mountPath: /var/lib/influxdb + {{- if .Values.license.secret }} + - name: license + mountPath: /var/run/secrets/influxdb/ + {{- end }} + {{- if .Values.meta.https.enabled }} + - name: tls + mountPath: /var/run/secrets/tls/ + {{ end }} + {{- if and .Values.meta.https.enabled .Values.meta.https.secret }} + {{- if .Values.meta.https.secret.ca -}} + - name: tls-ca + mountPath: /usr/share/ca-certificates/selfsigned/ca.crt + subPath: ca.crt + {{ end }} + {{ end }} + resources: + {{- toYaml .Values.meta.resources | nindent 12 }} + {{- with .Values.meta.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.meta.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.meta.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if and .Values.meta.persistence.enabled (not .Values.meta.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta-data + annotations: + {{- range $key, $value := .Values.meta.persistence.annotations }} + {{ $key }}: "{{ $value }}" + {{- end }} + spec: + accessModes: + - {{ .Values.meta.persistence.accessMode | quote}} + resources: + requests: + storage: {{ .Values.meta.persistence.size | quote }} + {{- if .Values.meta.persistence.storageClass }} + {{- if (eq "-" .Values.meta.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.meta.persistence.storageClass }}" + {{- end }} + {{- end }} + {{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/serviceaccount.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/serviceaccount.yaml new file mode 100644 index 0000000000..9e1fc427a1 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "influxdb-enterprise.labels" . | nindent 4 }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "influxdb-enterprise.serviceAccountName" . }} +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml new file mode 100644 index 0000000000..6b591fbbbc --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -0,0 +1,270 @@ +# Default values for influxdb-enterprise. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +nameOverride: "" +fullnameOverride: "" +imagePullSecrets: [] + +# License-key and license-path are mutually exclusive. Use only one and leave the other blank. +license: + # You can put your license key here for testing this chart out, + # but we STRONGLY recommend using a license file stored in a secret + # when you ship to production. + # key: "" + # secret: + # name: influxdb-license + # key: json + secret: + name: influxdb-enterprise-license + key: json +# Service account to use for deployment +# If the name is not specified default account will be used +serviceAccount: + create: false + name: '' + annotations: {} + +## The name of a secret in the same kubernetes namespace which contain values +## to be added to the environment. +## This can be used, for example, to set the INFLUXDB_ENTERPRISE_LICENSE_KEY +## or INFLUXDB_ENTERPRISE_LICENSE_PATH environment variable. +# envFromSecret: influxdb-license + +# A secret with keys "username" and "password" is required +# This bootstrap configuration allows you to configure +# some parts of the InfluxDB system at install time. +# +# This job ONLY runs once, after the first `helm upgrade --install` +# or `helm install` +# +# This job WILL NOT run on upgrades +# +bootstrap: + # This section allows you to enable authentication' + # of the data nodes, which will create a username + # and password for your "admin" account. + # A secret should be provided, which will have the keys + # "username" and "password" available. + auth: + secretName: sasquatch + # This section allows you to use DDL and DML to define + # databases, retention policies, and inject some data. + # When using the configMap setting, the keys "ddl" and "dml" + # must exist, even if one of them is empty. + # DDL is executed before DML, to enforce databases and retention policies + # to exist. + ddldml: {} + # configMap: influxdb-ddl-dml + # resources: {} + + +# Sets the tagged version of the docker image that you want to run, will default to latest +# The suffix is if you are pulling from influx repo, example images will be influxdb:1.8.0-meta and influxdb:1.8.0-data +# If set to true, the suffix won't be added +#image: +# tag: 1.11.3 +# ignoresuffix: false + +meta: + replicas: 3 + image: {} + # override: true + # pullPolicy: IfNotPresent + # repository: influxdb + # nodeSelector: {} + # tolerations: [] + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: influxdb.influxdata.com/component + operator: In + values: + - meta + topologyKey: kubernetes.io/hostname + # podAnnotations: {} + # + # podSecurityContext: {} + # fsGroup: 2000 + # + # This allows you to run the pods as a non-privileged user, set to the uid + # securityContext: {} + # runAsUser: 2000 + # runAsGroup: 2000 + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + # + # These are the commands that will be run before influxdb is started + # preruncmds: + # - cmd: ls -l + # description: We want to see what's in the directory + # - cmd: stat $HOME/somefile + # description: And we run a second command + # This secret needs a key called "secret" and it should be a long random string + # Please see docs for shared-internal-secret: + # https://docs.influxdata.com/enterprise_influxdb/v1.8/administration/config-data-nodes/#meta-internal-shared-secret + sharedSecret: + secretName: influxdb-enterprise-shared-secret + # + service: + ## Specify a service type + ## ClusterIP is default + ## ref: http://kubernetes.io/docs/user-guide/services/ + type: ClusterIP + # loadBalancerIP: "" + # externalIPs: [] + # externalTrafficPolicy: "" + # nodePort: 30086 + ## Add annotations to service + # annotations: {} + # + ## Persist data to a persistent volume + persistence: + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + ## influxdb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + # annotations: + # accessMode: ReadWriteOnce + # size: 8Gi + # Pick one + podDisruptionBudget: + # maxUnavailable: 2 + minAvailable: 2 + https: + # If you need to debug the data nodes registration with the meta nodes, we recommend + # that you comment out the active curl command in the data-configmap and uncomment the following + # line, which has -v / debugging enabled. + enabled: false + # The `useCertManager` option, when set to true, will + # automatically create the certificate resources for you. + # You do not need to set the secret.name when using this flag. + useCertManager: false + secret: + name: influxdb-tls + # crt: tls.crt + # key: tls.key + # ca: ca.crt + # caSecret: secret-name # only use if different from the above + insecure: true + ## Additional data container environment variables e.g.: + ## INFLUXDB_HTTP_FLUX_ENABLED: "true" + env: {} + resources: {} + +data: + replicas: 1 + image: {} + # override: true + # pullPolicy: IfNotPresent + # repository: influxdb + # nodeSelector: {} + # tolerations: [] + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: influxdb.influxdata.com/component + operator: In + values: + - data + topologyKey: kubernetes.io/hostname + # podAnnotations: {} + # + # podSecurityContext: {} + # fsGroup: 2000 + # + # This allows you to run the pods as a non-privileged user, set to the uid + # securityContext: {} + # runAsUser: 2000 + # runAsGroup: 2000 + # capabilities: + # drop: + # - ALL + # capabilities: + # drop: + # - ALL + # + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + # + # These are the commands that will be run before influxdb is started + # preruncmds: + # - cmd: ls -l + # description: We want to see what's in the directory + # - cmd: stat $HOME/somefile + # description: And we run a second command + # + service: + ## Specify a service type + ## ClusterIP is default + ## ref: http://kubernetes.io/docs/user-guide/services/ + type: ClusterIP + # loadBalancerIP: "" + # externalIPs: [] + # externalTrafficPolicy: "" + # nodePort: 30091 + ## Add annotations to service + # annotations: {} + # + ## Persist data to a persistent volume + persistence: + enabled: false + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + # existingClaim: + ## influxdb data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + # annotations: + # accessMode: ReadWriteOnce + # size: 8Gi + https: + # If you need to debug the data nodes registration with the meta nodes, we recommend + # that you comment out the active curl command in the data-configmap and uncomment the following + # line, which has -v / debugging enabled. + enabled: false + # The `useCertManager` option, when set to true, will + # automatically create the certificate resources for you. + # You do not need to set the secret.name when using this flag. + useCertManager: false + secret: + name: influxdb-tls + # crt: tls.crt + # key: tls.key + # ca: ca.crt + # caSecret: secret-name # only use if different from the above + insecure: true + flux: + enabled: true + ## Additional data container environment variables e.g.: + ## INFLUXDB_HTTP_FLUX_ENABLED: "true" + env: {} + resources: {} diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 16a7da7592..ba6b4a1459 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -44,6 +44,43 @@ source-influxdb: coordinator: query-timeout: "300s" +influxdb-enterprise: + enabled: true + meta: + service: + type: LoadBalancer + annotations: + metallb.universe.tf/address-pool: sdf-services + persistence: + # -- Enable InfluxDB Enterprise meta pod persistence + enabled: true + accessMode: ReadWriteOnce + size: 16Gi + # -- InfluxDB Enterprise meta pod resources + resources: + requests: + memory: 2Gi + cpu: 2 + limits: + memory: 4Gi + cpu: 4 + data: + replicas: 2 + # -- Enable InfluxDB Enterprise data pod persistence + persistence: + enabled: true + accessMode: ReadWriteOnce + storageClass: zfs--rubin-efd + size: 30Ti + # -- InfluxDB Enterprise data pod resources + resources: + requests: + memory: 192Gi + cpu: 8 + limits: + memory: 192Gi + cpu: 8 + kafka-connect-manager: influxdbSink: # Based on the kafka producers configuration for the Summit diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index f50bd800c0..c5666261ba 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -199,6 +199,9 @@ source-influxdb: memory: 96Gi cpu: 8 +# -- Override influxdb-enterprise configuration. +influxdb-enterprise: {} + # -- Override kafka-connect-manager configuration. kafka-connect-manager: {} From 13970aa497f45b6ef8df93555ba09450bdd2c83e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 24 Dec 2023 06:54:02 +0000 Subject: [PATCH 388/588] Update Helm release argo-workflows to v0.40.3 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index e572f97f83..9f0756acb4 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.39.9 + version: 0.40.3 repository: https://argoproj.github.io/argo-helm From 94880de209ac7270ad8bb5a09bac872def22aca5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 10:27:02 +0000 Subject: [PATCH 389/588] Update Helm release ingress-nginx to v4.9.0 --- applications/ingress-nginx/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index 101c089ecc..0e0d226198 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.8.4 + version: 4.9.0 repository: https://kubernetes.github.io/ingress-nginx From 1914eb5569d03192831ccbf4c6a833c495b78d19 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 12:29:48 +0000 Subject: [PATCH 390/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 428 +++++++++++++++++++++--------------------- requirements/main.txt | 218 ++++++++++----------- 2 files changed, 323 insertions(+), 323 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 87bef5ef30..e97aebcbff 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -143,59 +143,59 @@ click==8.1.7 \ # -c requirements/main.txt # documenteer # sphinx-click -coverage[toml]==7.3.3 \ - --hash=sha256:007a7e49831cfe387473e92e9ff07377f6121120669ddc39674e7244350a6a29 \ - --hash=sha256:1191270b06ecd68b1d00897b2daddb98e1719f63750969614ceb3438228c088e \ - --hash=sha256:1367aa411afb4431ab58fd7ee102adb2665894d047c490649e86219327183134 \ - --hash=sha256:1f0f8f0c497eb9c9f18f21de0750c8d8b4b9c7000b43996a094290b59d0e7523 \ - --hash=sha256:222b038f08a7ebed1e4e78ccf3c09a1ca4ac3da16de983e66520973443b546bc \ - --hash=sha256:243576944f7c1a1205e5cd658533a50eba662c74f9be4c050d51c69bd4532936 \ - --hash=sha256:2e9223a18f51d00d3ce239c39fc41410489ec7a248a84fab443fbb39c943616c \ - --hash=sha256:307aecb65bb77cbfebf2eb6e12009e9034d050c6c69d8a5f3f737b329f4f15fb \ - --hash=sha256:31c0b1b8b5a4aebf8fcd227237fc4263aa7fa0ddcd4d288d42f50eff18b0bac4 \ - --hash=sha256:3b15e03b8ee6a908db48eccf4e4e42397f146ab1e91c6324da44197a45cb9132 \ - --hash=sha256:3c854c1d2c7d3e47f7120b560d1a30c1ca221e207439608d27bc4d08fd4aeae8 \ - --hash=sha256:475de8213ed95a6b6283056d180b2442eee38d5948d735cd3d3b52b86dd65b92 \ - --hash=sha256:50c472c1916540f8b2deef10cdc736cd2b3d1464d3945e4da0333862270dcb15 \ - --hash=sha256:593efa42160c15c59ee9b66c5f27a453ed3968718e6e58431cdfb2d50d5ad284 \ - --hash=sha256:65d716b736f16e250435473c5ca01285d73c29f20097decdbb12571d5dfb2c94 \ - --hash=sha256:733537a182b5d62184f2a72796eb6901299898231a8e4f84c858c68684b25a70 \ - --hash=sha256:757453848c18d7ab5d5b5f1827293d580f156f1c2c8cef45bfc21f37d8681069 \ - --hash=sha256:79c32f875fd7c0ed8d642b221cf81feba98183d2ff14d1f37a1bbce6b0347d9f \ - --hash=sha256:7f3bad1a9313401ff2964e411ab7d57fb700a2d5478b727e13f156c8f89774a0 \ - --hash=sha256:7fbf3f5756e7955174a31fb579307d69ffca91ad163467ed123858ce0f3fd4aa \ - --hash=sha256:811ca7373da32f1ccee2927dc27dc523462fd30674a80102f86c6753d6681bc6 \ - --hash=sha256:89400aa1752e09f666cc48708eaa171eef0ebe3d5f74044b614729231763ae69 \ - --hash=sha256:8c944cf1775235c0857829c275c777a2c3e33032e544bcef614036f337ac37bb \ - --hash=sha256:9437a4074b43c177c92c96d051957592afd85ba00d3e92002c8ef45ee75df438 \ - --hash=sha256:9e17d9cb06c13b4f2ef570355fa45797d10f19ca71395910b249e3f77942a837 \ - --hash=sha256:9ede881c7618f9cf93e2df0421ee127afdfd267d1b5d0c59bcea771cf160ea4a \ - --hash=sha256:a1f76cfc122c9e0f62dbe0460ec9cc7696fc9a0293931a33b8870f78cf83a327 \ - --hash=sha256:a2ac4245f18057dfec3b0074c4eb366953bca6787f1ec397c004c78176a23d56 \ - --hash=sha256:a702e66483b1fe602717020a0e90506e759c84a71dbc1616dd55d29d86a9b91f \ - --hash=sha256:ad2453b852a1316c8a103c9c970db8fbc262f4f6b930aa6c606df9b2766eee06 \ - --hash=sha256:af75cf83c2d57717a8493ed2246d34b1f3398cb8a92b10fd7a1858cad8e78f59 \ - --hash=sha256:afdcc10c01d0db217fc0a64f58c7edd635b8f27787fea0a3054b856a6dff8717 \ - --hash=sha256:c59a3e59fb95e6d72e71dc915e6d7fa568863fad0a80b33bc7b82d6e9f844973 \ - --hash=sha256:cad9afc1644b979211989ec3ff7d82110b2ed52995c2f7263e7841c846a75348 \ - --hash=sha256:d299d379b676812e142fb57662a8d0d810b859421412b4d7af996154c00c31bb \ - --hash=sha256:d31650d313bd90d027f4be7663dfa2241079edd780b56ac416b56eebe0a21aab \ - --hash=sha256:d874434e0cb7b90f7af2b6e3309b0733cde8ec1476eb47db148ed7deeb2a9494 \ - --hash=sha256:db0338c4b0951d93d547e0ff8d8ea340fecf5885f5b00b23be5aa99549e14cfd \ - --hash=sha256:df04c64e58df96b4427db8d0559e95e2df3138c9916c96f9f6a4dd220db2fdb7 \ - --hash=sha256:e995efb191f04b01ced307dbd7407ebf6e6dc209b528d75583277b10fd1800ee \ - --hash=sha256:eda7f6e92358ac9e1717ce1f0377ed2b9320cea070906ece4e5c11d172a45a39 \ - --hash=sha256:ee453085279df1bac0996bc97004771a4a052b1f1e23f6101213e3796ff3cb85 \ - --hash=sha256:ee6621dccce8af666b8c4651f9f43467bfbf409607c604b840b78f4ff3619aeb \ - --hash=sha256:eee5e741b43ea1b49d98ab6e40f7e299e97715af2488d1c77a90de4a663a86e2 \ - --hash=sha256:f3bfd2c2f0e5384276e12b14882bf2c7621f97c35320c3e7132c156ce18436a1 \ - --hash=sha256:f501e36ac428c1b334c41e196ff6bd550c0353c7314716e80055b1f0a32ba394 \ - --hash=sha256:f9191be7af41f0b54324ded600e8ddbcabea23e1e8ba419d9a53b241dece821d \ - --hash=sha256:fbd8a5fe6c893de21a3c6835071ec116d79334fbdf641743332e442a3466f7ea \ - --hash=sha256:fc200cec654311ca2c3f5ab3ce2220521b3d4732f68e1b1e79bef8fcfc1f2b97 \ - --hash=sha256:ff4800783d85bff132f2cc7d007426ec698cdce08c3062c8d501ad3f4ea3d16c \ - --hash=sha256:ffb0eacbadb705c0a6969b0adf468f126b064f3362411df95f6d4f31c40d31c1 \ - --hash=sha256:fff0b2f249ac642fd735f009b8363c2b46cf406d3caec00e4deeb79b5ff39b40 +coverage[toml]==7.3.4 \ + --hash=sha256:020d56d2da5bc22a0e00a5b0d54597ee91ad72446fa4cf1b97c35022f6b6dbf0 \ + --hash=sha256:11ab62d0ce5d9324915726f611f511a761efcca970bd49d876cf831b4de65be5 \ + --hash=sha256:183c16173a70caf92e2dfcfe7c7a576de6fa9edc4119b8e13f91db7ca33a7923 \ + --hash=sha256:27ee94f088397d1feea3cb524e4313ff0410ead7d968029ecc4bc5a7e1d34fbf \ + --hash=sha256:3024ec1b3a221bd10b5d87337d0373c2bcaf7afd86d42081afe39b3e1820323b \ + --hash=sha256:309ed6a559bc942b7cc721f2976326efbfe81fc2b8f601c722bff927328507dc \ + --hash=sha256:33e63c578f4acce1b6cd292a66bc30164495010f1091d4b7529d014845cd9bee \ + --hash=sha256:36797b3625d1da885b369bdaaa3b0d9fb8865caed3c2b8230afaa6005434aa2f \ + --hash=sha256:36d75ef2acab74dc948d0b537ef021306796da551e8ac8b467810911000af66a \ + --hash=sha256:38d0b307c4d99a7aca4e00cad4311b7c51b7ac38fb7dea2abe0d182dd4008e05 \ + --hash=sha256:3d892a19ae24b9801771a5a989fb3e850bd1ad2e2b6e83e949c65e8f37bc67a1 \ + --hash=sha256:3f477fb8a56e0c603587b8278d9dbd32e54bcc2922d62405f65574bd76eba78a \ + --hash=sha256:47ee56c2cd445ea35a8cc3ad5c8134cb9bece3a5cb50bb8265514208d0a65928 \ + --hash=sha256:4a4184dcbe4f98d86470273e758f1d24191ca095412e4335ff27b417291f5964 \ + --hash=sha256:5214362abf26e254d749fc0c18af4c57b532a4bfde1a057565616dd3b8d7cc94 \ + --hash=sha256:607b6c6b35aa49defaebf4526729bd5238bc36fe3ef1a417d9839e1d96ee1e4c \ + --hash=sha256:610afaf929dc0e09a5eef6981edb6a57a46b7eceff151947b836d869d6d567c1 \ + --hash=sha256:6879fe41c60080aa4bb59703a526c54e0412b77e649a0d06a61782ecf0853ee1 \ + --hash=sha256:74397a1263275bea9d736572d4cf338efaade2de9ff759f9c26bcdceb383bb49 \ + --hash=sha256:758ebaf74578b73f727acc4e8ab4b16ab6f22a5ffd7dd254e5946aba42a4ce76 \ + --hash=sha256:782693b817218169bfeb9b9ba7f4a9f242764e180ac9589b45112571f32a0ba6 \ + --hash=sha256:7c4277ddaad9293454da19121c59f2d850f16bcb27f71f89a5c4836906eb35ef \ + --hash=sha256:85072e99474d894e5df582faec04abe137b28972d5e466999bc64fc37f564a03 \ + --hash=sha256:8a9c5bc5db3eb4cd55ecb8397d8e9b70247904f8eca718cc53c12dcc98e59fc8 \ + --hash=sha256:8ce03e25e18dd9bf44723e83bc202114817f3367789052dc9e5b5c79f40cf59d \ + --hash=sha256:93698ac0995516ccdca55342599a1463ed2e2d8942316da31686d4d614597ef9 \ + --hash=sha256:997aa14b3e014339d8101b9886063c5d06238848905d9ad6c6eabe533440a9a7 \ + --hash=sha256:9ac17b94ab4ca66cf803f2b22d47e392f0977f9da838bf71d1f0db6c32893cb9 \ + --hash=sha256:a02ac7c51819702b384fea5ee033a7c202f732a2a2f1fe6c41e3d4019828c8d3 \ + --hash=sha256:a1c3e9d2bbd6f3f79cfecd6f20854f4dc0c6e0ec317df2b265266d0dc06535f1 \ + --hash=sha256:a877810ef918d0d345b783fc569608804f3ed2507bf32f14f652e4eaf5d8f8d0 \ + --hash=sha256:a8e258dcc335055ab59fe79f1dec217d9fb0cdace103d6b5c6df6b75915e7959 \ + --hash=sha256:aefbb29dc56317a4fcb2f3857d5bce9b881038ed7e5aa5d3bcab25bd23f57328 \ + --hash=sha256:aff2bd3d585969cc4486bfc69655e862028b689404563e6b549e6a8244f226df \ + --hash=sha256:b1e0f25ae99cf247abfb3f0fac7ae25739e4cd96bf1afa3537827c576b4847e5 \ + --hash=sha256:b710869a15b8caf02e31d16487a931dbe78335462a122c8603bb9bd401ff6fb2 \ + --hash=sha256:bfed0ec4b419fbc807dec417c401499ea869436910e1ca524cfb4f81cf3f60e7 \ + --hash=sha256:c15fdfb141fcf6a900e68bfa35689e1256a670db32b96e7a931cab4a0e1600e5 \ + --hash=sha256:c6a23ae9348a7a92e7f750f9b7e828448e428e99c24616dec93a0720342f241d \ + --hash=sha256:c75738ce13d257efbb6633a049fb2ed8e87e2e6c2e906c52d1093a4d08d67c6b \ + --hash=sha256:d1d0ce6c6947a3a4aa5479bebceff2c807b9f3b529b637e2b33dea4468d75fc7 \ + --hash=sha256:d5b14abde6f8d969e6b9dd8c7a013d9a2b52af1235fe7bebef25ad5c8f47fa18 \ + --hash=sha256:d6ed790728fb71e6b8247bd28e77e99d0c276dff952389b5388169b8ca7b1c28 \ + --hash=sha256:e0d84099ea7cba9ff467f9c6f747e3fc3906e2aadac1ce7b41add72e8d0a3712 \ + --hash=sha256:e4353923f38d752ecfbd3f1f20bf7a3546993ae5ecd7c07fd2f25d40b4e54571 \ + --hash=sha256:e91029d7f151d8bf5ab7d8bfe2c3dbefd239759d642b211a677bc0709c9fdb96 \ + --hash=sha256:ea473c37872f0159294f7073f3fa72f68b03a129799f3533b2bb44d5e9fa4f82 \ + --hash=sha256:f154bd866318185ef5865ace5be3ac047b6d1cc0aeecf53bf83fe846f4384d5d \ + --hash=sha256:f97ff5a9fc2ca47f3383482858dd2cb8ddbf7514427eecf5aa5f7992d0571429 \ + --hash=sha256:f99b7d3f7a7adfa3d11e3a48d1a91bb65739555dd6a0d3fa68aa5852d962e5b1 \ + --hash=sha256:fb220b3596358a86361139edce40d97da7458412d412e1e10c8e1970ee8c09ab \ + --hash=sha256:fd2f8a641f8f193968afdc8fd1697e602e199931012b574194052d132a79be13 # via # -r requirements/dev.in # pytest-cov @@ -207,9 +207,9 @@ distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -documenteer[guide]==1.0.0a18 \ - --hash=sha256:25f20e46408b4188f54d958557fc206512102425d72742bb6a46d57d032f6dff \ - --hash=sha256:6a3b4123eef3f8d5cbb8168b2cc897d95c590a0f280e0bff8b46da89c4257772 +documenteer[guide]==1.0.0 \ + --hash=sha256:6d6f1f97fa7636591c8cb885fadce6055fe5e57a0b694182ac9980d9fd2f69a6 \ + --hash=sha256:76b0e074833ac3941e4479fa79982f32b09e60fc1c6993e8749b99c9c4f8e1de # via # -r requirements/dev.in # documenteer @@ -370,34 +370,34 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.7.1 \ - --hash=sha256:12cce78e329838d70a204293e7b29af9faa3ab14899aec397798a4b41be7f340 \ - --hash=sha256:1484b8fa2c10adf4474f016e09d7a159602f3239075c7bf9f1627f5acf40ad49 \ - --hash=sha256:204e0d6de5fd2317394a4eff62065614c4892d5a4d1a7ee55b765d7a3d9e3f82 \ - --hash=sha256:2643d145af5292ee956aa0a83c2ce1038a3bdb26e033dadeb2f7066fb0c9abce \ - --hash=sha256:2c6e4464ed5f01dc44dc9821caf67b60a4e5c3b04278286a85c067010653a0eb \ - --hash=sha256:2f7f6985d05a4e3ce8255396df363046c28bea790e40617654e91ed580ca7c51 \ - --hash=sha256:31902408f4bf54108bbfb2e35369877c01c95adc6192958684473658c322c8a5 \ - --hash=sha256:40716d1f821b89838589e5b3106ebbc23636ffdef5abc31f7cd0266db936067e \ - --hash=sha256:4b901927f16224d0d143b925ce9a4e6b3a758010673eeded9b748f250cf4e8f7 \ - --hash=sha256:4fc3d14ee80cd22367caaaf6e014494415bf440980a3045bf5045b525680ac33 \ - --hash=sha256:5cf3f0c5ac72139797953bd50bc6c95ac13075e62dbfcc923571180bebb662e9 \ - --hash=sha256:6dbdec441c60699288adf051f51a5d512b0d818526d1dcfff5a41f8cd8b4aaf1 \ - --hash=sha256:72cf32ce7dd3562373f78bd751f73c96cfb441de147cc2448a92c1a308bd0ca6 \ - --hash=sha256:75aa828610b67462ffe3057d4d8a4112105ed211596b750b53cbfe182f44777a \ - --hash=sha256:75c4d2a6effd015786c87774e04331b6da863fc3fc4e8adfc3b40aa55ab516fe \ - --hash=sha256:78e25b2fd6cbb55ddfb8058417df193f0129cad5f4ee75d1502248e588d9e0d7 \ - --hash=sha256:84860e06ba363d9c0eeabd45ac0fde4b903ad7aa4f93cd8b648385a888e23200 \ - --hash=sha256:8c5091ebd294f7628eb25ea554852a52058ac81472c921150e3a61cdd68f75a7 \ - --hash=sha256:944bdc21ebd620eafefc090cdf83158393ec2b1391578359776c00de00e8907a \ - --hash=sha256:9c7ac372232c928fff0645d85f273a726970c014749b924ce5710d7d89763a28 \ - --hash=sha256:d9b338c19fa2412f76e17525c1b4f2c687a55b156320acb588df79f2e6fa9fea \ - --hash=sha256:ee5d62d28b854eb61889cde4e1dbc10fbaa5560cb39780c3995f6737f7e82120 \ - --hash=sha256:f2c2521a8e4d6d769e3234350ba7b65ff5d527137cdcde13ff4d99114b0c8e7d \ - --hash=sha256:f6efc9bd72258f89a3816e3a98c09d36f079c223aa345c659622f056b760ab42 \ - --hash=sha256:f7c5d642db47376a0cc130f0de6d055056e010debdaf0707cd2b0fc7e7ef30ea \ - --hash=sha256:fcb6d9afb1b6208b4c712af0dafdc650f518836065df0d4fb1d800f5d6773db2 \ - --hash=sha256:fcd2572dd4519e8a6642b733cd3a8cfc1ef94bafd0c1ceed9c94fe736cb65b6a +mypy==1.8.0 \ + --hash=sha256:028cf9f2cae89e202d7b6593cd98db6759379f17a319b5faf4f9978d7084cdc6 \ + --hash=sha256:2afecd6354bbfb6e0160f4e4ad9ba6e4e003b767dd80d85516e71f2e955ab50d \ + --hash=sha256:2b5b6c721bd4aabaadead3a5e6fa85c11c6c795e0c81a7215776ef8afc66de02 \ + --hash=sha256:42419861b43e6962a649068a61f4a4839205a3ef525b858377a960b9e2de6e0d \ + --hash=sha256:42c6680d256ab35637ef88891c6bd02514ccb7e1122133ac96055ff458f93fc3 \ + --hash=sha256:485a8942f671120f76afffff70f259e1cd0f0cfe08f81c05d8816d958d4577d3 \ + --hash=sha256:4c886c6cce2d070bd7df4ec4a05a13ee20c0aa60cb587e8d1265b6c03cf91da3 \ + --hash=sha256:4e6d97288757e1ddba10dd9549ac27982e3e74a49d8d0179fc14d4365c7add66 \ + --hash=sha256:4ef4be7baf08a203170f29e89d79064463b7fc7a0908b9d0d5114e8009c3a259 \ + --hash=sha256:51720c776d148bad2372ca21ca29256ed483aa9a4cdefefcef49006dff2a6835 \ + --hash=sha256:52825b01f5c4c1c4eb0db253ec09c7aa17e1a7304d247c48b6f3599ef40db8bd \ + --hash=sha256:538fd81bb5e430cc1381a443971c0475582ff9f434c16cd46d2c66763ce85d9d \ + --hash=sha256:5c1538c38584029352878a0466f03a8ee7547d7bd9f641f57a0f3017a7c905b8 \ + --hash=sha256:6ff8b244d7085a0b425b56d327b480c3b29cafbd2eff27316a004f9a7391ae07 \ + --hash=sha256:7178def594014aa6c35a8ff411cf37d682f428b3b5617ca79029d8ae72f5402b \ + --hash=sha256:720a5ca70e136b675af3af63db533c1c8c9181314d207568bbe79051f122669e \ + --hash=sha256:7f1478736fcebb90f97e40aff11a5f253af890c845ee0c850fe80aa060a267c6 \ + --hash=sha256:855fe27b80375e5c5878492f0729540db47b186509c98dae341254c8f45f42ae \ + --hash=sha256:8963b83d53ee733a6e4196954502b33567ad07dfd74851f32be18eb932fb1cb9 \ + --hash=sha256:9261ed810972061388918c83c3f5cd46079d875026ba97380f3e3978a72f503d \ + --hash=sha256:99b00bc72855812a60d253420d8a2eae839b0afa4938f09f4d2aa9bb4654263a \ + --hash=sha256:ab3c84fa13c04aeeeabb2a7f67a25ef5d77ac9d6486ff33ded762ef353aa5592 \ + --hash=sha256:afe3fe972c645b4632c563d3f3eff1cdca2fa058f730df2b93a35e3b0c538218 \ + --hash=sha256:d19c413b3c07cbecf1f991e2221746b0d2a9410b59cb3f4fb9557f0365a1a817 \ + --hash=sha256:df9824ac11deaf007443e7ed2a4a26bebff98d2bc43c6da21b2b64185da011c4 \ + --hash=sha256:e46f44b54ebddbeedbd3d5b289a893219065ef805d95094d16a0af6630f5d410 \ + --hash=sha256:f5ac9a4eeb1ec0f1ccdc6f326bcdb464de5f80eb07fb38b5ddd7b0de6bc61e55 # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ @@ -440,120 +440,120 @@ pybtex-docutils==1.0.3 \ --hash=sha256:3a7ebdf92b593e00e8c1c538aa9a20bca5d92d84231124715acc964d51d93c6b \ --hash=sha256:8fd290d2ae48e32fcb54d86b0efb8d573198653c7e2447d5bec5847095f430b9 # via sphinxcontrib-bibtex -pydantic==2.5.2 \ - --hash=sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0 \ - --hash=sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd +pydantic==2.5.3 \ + --hash=sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a \ + --hash=sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4 # via # -c requirements/main.txt # autodoc-pydantic # documenteer # pydantic-settings -pydantic-core==2.14.5 \ - --hash=sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b \ - --hash=sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b \ - --hash=sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d \ - --hash=sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8 \ - --hash=sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124 \ - --hash=sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189 \ - --hash=sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c \ - --hash=sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d \ - --hash=sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f \ - --hash=sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520 \ - --hash=sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4 \ - --hash=sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6 \ - --hash=sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955 \ - --hash=sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3 \ - --hash=sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b \ - --hash=sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a \ - --hash=sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68 \ - --hash=sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3 \ - --hash=sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd \ - --hash=sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de \ - --hash=sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b \ - --hash=sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634 \ - --hash=sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7 \ - --hash=sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459 \ - --hash=sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7 \ - --hash=sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3 \ - --hash=sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331 \ - --hash=sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf \ - --hash=sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d \ - --hash=sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36 \ - --hash=sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59 \ - --hash=sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937 \ - --hash=sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc \ - --hash=sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093 \ - --hash=sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753 \ - --hash=sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706 \ - --hash=sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca \ - --hash=sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260 \ - --hash=sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997 \ - --hash=sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588 \ - --hash=sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71 \ - --hash=sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb \ - --hash=sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e \ - --hash=sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69 \ - --hash=sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5 \ - --hash=sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07 \ - --hash=sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1 \ - --hash=sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0 \ - --hash=sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd \ - --hash=sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8 \ - --hash=sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944 \ - --hash=sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26 \ - --hash=sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda \ - --hash=sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4 \ - --hash=sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9 \ - --hash=sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00 \ - --hash=sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe \ - --hash=sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6 \ - --hash=sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada \ - --hash=sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4 \ - --hash=sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7 \ - --hash=sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325 \ - --hash=sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4 \ - --hash=sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b \ - --hash=sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88 \ - --hash=sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04 \ - --hash=sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863 \ - --hash=sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0 \ - --hash=sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911 \ - --hash=sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b \ - --hash=sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e \ - --hash=sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144 \ - --hash=sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5 \ - --hash=sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720 \ - --hash=sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab \ - --hash=sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d \ - --hash=sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789 \ - --hash=sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec \ - --hash=sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2 \ - --hash=sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db \ - --hash=sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f \ - --hash=sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef \ - --hash=sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3 \ - --hash=sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209 \ - --hash=sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc \ - --hash=sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651 \ - --hash=sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8 \ - --hash=sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e \ - --hash=sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66 \ - --hash=sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7 \ - --hash=sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550 \ - --hash=sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd \ - --hash=sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405 \ - --hash=sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27 \ - --hash=sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093 \ - --hash=sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077 \ - --hash=sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113 \ - --hash=sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3 \ - --hash=sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6 \ - --hash=sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf \ - --hash=sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed \ - --hash=sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88 \ - --hash=sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe \ - --hash=sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18 \ - --hash=sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867 +pydantic-core==2.14.6 \ + --hash=sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556 \ + --hash=sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e \ + --hash=sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411 \ + --hash=sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245 \ + --hash=sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c \ + --hash=sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66 \ + --hash=sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd \ + --hash=sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d \ + --hash=sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b \ + --hash=sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06 \ + --hash=sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948 \ + --hash=sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341 \ + --hash=sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0 \ + --hash=sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f \ + --hash=sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a \ + --hash=sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2 \ + --hash=sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51 \ + --hash=sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80 \ + --hash=sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8 \ + --hash=sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d \ + --hash=sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8 \ + --hash=sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb \ + --hash=sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590 \ + --hash=sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87 \ + --hash=sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534 \ + --hash=sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b \ + --hash=sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145 \ + --hash=sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba \ + --hash=sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b \ + --hash=sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2 \ + --hash=sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e \ + --hash=sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052 \ + --hash=sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622 \ + --hash=sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab \ + --hash=sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b \ + --hash=sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66 \ + --hash=sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e \ + --hash=sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4 \ + --hash=sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e \ + --hash=sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec \ + --hash=sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c \ + --hash=sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed \ + --hash=sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937 \ + --hash=sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f \ + --hash=sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9 \ + --hash=sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4 \ + --hash=sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96 \ + --hash=sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277 \ + --hash=sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23 \ + --hash=sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7 \ + --hash=sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b \ + --hash=sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91 \ + --hash=sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d \ + --hash=sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e \ + --hash=sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1 \ + --hash=sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2 \ + --hash=sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160 \ + --hash=sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9 \ + --hash=sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670 \ + --hash=sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7 \ + --hash=sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c \ + --hash=sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb \ + --hash=sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42 \ + --hash=sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d \ + --hash=sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8 \ + --hash=sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1 \ + --hash=sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6 \ + --hash=sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8 \ + --hash=sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf \ + --hash=sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e \ + --hash=sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a \ + --hash=sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9 \ + --hash=sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1 \ + --hash=sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40 \ + --hash=sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2 \ + --hash=sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d \ + --hash=sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f \ + --hash=sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f \ + --hash=sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af \ + --hash=sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7 \ + --hash=sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda \ + --hash=sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a \ + --hash=sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95 \ + --hash=sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0 \ + --hash=sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60 \ + --hash=sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149 \ + --hash=sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975 \ + --hash=sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4 \ + --hash=sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe \ + --hash=sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94 \ + --hash=sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03 \ + --hash=sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c \ + --hash=sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b \ + --hash=sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a \ + --hash=sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24 \ + --hash=sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391 \ + --hash=sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c \ + --hash=sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab \ + --hash=sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd \ + --hash=sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786 \ + --hash=sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08 \ + --hash=sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8 \ + --hash=sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6 \ + --hash=sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0 \ + --hash=sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421 # via # -c requirements/main.txt # pydantic @@ -773,24 +773,24 @@ rpds-py==0.15.2 \ # via # jsonschema # referencing -ruff==0.1.8 \ - --hash=sha256:05ffe9dbd278965271252704eddb97b4384bf58b971054d517decfbf8c523f05 \ - --hash=sha256:5daaeaf00ae3c1efec9742ff294b06c3a2a9db8d3db51ee4851c12ad385cda30 \ - --hash=sha256:7d076717c67b34c162da7c1a5bda16ffc205e0e0072c03745275e7eab888719f \ - --hash=sha256:7de792582f6e490ae6aef36a58d85df9f7a0cfd1b0d4fe6b4fb51803a3ac96fa \ - --hash=sha256:a05b0ddd7ea25495e4115a43125e8a7ebed0aa043c3d432de7e7d6e8e8cd6448 \ - --hash=sha256:aa8ee4f8440023b0a6c3707f76cadce8657553655dcbb5fc9b2f9bb9bee389f6 \ - --hash=sha256:b6a21ab023124eafb7cef6d038f835cb1155cd5ea798edd8d9eb2f8b84be07d9 \ - --hash=sha256:bd8ee69b02e7bdefe1e5da2d5b6eaaddcf4f90859f00281b2333c0e3a0cc9cd6 \ - --hash=sha256:c8e3255afd186c142eef4ec400d7826134f028a85da2146102a1172ecc7c3696 \ - --hash=sha256:ce697c463458555027dfb194cb96d26608abab920fa85213deb5edf26e026664 \ - --hash=sha256:db6cedd9ffed55548ab313ad718bc34582d394e27a7875b4b952c2d29c001b26 \ - --hash=sha256:e49fbdfe257fa41e5c9e13c79b9e79a23a79bd0e40b9314bc53840f520c2c0b3 \ - --hash=sha256:e6f08ca730f4dc1b76b473bdf30b1b37d42da379202a059eae54ec7fc1fbcfed \ - --hash=sha256:f35960b02df6b827c1b903091bb14f4b003f6cf102705efc4ce78132a0aa5af3 \ - --hash=sha256:f41f692f1691ad87f51708b823af4bb2c5c87c9248ddd3191c8f088e66ce590a \ - --hash=sha256:f7ee467677467526cfe135eab86a40a0e8db43117936ac4f9b469ce9cdb3fb62 \ - --hash=sha256:ff78a7583020da124dd0deb835ece1d87bb91762d40c514ee9b67a087940528b +ruff==0.1.9 \ + --hash=sha256:0e17f53bcbb4fff8292dfd84cf72d767b5e146f009cccd40c2fad27641f8a7a9 \ + --hash=sha256:104aa9b5e12cb755d9dce698ab1b97726b83012487af415a4512fedd38b1459e \ + --hash=sha256:1e63bf5a4a91971082a4768a0aba9383c12392d0d6f1e2be2248c1f9054a20da \ + --hash=sha256:28d920e319783d5303333630dae46ecc80b7ba294aeffedf946a02ac0b7cc3db \ + --hash=sha256:2aec598fb65084e41a9c5d4b95726173768a62055aafb07b4eff976bac72a592 \ + --hash=sha256:331aae2cd4a0554667ac683243b151c74bd60e78fb08c3c2a4ac05ee1e606a39 \ + --hash=sha256:479ca4250cab30f9218b2e563adc362bd6ae6343df7c7b5a7865300a5156d5a6 \ + --hash=sha256:4d0738917c203246f3e275b37006faa3aa96c828b284ebfe3e99a8cb413c8c4b \ + --hash=sha256:69dac82d63a50df2ab0906d97a01549f814b16bc806deeac4f064ff95c47ddf5 \ + --hash=sha256:744dfe4b35470fa3820d5fe45758aace6269c578f7ddc43d447868cfe5078bcb \ + --hash=sha256:8151425a60878e66f23ad47da39265fc2fad42aed06fb0a01130e967a7a064f4 \ + --hash=sha256:837c739729394df98f342319f5136f33c65286b28b6b70a87c28f59354ec939b \ + --hash=sha256:aa8344310f1ae79af9ccd6e4b32749e93cddc078f9b5ccd0e45bd76a6d2e8bb6 \ + --hash=sha256:b041dee2734719ddbb4518f762c982f2e912e7f28b8ee4fe1dee0b15d1b6e800 \ + --hash=sha256:c497d769164df522fdaf54c6eba93f397342fe4ca2123a2e014a5b8fc7df81c7 \ + --hash=sha256:e6837202c2859b9f22e43cb01992373c2dbfeae5c0c91ad691a4a2e725392464 \ + --hash=sha256:e6a212f436122ac73df851f0cf006e0c6612fe6f9c864ed17ebefce0eff6a5fd # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -996,7 +996,7 @@ virtualenv==20.25.0 \ # via pre-commit # The following packages are considered to be unsafe in a requirements file: -setuptools==69.0.2 \ - --hash=sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2 \ - --hash=sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6 +setuptools==69.0.3 \ + --hash=sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05 \ + --hash=sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78 # via nodeenv diff --git a/requirements/main.txt b/requirements/main.txt index 0eeabf6f06..63a30a5244 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -346,119 +346,119 @@ pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pydantic==2.5.2 \ - --hash=sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0 \ - --hash=sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd +pydantic==2.5.3 \ + --hash=sha256:b3ef57c62535b0941697cce638c08900d87fcb67e29cfa99e8a68f747f393f7a \ + --hash=sha256:d0caf5954bee831b6bfe7e338c32b9e30c85dfe080c843680783ac2b631673b4 # via # -r requirements/main.in # fastapi # safir -pydantic-core==2.14.5 \ - --hash=sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b \ - --hash=sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b \ - --hash=sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d \ - --hash=sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8 \ - --hash=sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124 \ - --hash=sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189 \ - --hash=sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c \ - --hash=sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d \ - --hash=sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f \ - --hash=sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520 \ - --hash=sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4 \ - --hash=sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6 \ - --hash=sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955 \ - --hash=sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3 \ - --hash=sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b \ - --hash=sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a \ - --hash=sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68 \ - --hash=sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3 \ - --hash=sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd \ - --hash=sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de \ - --hash=sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b \ - --hash=sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634 \ - --hash=sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7 \ - --hash=sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459 \ - --hash=sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7 \ - --hash=sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3 \ - --hash=sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331 \ - --hash=sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf \ - --hash=sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d \ - --hash=sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36 \ - --hash=sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59 \ - --hash=sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937 \ - --hash=sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc \ - --hash=sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093 \ - --hash=sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753 \ - --hash=sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706 \ - --hash=sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca \ - --hash=sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260 \ - --hash=sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997 \ - --hash=sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588 \ - --hash=sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71 \ - --hash=sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb \ - --hash=sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e \ - --hash=sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69 \ - --hash=sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5 \ - --hash=sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07 \ - --hash=sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1 \ - --hash=sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0 \ - --hash=sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd \ - --hash=sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8 \ - --hash=sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944 \ - --hash=sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26 \ - --hash=sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda \ - --hash=sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4 \ - --hash=sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9 \ - --hash=sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00 \ - --hash=sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe \ - --hash=sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6 \ - --hash=sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada \ - --hash=sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4 \ - --hash=sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7 \ - --hash=sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325 \ - --hash=sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4 \ - --hash=sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b \ - --hash=sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88 \ - --hash=sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04 \ - --hash=sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863 \ - --hash=sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0 \ - --hash=sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911 \ - --hash=sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b \ - --hash=sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e \ - --hash=sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144 \ - --hash=sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5 \ - --hash=sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720 \ - --hash=sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab \ - --hash=sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d \ - --hash=sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789 \ - --hash=sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec \ - --hash=sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2 \ - --hash=sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db \ - --hash=sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f \ - --hash=sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef \ - --hash=sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3 \ - --hash=sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209 \ - --hash=sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc \ - --hash=sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651 \ - --hash=sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8 \ - --hash=sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e \ - --hash=sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66 \ - --hash=sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7 \ - --hash=sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550 \ - --hash=sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd \ - --hash=sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405 \ - --hash=sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27 \ - --hash=sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093 \ - --hash=sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077 \ - --hash=sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113 \ - --hash=sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3 \ - --hash=sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6 \ - --hash=sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf \ - --hash=sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed \ - --hash=sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88 \ - --hash=sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe \ - --hash=sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18 \ - --hash=sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867 +pydantic-core==2.14.6 \ + --hash=sha256:00646784f6cd993b1e1c0e7b0fdcbccc375d539db95555477771c27555e3c556 \ + --hash=sha256:00b1087dabcee0b0ffd104f9f53d7d3eaddfaa314cdd6726143af6bc713aa27e \ + --hash=sha256:0348b1dc6b76041516e8a854ff95b21c55f5a411c3297d2ca52f5528e49d8411 \ + --hash=sha256:036137b5ad0cb0004c75b579445a1efccd072387a36c7f217bb8efd1afbe5245 \ + --hash=sha256:095b707bb287bfd534044166ab767bec70a9bba3175dcdc3371782175c14e43c \ + --hash=sha256:0c08de15d50fa190d577e8591f0329a643eeaed696d7771760295998aca6bc66 \ + --hash=sha256:1302a54f87b5cd8528e4d6d1bf2133b6aa7c6122ff8e9dc5220fbc1e07bffebd \ + --hash=sha256:172de779e2a153d36ee690dbc49c6db568d7b33b18dc56b69a7514aecbcf380d \ + --hash=sha256:1b027c86c66b8627eb90e57aee1f526df77dc6d8b354ec498be9a757d513b92b \ + --hash=sha256:1ce830e480f6774608dedfd4a90c42aac4a7af0a711f1b52f807130c2e434c06 \ + --hash=sha256:1fd0c1d395372843fba13a51c28e3bb9d59bd7aebfeb17358ffaaa1e4dbbe948 \ + --hash=sha256:23598acb8ccaa3d1d875ef3b35cb6376535095e9405d91a3d57a8c7db5d29341 \ + --hash=sha256:24368e31be2c88bd69340fbfe741b405302993242ccb476c5c3ff48aeee1afe0 \ + --hash=sha256:26a92ae76f75d1915806b77cf459811e772d8f71fd1e4339c99750f0e7f6324f \ + --hash=sha256:27e524624eace5c59af499cd97dc18bb201dc6a7a2da24bfc66ef151c69a5f2a \ + --hash=sha256:2b8719037e570639e6b665a4050add43134d80b687288ba3ade18b22bbb29dd2 \ + --hash=sha256:2c5bcf3414367e29f83fd66f7de64509a8fd2368b1edf4351e862910727d3e51 \ + --hash=sha256:2dbe357bc4ddda078f79d2a36fc1dd0494a7f2fad83a0a684465b6f24b46fe80 \ + --hash=sha256:2f5fa187bde8524b1e37ba894db13aadd64faa884657473b03a019f625cee9a8 \ + --hash=sha256:2f6ffc6701a0eb28648c845f4945a194dc7ab3c651f535b81793251e1185ac3d \ + --hash=sha256:314ccc4264ce7d854941231cf71b592e30d8d368a71e50197c905874feacc8a8 \ + --hash=sha256:36026d8f99c58d7044413e1b819a67ca0e0b8ebe0f25e775e6c3d1fabb3c38fb \ + --hash=sha256:36099c69f6b14fc2c49d7996cbf4f87ec4f0e66d1c74aa05228583225a07b590 \ + --hash=sha256:36fa402dcdc8ea7f1b0ddcf0df4254cc6b2e08f8cd80e7010d4c4ae6e86b2a87 \ + --hash=sha256:370ffecb5316ed23b667d99ce4debe53ea664b99cc37bfa2af47bc769056d534 \ + --hash=sha256:3860c62057acd95cc84044e758e47b18dcd8871a328ebc8ccdefd18b0d26a21b \ + --hash=sha256:399ac0891c284fa8eb998bcfa323f2234858f5d2efca3950ae58c8f88830f145 \ + --hash=sha256:3a0b5db001b98e1c649dd55afa928e75aa4087e587b9524a4992316fa23c9fba \ + --hash=sha256:3dcf1978be02153c6a31692d4fbcc2a3f1db9da36039ead23173bc256ee3b91b \ + --hash=sha256:4241204e4b36ab5ae466ecec5c4c16527a054c69f99bba20f6f75232a6a534e2 \ + --hash=sha256:438027a975cc213a47c5d70672e0d29776082155cfae540c4e225716586be75e \ + --hash=sha256:43e166ad47ba900f2542a80d83f9fc65fe99eb63ceec4debec160ae729824052 \ + --hash=sha256:478e9e7b360dfec451daafe286998d4a1eeaecf6d69c427b834ae771cad4b622 \ + --hash=sha256:4ce8299b481bcb68e5c82002b96e411796b844d72b3e92a3fbedfe8e19813eab \ + --hash=sha256:4f86f1f318e56f5cbb282fe61eb84767aee743ebe32c7c0834690ebea50c0a6b \ + --hash=sha256:55a23dcd98c858c0db44fc5c04fc7ed81c4b4d33c653a7c45ddaebf6563a2f66 \ + --hash=sha256:599c87d79cab2a6a2a9df4aefe0455e61e7d2aeede2f8577c1b7c0aec643ee8e \ + --hash=sha256:5aa90562bc079c6c290f0512b21768967f9968e4cfea84ea4ff5af5d917016e4 \ + --hash=sha256:64634ccf9d671c6be242a664a33c4acf12882670b09b3f163cd00a24cffbd74e \ + --hash=sha256:667aa2eac9cd0700af1ddb38b7b1ef246d8cf94c85637cbb03d7757ca4c3fdec \ + --hash=sha256:6a31d98c0d69776c2576dda4b77b8e0c69ad08e8b539c25c7d0ca0dc19a50d6c \ + --hash=sha256:6af4b3f52cc65f8a0bc8b1cd9676f8c21ef3e9132f21fed250f6958bd7223bed \ + --hash=sha256:6c8edaea3089bf908dd27da8f5d9e395c5b4dc092dbcce9b65e7156099b4b937 \ + --hash=sha256:71d72ca5eaaa8d38c8df16b7deb1a2da4f650c41b58bb142f3fb75d5ad4a611f \ + --hash=sha256:72f9a942d739f09cd42fffe5dc759928217649f070056f03c70df14f5770acf9 \ + --hash=sha256:747265448cb57a9f37572a488a57d873fd96bf51e5bb7edb52cfb37124516da4 \ + --hash=sha256:75ec284328b60a4e91010c1acade0c30584f28a1f345bc8f72fe8b9e46ec6a96 \ + --hash=sha256:78d0768ee59baa3de0f4adac9e3748b4b1fffc52143caebddfd5ea2961595277 \ + --hash=sha256:78ee52ecc088c61cce32b2d30a826f929e1708f7b9247dc3b921aec367dc1b23 \ + --hash=sha256:7be719e4d2ae6c314f72844ba9d69e38dff342bc360379f7c8537c48e23034b7 \ + --hash=sha256:7e1f4744eea1501404b20b0ac059ff7e3f96a97d3e3f48ce27a139e053bb370b \ + --hash=sha256:7e90d6cc4aad2cc1f5e16ed56e46cebf4877c62403a311af20459c15da76fd91 \ + --hash=sha256:7ebe3416785f65c28f4f9441e916bfc8a54179c8dea73c23023f7086fa601c5d \ + --hash=sha256:7f41533d7e3cf9520065f610b41ac1c76bc2161415955fbcead4981b22c7611e \ + --hash=sha256:7f5025db12fc6de7bc1104d826d5aee1d172f9ba6ca936bf6474c2148ac336c1 \ + --hash=sha256:86c963186ca5e50d5c8287b1d1c9d3f8f024cbe343d048c5bd282aec2d8641f2 \ + --hash=sha256:86ce5fcfc3accf3a07a729779d0b86c5d0309a4764c897d86c11089be61da160 \ + --hash=sha256:8a14c192c1d724c3acbfb3f10a958c55a2638391319ce8078cb36c02283959b9 \ + --hash=sha256:8b93785eadaef932e4fe9c6e12ba67beb1b3f1e5495631419c784ab87e975670 \ + --hash=sha256:8ed1af8692bd8d2a29d702f1a2e6065416d76897d726e45a1775b1444f5928a7 \ + --hash=sha256:92879bce89f91f4b2416eba4429c7b5ca22c45ef4a499c39f0c5c69257522c7c \ + --hash=sha256:94fc0e6621e07d1e91c44e016cc0b189b48db053061cc22d6298a611de8071bb \ + --hash=sha256:982487f8931067a32e72d40ab6b47b1628a9c5d344be7f1a4e668fb462d2da42 \ + --hash=sha256:9862bf828112e19685b76ca499b379338fd4c5c269d897e218b2ae8fcb80139d \ + --hash=sha256:99b14dbea2fdb563d8b5a57c9badfcd72083f6006caf8e126b491519c7d64ca8 \ + --hash=sha256:9c6a5c79b28003543db3ba67d1df336f253a87d3112dac3a51b94f7d48e4c0e1 \ + --hash=sha256:a19b794f8fe6569472ff77602437ec4430f9b2b9ec7a1105cfd2232f9ba355e6 \ + --hash=sha256:a306cdd2ad3a7d795d8e617a58c3a2ed0f76c8496fb7621b6cd514eb1532cae8 \ + --hash=sha256:a3dde6cac75e0b0902778978d3b1646ca9f438654395a362cb21d9ad34b24acf \ + --hash=sha256:a874f21f87c485310944b2b2734cd6d318765bcbb7515eead33af9641816506e \ + --hash=sha256:a983cca5ed1dd9a35e9e42ebf9f278d344603bfcb174ff99a5815f953925140a \ + --hash=sha256:aca48506a9c20f68ee61c87f2008f81f8ee99f8d7f0104bff3c47e2d148f89d9 \ + --hash=sha256:b2602177668f89b38b9f84b7b3435d0a72511ddef45dc14446811759b82235a1 \ + --hash=sha256:b3e5fe4538001bb82e2295b8d2a39356a84694c97cb73a566dc36328b9f83b40 \ + --hash=sha256:b6ca36c12a5120bad343eef193cc0122928c5c7466121da7c20f41160ba00ba2 \ + --hash=sha256:b89f4477d915ea43b4ceea6756f63f0288941b6443a2b28c69004fe07fde0d0d \ + --hash=sha256:b9a9d92f10772d2a181b5ca339dee066ab7d1c9a34ae2421b2a52556e719756f \ + --hash=sha256:c99462ffc538717b3e60151dfaf91125f637e801f5ab008f81c402f1dff0cd0f \ + --hash=sha256:cb92f9061657287eded380d7dc455bbf115430b3aa4741bdc662d02977e7d0af \ + --hash=sha256:cdee837710ef6b56ebd20245b83799fce40b265b3b406e51e8ccc5b85b9099b7 \ + --hash=sha256:cf10b7d58ae4a1f07fccbf4a0a956d705356fea05fb4c70608bb6fa81d103cda \ + --hash=sha256:d15687d7d7f40333bd8266f3814c591c2e2cd263fa2116e314f60d82086e353a \ + --hash=sha256:d5c28525c19f5bb1e09511669bb57353d22b94cf8b65f3a8d141c389a55dec95 \ + --hash=sha256:d5f916acf8afbcab6bacbb376ba7dc61f845367901ecd5e328fc4d4aef2fcab0 \ + --hash=sha256:dab03ed811ed1c71d700ed08bde8431cf429bbe59e423394f0f4055f1ca0ea60 \ + --hash=sha256:db453f2da3f59a348f514cfbfeb042393b68720787bbef2b4c6068ea362c8149 \ + --hash=sha256:de2a0645a923ba57c5527497daf8ec5df69c6eadf869e9cd46e86349146e5975 \ + --hash=sha256:dea7fcd62915fb150cdc373212141a30037e11b761fbced340e9db3379b892d4 \ + --hash=sha256:dfcbebdb3c4b6f739a91769aea5ed615023f3c88cb70df812849aef634c25fbe \ + --hash=sha256:dfcebb950aa7e667ec226a442722134539e77c575f6cfaa423f24371bb8d2e94 \ + --hash=sha256:e0641b506486f0b4cd1500a2a65740243e8670a2549bb02bc4556a83af84ae03 \ + --hash=sha256:e33b0834f1cf779aa839975f9d8755a7c2420510c0fa1e9fa0497de77cd35d2c \ + --hash=sha256:e4ace1e220b078c8e48e82c081e35002038657e4b37d403ce940fa679e57113b \ + --hash=sha256:e4cf2d5829f6963a5483ec01578ee76d329eb5caf330ecd05b3edd697e7d768a \ + --hash=sha256:e574de99d735b3fc8364cba9912c2bec2da78775eba95cbb225ef7dda6acea24 \ + --hash=sha256:e646c0e282e960345314f42f2cea5e0b5f56938c093541ea6dbf11aec2862391 \ + --hash=sha256:e8a5ac97ea521d7bde7621d86c30e86b798cdecd985723c4ed737a2aa9e77d0c \ + --hash=sha256:eedf97be7bc3dbc8addcef4142f4b4164066df0c6f36397ae4aaed3eb187d8ab \ + --hash=sha256:ef633add81832f4b56d3b4c9408b43d530dfca29e68fb1b797dcb861a2c734cd \ + --hash=sha256:f27207e8ca3e5e021e2402ba942e5b4c629718e665c81b8b306f3c8b1ddbb786 \ + --hash=sha256:f85f3843bdb1fe80e8c206fe6eed7a1caeae897e496542cee499c374a85c6e08 \ + --hash=sha256:f8e81e4b55930e5ffab4a68db1af431629cf2e4066dbdbfef65348b8ab804ea8 \ + --hash=sha256:f96ae96a060a8072ceff4cfde89d261837b4294a4f28b84a28765470d502ccc6 \ + --hash=sha256:fd9e98b408384989ea4ab60206b8e100d8687da18b5c813c11e92fd8212a98e0 \ + --hash=sha256:ffff855100bc066ff2cd3aa4a60bc9534661816b110f0243e59503ec2df38421 # via pydantic pyjwt[crypto]==2.8.0 \ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ From e242b506ae49e7f63b1a37ef22ec41cafdd8beee Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Wed, 27 Dec 2023 15:51:08 -0700 Subject: [PATCH 391/588] Update Butler for multi-repo server Butler server is now able to serve multiple repositories from a single service. Updated configuration to point to a repository index file instead of a single Butler configuration file. Added ingresses for each of the repositories. --- applications/butler/Chart.yaml | 2 +- applications/butler/README.md | 3 ++- applications/butler/templates/deployment.yaml | 4 ++-- applications/butler/templates/ingress-anonymous.yaml | 7 ++++--- .../butler/templates/ingress-authenticated.yaml | 4 +++- applications/butler/values-idfdev.yaml | 4 +++- applications/butler/values.yaml | 10 +++++++--- 7 files changed, 22 insertions(+), 12 deletions(-) diff --git a/applications/butler/Chart.yaml b/applications/butler/Chart.yaml index 15503e4066..817bcdfc83 100644 --- a/applications/butler/Chart.yaml +++ b/applications/butler/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 0.0.1 +appVersion: 0.0.2 description: Server for Butler data abstraction service name: butler sources: diff --git a/applications/butler/README.md b/applications/butler/README.md index 3043ea5172..46acdc866a 100644 --- a/applications/butler/README.md +++ b/applications/butler/README.md @@ -15,8 +15,9 @@ Server for Butler data abstraction service | autoscaling.maxReplicas | int | `100` | Maximum number of butler deployment pods | | autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | -| config.configUri | string | `""` | URI to the file specifying the DirectButler configuration to be used by the butler server | +| config.indexUri | string | `""` | URI to the DirectButler repository index file listing the configurations for each repository to be hosted by this server. | | config.pathPrefix | string | `"/api/butler"` | The prefix of the path portion of the URL where the Butler service will be exposed. For example, if the service should be exposed at `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` | +| config.repositoryLabels | list | `[]` | List of Butler repository labels which will be hosted by this server, matching those from the index file. | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/butler/templates/deployment.yaml b/applications/butler/templates/deployment.yaml index eb22fc1cc3..7bca29eb03 100644 --- a/applications/butler/templates/deployment.yaml +++ b/applications/butler/templates/deployment.yaml @@ -50,8 +50,8 @@ spec: value: "/opt/lsst/butler/secrets/butler-gcs-creds.json" - name: S3_ENDPOINT_URL value: "https://storage.googleapis.com" - - name: BUTLER_SERVER_CONFIG_URI - value: {{ .Values.config.configUri | quote }} + - name: DAF_BUTLER_REPOSITORY_INDEX + value: {{ .Values.config.indexUri | quote }} volumeMounts: - name: "butler-secrets" mountPath: "/opt/lsst/butler/secrets" diff --git a/applications/butler/templates/ingress-anonymous.yaml b/applications/butler/templates/ingress-anonymous.yaml index d3b79b4ae5..3c58b89c78 100644 --- a/applications/butler/templates/ingress-anonymous.yaml +++ b/applications/butler/templates/ingress-anonymous.yaml @@ -20,6 +20,7 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: + {{- range $repositoryLabel := .Values.config.repositoryLabels }} # For direct end-user use of the Butler client library, the # Butler() convenience constructor must be able to load a # configuration file via unauthenticated HTTP. This exists for @@ -27,18 +28,18 @@ template: # to the existence of the Butler server -- they are passed the URI # for a repository root on the filesystem or HTTP, from which a # configuration file is loaded. - - path: "{{ .Values.config.pathPrefix }}/butler.yaml" + - path: "{{ $.Values.config.pathPrefix }}/repo/{{ $repositoryLabel }}/butler.yaml" pathType: "Exact" backend: service: name: "butler" port: number: 8080 - - path: "{{ .Values.config.pathPrefix }}/butler.json" + - path: "{{ $.Values.config.pathPrefix }}/repo/{{ $repositoryLabel }}/butler.json" pathType: "Exact" backend: service: name: "butler" port: number: 8080 - + {{- end }} diff --git a/applications/butler/templates/ingress-authenticated.yaml b/applications/butler/templates/ingress-authenticated.yaml index 2868813397..d12b573eb0 100644 --- a/applications/butler/templates/ingress-authenticated.yaml +++ b/applications/butler/templates/ingress-authenticated.yaml @@ -29,10 +29,12 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: {{ .Values.config.pathPrefix | quote }} + {{- range $repositoryLabel := .Values.config.repositoryLabels }} + - path: "{{ $.Values.config.pathPrefix }}/repo/{{ $repositoryLabel }}" pathType: "Prefix" backend: service: name: "butler" port: number: 8080 + {{- end }} diff --git a/applications/butler/values-idfdev.yaml b/applications/butler/values-idfdev.yaml index 0e46edf219..1d5d57e000 100644 --- a/applications/butler/values-idfdev.yaml +++ b/applications/butler/values-idfdev.yaml @@ -2,4 +2,6 @@ image: pullPolicy: Always config: - configUri: "s3://butler-us-central1-panda-dev/dc2/butler-external-idfdev.yaml" + indexUri: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml" + repositoryLabels: + - dp02 diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index 00a45ff8b7..92d382b98b 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -64,9 +64,13 @@ global: vaultSecretsPath: "" config: - # -- URI to the file specifying the DirectButler configuration to be used - # by the butler server - configUri: "" + # -- URI to the DirectButler repository index file listing the configurations + # for each repository to be hosted by this server. + indexUri: "" + + # -- List of Butler repository labels which will be hosted by this server, + # matching those from the index file. + repositoryLabels: [] # -- The prefix of the path portion of the URL where the Butler service will # be exposed. For example, if the service should be exposed at From ad7b00a18ca580957faeb3bbe2c58d26a1aeb621 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Thu, 28 Dec 2023 10:20:05 -0700 Subject: [PATCH 392/588] Add butler to idfint Added a deployment of the butler server to idfint. Also added a configuration for idfprod but it is not yet enabled. --- applications/butler/values-idfint.yaml | 4 ++++ applications/butler/values-idfprod.yaml | 5 +++++ environments/values-idfint.yaml | 1 + 3 files changed, 10 insertions(+) create mode 100644 applications/butler/values-idfint.yaml create mode 100644 applications/butler/values-idfprod.yaml diff --git a/applications/butler/values-idfint.yaml b/applications/butler/values-idfint.yaml new file mode 100644 index 0000000000..2aa6066c53 --- /dev/null +++ b/applications/butler/values-idfint.yaml @@ -0,0 +1,4 @@ +config: + indexUri: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" + repositoryLabels: + - dp02 diff --git a/applications/butler/values-idfprod.yaml b/applications/butler/values-idfprod.yaml new file mode 100644 index 0000000000..3ae3da39fd --- /dev/null +++ b/applications/butler/values-idfprod.yaml @@ -0,0 +1,5 @@ +config: + indexUri: "s3://butler-us-central1-repo-locations/data-repos.yaml" + repositoryLabels: + - dp01 + - dp02 diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 71d63833a8..b7f6751c54 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -12,6 +12,7 @@ vaultPathPrefix: "secret/phalanx/idfint" applications: alert-stream-broker: true + butler: true datalinker: true hips: true linters: true From 5972b5c1d33cdcd75a67c97ca616a510da41604c Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 1 Jan 2024 12:30:07 +0000 Subject: [PATCH 393/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 324 +++++++++++++++++++++--------------------- requirements/main.txt | 25 ++-- 2 files changed, 174 insertions(+), 175 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index e97aebcbff..bf2879087f 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -14,9 +14,9 @@ annotated-types==0.6.0 \ # via # -c requirements/main.txt # pydantic -attrs==23.1.0 \ - --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ - --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 +attrs==23.2.0 \ + --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ + --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 # via # jsonschema # referencing @@ -143,59 +143,59 @@ click==8.1.7 \ # -c requirements/main.txt # documenteer # sphinx-click -coverage[toml]==7.3.4 \ - --hash=sha256:020d56d2da5bc22a0e00a5b0d54597ee91ad72446fa4cf1b97c35022f6b6dbf0 \ - --hash=sha256:11ab62d0ce5d9324915726f611f511a761efcca970bd49d876cf831b4de65be5 \ - --hash=sha256:183c16173a70caf92e2dfcfe7c7a576de6fa9edc4119b8e13f91db7ca33a7923 \ - --hash=sha256:27ee94f088397d1feea3cb524e4313ff0410ead7d968029ecc4bc5a7e1d34fbf \ - --hash=sha256:3024ec1b3a221bd10b5d87337d0373c2bcaf7afd86d42081afe39b3e1820323b \ - --hash=sha256:309ed6a559bc942b7cc721f2976326efbfe81fc2b8f601c722bff927328507dc \ - --hash=sha256:33e63c578f4acce1b6cd292a66bc30164495010f1091d4b7529d014845cd9bee \ - --hash=sha256:36797b3625d1da885b369bdaaa3b0d9fb8865caed3c2b8230afaa6005434aa2f \ - --hash=sha256:36d75ef2acab74dc948d0b537ef021306796da551e8ac8b467810911000af66a \ - --hash=sha256:38d0b307c4d99a7aca4e00cad4311b7c51b7ac38fb7dea2abe0d182dd4008e05 \ - --hash=sha256:3d892a19ae24b9801771a5a989fb3e850bd1ad2e2b6e83e949c65e8f37bc67a1 \ - --hash=sha256:3f477fb8a56e0c603587b8278d9dbd32e54bcc2922d62405f65574bd76eba78a \ - --hash=sha256:47ee56c2cd445ea35a8cc3ad5c8134cb9bece3a5cb50bb8265514208d0a65928 \ - --hash=sha256:4a4184dcbe4f98d86470273e758f1d24191ca095412e4335ff27b417291f5964 \ - --hash=sha256:5214362abf26e254d749fc0c18af4c57b532a4bfde1a057565616dd3b8d7cc94 \ - --hash=sha256:607b6c6b35aa49defaebf4526729bd5238bc36fe3ef1a417d9839e1d96ee1e4c \ - --hash=sha256:610afaf929dc0e09a5eef6981edb6a57a46b7eceff151947b836d869d6d567c1 \ - --hash=sha256:6879fe41c60080aa4bb59703a526c54e0412b77e649a0d06a61782ecf0853ee1 \ - --hash=sha256:74397a1263275bea9d736572d4cf338efaade2de9ff759f9c26bcdceb383bb49 \ - --hash=sha256:758ebaf74578b73f727acc4e8ab4b16ab6f22a5ffd7dd254e5946aba42a4ce76 \ - --hash=sha256:782693b817218169bfeb9b9ba7f4a9f242764e180ac9589b45112571f32a0ba6 \ - --hash=sha256:7c4277ddaad9293454da19121c59f2d850f16bcb27f71f89a5c4836906eb35ef \ - --hash=sha256:85072e99474d894e5df582faec04abe137b28972d5e466999bc64fc37f564a03 \ - --hash=sha256:8a9c5bc5db3eb4cd55ecb8397d8e9b70247904f8eca718cc53c12dcc98e59fc8 \ - --hash=sha256:8ce03e25e18dd9bf44723e83bc202114817f3367789052dc9e5b5c79f40cf59d \ - --hash=sha256:93698ac0995516ccdca55342599a1463ed2e2d8942316da31686d4d614597ef9 \ - --hash=sha256:997aa14b3e014339d8101b9886063c5d06238848905d9ad6c6eabe533440a9a7 \ - --hash=sha256:9ac17b94ab4ca66cf803f2b22d47e392f0977f9da838bf71d1f0db6c32893cb9 \ - --hash=sha256:a02ac7c51819702b384fea5ee033a7c202f732a2a2f1fe6c41e3d4019828c8d3 \ - --hash=sha256:a1c3e9d2bbd6f3f79cfecd6f20854f4dc0c6e0ec317df2b265266d0dc06535f1 \ - --hash=sha256:a877810ef918d0d345b783fc569608804f3ed2507bf32f14f652e4eaf5d8f8d0 \ - --hash=sha256:a8e258dcc335055ab59fe79f1dec217d9fb0cdace103d6b5c6df6b75915e7959 \ - --hash=sha256:aefbb29dc56317a4fcb2f3857d5bce9b881038ed7e5aa5d3bcab25bd23f57328 \ - --hash=sha256:aff2bd3d585969cc4486bfc69655e862028b689404563e6b549e6a8244f226df \ - --hash=sha256:b1e0f25ae99cf247abfb3f0fac7ae25739e4cd96bf1afa3537827c576b4847e5 \ - --hash=sha256:b710869a15b8caf02e31d16487a931dbe78335462a122c8603bb9bd401ff6fb2 \ - --hash=sha256:bfed0ec4b419fbc807dec417c401499ea869436910e1ca524cfb4f81cf3f60e7 \ - --hash=sha256:c15fdfb141fcf6a900e68bfa35689e1256a670db32b96e7a931cab4a0e1600e5 \ - --hash=sha256:c6a23ae9348a7a92e7f750f9b7e828448e428e99c24616dec93a0720342f241d \ - --hash=sha256:c75738ce13d257efbb6633a049fb2ed8e87e2e6c2e906c52d1093a4d08d67c6b \ - --hash=sha256:d1d0ce6c6947a3a4aa5479bebceff2c807b9f3b529b637e2b33dea4468d75fc7 \ - --hash=sha256:d5b14abde6f8d969e6b9dd8c7a013d9a2b52af1235fe7bebef25ad5c8f47fa18 \ - --hash=sha256:d6ed790728fb71e6b8247bd28e77e99d0c276dff952389b5388169b8ca7b1c28 \ - --hash=sha256:e0d84099ea7cba9ff467f9c6f747e3fc3906e2aadac1ce7b41add72e8d0a3712 \ - --hash=sha256:e4353923f38d752ecfbd3f1f20bf7a3546993ae5ecd7c07fd2f25d40b4e54571 \ - --hash=sha256:e91029d7f151d8bf5ab7d8bfe2c3dbefd239759d642b211a677bc0709c9fdb96 \ - --hash=sha256:ea473c37872f0159294f7073f3fa72f68b03a129799f3533b2bb44d5e9fa4f82 \ - --hash=sha256:f154bd866318185ef5865ace5be3ac047b6d1cc0aeecf53bf83fe846f4384d5d \ - --hash=sha256:f97ff5a9fc2ca47f3383482858dd2cb8ddbf7514427eecf5aa5f7992d0571429 \ - --hash=sha256:f99b7d3f7a7adfa3d11e3a48d1a91bb65739555dd6a0d3fa68aa5852d962e5b1 \ - --hash=sha256:fb220b3596358a86361139edce40d97da7458412d412e1e10c8e1970ee8c09ab \ - --hash=sha256:fd2f8a641f8f193968afdc8fd1697e602e199931012b574194052d132a79be13 +coverage[toml]==7.4.0 \ + --hash=sha256:04387a4a6ecb330c1878907ce0dc04078ea72a869263e53c72a1ba5bbdf380ca \ + --hash=sha256:0676cd0ba581e514b7f726495ea75aba3eb20899d824636c6f59b0ed2f88c471 \ + --hash=sha256:0e8d06778e8fbffccfe96331a3946237f87b1e1d359d7fbe8b06b96c95a5407a \ + --hash=sha256:0eb3c2f32dabe3a4aaf6441dde94f35687224dfd7eb2a7f47f3fd9428e421058 \ + --hash=sha256:109f5985182b6b81fe33323ab4707011875198c41964f014579cf82cebf2bb85 \ + --hash=sha256:13eaf476ec3e883fe3e5fe3707caeb88268a06284484a3daf8250259ef1ba143 \ + --hash=sha256:164fdcc3246c69a6526a59b744b62e303039a81e42cfbbdc171c91a8cc2f9446 \ + --hash=sha256:26776ff6c711d9d835557ee453082025d871e30b3fd6c27fcef14733f67f0590 \ + --hash=sha256:26f66da8695719ccf90e794ed567a1549bb2644a706b41e9f6eae6816b398c4a \ + --hash=sha256:29f3abe810930311c0b5d1a7140f6395369c3db1be68345638c33eec07535105 \ + --hash=sha256:316543f71025a6565677d84bc4df2114e9b6a615aa39fb165d697dba06a54af9 \ + --hash=sha256:36b0ea8ab20d6a7564e89cb6135920bc9188fb5f1f7152e94e8300b7b189441a \ + --hash=sha256:3cc9d4bc55de8003663ec94c2f215d12d42ceea128da8f0f4036235a119c88ac \ + --hash=sha256:485e9f897cf4856a65a57c7f6ea3dc0d4e6c076c87311d4bc003f82cfe199d25 \ + --hash=sha256:5040148f4ec43644702e7b16ca864c5314ccb8ee0751ef617d49aa0e2d6bf4f2 \ + --hash=sha256:51456e6fa099a8d9d91497202d9563a320513fcf59f33991b0661a4a6f2ad450 \ + --hash=sha256:53d7d9158ee03956e0eadac38dfa1ec8068431ef8058fe6447043db1fb40d932 \ + --hash=sha256:5a10a4920def78bbfff4eff8a05c51be03e42f1c3735be42d851f199144897ba \ + --hash=sha256:5b14b4f8760006bfdb6e08667af7bc2d8d9bfdb648351915315ea17645347137 \ + --hash=sha256:5b2ccb7548a0b65974860a78c9ffe1173cfb5877460e5a229238d985565574ae \ + --hash=sha256:697d1317e5290a313ef0d369650cfee1a114abb6021fa239ca12b4849ebbd614 \ + --hash=sha256:6ae8c9d301207e6856865867d762a4b6fd379c714fcc0607a84b92ee63feff70 \ + --hash=sha256:707c0f58cb1712b8809ece32b68996ee1e609f71bd14615bd8f87a1293cb610e \ + --hash=sha256:74775198b702868ec2d058cb92720a3c5a9177296f75bd97317c787daf711505 \ + --hash=sha256:756ded44f47f330666843b5781be126ab57bb57c22adbb07d83f6b519783b870 \ + --hash=sha256:76f03940f9973bfaee8cfba70ac991825611b9aac047e5c80d499a44079ec0bc \ + --hash=sha256:79287fd95585ed36e83182794a57a46aeae0b64ca53929d1176db56aacc83451 \ + --hash=sha256:799c8f873794a08cdf216aa5d0531c6a3747793b70c53f70e98259720a6fe2d7 \ + --hash=sha256:7d360587e64d006402b7116623cebf9d48893329ef035278969fa3bbf75b697e \ + --hash=sha256:80b5ee39b7f0131ebec7968baa9b2309eddb35b8403d1869e08f024efd883566 \ + --hash=sha256:815ac2d0f3398a14286dc2cea223a6f338109f9ecf39a71160cd1628786bc6f5 \ + --hash=sha256:83c2dda2666fe32332f8e87481eed056c8b4d163fe18ecc690b02802d36a4d26 \ + --hash=sha256:846f52f46e212affb5bcf131c952fb4075b55aae6b61adc9856222df89cbe3e2 \ + --hash=sha256:936d38794044b26c99d3dd004d8af0035ac535b92090f7f2bb5aa9c8e2f5cd42 \ + --hash=sha256:9864463c1c2f9cb3b5db2cf1ff475eed2f0b4285c2aaf4d357b69959941aa555 \ + --hash=sha256:995ea5c48c4ebfd898eacb098164b3cc826ba273b3049e4a889658548e321b43 \ + --hash=sha256:a1526d265743fb49363974b7aa8d5899ff64ee07df47dd8d3e37dcc0818f09ed \ + --hash=sha256:a56de34db7b7ff77056a37aedded01b2b98b508227d2d0979d373a9b5d353daa \ + --hash=sha256:a7c97726520f784239f6c62506bc70e48d01ae71e9da128259d61ca5e9788516 \ + --hash=sha256:b8e99f06160602bc64da35158bb76c73522a4010f0649be44a4e167ff8555952 \ + --hash=sha256:bb1de682da0b824411e00a0d4da5a784ec6496b6850fdf8c865c1d68c0e318dd \ + --hash=sha256:bf477c355274a72435ceb140dc42de0dc1e1e0bf6e97195be30487d8eaaf1a09 \ + --hash=sha256:bf635a52fc1ea401baf88843ae8708591aa4adff875e5c23220de43b1ccf575c \ + --hash=sha256:bfd5db349d15c08311702611f3dccbef4b4e2ec148fcc636cf8739519b4a5c0f \ + --hash=sha256:c530833afc4707fe48524a44844493f36d8727f04dcce91fb978c414a8556cc6 \ + --hash=sha256:cc6d65b21c219ec2072c1293c505cf36e4e913a3f936d80028993dd73c7906b1 \ + --hash=sha256:cd3c1e4cb2ff0083758f09be0f77402e1bdf704adb7f89108007300a6da587d0 \ + --hash=sha256:cfd2a8b6b0d8e66e944d47cdec2f47c48fef2ba2f2dff5a9a75757f64172857e \ + --hash=sha256:d0ca5c71a5a1765a0f8f88022c52b6b8be740e512980362f7fdbb03725a0d6b9 \ + --hash=sha256:e7defbb9737274023e2d7af02cac77043c86ce88a907c58f42b580a97d5bcca9 \ + --hash=sha256:e9d1bf53c4c8de58d22e0e956a79a5b37f754ed1ffdbf1a260d9dcfa2d8a325e \ + --hash=sha256:ea81d8f9691bb53f4fb4db603203029643caffc82bf998ab5b59ca05560f4c06 # via # -r requirements/dev.in # pytest-cov @@ -278,9 +278,9 @@ jsonschema==4.20.0 \ --hash=sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa \ --hash=sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3 # via sphinxcontrib-redoc -jsonschema-specifications==2023.11.2 \ - --hash=sha256:9472fc4fea474cd74bea4a2b190daeccb5a9e4db2ea80efcf7a1b582fc9a81b8 \ - --hash=sha256:e74ba7c0a65e8cb49dc26837d6cfe576557084a8b423ed16a420984228104f93 +jsonschema-specifications==2023.12.1 \ + --hash=sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc \ + --hash=sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c # via jsonschema latexcodec==2.0.1 \ --hash=sha256:2aa2551c373261cefe2ad3a8953a6d6533e68238d180eb4bb91d7964adb3fe9a \ @@ -576,9 +576,9 @@ pygments==2.17.2 \ pylatexenc==2.10 \ --hash=sha256:3dd8fd84eb46dc30bee1e23eaab8d8fb5a7f507347b23e5f38ad9675c84f40d3 # via documenteer -pytest==7.4.3 \ - --hash=sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac \ - --hash=sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5 +pytest==7.4.4 \ + --hash=sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280 \ + --hash=sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8 # via # -r requirements/dev.in # pytest-cov @@ -670,106 +670,106 @@ rich==13.7.0 \ --hash=sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa \ --hash=sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235 # via pytest-pretty -rpds-py==0.15.2 \ - --hash=sha256:02744236ac1895d7be837878e707a5c35fb8edc5137602f253b63623d7ad5c8c \ - --hash=sha256:03f9c5875515820633bd7709a25c3e60c1ea9ad1c5d4030ce8a8c203309c36fd \ - --hash=sha256:044f6f46d62444800402851afa3c3ae50141f12013060c1a3a0677e013310d6d \ - --hash=sha256:07a2e1d78d382f7181789713cdf0c16edbad4fe14fe1d115526cb6f0eef0daa3 \ - --hash=sha256:082e0e55d73690ffb4da4352d1b5bbe1b5c6034eb9dc8c91aa2a3ee15f70d3e2 \ - --hash=sha256:13152dfe7d7c27c40df8b99ac6aab12b978b546716e99f67e8a67a1d441acbc3 \ - --hash=sha256:13716e53627ad97babf72ac9e01cf9a7d4af2f75dd5ed7b323a7a9520e948282 \ - --hash=sha256:13ff62d3561a23c17341b4afc78e8fcfd799ab67c0b1ca32091d71383a98ba4b \ - --hash=sha256:1607cda6129f815493a3c184492acb5ae4aa6ed61d3a1b3663aa9824ed26f7ac \ - --hash=sha256:164fcee32f15d04d61568c9cb0d919e37ff3195919cd604039ff3053ada0461b \ - --hash=sha256:1c24e30d720c0009b6fb2e1905b025da56103c70a8b31b99138e4ed1c2a6c5b0 \ - --hash=sha256:1e6fcd0a0f62f2997107f758bb372397b8d5fd5f39cc6dcb86f7cb98a2172d6c \ - --hash=sha256:1fd0f0b1ccd7d537b858a56355a250108df692102e08aa2036e1a094fd78b2dc \ - --hash=sha256:2181e86d4e1cdf49a7320cb72a36c45efcb7670d0a88f09fd2d3a7967c0540fd \ - --hash=sha256:2974e6dff38afafd5ccf8f41cb8fc94600b3f4fd9b0a98f6ece6e2219e3158d5 \ - --hash=sha256:2dccc623725d0b298f557d869a68496a2fd2a9e9c41107f234fa5f7a37d278ac \ - --hash=sha256:2df3d07a16a3bef0917b28cd564778fbb31f3ffa5b5e33584470e2d1b0f248f0 \ - --hash=sha256:2e7e5633577b3bd56bf3af2ef6ae3778bbafb83743989d57f0e7edbf6c0980e4 \ - --hash=sha256:2ee066a64f0d2ba45391cac15b3a70dcb549e968a117bd0500634754cfe0e5fc \ - --hash=sha256:2f1f295a5c28cfa74a7d48c95acc1c8a7acd49d7d9072040d4b694fe11cd7166 \ - --hash=sha256:2faa97212b0dc465afeedf49045cdd077f97be1188285e646a9f689cb5dfff9e \ - --hash=sha256:30479a9f1fce47df56b07460b520f49fa2115ec2926d3b1303c85c81f8401ed1 \ - --hash=sha256:337a8653fb11d2fbe7157c961cc78cb3c161d98cf44410ace9a3dc2db4fad882 \ - --hash=sha256:3423007fc0661827e06f8a185a3792c73dda41f30f3421562f210cf0c9e49569 \ - --hash=sha256:373b76eeb79e8c14f6d82cb1d4d5293f9e4059baec6c1b16dca7ad13b6131b39 \ - --hash=sha256:3b79c63d29101cbaa53a517683557bb550462394fb91044cc5998dd2acff7340 \ - --hash=sha256:3bbc89ce2a219662ea142f0abcf8d43f04a41d5b1880be17a794c39f0d609cb0 \ - --hash=sha256:3c11bc5814554b018f6c5d6ae0969e43766f81e995000b53a5d8c8057055e886 \ - --hash=sha256:3cd61e759c4075510052d1eca5cddbd297fe1164efec14ef1fce3f09b974dfe4 \ - --hash=sha256:3d40fb3ca22e3d40f494d577441b263026a3bd8c97ae6ce89b2d3c4b39ac9581 \ - --hash=sha256:3db0c998c92b909d7c90b66c965590d4f3cd86157176a6cf14aa1f867b77b889 \ - --hash=sha256:422b0901878a31ef167435c5ad46560362891816a76cc0d150683f3868a6f0d1 \ - --hash=sha256:46b4f3d47d1033db569173be62365fbf7808c2bd3fb742314d251f130d90d44c \ - --hash=sha256:485fbdd23becb822804ed05622907ee5c8e8a5f43f6f43894a45f463b2217045 \ - --hash=sha256:53304cc14b1d94487d70086e1cb0cb4c29ec6da994d58ae84a4d7e78c6a6d04d \ - --hash=sha256:5595c80dd03d7e6c6afb73f3594bf3379a7d79fa57164b591d012d4b71d6ac4c \ - --hash=sha256:56b51ba29a18e5f5810224bcf00747ad931c0716e3c09a76b4a1edd3d4aba71f \ - --hash=sha256:580182fa5b269c2981e9ce9764367cb4edc81982ce289208d4607c203f44ffde \ - --hash=sha256:5e99d6510c8557510c220b865d966b105464740dcbebf9b79ecd4fbab30a13d9 \ - --hash=sha256:5eb05b654a41e0f81ab27a7c3e88b6590425eb3e934e1d533ecec5dc88a6ffff \ - --hash=sha256:62b292fff4739c6be89e6a0240c02bda5a9066a339d90ab191cf66e9fdbdc193 \ - --hash=sha256:6a5122b17a4faf5d7a6d91fa67b479736c0cacc7afe791ddebb7163a8550b799 \ - --hash=sha256:6a8ff8e809da81363bffca2b965cb6e4bf6056b495fc3f078467d1f8266fe27f \ - --hash=sha256:6c43e1b89099279cc03eb1c725c5de12af6edcd2f78e2f8a022569efa639ada3 \ - --hash=sha256:709dc11af2f74ba89c68b1592368c6edcbccdb0a06ba77eb28c8fe08bb6997da \ - --hash=sha256:7e072f5da38d6428ba1fc1115d3cc0dae895df671cb04c70c019985e8c7606be \ - --hash=sha256:813a65f95bfcb7c8f2a70dd6add9b51e9accc3bdb3e03d0ff7a9e6a2d3e174bf \ - --hash=sha256:86c01299942b0f4b5b5f28c8701689181ad2eab852e65417172dbdd6c5b3ccc8 \ - --hash=sha256:893e38d0f4319dfa70c0f36381a37cc418985c87b11d9784365b1fff4fa6973b \ - --hash=sha256:8a5f574b92b3ee7d254e56d56e37ec0e1416acb1ae357c4956d76a1788dc58fb \ - --hash=sha256:8b9650f92251fdef843e74fc252cdfd6e3c700157ad686eeb0c6d7fdb2d11652 \ - --hash=sha256:8ec464f20fe803ae00419bd1610934e3bda963aeba1e6181dfc9033dc7e8940c \ - --hash=sha256:8f333bfe782a2d05a67cfaa0cc9cd68b36b39ee6acfe099f980541ed973a7093 \ - --hash=sha256:8ffdeb7dbd0160d4e391e1f857477e4762d00aa2199c294eb95dfb9451aa1d9f \ - --hash=sha256:911e600e798374c0d86235e7ef19109cf865d1336942d398ff313375a25a93ba \ - --hash=sha256:9235be95662559141934fced8197de6fee8c58870f36756b0584424b6d708393 \ - --hash=sha256:938518a11780b39998179d07f31a4a468888123f9b00463842cd40f98191f4d3 \ - --hash=sha256:93c18a1696a8e0388ed84b024fe1a188a26ba999b61d1d9a371318cb89885a8c \ - --hash=sha256:97532802f14d383f37d603a56e226909f825a83ff298dc1b6697de00d2243999 \ - --hash=sha256:98ee201a52a7f65608e5494518932e1473fd43535f12cade0a1b4ab32737fe28 \ - --hash=sha256:9d2ae79f31da5143e020a8d4fc74e1f0cbcb8011bdf97453c140aa616db51406 \ - --hash=sha256:9d38494a8d21c246c535b41ecdb2d562c4b933cf3d68de03e8bc43a0d41be652 \ - --hash=sha256:9d41ebb471a6f064c0d1c873c4f7dded733d16ca5db7d551fb04ff3805d87802 \ - --hash=sha256:9e09d017e3f4d9bd7d17a30d3f59e4d6d9ba2d2ced280eec2425e84112cf623f \ - --hash=sha256:a6945c2d61c42bb7e818677f43638675b8c1c43e858b67a96df3eb2426a86c9d \ - --hash=sha256:a72e00826a2b032dda3eb25aa3e3579c6d6773d22d8446089a57a123481cc46c \ - --hash=sha256:aa1e626c524d2c7972c0f3a8a575d654a3a9c008370dc2a97e46abd0eaa749b9 \ - --hash=sha256:ab095edf1d840a6a6a4307e1a5b907a299a94e7b90e75436ee770b8c35d22a25 \ - --hash=sha256:ac2ac84a4950d627d84b61f082eba61314373cfab4b3c264b62efab02ababe83 \ - --hash=sha256:ac7187bee72384b9cfedf09a29a3b2b6e8815cc64c095cdc8b5e6aec81e9fd5f \ - --hash=sha256:ae9d83a81b09ce3a817e2cbb23aabc07f86a3abc664c613cd283ce7a03541e95 \ - --hash=sha256:afeabb382c1256a7477b739820bce7fe782bb807d82927102cee73e79b41b38b \ - --hash=sha256:b2a4cd924d0e2f4b1a68034abe4cadc73d69ad5f4cf02db6481c0d4d749f548f \ - --hash=sha256:b414ef79f1f06fb90b5165db8aef77512c1a5e3ed1b4807da8476b7e2c853283 \ - --hash=sha256:b4ecbba7efd82bd2a4bb88aab7f984eb5470991c1347bdd1f35fb34ea28dba6e \ - --hash=sha256:b61d5096e75fd71018b25da50b82dd70ec39b5e15bb2134daf7eb7bbbc103644 \ - --hash=sha256:b629db53fe17e6ce478a969d30bd1d0e8b53238c46e3a9c9db39e8b65a9ef973 \ - --hash=sha256:b70b45a40ad0798b69748b34d508259ef2bdc84fb2aad4048bc7c9cafb68ddb3 \ - --hash=sha256:b88c3ab98556bc351b36d6208a6089de8c8db14a7f6e1f57f82a334bd2c18f0b \ - --hash=sha256:baf744e5f9d5ee6531deea443be78b36ed1cd36c65a0b95ea4e8d69fa0102268 \ - --hash=sha256:bbc7421cbd28b4316d1d017db338039a7943f945c6f2bb15e1439b14b5682d28 \ - --hash=sha256:c31272c674f725dfe0f343d73b0abe8c878c646967ec1c6106122faae1efc15b \ - --hash=sha256:c51a899792ee2c696072791e56b2020caff58b275abecbc9ae0cb71af0645c95 \ - --hash=sha256:c61e42b4ceb9759727045765e87d51c1bb9f89987aca1fcc8a040232138cad1c \ - --hash=sha256:c7cd0841a586b7105513a7c8c3d5c276f3adc762a072d81ef7fae80632afad1e \ - --hash=sha256:c827a931c6b57f50f1bb5de400dcfb00bad8117e3753e80b96adb72d9d811514 \ - --hash=sha256:d2aa3ca9552f83b0b4fa6ca8c6ce08da6580f37e3e0ab7afac73a1cfdc230c0e \ - --hash=sha256:d46ee458452727a147d7897bb33886981ae1235775e05decae5d5d07f537695a \ - --hash=sha256:d64a657de7aae8db2da60dc0c9e4638a0c3893b4d60101fd564a3362b2bfeb34 \ - --hash=sha256:d800a8e2ac62db1b9ea5d6d1724f1a93c53907ca061de4d05ed94e8dfa79050c \ - --hash=sha256:d9d7ebcd11ea76ba0feaae98485cd8e31467c3d7985210fab46983278214736b \ - --hash=sha256:dd7d3608589072f63078b4063a6c536af832e76b0b3885f1bfe9e892abe6c207 \ - --hash=sha256:ec19e823b4ccd87bd69e990879acbce9e961fc7aebe150156b8f4418d4b27b7f \ - --hash=sha256:ee40206d1d6e95eaa2b7b919195e3689a5cf6ded730632de7f187f35a1b6052c \ - --hash=sha256:f138f550b83554f5b344d6be35d3ed59348510edc3cb96f75309db6e9bfe8210 \ - --hash=sha256:f3e6e2e502c4043c52a99316d89dc49f416acda5b0c6886e0dd8ea7bb35859e8 \ - --hash=sha256:fb10bb720348fe1647a94eb605accb9ef6a9b1875d8845f9e763d9d71a706387 \ - --hash=sha256:fc066395e6332da1e7525d605b4c96055669f8336600bef8ac569d5226a7c76f \ - --hash=sha256:fc33267d58dfbb2361baed52668c5d8c15d24bc0372cecbb79fed77339b55e0d +rpds-py==0.16.2 \ + --hash=sha256:0474df4ade9a3b4af96c3d36eb81856cb9462e4c6657d4caecfd840d2a13f3c9 \ + --hash=sha256:071980663c273bf3d388fe5c794c547e6f35ba3335477072c713a3176bf14a60 \ + --hash=sha256:07aab64e2808c3ebac2a44f67e9dc0543812b715126dfd6fe4264df527556cb6 \ + --hash=sha256:088396c7c70e59872f67462fcac3ecbded5233385797021976a09ebd55961dfe \ + --hash=sha256:162d7cd9cd311c1b0ff1c55a024b8f38bd8aad1876b648821da08adc40e95734 \ + --hash=sha256:19f00f57fdd38db4bb5ad09f9ead1b535332dbf624200e9029a45f1f35527ebb \ + --hash=sha256:1bdbc5fcb04a7309074de6b67fa9bc4b418ab3fc435fec1f2779a0eced688d04 \ + --hash=sha256:1be2f033df1b8be8c3167ba3c29d5dca425592ee31e35eac52050623afba5772 \ + --hash=sha256:24f7a2eb3866a9e91f4599851e0c8d39878a470044875c49bd528d2b9b88361c \ + --hash=sha256:290a81cfbe4673285cdf140ec5cd1658ffbf63ab359f2b352ebe172e7cfa5bf0 \ + --hash=sha256:2946b120718eba9af2b4dd103affc1164a87b9e9ebff8c3e4c05d7b7a7e274e2 \ + --hash=sha256:2bd82db36cd70b3628c0c57d81d2438e8dd4b7b32a6a9f25f24ab0e657cb6c4e \ + --hash=sha256:2ddef620e70eaffebed5932ce754d539c0930f676aae6212f8e16cd9743dd365 \ + --hash=sha256:2e53b9b25cac9065328901713a7e9e3b12e4f57ef4280b370fbbf6fef2052eef \ + --hash=sha256:302bd4983bbd47063e452c38be66153760112f6d3635c7eeefc094299fa400a9 \ + --hash=sha256:349cb40897fd529ca15317c22c0eab67f5ac5178b5bd2c6adc86172045210acc \ + --hash=sha256:358dafc89ce3894c7f486c615ba914609f38277ef67f566abc4c854d23b997fa \ + --hash=sha256:35953f4f2b3216421af86fd236b7c0c65935936a94ea83ddbd4904ba60757773 \ + --hash=sha256:35ae5ece284cf36464eb160880018cf6088a9ac5ddc72292a6092b6ef3f4da53 \ + --hash=sha256:3b811d182ad17ea294f2ec63c0621e7be92a1141e1012383461872cead87468f \ + --hash=sha256:3da5a4c56953bdbf6d04447c3410309616c54433146ccdb4a277b9cb499bc10e \ + --hash=sha256:3dc6a7620ba7639a3db6213da61312cb4aa9ac0ca6e00dc1cbbdc21c2aa6eb57 \ + --hash=sha256:3f91df8e6dbb7360e176d1affd5fb0246d2b88d16aa5ebc7db94fd66b68b61da \ + --hash=sha256:4022b9dc620e14f30201a8a73898a873c8e910cb642bcd2f3411123bc527f6ac \ + --hash=sha256:413b9c17388bbd0d87a329d8e30c1a4c6e44e2bb25457f43725a8e6fe4161e9e \ + --hash=sha256:43d4dd5fb16eb3825742bad8339d454054261ab59fed2fbac84e1d84d5aae7ba \ + --hash=sha256:44627b6ca7308680a70766454db5249105fa6344853af6762eaad4158a2feebe \ + --hash=sha256:44a54e99a2b9693a37ebf245937fd6e9228b4cbd64b9cc961e1f3391ec6c7391 \ + --hash=sha256:47713dc4fce213f5c74ca8a1f6a59b622fc1b90868deb8e8e4d993e421b4b39d \ + --hash=sha256:495a14b72bbe217f2695dcd9b5ab14d4f8066a00f5d209ed94f0aca307f85f6e \ + --hash=sha256:4c46ad6356e1561f2a54f08367d1d2e70a0a1bb2db2282d2c1972c1d38eafc3b \ + --hash=sha256:4d6a9f052e72d493efd92a77f861e45bab2f6be63e37fa8ecf0c6fd1a58fedb0 \ + --hash=sha256:509b617ac787cd1149600e731db9274ebbef094503ca25158e6f23edaba1ca8f \ + --hash=sha256:5552f328eaef1a75ff129d4d0c437bf44e43f9436d3996e8eab623ea0f5fcf73 \ + --hash=sha256:5a80e2f83391ad0808b4646732af2a7b67550b98f0cae056cb3b40622a83dbb3 \ + --hash=sha256:5cf6af100ffb5c195beec11ffaa8cf8523057f123afa2944e6571d54da84cdc9 \ + --hash=sha256:5e6caa3809e50690bd92fa490f5c38caa86082c8c3315aa438bce43786d5e90d \ + --hash=sha256:5ef00873303d678aaf8b0627e111fd434925ca01c657dbb2641410f1cdaef261 \ + --hash=sha256:69ac7ea9897ec201ce68b48582f3eb34a3f9924488a5432a93f177bf76a82a7e \ + --hash=sha256:6a61226465bda9283686db8f17d02569a98e4b13c637be5a26d44aa1f1e361c2 \ + --hash=sha256:6d904c5693e08bad240f16d79305edba78276be87061c872a4a15e2c301fa2c0 \ + --hash=sha256:6dace7b26a13353e24613417ce2239491b40a6ad44e5776a18eaff7733488b44 \ + --hash=sha256:6df15846ee3fb2e6397fe25d7ca6624af9f89587f3f259d177b556fed6bebe2c \ + --hash=sha256:703d95c75a72e902544fda08e965885525e297578317989fd15a6ce58414b41d \ + --hash=sha256:726ac36e8a3bb8daef2fd482534cabc5e17334052447008405daca7ca04a3108 \ + --hash=sha256:781ef8bfc091b19960fc0142a23aedadafa826bc32b433fdfe6fd7f964d7ef44 \ + --hash=sha256:80443fe2f7b3ea3934c5d75fb0e04a5dbb4a8e943e5ff2de0dec059202b70a8b \ + --hash=sha256:83640a5d7cd3bff694747d50436b8b541b5b9b9782b0c8c1688931d6ee1a1f2d \ + --hash=sha256:84c5a4d1f9dd7e2d2c44097fb09fffe728629bad31eb56caf97719e55575aa82 \ + --hash=sha256:882ce6e25e585949c3d9f9abd29202367175e0aab3aba0c58c9abbb37d4982ff \ + --hash=sha256:888a97002e986eca10d8546e3c8b97da1d47ad8b69726dcfeb3e56348ebb28a3 \ + --hash=sha256:8aad80645a011abae487d356e0ceb359f4938dfb6f7bcc410027ed7ae4f7bb8b \ + --hash=sha256:8cb6fe8ecdfffa0e711a75c931fb39f4ba382b4b3ccedeca43f18693864fe850 \ + --hash=sha256:8d6b6937ae9eac6d6c0ca3c42774d89fa311f55adff3970fb364b34abde6ed3d \ + --hash=sha256:90123853fc8b1747f80b0d354be3d122b4365a93e50fc3aacc9fb4c2488845d6 \ + --hash=sha256:96f957d6ab25a78b9e7fc9749d754b98eac825a112b4e666525ce89afcbd9ed5 \ + --hash=sha256:981d135c7cdaf6cd8eadae1c950de43b976de8f09d8e800feed307140d3d6d00 \ + --hash=sha256:9b32f742ce5b57201305f19c2ef7a184b52f6f9ba6871cc042c2a61f0d6b49b8 \ + --hash=sha256:9f0350ef2fba5f34eb0c9000ea328e51b9572b403d2f7f3b19f24085f6f598e8 \ + --hash=sha256:a297a4d08cc67c7466c873c78039d87840fb50d05473db0ec1b7b03d179bf322 \ + --hash=sha256:a3d7e2ea25d3517c6d7e5a1cc3702cffa6bd18d9ef8d08d9af6717fc1c700eed \ + --hash=sha256:a4b682c5775d6a3d21e314c10124599976809455ee67020e8e72df1769b87bc3 \ + --hash=sha256:a4ebb8b20bd09c5ce7884c8f0388801100f5e75e7f733b1b6613c713371feefc \ + --hash=sha256:a61f659665a39a4d17d699ab3593d7116d66e1e2e3f03ef3fb8f484e91908808 \ + --hash=sha256:a9880b4656efe36ccad41edc66789e191e5ee19a1ea8811e0aed6f69851a82f4 \ + --hash=sha256:ac08472f41ea77cd6a5dae36ae7d4ed3951d6602833af87532b556c1b4601d63 \ + --hash=sha256:adc0c3d6fc6ae35fee3e4917628983f6ce630d513cbaad575b4517d47e81b4bb \ + --hash=sha256:af27423662f32d7501a00c5e7342f7dbd1e4a718aea7a239781357d15d437133 \ + --hash=sha256:b2e75e17bd0bb66ee34a707da677e47c14ee51ccef78ed6a263a4cc965a072a1 \ + --hash=sha256:b634c5ec0103c5cbebc24ebac4872b045cccb9456fc59efdcf6fe39775365bd2 \ + --hash=sha256:b6f5549d6ed1da9bfe3631ca9483ae906f21410be2445b73443fa9f017601c6f \ + --hash=sha256:bd4b677d929cf1f6bac07ad76e0f2d5de367e6373351c01a9c0a39f6b21b4a8b \ + --hash=sha256:bf721ede3eb7b829e4a9b8142bd55db0bdc82902720548a703f7e601ee13bdc3 \ + --hash=sha256:c647ca87fc0ebe808a41de912e9a1bfef9acb85257e5d63691364ac16b81c1f0 \ + --hash=sha256:ca57468da2d9a660bcf8961637c85f2fbb2aa64d9bc3f9484e30c3f9f67b1dd7 \ + --hash=sha256:cad0f59ee3dc35526039f4bc23642d52d5f6616b5f687d846bfc6d0d6d486db0 \ + --hash=sha256:cc97f0640e91d7776530f06e6836c546c1c752a52de158720c4224c9e8053cad \ + --hash=sha256:ccd4e400309e1f34a5095bf9249d371f0fd60f8a3a5c4a791cad7b99ce1fd38d \ + --hash=sha256:cffa76b385dfe1e38527662a302b19ffb0e7f5cf7dd5e89186d2c94a22dd9d0c \ + --hash=sha256:d0dd7ed2f16df2e129496e7fbe59a34bc2d7fc8db443a606644d069eb69cbd45 \ + --hash=sha256:d452817e0d9c749c431a1121d56a777bd7099b720b3d1c820f1725cb40928f58 \ + --hash=sha256:d8dda2a806dfa4a9b795950c4f5cc56d6d6159f7d68080aedaff3bdc9b5032f5 \ + --hash=sha256:dcbe1f8dd179e4d69b70b1f1d9bb6fd1e7e1bdc9c9aad345cdeb332e29d40748 \ + --hash=sha256:e0441fb4fdd39a230477b2ca9be90868af64425bfe7b122b57e61e45737a653b \ + --hash=sha256:e04e56b4ca7a770593633556e8e9e46579d66ec2ada846b401252a2bdcf70a6d \ + --hash=sha256:e061de3b745fe611e23cd7318aec2c8b0e4153939c25c9202a5811ca911fd733 \ + --hash=sha256:e93ec1b300acf89730cf27975ef574396bc04edecc358e9bd116fb387a123239 \ + --hash=sha256:e9e557db6a177470316c82f023e5d571811c9a4422b5ea084c85da9aa3c035fc \ + --hash=sha256:eab36eae3f3e8e24b05748ec9acc66286662f5d25c52ad70cadab544e034536b \ + --hash=sha256:ec23fcad480e77ede06cf4127a25fc440f7489922e17fc058f426b5256ee0edb \ + --hash=sha256:ec2e1cf025b2c0f48ec17ff3e642661da7ee332d326f2e6619366ce8e221f018 \ + --hash=sha256:ed99b4f7179d2111702020fd7d156e88acd533f5a7d3971353e568b6051d5c97 \ + --hash=sha256:ee94cb58c0ba2c62ee108c2b7c9131b2c66a29e82746e8fa3aa1a1effbd3dcf1 \ + --hash=sha256:f19afcfc0dd0dca35694df441e9b0f95bc231b512f51bded3c3d8ca32153ec19 \ + --hash=sha256:f1b9d9260e06ea017feb7172976ab261e011c1dc2f8883c7c274f6b2aabfe01a \ + --hash=sha256:f28ac0e8e7242d140f99402a903a2c596ab71550272ae9247ad78f9a932b5698 \ + --hash=sha256:f42e25c016927e2a6b1ce748112c3ab134261fc2ddc867e92d02006103e1b1b7 \ + --hash=sha256:f4bd4578e44f26997e9e56c96dedc5f1af43cc9d16c4daa29c771a00b2a26851 \ + --hash=sha256:f811771019f063bbd0aa7bb72c8a934bc13ebacb4672d712fc1639cfd314cccc # via # jsonschema # referencing diff --git a/requirements/main.txt b/requirements/main.txt index 63a30a5244..3034ee4dd8 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -8,11 +8,10 @@ annotated-types==0.6.0 \ --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ --hash=sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d # via pydantic -anyio==3.7.1 \ - --hash=sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780 \ - --hash=sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5 +anyio==4.2.0 \ + --hash=sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee \ + --hash=sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f # via - # fastapi # httpcore # starlette bcrypt==4.1.2 \ @@ -231,9 +230,9 @@ cryptography==41.0.7 \ # -r requirements/main.in # pyjwt # safir -fastapi==0.105.0 \ - --hash=sha256:4d12838819aa52af244580675825e750ad67c9df4614f557a769606af902cf22 \ - --hash=sha256:f19ebf6fdc82a3281d10f2cb4774bdfa90238e3b40af3525a0c09fd08ad1c480 +fastapi==0.108.0 \ + --hash=sha256:5056e504ac6395bf68493d71fcfc5352fdbd5fda6f88c21f6420d80d81163296 \ + --hash=sha256:8c7bc6d315da963ee4cdb605557827071a9a7f95aeb8fcdd3bde48cdc8764dd7 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -551,15 +550,15 @@ sniffio==1.3.0 \ # anyio # httpcore # httpx -starlette==0.27.0 \ - --hash=sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75 \ - --hash=sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91 +starlette==0.32.0.post1 \ + --hash=sha256:cd0cb10ddb49313f609cedfac62c8c12e56c7314b66d89bb077ba228bada1b09 \ + --hash=sha256:e54e2b7e2fb06dff9eac40133583f10dfa05913f5a85bf26f427c7a40a9a3d02 # via # fastapi # safir -structlog==23.2.0 \ - --hash=sha256:16a167e87b9fa7fae9a972d5d12805ef90e04857a93eba479d4be3801a6a1482 \ - --hash=sha256:334666b94707f89dbc4c81a22a8ccd34449f0201d5b1ee097a030b577fa8c858 +structlog==23.3.0 \ + --hash=sha256:24b42b914ac6bc4a4e6f716e82ac70d7fb1e8c3b1035a765591953bfc37101a5 \ + --hash=sha256:d6922a88ceabef5b13b9eda9c4043624924f60edbb00397f4d193bd754cde60a # via safir typing-extensions==4.9.0 \ --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \ From 3d4921a4f243a281256f31964ead14508410b489 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 2 Jan 2024 17:00:31 +0000 Subject: [PATCH 394/588] Update Helm release argo-cd to v5.52.0 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 574f794f5d..ff30957329 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.51.6 + version: 5.52.0 repository: https://argoproj.github.io/argo-helm From d3e090760427055a6192fd8806a1ca81fe1a789e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 2 Jan 2024 11:02:06 -0800 Subject: [PATCH 395/588] Fix documentation errors - Fix a permanent redirect of a GitHub link - Fix formatting errors in the troubleshooting page - Do not check the validity of console.cloud.google.com links - Add empty values-base.yaml file for rubintv --- applications/rubintv/values-base.yaml | 0 docs/about/local-environment-setup.rst | 2 +- .../infrastructure/google/credentials.rst | 4 ++-- docs/admin/troubleshooting.rst | 19 ++----------------- docs/documenteer.toml | 1 + 5 files changed, 6 insertions(+), 20 deletions(-) create mode 100644 applications/rubintv/values-base.yaml diff --git a/applications/rubintv/values-base.yaml b/applications/rubintv/values-base.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/about/local-environment-setup.rst b/docs/about/local-environment-setup.rst index f480087128..144bbce630 100644 --- a/docs/about/local-environment-setup.rst +++ b/docs/about/local-environment-setup.rst @@ -23,7 +23,7 @@ You will likely need to make changes to Phalanx and create pull requests, so you Members of the `lsst-sqre/phalanx`_ repository on GitHub can clone the repository directly and create a ticket branch, per the `Data Management workflow guide`_. -Otherwise, fork lsst-sqre/phalanx `following GitHub's guide `__. +Otherwise, fork lsst-sqre/phalanx `following GitHub's guide `__. .. _about-venv: diff --git a/docs/admin/infrastructure/google/credentials.rst b/docs/admin/infrastructure/google/credentials.rst index da41abbf02..e8cda7125b 100644 --- a/docs/admin/infrastructure/google/credentials.rst +++ b/docs/admin/infrastructure/google/credentials.rst @@ -16,14 +16,14 @@ Google provides a mechanism to obtain those credentials using the :command:`gclo If you have access to multiple Google Cloud Platform projects, you will be asked to select one as your default project. You may wish to choose the project for the Phalanx environment you use most often. - You can find the project ID of a Phalanx project hosted on GKE in its :doc:`environments page `. + You can find the project ID of a Phalanx project hosted on GKE in its :doc:`environments page `. #. `Install kubectl and the GKE auth plugin `__. As part of that installation, you will run the :command:`gcloud` command that obtains credentials usable by :command:`kubectl` and other privileged Kubernetes commands. The final step has an example :command:`gcloud` command, but it assumes that you are getting credentials for your default project. Rubin uses multiple Google Cloud Platform projects for different environments, so you may have to provide the project ID as well. -For the full command to run, see the bottom of the relevant :doc:`environments page `. +For the full command to run, see the bottom of the relevant :doc:`environments page `. Once you have followed this process on a system, the credentials will remain valid unless the Kubernetes control plane credentials are rotated. diff --git a/docs/admin/troubleshooting.rst b/docs/admin/troubleshooting.rst index 1372c483ab..377a094527 100644 --- a/docs/admin/troubleshooting.rst +++ b/docs/admin/troubleshooting.rst @@ -20,28 +20,13 @@ When this happens, you may need to recreate the persistent volume. Spawner menu missing images, nublado stuck pulling the same image ================================================================= -**Symptoms: **When a user goes to the spawner page for the Notebook Aspect, the expected menu of images is not available. + +**Symptoms:** When a user goes to the spawner page for the Notebook Aspect, the expected menu of images is not available. Instead, the menu is missing one or more images. The same image or set of images is pulled again each on each prepuller loop the nublado lab controller attempts. **Solution:** :doc:`infrastructure/kubernetes-node-status-max-images` - -Spawner menu missing images, cachemachine stuck pulling the same image -====================================================================== - -**Symptoms:** When a user goes to the spawner page for the Notebook Aspect, the expected menu of images is not available. -Instead, the menu is either empty or missing the right number of images of different classes. -The cachemachine application is continuously creating a ``DaemonSet`` for the same image without apparent forward progress. -Querying the cachemachine ``/available`` API shows either nothing in ``images`` or not everything that was expected. - -**Cause:** This is the same problem as above, but with the older (cachemachine+moneypenny)-based infrastructure rather than nublado v3. The solution is the same: :doc:`infrastructure/kubernetes-node-status-max-images`. - -**Solution:** :doc:`/applications/cachemachine/pruning` - -If this doesn't work, another possibility is that there is a node that cachemachine thinks is available for JupyterLab images but which is not eligible for its ``DaemonSet``. -This would be a bug in cachemachine, which should ignore cordoned nodes, but it's possible there is a new iteration of node state or a new rule for where ``DaemonSets`` are allowed to run that it does not know about. - Spawning a notebook fails with a pending error ============================================== diff --git a/docs/documenteer.toml b/docs/documenteer.toml index b688c5b5e9..bb0dc716ef 100644 --- a/docs/documenteer.toml +++ b/docs/documenteer.toml @@ -66,4 +66,5 @@ ignore = [ '^https://usdf-rsp-int.slac.stanford.edu', '^https://usdf-tel-rsp.slac.stanford.edu', '^https://github.com/orgs/', + '^https://console.cloud.google.com/', ] From ef998810ddc003fd917a93def17a402c9dda82e2 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 13 Nov 2023 08:38:32 -0800 Subject: [PATCH 396/588] Update for new Nublado configuration syntax Many elements of the Nublado controller configuration have moved around, been renamed, or have changed syntax. Update the Phalanx configuration accordingly. Set the nublado chart to install the new alpha release. Remove DOCKER_SECRET_NAME from the Nublado controller deployment, since it appears to be unused, and stop mounting an emptyDir on /tmp for the controller. It shouldn't need to use /tmp. Update the Nublado controller ingresses to reflect the reorganized configuration, and do not install the files ingress unless the file server is enabled. --- applications/nublado/Chart.yaml | 2 +- applications/nublado/README.md | 41 ++- .../templates/controller-deployment.yaml | 6 - .../templates/controller-ingress-admin.yaml | 2 +- .../controller-ingress-anonymous.yaml | 6 +- .../templates/controller-ingress-files.yaml | 4 +- .../templates/controller-ingress-user.yaml | 8 +- .../nublado/templates/hub-configmap.yaml | 9 +- applications/nublado/values-base.yaml | 98 +++--- applications/nublado/values-idfdev.yaml | 20 +- applications/nublado/values-idfint.yaml | 13 +- applications/nublado/values-idfprod.yaml | 13 +- applications/nublado/values-summit.yaml | 137 ++++----- .../nublado/values-tucson-teststand.yaml | 118 ++++---- applications/nublado/values-usdfdev.yaml | 106 +++---- applications/nublado/values-usdfint.yaml | 106 +++---- applications/nublado/values-usdfprod.yaml | 106 +++---- applications/nublado/values.yaml | 281 ++++++++++-------- 18 files changed, 502 insertions(+), 574 deletions(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 4c83bb821e..8c38ef0df6 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -5,7 +5,7 @@ description: JupyterHub and custom spawner for the Rubin Science Platform sources: - https://github.com/lsst-sqre/nublado home: https://nublado.lsst.io/ -appVersion: 0.9.0 +appVersion: 4.0.0 dependencies: - name: jupyterhub diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 2056529e1a..7c9914915e 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -13,7 +13,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | Key | Type | Default | Description | |-----|------|---------|-------------| | cloudsql.affinity | object | `{}` | Affinity rules for the Cloud SQL Proxy pod | -| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a separate service, because shoehorning it into Zero to Jupyterhub's extraContainers looks messy, and it's not necessary that it be very performant. | +| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | | cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | @@ -24,12 +24,16 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | | controller.affinity | object | `{}` | Affinity rules for the lab controller pod | +| controller.config.fileserver.application | string | `"fileservers"` | ArgcoCD application in which to collect user file servers | +| controller.config.fileserver.creationTimeout | int | `120` | Timeout to wait for Kubernetes to create file servers, in seconds | | controller.config.fileserver.enabled | bool | `false` | Enable fileserver management | -| controller.config.fileserver.image | string | `"ghcr.io/lsst-sqre/worblehat"` | Image for fileserver container | +| controller.config.fileserver.idleTimeout | int | `3600` | Timeout for idle user fileservers, in seconds | +| controller.config.fileserver.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for file server image | +| controller.config.fileserver.image.repository | string | `"ghcr.io/lsst-sqre/worblehat"` | File server image to use | +| controller.config.fileserver.image.tag | string | `"0.1.0"` | Tag of file server image to use | | controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user fileservers | -| controller.config.fileserver.pullPolicy | string | `"IfNotPresent"` | Pull policy for fileserver container | -| controller.config.fileserver.tag | string | `"0.1.0"` | Tag for fileserver container | -| controller.config.fileserver.timeout | int | `3600` | Timeout for user fileservers, in seconds | +| controller.config.fileserver.pathPrefix | string | `"/files"` | Path prefix for user file servers | +| controller.config.fileserver.resources | object | See `values.yaml` | Resource requests and limits for user file servers | | controller.config.images.aliasTags | list | `[]` | Additional tags besides `recommendedTag` that should be recognized as aliases. | | controller.config.images.cycle | string | `nil` | Restrict images to this SAL cycle, if given. | | controller.config.images.numDailies | int | `3` | Number of most-recent dailies to prepull. | @@ -38,19 +42,24 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.images.pin | list | `[]` | List of additional image tags to prepull. Listing the image tagged as recommended here is recommended when using a Docker image source to ensure its name can be expanded properly in the menu. | | controller.config.images.recommendedTag | string | `"recommended"` | Tag marking the recommended image (shown first in the menu) | | controller.config.images.source | object | None, must be specified | Source for prepulled images. For Docker, set `type` to `docker`, `registry` to the hostname and `repository` to the name of the repository. For Google Artifact Repository, set `type` to `google`, `location` to the region, `projectId` to the Google project, `repository` to the name of the repository, and `image` to the name of the image. | -| controller.config.lab.application | string | See `values.yaml` | ArgcoCD application in which to collect user lab objects. | -| controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab. | +| controller.config.lab.application | string | `"nublado-users"` | ArgoCD application in which to collect user lab objects | +| controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab | +| controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | -| controller.config.lab.initcontainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image reference), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with `allowPrivilegeEscalation` true. Otherwise it will, run as UID 1000. | +| controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | +| controller.config.lab.namespacePrefix | string | `"nublado"` | Prefix for namespaces for user labs. To this will be added a dash (`-`) and the user's username. | +| controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | +| controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | | controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | -| controller.config.lab.sizes | object | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Names must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI prefixes for memory are supported. `large`) | -| controller.config.lab.volumes | list | `[]` | Volumes that should be mounted in lab pods. This supports NFS, HostPath, and PVC volume types (differentiated in source.type) | -| controller.config.safir.logLevel | string | `"INFO"` | Level of Python logging | -| controller.config.safir.pathPrefix | string | `"/nublado"` | Path prefix that will be routed to the controller | +| controller.config.lab.sizes | object | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Names must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | +| controller.config.lab.spawnTimeout | int | `600` | How long to wait for Kubernetes to spawn a lab in seconds. This should generally be shorter than the spawn timeout set in JupyterHub. | +| controller.config.lab.volumes | list | `[]` | Volumes that should be mounted in lab pods. This supports NFS, HostPath, and PVC volume types (differentiated in source.type). | +| controller.config.logLevel | string | `"INFO"` | Level of Python logging | +| controller.config.pathPrefix | string | `"/nublado"` | Path prefix that will be routed to the controller | | controller.googleServiceAccount | string | None, must be set when using Google Artifact Registry | If Google Artifact Registry is used as the image source, the Google service account that has an IAM binding to the `nublado-controller` Kubernetes service account and has the Artifact Registry reader role | | controller.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the nublado image | -| controller.image.repository | string | `"ghcr.io/lsst-sqre/jupyterlab-controller"` | nublado image to use | +| controller.image.repository | string | `"ghcr.io/lsst-sqre/nublado-controller"` | nublado image to use | | controller.image.tag | string | The appVersion of the chart | Tag of nublado image to use | | controller.ingress.annotations | object | `{}` | Additional annotations to add for the lab controller pod ingress | | controller.nodeSelector | object | `{}` | Node selector rules for the lab controller pod | @@ -80,12 +89,12 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.hub.extraEnv | object | Gets `JUPYTERHUB_CRYPT_KEY` from `nublado-secret` | Additional environment variables to set | | jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | | jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | -| jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/rsp-restspawner"` | Image to use for JupyterHub | -| jupyterhub.hub.image.tag | string | `"0.5.0"` | Tag of image to use for JupyterHub | +| jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/nublado-jupyterhub"` | Image to use for JupyterHub | +| jupyterhub.hub.image.tag | string | `"4.0.0"` | Tag of image to use for JupyterHub | | jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | | jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | | jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | -| jupyterhub.ingress.enabled | bool | `false` | Whether to enable the default ingress | +| jupyterhub.ingress.enabled | bool | `false` | Whether to enable the default ingress. Should always be disabled since we install our own `GafaelfawrIngress` | | jupyterhub.prePuller.continuous.enabled | bool | `false` | Whether to run the JupyterHub continuous prepuller (the Nublado controller does its own prepulling) | | jupyterhub.prePuller.hook.enabled | bool | `false` | Whether to run the JupyterHub hook prepuller (the Nublado controller does its own prepulling) | | jupyterhub.proxy.chp.networkPolicy.interNamespaceAccessLabels | string | `"accept"` | Enable access to the proxy from other namespaces, since we put each user's lab environment in its own namespace | diff --git a/applications/nublado/templates/controller-deployment.yaml b/applications/nublado/templates/controller-deployment.yaml index 660b274c03..800fc2cb41 100644 --- a/applications/nublado/templates/controller-deployment.yaml +++ b/applications/nublado/templates/controller-deployment.yaml @@ -36,8 +36,6 @@ spec: image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.controller.image.pullPolicy | quote }} env: - - name: DOCKER_SECRET_NAME - value: "pull-secret" - name: EXTERNAL_INSTANCE_URL value: {{ .Values.global.baseUrl | quote }} {{- if .Values.controller.slackAlerts }} @@ -70,8 +68,6 @@ spec: {{- end }} - name: "podinfo" mountPath: "/etc/podinfo" - - name: "tmp" - mountPath: "/tmp" {{- with .Values.controller.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -106,5 +102,3 @@ spec: - path: "uid" fieldRef: fieldPath: "metadata.uid" - - name: "tmp" - emptyDir: {} diff --git a/applications/nublado/templates/controller-ingress-admin.yaml b/applications/nublado/templates/controller-ingress-admin.yaml index 3d797d9a5a..8e85999384 100644 --- a/applications/nublado/templates/controller-ingress-admin.yaml +++ b/applications/nublado/templates/controller-ingress-admin.yaml @@ -17,7 +17,7 @@ template: - host: {{ .Values.global.host | quote }} http: paths: - - path: {{ .Values.controller.config.safir.pathPrefix | quote }} + - path: {{ .Values.controller.config.pathPrefix | quote }} pathType: "Prefix" backend: service: diff --git a/applications/nublado/templates/controller-ingress-anonymous.yaml b/applications/nublado/templates/controller-ingress-anonymous.yaml index c41858c1e9..5148c0f253 100644 --- a/applications/nublado/templates/controller-ingress-anonymous.yaml +++ b/applications/nublado/templates/controller-ingress-anonymous.yaml @@ -16,21 +16,21 @@ template: - host: {{ .Values.global.host | quote }} http: paths: - - path: "{{ .Values.controller.config.safir.pathPrefix }}/openapi.json" + - path: "{{ .Values.controller.config.pathPrefix }}/openapi.json" pathType: "Exact" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/docs" + - path: "{{ .Values.controller.config.pathPrefix }}/docs" pathType: "Exact" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/redoc" + - path: "{{ .Values.controller.config.pathPrefix }}/redoc" pathType: "Exact" backend: service: diff --git a/applications/nublado/templates/controller-ingress-files.yaml b/applications/nublado/templates/controller-ingress-files.yaml index 77b125044c..03abf181c9 100644 --- a/applications/nublado/templates/controller-ingress-files.yaml +++ b/applications/nublado/templates/controller-ingress-files.yaml @@ -1,3 +1,4 @@ +{{- if .Values.controller.config.fileserver.enabled -}} apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: @@ -21,10 +22,11 @@ template: - host: {{ .Values.global.host | quote }} http: paths: - - path: "/files" + - path: {{ .Values.controller.config.fileserver.pathPrefix | quote }} pathType: "Prefix" backend: service: name: "nublado-controller" port: number: 80 +{{- end }} diff --git a/applications/nublado/templates/controller-ingress-user.yaml b/applications/nublado/templates/controller-ingress-user.yaml index 2f6894df1d..45549f4703 100644 --- a/applications/nublado/templates/controller-ingress-user.yaml +++ b/applications/nublado/templates/controller-ingress-user.yaml @@ -21,28 +21,28 @@ template: - host: {{ .Values.global.host | quote }} http: paths: - - path: "{{ .Values.controller.config.safir.pathPrefix }}/spawner/v1/labs/.*/create" + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/labs/.*/create" pathType: "ImplementationSpecific" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/spawner/v1/labs/.*/events" + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/labs/.*/events" pathType: "ImplementationSpecific" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/spawner/v1/lab-form" + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/lab-form" pathType: "Prefix" backend: service: name: "nublado-controller" port: number: 80 - - path: "{{ .Values.controller.config.safir.pathPrefix }}/spawner/v1/user-status" + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/user-status" pathType: "Exact" backend: service: diff --git a/applications/nublado/templates/hub-configmap.yaml b/applications/nublado/templates/hub-configmap.yaml index 22bf56bc36..c191e92b1b 100644 --- a/applications/nublado/templates/hub-configmap.yaml +++ b/applications/nublado/templates/hub-configmap.yaml @@ -6,11 +6,12 @@ metadata: {{- include "nublado.labels" . | nindent 4 }} data: 00_nublado.py: | - import rsp_restspawner + import rubin.nublado.authenticator + import rubin.nublado.spawner # Use our authenticator and spawner. - c.JupyterHub.authenticator_class = "rsp_restspawner.GafaelfawrAuthenticator" - c.JupyterHub.spawner_class = "rsp_restspawner.RSPRestSpawner" + c.JupyterHub.authenticator_class = "rubin.nublado.authenticator.GafaelfawrAuthenticator" + c.JupyterHub.spawner_class = "rubin.nublado.spawner.RSPRestSpawner" # Set internal Hub API URL. c.JupyterHub.hub_connect_url = ( @@ -39,4 +40,4 @@ data: c.Spawner.http_timeout = {{ .Values.hub.timeout.startup }} # Configure the URL to the lab controller. - c.RSPRestSpawner.controller_url = "{{ .Values.global.baseUrl }}{{ .Values.controller.config.safir.pathPrefix }}" + c.RSPRestSpawner.controller_url = "{{ .Values.global.baseUrl }}{{ .Values.controller.config.pathPrefix }}" diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index d026a96805..f2bedaf0d6 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -11,8 +11,6 @@ controller: cycle: 34 recommended_tag: "recommended_c0034" lab: - application: "nublado-users" - pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: @@ -23,61 +21,57 @@ controller: PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" - privileged: true - volumes: + - name: "initdir" + image: + repository: "ghcr.io/lsst-sqre/initdir" + tag: "0.0.4" + privileged: true + volumes: + - containerPath: "/home" + source: + serverPath: "/rsphome" + server: "nfs-rsphome.ls.lsst.org" + type: "nfs" + pullSecret: "pull-secret" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + volumes: - containerPath: "/home" - mode: "rw" source: + type: "nfs" serverPath: "/rsphome" server: "nfs-rsphome.ls.lsst.org" + - containerPath: "/project" + source: type: "nfs" - secrets: - - secretName: "nublado-lab-secret" - secretKey: "postgres-credentials.txt" - volumes: - - containerPath: "/home" - mode: "rw" - source: - type: "nfs" - serverPath: "/rsphome" - server: "nfs-rsphome.ls.lsst.org" - - containerPath: "/project" - mode: "rw" - source: - type: "nfs" - serverPath: "/project" - server: "nfs-project.ls.lsst.org" - - containerPath: "/scratch" - mode: "rw" - source: - type: "nfs" - serverPath: "/scratch" - server: "nfs-scratch.ls.lsst.org" - - containerPath: "/datasets" - mode: "rw" - source: - type: "nfs" - serverPath: "/lsstdata" - server: "nfs-lsstdata.ls.lsst.org" - - containerPath: "/repo/LATISS" - mode: "rw" - source: - type: "nfs" - serverPath: "/auxtel/repo/LATISS" - server: "nfs-auxtel.ls.lsst.org" - - containerPath: "/net/obs-env" - mode: "rw" - source: - type: "nfs" - serverPath: "/obs-env" - server: "nfs-obsenv.ls.lsst.org" - - containerPath: "/data/lsstdata/BTS/auxtel" - source: - type: "nfs" - serverPath: "/auxtel/lsstdata/BTS/auxtel" - server: "nfs-auxtel.ls.lsst.org" + serverPath: "/project" + server: "nfs-project.ls.lsst.org" + - containerPath: "/scratch" + source: + type: "nfs" + serverPath: "/scratch" + server: "nfs-scratch.ls.lsst.org" + - containerPath: "/datasets" + source: + type: "nfs" + serverPath: "/lsstdata" + server: "nfs-lsstdata.ls.lsst.org" + - containerPath: "/repo/LATISS" + source: + type: "nfs" + serverPath: "/auxtel/repo/LATISS" + server: "nfs-auxtel.ls.lsst.org" + - containerPath: "/net/obs-env" + source: + type: "nfs" + serverPath: "/obs-env" + server: "nfs-obsenv.ls.lsst.org" + - containerPath: "/data/lsstdata/BTS/auxtel" + source: + type: "nfs" + serverPath: "/auxtel/lsstdata/BTS/auxtel" + server: "nfs-auxtel.ls.lsst.org" jupyterhub: cull: diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 5ec2b3ba5f..ae36dad104 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -2,11 +2,10 @@ controller: googleServiceAccount: "nublado-controller@science-platform-dev-7696.iam.gserviceaccount.com" slackAlerts: true config: - safir: - logLevel: "DEBUG" + logLevel: "DEBUG" fileserver: enabled: true - timeout: 43200 + idleTimeout: 43200 # 12 hours images: source: type: "google" @@ -14,12 +13,7 @@ controller: projectId: "rubin-shared-services-71ec" repository: "sciplat" image: "sciplat-lab" - recommendedTag: "recommended" - numReleases: 1 - numWeeklies: 2 - numDailies: 3 lab: - application: "nublado-users" env: AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" @@ -29,16 +23,16 @@ controller: S3_ENDPOINT_URL: "https://storage.googleapis.com" initContainers: - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" + image: + repository: "ghcr.io/lsst-sqre/initdir" + tag: "0.0.4" privileged: true volumes: - containerPath: "/home" - mode: "rw" source: type: nfs serverPath: "/share1/home" server: "10.87.86.26" - secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" @@ -50,19 +44,16 @@ controller: secretKey: "postgres-credentials.txt" volumes: - containerPath: "/home" - mode: "rw" source: type: nfs serverPath: "/share1/home" server: "10.87.86.26" - containerPath: "/project" - mode: "rw" source: type: nfs serverPath: "/share1/project" server: "10.87.86.26" - containerPath: "/scratch" - mode: "rw" source: type: nfs serverPath: "/share1/scratch" @@ -71,7 +62,6 @@ jupyterhub: hub: db: url: "postgresql://nublado@cloud-sql-proxy.nublado/nublado" - upgrade: true hub: internalDatabase: false cloudsql: diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 0a27c25b73..bab0814f16 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -11,10 +11,6 @@ controller: projectId: "rubin-shared-services-71ec" repository: "sciplat" image: "sciplat-lab" - recommendedTag: "recommended" - numReleases: 1 - numWeeklies: 2 - numDailies: 3 lab: env: AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -33,7 +29,6 @@ controller: NO_ACTIVITY_TIMEOUT: "432000" CULL_KERNEL_IDLE_TIMEOUT: "432000" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - sizes: small: cpu: 1.0 @@ -49,11 +44,12 @@ controller: memory: 32Gi initContainers: - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" + image: + repository: "ghcr.io/lsst-sqre/initdir" + tag: "0.0.4" privileged: true volumes: - containerPath: "/home" - mode: "rw" source: serverPath: "/share1/home" server: "10.22.240.130" @@ -69,19 +65,16 @@ controller: secretKey: "postgres-credentials.txt" volumes: - containerPath: "/home" - mode: "rw" source: serverPath: "/share1/home" server: "10.22.240.130" type: "nfs" - containerPath: "/project" - mode: "rw" source: serverPath: "/share1/project" server: "10.22.240.130" type: "nfs" - containerPath: "/scratch" - mode: "rw" source: serverPath: "/share1/scratch" server: "10.22.240.130" diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index d11d632476..6d7c86ef5b 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -9,10 +9,6 @@ controller: projectId: "rubin-shared-services-71ec" repository: "sciplat" image: "sciplat-lab" - recommendedTag: "recommended" - numReleases: 1 - numWeeklies: 2 - numDailies: 3 lab: env: AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -24,7 +20,6 @@ controller: NO_ACTIVITY_TIMEOUT: "432000" CULL_KERNEL_IDLE_TIMEOUT: "432000" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - sizes: small: cpu: 1.0 @@ -37,11 +32,12 @@ controller: memory: 16Gi initContainers: - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" + image: + repository: "ghcr.io/lsst-sqre/initdir" + tag: "0.0.4" privileged: true volumes: - containerPath: "/home" - mode: "rw" source: serverPath: "/share1/home" server: "10.13.105.122" @@ -57,19 +53,16 @@ controller: secretKey: "postgres-credentials.txt" volumes: - containerPath: "/home" - mode: "rw" source: serverPath: "/share1/home" server: "10.13.105.122" type: "nfs" - containerPath: "/project" - mode: "rw" source: serverPath: "/share1/project" server: "10.13.105.122" type: "nfs" - containerPath: "/scratch" - mode: "rw" source: serverPath: "/share1/scratch" server: "10.13.105.122" diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 3c2f173345..98bdc3ea15 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -11,7 +11,6 @@ controller: cycle: 34 recommended_tag: "recommended_c0034" lab: - pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: @@ -22,81 +21,77 @@ controller: PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" - privileged: true - volumes: + - name: "initdir" + image: + repository: "ghcr.io/lsst-sqre/initdir" + tag: "0.0.4" + privileged: true + volumes: + - containerPath: "/home" + source: + serverPath: "/jhome" + server: "nfs1.cp.lsst.org" + type: "nfs" + pullSecret: "pull-secret" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + volumes: - containerPath: "/home" - mode: "rw" source: + type: "nfs" serverPath: "/jhome" server: "nfs1.cp.lsst.org" + - containerPath: "/project" + source: type: "nfs" - secrets: - - secretName: "nublado-lab-secret" - secretKey: "postgres-credentials.txt" - volumes: - - containerPath: "/home" - mode: "rw" - source: - type: "nfs" - serverPath: "/jhome" - server: "nfs1.cp.lsst.org" - - containerPath: "/project" - mode: "rw" - source: - type: "nfs" - serverPath: "/project" - server: "nfs1.cp.lsst.org" - - containerPath: "/scratch" - mode: "rw" - source: - type: "nfs" - serverPath: "/scratch" - server: "nfs1.cp.lsst.org" - - containerPath: "/repo/LATISS" - mode: "rw" - source: - type: "nfs" - serverPath: "/auxtel/repo/LATISS" - server: "nfs-auxtel.cp.lsst.org" - - containerPath: "/repo/LSSTComCam" - mode: "rw" - source: - type: "nfs" - serverPath: "/repo/LSSTComCam" - server: "comcam-archiver.cp.lsst.org" - - containerPath: "/net/obs-env" - mode: "rw" - source: - type: "nfs" - serverPath: "/obs-env" - server: "nfs-obsenv.cp.lsst.org" - - containerPath: "/readonly/lsstdata/other" - source: - type: "nfs" - serverPath: "/lsstdata" - server: "nfs1.cp.lsst.org" - - containerPath: "/readonly/lsstdata/comcam" - source: - type: "nfs" - serverPath: "/lsstdata" - server: "comcam-archiver.cp.lsst.org" - - containerPath: "/readonly/lsstdata/auxtel" - source: - type: "nfs" - serverPath: "/auxtel/lsstdata" - server: "nfs-auxtel.cp.lsst.org" - - containerPath: "/data/lsstdata/base/comcam" - source: - type: "nfs" - serverPath: "/lsstdata/base/comcam" - server: "comcam-archiver.cp.lsst.org" - - containerPath: "/data/lsstdata/base/auxtel" - source: - type: "nfs" - serverPath: "/auxtel/lsstdata/base/auxtel" - server: "nfs-auxtel.cp.lsst.org" + serverPath: "/project" + server: "nfs1.cp.lsst.org" + - containerPath: "/scratch" + source: + type: "nfs" + serverPath: "/scratch" + server: "nfs1.cp.lsst.org" + - containerPath: "/repo/LATISS" + source: + type: "nfs" + serverPath: "/auxtel/repo/LATISS" + server: "nfs-auxtel.cp.lsst.org" + - containerPath: "/repo/LSSTComCam" + source: + type: "nfs" + serverPath: "/repo/LSSTComCam" + server: "comcam-archiver.cp.lsst.org" + - containerPath: "/net/obs-env" + source: + type: "nfs" + serverPath: "/obs-env" + server: "nfs-obsenv.cp.lsst.org" + - containerPath: "/readonly/lsstdata/other" + source: + type: "nfs" + serverPath: "/lsstdata" + server: "nfs1.cp.lsst.org" + - containerPath: "/readonly/lsstdata/comcam" + source: + type: "nfs" + serverPath: "/lsstdata" + server: "comcam-archiver.cp.lsst.org" + - containerPath: "/readonly/lsstdata/auxtel" + source: + type: "nfs" + serverPath: "/auxtel/lsstdata" + server: "nfs-auxtel.cp.lsst.org" + - containerPath: "/data/lsstdata/base/comcam" + source: + type: "nfs" + serverPath: "/lsstdata/base/comcam" + server: "comcam-archiver.cp.lsst.org" + - containerPath: "/data/lsstdata/base/auxtel" + source: + type: "nfs" + serverPath: "/auxtel/lsstdata/base/auxtel" + server: "nfs-auxtel.cp.lsst.org" jupyterhub: cull: diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 5392070d26..6732f59a89 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -11,7 +11,6 @@ controller: cycle: 32 recommended_tag: "recommended_c0032" lab: - pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: @@ -22,72 +21,67 @@ controller: PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - - name: "initdir" - image: "ghcr.io/lsst-sqre/initdir:0.0.4" - privileged: true - volumes: + - name: "initdir" + image: + repository: "ghcr.io/lsst-sqre/initdir" + tag: "0.0.4" + privileged: true + volumes: + - containerPath: "/home" + source: + serverPath: "/jhome" + server: "nfs-jhome.tu.lsst.org" + type: "nfs" + pullSecret: "pull-secret" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + volumes: - containerPath: "/home" - mode: "rw" source: + type: "nfs" serverPath: "/jhome" server: "nfs-jhome.tu.lsst.org" + - containerPath: "/project" + source: type: "nfs" - secrets: - - secretName: "nublado-lab-secret" - secretKey: "postgres-credentials.txt" - volumes: - - containerPath: "/home" - mode: "rw" - source: - type: "nfs" - serverPath: "/jhome" - server: "nfs-jhome.tu.lsst.org" - - containerPath: "/project" - mode: "rw" - source: - type: "nfs" - serverPath: "/project" - server: "nfs-project.tu.lsst.org" - - containerPath: "/scratch" - mode: "rw" - source: - type: "nfs" - serverPath: "/scratch" - server: "nfs-scratch.tu.lsst.org" - - containerPath: "/datasets" - mode: "rw" - source: - type: "nfs" - serverPath: "/lsstdata" - server: "nfs-lsstdata.tu.lsst.org" - - containerPath: "/repo/LATISS" - mode: "rw" - source: - type: "nfs" - serverPath: "/auxtel/repo/LATISS" - server: "nfs-auxtel.tu.lsst.org" - - containerPath: "/net/obs-env" - mode: "rw" - source: - type: "nfs" - serverPath: "/obs-env" - server: "nfs-obsenv.tu.lsst.org" - - containerPath: "/repo/LSSTComCam" - mode: "rw" - source: - type: "nfs" - serverPath: "/repo/LSSTComCam" - server: "comcam-archiver.tu.lsst.org" - - containerPath: "/data/lsstdata/TTS/auxtel" - source: - type: "nfs" - serverPath: "/auxtel/lsstdata/TTS/auxtel" - server: "nfs-auxtel.tu.lsst.org" - - containerPath: "/data/lsstdata/TTS/comcam" - source: - type: "nfs" - serverPath: "/lsstdata/TTS/comcam" - server: "comcam-archiver.tu.lsst.org" + serverPath: "/project" + server: "nfs-project.tu.lsst.org" + - containerPath: "/scratch" + source: + type: "nfs" + serverPath: "/scratch" + server: "nfs-scratch.tu.lsst.org" + - containerPath: "/datasets" + source: + type: "nfs" + serverPath: "/lsstdata" + server: "nfs-lsstdata.tu.lsst.org" + - containerPath: "/repo/LATISS" + source: + type: "nfs" + serverPath: "/auxtel/repo/LATISS" + server: "nfs-auxtel.tu.lsst.org" + - containerPath: "/net/obs-env" + source: + type: "nfs" + serverPath: "/obs-env" + server: "nfs-obsenv.tu.lsst.org" + - containerPath: "/repo/LSSTComCam" + source: + type: "nfs" + serverPath: "/repo/LSSTComCam" + server: "comcam-archiver.tu.lsst.org" + - containerPath: "/data/lsstdata/TTS/auxtel" + source: + type: "nfs" + serverPath: "/auxtel/lsstdata/TTS/auxtel" + server: "nfs-auxtel.tu.lsst.org" + - containerPath: "/data/lsstdata/TTS/comcam" + source: + type: "nfs" + serverPath: "/lsstdata/TTS/comcam" + server: "comcam-archiver.tu.lsst.org" jupyterhub: cull: diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 9c2cfd95c6..86b04f5b59 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -1,27 +1,14 @@ controller: config: - safir: - logLevel: "DEBUG" - fileserver: - enabled: false - timeout: 21600 - + logLevel: "DEBUG" images: source: type: "docker" registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" - recommendedTag: "recommended" - pin: ["w_2023_47"] - numReleases: 1 - numWeeklies: 2 - numDailies: 3 - + pin: + - "w_2023_47" lab: - pullSecret: "pull-secret" - - homedirSchema: "initialThenUsername" - env: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -33,56 +20,53 @@ controller: http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1" - - files: + homedirSchema: "initialThenUsername" + nss: # Add rubin_users group (there is not yet a simpler way to do this). - /etc/group: - contents: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - utmp:x:22: - tape:x:33: - utempter:x:35: - video:x:39: - ftp:x:50: - lock:x:54: - tss:x:59: - audio:x:63: - dbus:x:81: - screen:x:84: - nobody:x:99: - users:x:100: - systemd-journal:x:190: - systemd-network:x:192: - cgred:x:997: - ssh_keys:x:998: - input:x:999: - rubin_users:x:4085: - + baseGroup: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + rubin_users:x:4085: + pullSecret: "pull-secret" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" - volumes: - containerPath: "/home" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-home" @@ -93,7 +77,6 @@ controller: storage: "1Gi" - containerPath: "/project" subPath: "g" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -103,7 +86,6 @@ controller: requests: storage: "1Gi" - containerPath: "/sdf/group/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -113,7 +95,6 @@ controller: requests: storage: "1Gi" - containerPath: "/sdf/data/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-data-rubin" @@ -123,7 +104,6 @@ controller: requests: storage: "1Gi" - containerPath: "/scratch" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-scratch" @@ -133,7 +113,6 @@ controller: requests: storage: "1Gi" - containerPath: "/fs/ddn/sdf/group/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-rubin" @@ -143,7 +122,6 @@ controller: requests: storage: "1Gi" - containerPath: "/fs/ddn/sdf/group/lsst" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-lsst" diff --git a/applications/nublado/values-usdfint.yaml b/applications/nublado/values-usdfint.yaml index 9c2cfd95c6..86b04f5b59 100644 --- a/applications/nublado/values-usdfint.yaml +++ b/applications/nublado/values-usdfint.yaml @@ -1,27 +1,14 @@ controller: config: - safir: - logLevel: "DEBUG" - fileserver: - enabled: false - timeout: 21600 - + logLevel: "DEBUG" images: source: type: "docker" registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" - recommendedTag: "recommended" - pin: ["w_2023_47"] - numReleases: 1 - numWeeklies: 2 - numDailies: 3 - + pin: + - "w_2023_47" lab: - pullSecret: "pull-secret" - - homedirSchema: "initialThenUsername" - env: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -33,56 +20,53 @@ controller: http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1" - - files: + homedirSchema: "initialThenUsername" + nss: # Add rubin_users group (there is not yet a simpler way to do this). - /etc/group: - contents: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - utmp:x:22: - tape:x:33: - utempter:x:35: - video:x:39: - ftp:x:50: - lock:x:54: - tss:x:59: - audio:x:63: - dbus:x:81: - screen:x:84: - nobody:x:99: - users:x:100: - systemd-journal:x:190: - systemd-network:x:192: - cgred:x:997: - ssh_keys:x:998: - input:x:999: - rubin_users:x:4085: - + baseGroup: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + rubin_users:x:4085: + pullSecret: "pull-secret" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" - volumes: - containerPath: "/home" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-home" @@ -93,7 +77,6 @@ controller: storage: "1Gi" - containerPath: "/project" subPath: "g" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -103,7 +86,6 @@ controller: requests: storage: "1Gi" - containerPath: "/sdf/group/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -113,7 +95,6 @@ controller: requests: storage: "1Gi" - containerPath: "/sdf/data/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-data-rubin" @@ -123,7 +104,6 @@ controller: requests: storage: "1Gi" - containerPath: "/scratch" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-scratch" @@ -133,7 +113,6 @@ controller: requests: storage: "1Gi" - containerPath: "/fs/ddn/sdf/group/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-rubin" @@ -143,7 +122,6 @@ controller: requests: storage: "1Gi" - containerPath: "/fs/ddn/sdf/group/lsst" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-lsst" diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml index b0836ffa7b..c12900a045 100644 --- a/applications/nublado/values-usdfprod.yaml +++ b/applications/nublado/values-usdfprod.yaml @@ -1,27 +1,14 @@ controller: config: - safir: - logLevel: "DEBUG" - fileserver: - enabled: false - timeout: 21600 - + logLevel: "DEBUG" images: source: type: "docker" registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" - recommendedTag: "recommended" - pin: ["w_2023_47"] - numReleases: 1 - numWeeklies: 2 - numDailies: 3 - + pin: + - "w_2023_47" lab: - pullSecret: "pull-secret" - - homedirSchema: "initialThenUsername" - env: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" @@ -33,56 +20,53 @@ controller: http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1" - - files: + homedirSchema: "initialThenUsername" + nss: # Add rubin_users group (there is not yet a simpler way to do this). - /etc/group: - contents: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - utmp:x:22: - tape:x:33: - utempter:x:35: - video:x:39: - ftp:x:50: - lock:x:54: - tss:x:59: - audio:x:63: - dbus:x:81: - screen:x:84: - nobody:x:99: - users:x:100: - systemd-journal:x:190: - systemd-network:x:192: - cgred:x:997: - ssh_keys:x:998: - input:x:999: - rubin_users:x:4085: - + baseGroup: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + rubin_users:x:4085: + pullSecret: "pull-secret" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" - volumes: - containerPath: "/home" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-home" @@ -93,7 +77,6 @@ controller: storage: "1Gi" - containerPath: "/project" subPath: "g" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -103,7 +86,6 @@ controller: requests: storage: "1Gi" - containerPath: "/sdf/group/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -113,7 +95,6 @@ controller: requests: storage: "1Gi" - containerPath: "/sdf/data/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-data-rubin" @@ -123,7 +104,6 @@ controller: requests: storage: "1Gi" - containerPath: "/scratch" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "sdf-scratch" @@ -133,7 +113,6 @@ controller: requests: storage: "1Gi" - containerPath: "/fs/ddn/sdf/group/rubin" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-rubin" @@ -143,7 +122,6 @@ controller: requests: storage: "1Gi" - containerPath: "/fs/ddn/sdf/group/lsst" - mode: "rw" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-lsst" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 15aeffd678..9c02f7d2cb 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -3,7 +3,7 @@ controller: image: # -- nublado image to use - repository: ghcr.io/lsst-sqre/jupyterlab-controller + repository: ghcr.io/lsst-sqre/nublado-controller # -- Pull policy for the nublado image pullPolicy: IfNotPresent @@ -43,25 +43,51 @@ controller: # Passed as YAML to the lab controller. config: + # -- Level of Python logging + logLevel: "INFO" + + # -- Path prefix that will be routed to the controller + pathPrefix: "/nublado" + fileserver: # -- Enable fileserver management enabled: false - # -- Image for fileserver container - image: ghcr.io/lsst-sqre/worblehat + # -- ArgcoCD application in which to collect user file servers + application: "fileservers" + + # -- Timeout to wait for Kubernetes to create file servers, in seconds + creationTimeout: 120 - # -- Tag for fileserver container - tag: 0.1.0 + # -- Timeout for idle user fileservers, in seconds + idleTimeout: 3600 - # -- Pull policy for fileserver container - pullPolicy: IfNotPresent + image: + # -- File server image to use + repository: ghcr.io/lsst-sqre/worblehat - # -- Timeout for user fileservers, in seconds - timeout: 3600 + # -- Pull policy for file server image + pullPolicy: IfNotPresent + + # -- Tag of file server image to use + tag: 0.1.0 # -- Namespace for user fileservers namespace: fileservers + # -- Path prefix for user file servers + pathPrefix: "/files" + + # -- Resource requests and limits for user file servers + # @default -- See `values.yaml` + resources: + requests: + cpu: 0.1 + memory: 1Gi + limits: + cpu: 1 + memory: 10Gi + images: # -- Source for prepulled images. For Docker, set `type` to `docker`, # `registry` to the hostname and `repository` to the name of the @@ -97,10 +123,10 @@ controller: aliasTags: [] lab: - # -- ArgcoCD application in which to collect user lab objects. - # @default -- See `values.yaml` + # -- ArgoCD application in which to collect user lab objects application: "nublado-users" - # -- Environment variables to set for every user lab. + + # -- Environment variables to set for every user lab # @default -- See `values.yaml` env: API_ROUTE: "/api" @@ -113,13 +139,109 @@ controller: NO_ACTIVITY_TIMEOUT: "432000" # Also from group? TAP_ROUTE: "/api/tap" + # -- Extra annotations to add to user lab pods + extraAnnotations: {} + + # -- Files to be mounted as ConfigMaps inside the user lab pod. + # `contents` contains the file contents. Set `modify` to true to make + # the file writable in the pod. + # @default -- See `values.yaml` + files: + /opt/lsst/software/jupyterlab/lsst_dask.yml: | + # No longer used, but preserves compatibility with runlab.sh + dask_worker.yml: | + enabled: false + /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template: | + # Licensed under the Apache License, Version 2.0 (the "License"); + # You may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Authors: + # - Wen Guan, , 2020 + [common] + # if logdir is configured, idds will write to idds.log in this + # directory, else idds will go to stdout/stderr. With supervisord, + # it's good to write to stdout/stderr, then supervisord can manage + # and rotate logs. + # logdir = /var/log/idds + loglevel = INFO + [rest] + host = https://iddsserver.cern.ch:443/idds + #url_prefix = /idds + #cacher_dir = /tmp + cacher_dir = /data/idds + # -- Containers run as init containers with each user pod. Each should - # set `name`, `image` (a Docker image reference), and `privileged`, and - # may contain `volumes` (similar to the main `volumes` - # configuration). If `privileged` is true, the container will run as - # root with `allowPrivilegeEscalation` true. Otherwise it will, run as - # UID 1000. - initcontainers: [] + # set `name`, `image` (a Docker image and pull policy specification), + # and `privileged`, and may contain `volumes` (similar to the main + # `volumes` configuration). If `privileged` is true, the container will + # run as root with all capabilities. Otherwise it will run as the user. + initContainers: [] + + # -- Prefix for namespaces for user labs. To this will be added a dash + # (`-`) and the user's username. + namespacePrefix: "nublado" + + nss: + # -- Base `/etc/passwd` file for lab containers + # @default -- See `values.yaml` + basePasswd: | + root:x:0:0:root:/root:/bin/bash + bin:x:1:1:bin:/bin:/sbin/nologin + daemon:x:2:2:daemon:/sbin:/sbin/nologin + adm:x:3:4:adm:/var/adm:/sbin/nologin + lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin + sync:x:5:0:sync:/sbin:/bin/sync + shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown + halt:x:7:0:halt:/sbin:/sbin/halt + mail:x:8:12:mail:/var/spool/mail:/sbin/nologin + operator:x:11:0:operator:/root:/sbin/nologin + games:x:12:100:games:/usr/games:/sbin/nologin + ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin + tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin + dbus:x:81:81:System message bus:/:/sbin/nologin + nobody:x:99:99:Nobody:/:/sbin/nologin + systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin + lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash + + # -- Base `/etc/group` file for lab containers + # @default -- See `values.yaml` + baseGroup: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: # -- Pull secret to use for labs. Set to the string `pull-secret` to use # the normal pull secret from Vault. @@ -135,10 +257,11 @@ controller: # -- Available lab sizes. Names must be chosen from `fine`, # `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, # `gargantuan`, and `colossal` in that order. Each should specify the - # maximum CPU equivalents and memory. SI prefixes for memory are - # supported. + # maximum CPU equivalents and memory. SI suffixes for memory are + # supported. Sizes will be shown in the order defined here, and the + # first defined size will be the default. # @default -- See `values.yaml` (specifies `small`, `medium`, and - # `large`) + # `large` with `small` as the default) sizes: small: cpu: 1.0 @@ -150,113 +273,21 @@ controller: cpu: 4.0 memory: 16Gi + # -- How long to wait for Kubernetes to spawn a lab in seconds. This + # should generally be shorter than the spawn timeout set in JupyterHub. + spawnTimeout: 600 + # -- Volumes that should be mounted in lab pods. This supports NFS, - # HostPath, and PVC volume types (differentiated in source.type) + # HostPath, and PVC volume types (differentiated in source.type). volumes: [] # volumes: # - containerPath: "/project" - # mode: "rw" + # readOnly: true # source: # type: nfs # serverPath: "/share1/project" # server: "10.87.86.26" - # -- Files to be mounted as ConfigMaps inside the user lab pod. - # `contents` contains the file contents. Set `modify` to true to make - # the file writable in the pod. - # @default -- See `values.yaml` - files: - /etc/passwd: - modify: true - contents: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash - /etc/group: - modify: true - contents: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - utmp:x:22: - tape:x:33: - utempter:x:35: - video:x:39: - ftp:x:50: - lock:x:54: - tss:x:59: - audio:x:63: - dbus:x:81: - screen:x:84: - nobody:x:99: - users:x:100: - systemd-journal:x:190: - systemd-network:x:192: - cgred:x:997: - ssh_keys:x:998: - input:x:999: - /opt/lsst/software/jupyterlab/lsst_dask.yml: - modify: false - contents: | - # No longer used, but preserves compatibility with runlab.sh - dask_worker.yml: | - enabled: false - /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template: - modify: false - contents: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - safir: - # -- Level of Python logging - logLevel: "INFO" - - # -- Path prefix that will be routed to the controller - pathPrefix: "/nublado" - # JupyterHub configuration handled directly by this chart rather than by Zero # to JupyterHub. hub: @@ -301,10 +332,10 @@ jupyterhub: image: # -- Image to use for JupyterHub - name: ghcr.io/lsst-sqre/rsp-restspawner + name: "ghcr.io/lsst-sqre/nublado-jupyterhub" # -- Tag of image to use for JupyterHub - tag: 0.5.0 + tag: "4.0.0" # -- Resource limits and requests resources: @@ -421,7 +452,8 @@ jupyterhub: # repeat the global host name and manually configure authentication, we # instead install our own GafaelfawrIngress. ingress: - # -- Whether to enable the default ingress + # -- Whether to enable the default ingress. Should always be disabled + # since we install our own `GafaelfawrIngress` enabled: false cull: @@ -459,9 +491,7 @@ jupyterhub: cloudsql: # -- Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google - # Cloud. This will be run as a separate service, because shoehorning - # it into Zero to Jupyterhub's extraContainers looks messy, and it's - # not necessary that it be very performant. + # Cloud. enabled: false image: @@ -505,7 +535,6 @@ cloudsql: # -- Affinity rules for the Cloud SQL Proxy pod affinity: {} - # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: From de9416349a3510583fde8954bcf404ceab909881 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 14 Nov 2023 08:47:45 -0800 Subject: [PATCH 397/588] Switch to a list for Nublado lab sizes Argo CD or Helm doesn't preserve the ordering of dicts, so switch to a list for specifying lab size definitions. --- applications/nublado/README.md | 2 +- applications/nublado/values-idfint.yaml | 8 ++++---- applications/nublado/values-idfprod.yaml | 6 +++--- applications/nublado/values.yaml | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 7c9914915e..a3dd6842c4 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -52,7 +52,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | | controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | -| controller.config.lab.sizes | object | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Names must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | +| controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Sizes must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | | controller.config.lab.spawnTimeout | int | `600` | How long to wait for Kubernetes to spawn a lab in seconds. This should generally be shorter than the spawn timeout set in JupyterHub. | | controller.config.lab.volumes | list | `[]` | Volumes that should be mounted in lab pods. This supports NFS, HostPath, and PVC volume types (differentiated in source.type). | | controller.config.logLevel | string | `"INFO"` | Level of Python logging | diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index bab0814f16..7eeea4925f 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -30,16 +30,16 @@ controller: CULL_KERNEL_IDLE_TIMEOUT: "432000" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" sizes: - small: + - size: small cpu: 1.0 memory: 4Gi - medium: + - size: medium cpu: 2.0 memory: 8Gi - large: + - size: large cpu: 4.0 memory: 16Gi - huge: + - size: huge cpu: 8.0 memory: 32Gi initContainers: diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 6d7c86ef5b..59e886fd34 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -21,13 +21,13 @@ controller: CULL_KERNEL_IDLE_TIMEOUT: "432000" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" sizes: - small: + - size: small cpu: 1.0 memory: 4Gi - medium: + - size: medium cpu: 2.0 memory: 8Gi - large: + - size: large cpu: 4.0 memory: 16Gi initContainers: diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 9c02f7d2cb..b3091fba9d 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -254,7 +254,7 @@ controller: # that key. secrets: [] - # -- Available lab sizes. Names must be chosen from `fine`, + # -- Available lab sizes. Sizes must be chosen from `fine`, # `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, # `gargantuan`, and `colossal` in that order. Each should specify the # maximum CPU equivalents and memory. SI suffixes for memory are @@ -263,13 +263,13 @@ controller: # @default -- See `values.yaml` (specifies `small`, `medium`, and # `large` with `small` as the default) sizes: - small: + - size: small cpu: 1.0 memory: 4Gi - medium: + - size: medium cpu: 2.0 memory: 8Gi - large: + - size: large cpu: 4.0 memory: 16Gi From 1cb4dad10ebbcf2f58402aa57db160572067f701 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 17 Nov 2023 09:13:51 -0800 Subject: [PATCH 398/588] Update Nublado configuration for volume syntax Update the configuration for the Nubaldo application for the new separation between volumes and volume mounts. --- applications/nublado/README.md | 5 +- applications/nublado/values-base.yaml | 36 ++++++++----- applications/nublado/values-idfdev.yaml | 20 ++++--- applications/nublado/values-idfint.yaml | 20 ++++--- applications/nublado/values-idfprod.yaml | 20 ++++--- applications/nublado/values-summit.yaml | 52 +++++++++++++------ .../nublado/values-tucson-teststand.yaml | 44 +++++++++++----- applications/nublado/values-usdfdev.yaml | 38 ++++++++------ applications/nublado/values-usdfint.yaml | 38 ++++++++------ applications/nublado/values-usdfprod.yaml | 38 ++++++++------ applications/nublado/values.yaml | 22 +++++--- 11 files changed, 210 insertions(+), 123 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index a3dd6842c4..00d77b49d8 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -46,7 +46,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab | | controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | -| controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | +| controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumeMounts` (similar to the main `volumeMountss` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | | controller.config.lab.namespacePrefix | string | `"nublado"` | Prefix for namespaces for user labs. To this will be added a dash (`-`) and the user's username. | | controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | | controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | @@ -54,7 +54,8 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | | controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Sizes must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | | controller.config.lab.spawnTimeout | int | `600` | How long to wait for Kubernetes to spawn a lab in seconds. This should generally be shorter than the spawn timeout set in JupyterHub. | -| controller.config.lab.volumes | list | `[]` | Volumes that should be mounted in lab pods. This supports NFS, HostPath, and PVC volume types (differentiated in source.type). | +| controller.config.lab.volumeMounts | list | `[]` | Volumes that should be mounted in lab pods. | +| controller.config.lab.volumes | list | `[]` | Volumes that will be in lab pods or init containers. This supports NFS, HostPath, and PVC volume types (differentiated in source.type). | | controller.config.logLevel | string | `"INFO"` | Level of Python logging | | controller.config.pathPrefix | string | `"/nublado"` | Path prefix that will be routed to the controller | | controller.googleServiceAccount | string | None, must be set when using Google Artifact Registry | If Google Artifact Registry is used as the image source, the Google service account that has an IAM binding to the `nublado-controller` Kubernetes service account and has the Artifact Registry reader role | diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index f2bedaf0d6..c1fda37419 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -26,52 +26,64 @@ controller: repository: "ghcr.io/lsst-sqre/initdir" tag: "0.0.4" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - source: - serverPath: "/rsphome" - server: "nfs-rsphome.ls.lsst.org" - type: "nfs" + volumeName: "home" pullSecret: "pull-secret" secrets: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "home" source: type: "nfs" serverPath: "/rsphome" server: "nfs-rsphome.ls.lsst.org" - - containerPath: "/project" + - name: "project" source: type: "nfs" serverPath: "/project" server: "nfs-project.ls.lsst.org" - - containerPath: "/scratch" + - name: "scratch" source: type: "nfs" serverPath: "/scratch" server: "nfs-scratch.ls.lsst.org" - - containerPath: "/datasets" + - name: "datasets" source: type: "nfs" serverPath: "/lsstdata" server: "nfs-lsstdata.ls.lsst.org" - - containerPath: "/repo/LATISS" + - name: "latiss" source: type: "nfs" serverPath: "/auxtel/repo/LATISS" server: "nfs-auxtel.ls.lsst.org" - - containerPath: "/net/obs-env" + - name: "obs-env" source: type: "nfs" serverPath: "/obs-env" server: "nfs-obsenv.ls.lsst.org" - - containerPath: "/data/lsstdata/BTS/auxtel" + - name: "auxtel" source: type: "nfs" serverPath: "/auxtel/lsstdata/BTS/auxtel" server: "nfs-auxtel.ls.lsst.org" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" + - containerPath: "/datasets" + volumeName: "datasets" + - containerPath: "/repo/LATISS" + volumeName: "latiss" + - containerPath: "/net/obs-env" + volumeName: "obs-env" + - containerPath: "/data/lsstdata/BTS/auxtel" + volumeName: "auxtel" jupyterhub: cull: diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index ae36dad104..970cb6bc89 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -27,12 +27,9 @@ controller: repository: "ghcr.io/lsst-sqre/initdir" tag: "0.0.4" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - source: - type: nfs - serverPath: "/share1/home" - server: "10.87.86.26" + volumeName: "home" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" @@ -43,21 +40,28 @@ controller: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "home" source: type: nfs serverPath: "/share1/home" server: "10.87.86.26" - - containerPath: "/project" + - name: "project" source: type: nfs serverPath: "/share1/project" server: "10.87.86.26" - - containerPath: "/scratch" + - name: "scratch" source: type: nfs serverPath: "/share1/scratch" server: "10.87.86.26" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" jupyterhub: hub: db: diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 7eeea4925f..ce4c1c732b 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -48,12 +48,9 @@ controller: repository: "ghcr.io/lsst-sqre/initdir" tag: "0.0.4" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - source: - serverPath: "/share1/home" - server: "10.22.240.130" - type: "nfs" + volumeName: "home" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" @@ -64,21 +61,28 @@ controller: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "home" source: serverPath: "/share1/home" server: "10.22.240.130" type: "nfs" - - containerPath: "/project" + - name: "project" source: serverPath: "/share1/project" server: "10.22.240.130" type: "nfs" - - containerPath: "/scratch" + - name: "scratch" source: serverPath: "/share1/scratch" server: "10.22.240.130" type: "nfs" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" jupyterhub: hub: diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 59e886fd34..76aeaedea3 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -36,12 +36,9 @@ controller: repository: "ghcr.io/lsst-sqre/initdir" tag: "0.0.4" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - source: - serverPath: "/share1/home" - server: "10.13.105.122" - type: "nfs" + volumeName: "home" secrets: - secretName: "nublado-lab-secret" secretKey: "aws-credentials.ini" @@ -52,21 +49,28 @@ controller: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "home" source: serverPath: "/share1/home" server: "10.13.105.122" type: "nfs" - - containerPath: "/project" + - name: "project" source: serverPath: "/share1/project" server: "10.13.105.122" type: "nfs" - - containerPath: "/scratch" + - name: "scratch" source: serverPath: "/share1/scratch" server: "10.13.105.122" type: "nfs" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" jupyterhub: hub: diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 98bdc3ea15..2fbb0e3911 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -26,72 +26,92 @@ controller: repository: "ghcr.io/lsst-sqre/initdir" tag: "0.0.4" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - source: - serverPath: "/jhome" - server: "nfs1.cp.lsst.org" - type: "nfs" + volumeName: "home" pullSecret: "pull-secret" secrets: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "home" source: type: "nfs" serverPath: "/jhome" server: "nfs1.cp.lsst.org" - - containerPath: "/project" + - name: "project" source: type: "nfs" serverPath: "/project" server: "nfs1.cp.lsst.org" - - containerPath: "/scratch" + - name: "scratch" source: type: "nfs" serverPath: "/scratch" server: "nfs1.cp.lsst.org" - - containerPath: "/repo/LATISS" + - name: "latiss" source: type: "nfs" serverPath: "/auxtel/repo/LATISS" server: "nfs-auxtel.cp.lsst.org" - - containerPath: "/repo/LSSTComCam" + - name: "lsstcomcam" source: type: "nfs" serverPath: "/repo/LSSTComCam" server: "comcam-archiver.cp.lsst.org" - - containerPath: "/net/obs-env" + - name: "obs-env" source: type: "nfs" serverPath: "/obs-env" server: "nfs-obsenv.cp.lsst.org" - - containerPath: "/readonly/lsstdata/other" + - name: "lsstdata-other" source: type: "nfs" serverPath: "/lsstdata" server: "nfs1.cp.lsst.org" - - containerPath: "/readonly/lsstdata/comcam" + - name: "lsstdata-comcam" source: type: "nfs" serverPath: "/lsstdata" server: "comcam-archiver.cp.lsst.org" - - containerPath: "/readonly/lsstdata/auxtel" + - name: "lsstdata-auxtel" source: type: "nfs" serverPath: "/auxtel/lsstdata" server: "nfs-auxtel.cp.lsst.org" - - containerPath: "/data/lsstdata/base/comcam" + - name: "lsstdata-base-comcam" source: type: "nfs" serverPath: "/lsstdata/base/comcam" server: "comcam-archiver.cp.lsst.org" - - containerPath: "/data/lsstdata/base/auxtel" + - name: "lsstdata-base-auxtel" source: type: "nfs" serverPath: "/auxtel/lsstdata/base/auxtel" server: "nfs-auxtel.cp.lsst.org" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" + - containerPath: "/repo/LATISS" + volumeName: "latiss" + - containerPath: "/repo/LSSTComCam" + volumeName: "lsstcomcam" + - containerPath: "/net/obs-env" + volumeName: "obs-env" + - containerPath: "/readonly/lsstdata/other" + volumeName: "lsstdata-other" + - containerPath: "/readonly/lsstdata/comcam" + volumeName: "lsstdata-comcam" + - containerPath: "/readonly/lsstdata/auxtel" + volumeName: "lsstdata-auxtel" + - containerPath: "/data/lsstdata/base/comcam" + volumeName: "lsstdata-base-comcam" + - containerPath: "/data/lsstdata/base/auxtel" + volumeName: "lsstdata-base-auxtel" jupyterhub: cull: diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 6732f59a89..0652ade139 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -26,62 +26,78 @@ controller: repository: "ghcr.io/lsst-sqre/initdir" tag: "0.0.4" privileged: true - volumes: + volumeMounts: - containerPath: "/home" - source: - serverPath: "/jhome" - server: "nfs-jhome.tu.lsst.org" - type: "nfs" + volumeName: "home" pullSecret: "pull-secret" secrets: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "home" source: type: "nfs" serverPath: "/jhome" server: "nfs-jhome.tu.lsst.org" - - containerPath: "/project" + - name: "project" source: type: "nfs" serverPath: "/project" server: "nfs-project.tu.lsst.org" - - containerPath: "/scratch" + - name: "scratch" source: type: "nfs" serverPath: "/scratch" server: "nfs-scratch.tu.lsst.org" - - containerPath: "/datasets" + - name: "datasets" source: type: "nfs" serverPath: "/lsstdata" server: "nfs-lsstdata.tu.lsst.org" - - containerPath: "/repo/LATISS" + - name: "latiss" source: type: "nfs" serverPath: "/auxtel/repo/LATISS" server: "nfs-auxtel.tu.lsst.org" - - containerPath: "/net/obs-env" + - name: "obs-env" source: type: "nfs" serverPath: "/obs-env" server: "nfs-obsenv.tu.lsst.org" - - containerPath: "/repo/LSSTComCam" + - name: "lsstcomcan" source: type: "nfs" serverPath: "/repo/LSSTComCam" server: "comcam-archiver.tu.lsst.org" - - containerPath: "/data/lsstdata/TTS/auxtel" + - name: "auxtel" source: type: "nfs" serverPath: "/auxtel/lsstdata/TTS/auxtel" server: "nfs-auxtel.tu.lsst.org" - - containerPath: "/data/lsstdata/TTS/comcam" + - name: "comcam" source: type: "nfs" serverPath: "/lsstdata/TTS/comcam" server: "comcam-archiver.tu.lsst.org" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" + - containerPath: "/datasets" + volumeName: "datasets" + - containerPath: "/repo/LATISS" + volumeName: "latiss" + - containerPath: "/net/obs-env" + volumeName: "obs-env" + - containerPath: "/repo/LSSTComCam" + volumeName: "lsstcomcam" + - containerPath: "/data/lsstdata/TTS/auxtel" + volumeName: "auxtel" + - containerPath: "/data/lsstdata/TTS/comcam" + volumeName: "comcam" jupyterhub: cull: diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 86b04f5b59..cab1342b14 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -66,7 +66,7 @@ controller: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "sdf-home" source: type: "persistentVolumeClaim" storageClassName: "sdf-home" @@ -75,17 +75,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/project" - subPath: "g" - source: - type: "persistentVolumeClaim" - storageClassName: "sdf-group-rubin" - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: "1Gi" - - containerPath: "/sdf/group/rubin" + - name: "sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -94,7 +84,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/sdf/data/rubin" + - name: "sdf-data-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-data-rubin" @@ -103,7 +93,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/scratch" + - name: "sdf-scratch" source: type: "persistentVolumeClaim" storageClassName: "sdf-scratch" @@ -112,7 +102,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/rubin" + - name: "fs-ddn-sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-rubin" @@ -121,7 +111,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/lsst" + - name: "fs-ddn-sdf-group-lsst" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-lsst" @@ -130,6 +120,22 @@ controller: resources: requests: storage: "1Gi" + volumeMounts: + - containerPath: "/home" + volumeName: "sdf-home" + - containerPath: "/project" + subPath: "g" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/group/rubin" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/data/rubin" + volumeName: "sdf-data-rubin" + - containerPath: "/scratch" + volumeName: "sdf-scratch" + - containerPath: "/fs/ddn/sdf/group/rubin" + volumeName: "fs-ddn-sdf-group-rubin" + - containerPath: "/fs/ddn/sdf/group/lsst" + volumeName: "fs-ddn-sdf-group-lsst" proxy: ingress: diff --git a/applications/nublado/values-usdfint.yaml b/applications/nublado/values-usdfint.yaml index 86b04f5b59..cab1342b14 100644 --- a/applications/nublado/values-usdfint.yaml +++ b/applications/nublado/values-usdfint.yaml @@ -66,7 +66,7 @@ controller: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "sdf-home" source: type: "persistentVolumeClaim" storageClassName: "sdf-home" @@ -75,17 +75,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/project" - subPath: "g" - source: - type: "persistentVolumeClaim" - storageClassName: "sdf-group-rubin" - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: "1Gi" - - containerPath: "/sdf/group/rubin" + - name: "sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -94,7 +84,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/sdf/data/rubin" + - name: "sdf-data-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-data-rubin" @@ -103,7 +93,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/scratch" + - name: "sdf-scratch" source: type: "persistentVolumeClaim" storageClassName: "sdf-scratch" @@ -112,7 +102,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/rubin" + - name: "fs-ddn-sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-rubin" @@ -121,7 +111,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/lsst" + - name: "fs-ddn-sdf-group-lsst" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-lsst" @@ -130,6 +120,22 @@ controller: resources: requests: storage: "1Gi" + volumeMounts: + - containerPath: "/home" + volumeName: "sdf-home" + - containerPath: "/project" + subPath: "g" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/group/rubin" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/data/rubin" + volumeName: "sdf-data-rubin" + - containerPath: "/scratch" + volumeName: "sdf-scratch" + - containerPath: "/fs/ddn/sdf/group/rubin" + volumeName: "fs-ddn-sdf-group-rubin" + - containerPath: "/fs/ddn/sdf/group/lsst" + volumeName: "fs-ddn-sdf-group-lsst" proxy: ingress: diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml index c12900a045..e0c2309934 100644 --- a/applications/nublado/values-usdfprod.yaml +++ b/applications/nublado/values-usdfprod.yaml @@ -66,7 +66,7 @@ controller: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: - - containerPath: "/home" + - name: "sdf-home" source: type: "persistentVolumeClaim" storageClassName: "sdf-home" @@ -75,17 +75,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/project" - subPath: "g" - source: - type: "persistentVolumeClaim" - storageClassName: "sdf-group-rubin" - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: "1Gi" - - containerPath: "/sdf/group/rubin" + - name: "sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-group-rubin" @@ -94,7 +84,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/sdf/data/rubin" + - name: "sdf-data-rubin" source: type: "persistentVolumeClaim" storageClassName: "sdf-data-rubin" @@ -103,7 +93,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/scratch" + - name: "sdf-scratch" source: type: "persistentVolumeClaim" storageClassName: "sdf-scratch" @@ -112,7 +102,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/rubin" + - name: "fs-ddn-sdf-group-rubin" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-rubin" @@ -121,7 +111,7 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/ddn/sdf/group/lsst" + - name: "fs-ddn-sdf-group-lsst" source: type: "persistentVolumeClaim" storageClassName: "fs-ddn-sdf-group-lsst" @@ -130,6 +120,22 @@ controller: resources: requests: storage: "1Gi" + volumeMounts: + - containerPath: "/home" + volumeName: "sdf-home" + - containerPath: "/project" + subPath: "g" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/group/rubin" + volumeName: "sdf-group-rubin" + - containerPath: "/sdf/data/rubin" + volumeName: "sdf-data-rubin" + - containerPath: "/scratch" + volumeName: "sdf-scratch" + - containerPath: "/fs/ddn/sdf/group/rubin" + volumeName: "fs-ddn-sdf-group-rubin" + - containerPath: "/fs/ddn/sdf/group/lsst" + volumeName: "fs-ddn-sdf-group-lsst" proxy: ingress: diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index b3091fba9d..cc3a60a199 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -174,9 +174,10 @@ controller: # -- Containers run as init containers with each user pod. Each should # set `name`, `image` (a Docker image and pull policy specification), - # and `privileged`, and may contain `volumes` (similar to the main - # `volumes` configuration). If `privileged` is true, the container will - # run as root with all capabilities. Otherwise it will run as the user. + # and `privileged`, and may contain `volumeMounts` (similar to the main + # `volumeMountss` configuration). If `privileged` is true, the container + # will run as root with all capabilities. Otherwise it will run as the + # user. initContainers: [] # -- Prefix for namespaces for user labs. To this will be added a dash @@ -277,17 +278,24 @@ controller: # should generally be shorter than the spawn timeout set in JupyterHub. spawnTimeout: 600 - # -- Volumes that should be mounted in lab pods. This supports NFS, - # HostPath, and PVC volume types (differentiated in source.type). + # -- Volumes that will be in lab pods or init containers. This supports + # NFS, HostPath, and PVC volume types (differentiated in source.type). volumes: [] # volumes: - # - containerPath: "/project" - # readOnly: true + # - name: "project" # source: # type: nfs + # readOnly: true # serverPath: "/share1/project" # server: "10.87.86.26" + # -- Volumes that should be mounted in lab pods. + volumeMounts: [] + # volumeMounts: + # - containerPath: "/project" + # readOnly: true + # volumeName: "project" + # JupyterHub configuration handled directly by this chart rather than by Zero # to JupyterHub. hub: From 8a88f2be9022c930c79becc090909b5bff18c57e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 20 Nov 2023 15:46:34 -0800 Subject: [PATCH 399/588] Document deleteTimeout for Nubaldo config File servers and labs now have a configurable deletion timeout. --- applications/nublado/README.md | 2 ++ applications/nublado/values.yaml | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 00d77b49d8..45906f166b 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -26,6 +26,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.affinity | object | `{}` | Affinity rules for the lab controller pod | | controller.config.fileserver.application | string | `"fileservers"` | ArgcoCD application in which to collect user file servers | | controller.config.fileserver.creationTimeout | int | `120` | Timeout to wait for Kubernetes to create file servers, in seconds | +| controller.config.fileserver.deleteTimeout | int | 60 (1 minute) | Timeout for deleting a user's file server from Kubernetes, in seconds | | controller.config.fileserver.enabled | bool | `false` | Enable fileserver management | | controller.config.fileserver.idleTimeout | int | `3600` | Timeout for idle user fileservers, in seconds | | controller.config.fileserver.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for file server image | @@ -43,6 +44,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.images.recommendedTag | string | `"recommended"` | Tag marking the recommended image (shown first in the menu) | | controller.config.images.source | object | None, must be specified | Source for prepulled images. For Docker, set `type` to `docker`, `registry` to the hostname and `repository` to the name of the repository. For Google Artifact Repository, set `type` to `google`, `location` to the region, `projectId` to the Google project, `repository` to the name of the repository, and `image` to the name of the image. | | controller.config.lab.application | string | `"nublado-users"` | ArgoCD application in which to collect user lab objects | +| controller.config.lab.deleteTimeout | int | 60 (1 minute) | Timeout for deleting a user's lab resources from Kubernetes in seconds | | controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab | | controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index cc3a60a199..759a941eb3 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -59,6 +59,11 @@ controller: # -- Timeout to wait for Kubernetes to create file servers, in seconds creationTimeout: 120 + # -- Timeout for deleting a user's file server from Kubernetes, in + # seconds + # @default -- 60 (1 minute) + deleteTimeout: 60 + # -- Timeout for idle user fileservers, in seconds idleTimeout: 3600 @@ -126,6 +131,11 @@ controller: # -- ArgoCD application in which to collect user lab objects application: "nublado-users" + # -- Timeout for deleting a user's lab resources from Kubernetes in + # seconds + # @default -- 60 (1 minute) + deleteTimeout: 60 + # -- Environment variables to set for every user lab # @default -- See `values.yaml` env: From 59046d823833a7ca83aa377e7d4d3dd4ff05f823 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 30 Nov 2023 17:05:10 -0800 Subject: [PATCH 400/588] Nublado configuration cleanups Clean up various bits of configuration found while working on the Nublado manual. - Note that it's not clear that we need to import the authenticator and spawner modules, but it can't hurt. - Use the Nublado controller lab spawn timeout as the JupyterHub lab spawn timeout as well, rather than having two separate configuration settings. - Remove possibly-outdated information from the JupyterHub ConfigMap. - Add default resource limits for the Nublado controller. - Add affinity, nodeSelector, and tolerations settings for user labs and user file servers to values.yaml. - Delete the hopefully-now-obsolete singleuser Zero to JupyterHub settings. - Quote all strings in the Nublado values.yaml. --- applications/nublado/README.md | 36 +++--- .../nublado/templates/hub-configmap.yaml | 13 ++- applications/nublado/values.yaml | 106 ++++++++++-------- 3 files changed, 84 insertions(+), 71 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 45906f166b..37f39c4f31 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -23,18 +23,21 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy pod | | cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | -| controller.affinity | object | `{}` | Affinity rules for the lab controller pod | -| controller.config.fileserver.application | string | `"fileservers"` | ArgcoCD application in which to collect user file servers | +| controller.affinity | object | `{}` | Affinity rules for the Nublado controller | +| controller.config.fileserver.affinity | object | `{}` | Affinity rules for user file server pods | +| controller.config.fileserver.application | string | `"fileservers"` | Argo CD application in which to collect user file servers | | controller.config.fileserver.creationTimeout | int | `120` | Timeout to wait for Kubernetes to create file servers, in seconds | | controller.config.fileserver.deleteTimeout | int | 60 (1 minute) | Timeout for deleting a user's file server from Kubernetes, in seconds | -| controller.config.fileserver.enabled | bool | `false` | Enable fileserver management | +| controller.config.fileserver.enabled | bool | `false` | Enable user file servers | | controller.config.fileserver.idleTimeout | int | `3600` | Timeout for idle user fileservers, in seconds | | controller.config.fileserver.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for file server image | | controller.config.fileserver.image.repository | string | `"ghcr.io/lsst-sqre/worblehat"` | File server image to use | | controller.config.fileserver.image.tag | string | `"0.1.0"` | Tag of file server image to use | -| controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user fileservers | +| controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user file servers | +| controller.config.fileserver.nodeSelector | object | `{}` | Node selector rules for user file server pods | | controller.config.fileserver.pathPrefix | string | `"/files"` | Path prefix for user file servers | | controller.config.fileserver.resources | object | See `values.yaml` | Resource requests and limits for user file servers | +| controller.config.fileserver.tolerations | list | `[]` | Tolerations for user file server pods | | controller.config.images.aliasTags | list | `[]` | Additional tags besides `recommendedTag` that should be recognized as aliases. | | controller.config.images.cycle | string | `nil` | Restrict images to this SAL cycle, if given. | | controller.config.images.numDailies | int | `3` | Number of most-recent dailies to prepull. | @@ -43,38 +46,40 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.images.pin | list | `[]` | List of additional image tags to prepull. Listing the image tagged as recommended here is recommended when using a Docker image source to ensure its name can be expanded properly in the menu. | | controller.config.images.recommendedTag | string | `"recommended"` | Tag marking the recommended image (shown first in the menu) | | controller.config.images.source | object | None, must be specified | Source for prepulled images. For Docker, set `type` to `docker`, `registry` to the hostname and `repository` to the name of the repository. For Google Artifact Repository, set `type` to `google`, `location` to the region, `projectId` to the Google project, `repository` to the name of the repository, and `image` to the name of the image. | -| controller.config.lab.application | string | `"nublado-users"` | ArgoCD application in which to collect user lab objects | +| controller.config.lab.affinity | object | `{}` | Affinity rules for user lab pods | +| controller.config.lab.application | string | `"nublado-users"` | Argo CD application in which to collect user lab objects | | controller.config.lab.deleteTimeout | int | 60 (1 minute) | Timeout for deleting a user's lab resources from Kubernetes in seconds | | controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab | | controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | | controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumeMounts` (similar to the main `volumeMountss` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | | controller.config.lab.namespacePrefix | string | `"nublado"` | Prefix for namespaces for user labs. To this will be added a dash (`-`) and the user's username. | +| controller.config.lab.nodeSelector | object | `{}` | Node selector rules for user lab pods | | controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | | controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | | controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | | controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Sizes must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | | controller.config.lab.spawnTimeout | int | `600` | How long to wait for Kubernetes to spawn a lab in seconds. This should generally be shorter than the spawn timeout set in JupyterHub. | +| controller.config.lab.tolerations | list | `[]` | Tolerations for user lab pods | | controller.config.lab.volumeMounts | list | `[]` | Volumes that should be mounted in lab pods. | | controller.config.lab.volumes | list | `[]` | Volumes that will be in lab pods or init containers. This supports NFS, HostPath, and PVC volume types (differentiated in source.type). | | controller.config.logLevel | string | `"INFO"` | Level of Python logging | | controller.config.pathPrefix | string | `"/nublado"` | Path prefix that will be routed to the controller | | controller.googleServiceAccount | string | None, must be set when using Google Artifact Registry | If Google Artifact Registry is used as the image source, the Google service account that has an IAM binding to the `nublado-controller` Kubernetes service account and has the Artifact Registry reader role | -| controller.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the nublado image | -| controller.image.repository | string | `"ghcr.io/lsst-sqre/nublado-controller"` | nublado image to use | -| controller.image.tag | string | The appVersion of the chart | Tag of nublado image to use | -| controller.ingress.annotations | object | `{}` | Additional annotations to add for the lab controller pod ingress | -| controller.nodeSelector | object | `{}` | Node selector rules for the lab controller pod | -| controller.podAnnotations | object | `{}` | Annotations for the lab controller pod | -| controller.resources | object | `{}` | Resource limits and requests for the lab controller pod | +| controller.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the controller image | +| controller.image.repository | string | `"ghcr.io/lsst-sqre/nublado-controller"` | Nublado controller image to use | +| controller.image.tag | string | The appVersion of the chart | Tag of Nublado controller image to use | +| controller.ingress.annotations | object | `{}` | Additional annotations to add for the Nublado controller ingress | +| controller.nodeSelector | object | `{}` | Node selector rules for the Nublado controller | +| controller.podAnnotations | object | `{}` | Annotations for the Nublado controller | +| controller.resources | object | See `values.yaml` | Resource limits and requests for the Nublado controller | | controller.slackAlerts | bool | `false` | Whether to enable Slack alerts. If set to true, `slack_webhook` must be set in the corresponding Nublado Vault secret. | -| controller.tolerations | list | `[]` | Tolerations for the lab controller pod | +| controller.tolerations | list | `[]` | Tolerations for the Nublado controller | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | hub.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. | -| hub.timeout.spawn | int | `600` | Timeout for the Kubernetes spawn process in seconds. (Allow long enough to pull uncached images if needed.) | | hub.timeout.startup | int | `90` | Timeout for JupyterLab to start. Currently this sometimes takes over 60 seconds for reasons we don't understand. | | jupyterhub.cull.enabled | bool | `true` | Enable the lab culler. | | jupyterhub.cull.every | int | 600 (10 minutes) | How frequently to check for idle labs in seconds | @@ -104,8 +109,5 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.proxy.service.type | string | `"ClusterIP"` | Only expose the proxy to the cluster, overriding the default of exposing the proxy directly to the Internet | | jupyterhub.scheduling.userPlaceholder.enabled | bool | `false` | Whether to spawn placeholder pods representing fake users to force autoscaling in advance of running out of resources | | jupyterhub.scheduling.userScheduler.enabled | bool | `false` | Whether the user scheduler should be enabled | -| jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | Whether to configure iptables to block cloud metadata endpoints. This is unnecessary in our environments (they are blocked by cluster configuration) and thus is disabled to reduce complexity. | -| jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | Start command for labs | -| jupyterhub.singleuser.defaultUrl | string | `"/lab"` | Default URL prefix for lab endpoints | | proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | | secrets.templateSecrets | bool | `false` | Whether to use the new secrets management mechanism. If enabled, the Vault nublado secret will be split into a nublado secret for JupyterHub and a nublado-lab-secret secret used as a source for secret values for the user's lab. | diff --git a/applications/nublado/templates/hub-configmap.yaml b/applications/nublado/templates/hub-configmap.yaml index c191e92b1b..2381cae6e1 100644 --- a/applications/nublado/templates/hub-configmap.yaml +++ b/applications/nublado/templates/hub-configmap.yaml @@ -6,6 +6,9 @@ metadata: {{- include "nublado.labels" . | nindent 4 }} data: 00_nublado.py: | + # It's not clear whether explicitly importing the spawner and + # authenticator modules is required or if JupyterHub will do this for us + # anyway, but it can't hurt. import rubin.nublado.authenticator import rubin.nublado.spawner @@ -31,12 +34,12 @@ data: # Use JupyterLab by default. c.Spawner.default_url = "/lab" - # Allow ten minutes for the lab to spawn in case it needs to be pulled. - c.Spawner.start_timeout = {{ .Values.hub.timeout.spawn }} + # How long to wait for Kubernetes to start the lab. This must match the + # corresponding setting in the Nublado controller. + c.Spawner.start_timeout = {{ .Values.controller.config.lab.spawnTimeout }} - # Allow 90 seconds for JupyterLab to start. For reasons we do not yet - # understand, it is often glacially slow and sometimes takes over 60 - # seconds. + # How long to wait for the JupyterLab process to respond to network + # connections after the pod has started running. c.Spawner.http_timeout = {{ .Values.hub.timeout.startup }} # Configure the URL to the lab controller. diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 759a941eb3..d89086859d 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -2,33 +2,40 @@ controller: image: - # -- nublado image to use - repository: ghcr.io/lsst-sqre/nublado-controller + # -- Nublado controller image to use + repository: "ghcr.io/lsst-sqre/nublado-controller" - # -- Pull policy for the nublado image - pullPolicy: IfNotPresent + # -- Pull policy for the controller image + pullPolicy: "IfNotPresent" - # -- Tag of nublado image to use + # -- Tag of Nublado controller image to use # @default -- The appVersion of the chart tag: "" - # -- Affinity rules for the lab controller pod + # -- Affinity rules for the Nublado controller affinity: {} - # -- Node selector rules for the lab controller pod + # -- Node selector rules for the Nublado controller nodeSelector: {} - # -- Annotations for the lab controller pod + # -- Annotations for the Nublado controller podAnnotations: {} - # -- Resource limits and requests for the lab controller pod - resources: {} + # -- Resource limits and requests for the Nublado controller + # @default -- See `values.yaml` + resources: + limits: + cpu: "0.25" + memory: "200Mi" + requests: + cpu: "0.05" + memory: "120Mi" - # -- Tolerations for the lab controller pod + # -- Tolerations for the Nublado controller tolerations: [] ingress: - # -- Additional annotations to add for the lab controller pod ingress + # -- Additional annotations to add for the Nublado controller ingress annotations: {} # -- If Google Artifact Registry is used as the image source, the Google @@ -50,10 +57,13 @@ controller: pathPrefix: "/nublado" fileserver: - # -- Enable fileserver management + # -- Enable user file servers enabled: false - # -- ArgcoCD application in which to collect user file servers + # -- Affinity rules for user file server pods + affinity: {} + + # -- Argo CD application in which to collect user file servers application: "fileservers" # -- Timeout to wait for Kubernetes to create file servers, in seconds @@ -69,16 +79,19 @@ controller: image: # -- File server image to use - repository: ghcr.io/lsst-sqre/worblehat + repository: "ghcr.io/lsst-sqre/worblehat" # -- Pull policy for file server image - pullPolicy: IfNotPresent + pullPolicy: "IfNotPresent" # -- Tag of file server image to use - tag: 0.1.0 + tag: "0.1.0" - # -- Namespace for user fileservers - namespace: fileservers + # -- Namespace for user file servers + namespace: "fileservers" + + # -- Node selector rules for user file server pods + nodeSelector: {} # -- Path prefix for user file servers pathPrefix: "/files" @@ -88,10 +101,13 @@ controller: resources: requests: cpu: 0.1 - memory: 1Gi + memory: "1Gi" limits: cpu: 1 - memory: 10Gi + memory: "10Gi" + + # -- Tolerations for user file server pods + tolerations: [] images: # -- Source for prepulled images. For Docker, set `type` to `docker`, @@ -128,7 +144,10 @@ controller: aliasTags: [] lab: - # -- ArgoCD application in which to collect user lab objects + # -- Affinity rules for user lab pods + affinity: {} + + # -- Argo CD application in which to collect user lab objects application: "nublado-users" # -- Timeout for deleting a user's lab resources from Kubernetes in @@ -194,6 +213,9 @@ controller: # (`-`) and the user's username. namespacePrefix: "nublado" + # -- Node selector rules for user lab pods + nodeSelector: {} + nss: # -- Base `/etc/passwd` file for lab containers # @default -- See `values.yaml` @@ -274,20 +296,23 @@ controller: # @default -- See `values.yaml` (specifies `small`, `medium`, and # `large` with `small` as the default) sizes: - - size: small + - size: "small" cpu: 1.0 - memory: 4Gi - - size: medium + memory: "4Gi" + - size: "medium" cpu: 2.0 - memory: 8Gi - - size: large + memory: "8Gi" + - size: "large" cpu: 4.0 - memory: 16Gi + memory: "16Gi" # -- How long to wait for Kubernetes to spawn a lab in seconds. This # should generally be shorter than the spawn timeout set in JupyterHub. spawnTimeout: 600 + # -- Tolerations for user lab pods + tolerations: [] + # -- Volumes that will be in lab pods or init containers. This supports # NFS, HostPath, and PVC volume types (differentiated in source.type). volumes: [] @@ -315,10 +340,6 @@ hub: internalDatabase: true timeout: - # -- Timeout for the Kubernetes spawn process in seconds. (Allow long - # enough to pull uncached images if needed.) - spawn: 600 - # -- Timeout for JupyterLab to start. Currently this sometimes takes over # 60 seconds for reasons we don't understand. startup: 90 @@ -358,8 +379,8 @@ jupyterhub: # -- Resource limits and requests resources: limits: - cpu: 900m - memory: 1Gi # Should support about 200 users + cpu: "900m" + memory: "1Gi" # Should support about 200 users db: # -- Type of database to use @@ -434,30 +455,17 @@ jupyterhub: # controller does its own prepulling) enabled: false - singleuser: - cloudMetadata: - # -- Whether to configure iptables to block cloud metadata endpoints. - # This is unnecessary in our environments (they are blocked by cluster - # configuration) and thus is disabled to reduce complexity. - blockWithIptables: false - - # -- Start command for labs - cmd: "/opt/lsst/software/jupyterlab/runlab.sh" - - # -- Default URL prefix for lab endpoints - defaultUrl: "/lab" - proxy: service: # -- Only expose the proxy to the cluster, overriding the default of # exposing the proxy directly to the Internet - type: ClusterIP + type: "ClusterIP" chp: networkPolicy: # -- Enable access to the proxy from other namespaces, since we put # each user's lab environment in its own namespace - interNamespaceAccessLabels: accept + interNamespaceAccessLabels: "accept" # This currently causes Minikube deployment in GH-actions to fail. # We want it sometime but it's not critical; it will help with From 7ac25d6c311f68b62764fd2dca129ce3c44e5ff8 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 1 Dec 2023 14:38:14 -0800 Subject: [PATCH 401/588] Use entry points for JupyterHub plugins Rather than import the modules explicitly in our local configuration, rely on JupyterHub entry point support and refer to our authenticator and spawner classes by their defined entry points. --- applications/nublado/templates/hub-configmap.yaml | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/applications/nublado/templates/hub-configmap.yaml b/applications/nublado/templates/hub-configmap.yaml index 2381cae6e1..f76116475e 100644 --- a/applications/nublado/templates/hub-configmap.yaml +++ b/applications/nublado/templates/hub-configmap.yaml @@ -6,15 +6,10 @@ metadata: {{- include "nublado.labels" . | nindent 4 }} data: 00_nublado.py: | - # It's not clear whether explicitly importing the spawner and - # authenticator modules is required or if JupyterHub will do this for us - # anyway, but it can't hurt. - import rubin.nublado.authenticator - import rubin.nublado.spawner - - # Use our authenticator and spawner. - c.JupyterHub.authenticator_class = "rubin.nublado.authenticator.GafaelfawrAuthenticator" - c.JupyterHub.spawner_class = "rubin.nublado.spawner.RSPRestSpawner" + # Use our authenticator and spawner. Both register custom entry points, + # so the full module and class name is not required. + c.JupyterHub.authenticator_class = "gafaelfawr" + c.JupyterHub.spawner_class = "nublado" # Set internal Hub API URL. c.JupyterHub.hub_connect_url = ( @@ -43,4 +38,4 @@ data: c.Spawner.http_timeout = {{ .Values.hub.timeout.startup }} # Configure the URL to the lab controller. - c.RSPRestSpawner.controller_url = "{{ .Values.global.baseUrl }}{{ .Values.controller.config.pathPrefix }}" + c.NubladoSpawner.controller_url = "{{ .Values.global.baseUrl }}{{ .Values.controller.config.pathPrefix }}" From b945d3348723a542442084d789897bda29ce8e8d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 1 Dec 2023 14:38:56 -0800 Subject: [PATCH 402/588] Sort and improve descriptions of nublado values Try to be a bit more consistent with ordering and fix a few values descriptions to be more accurate. --- applications/nublado/README.md | 16 ++--- applications/nublado/values.yaml | 65 ++++++++++--------- .../add-new-environment.rst | 2 +- 3 files changed, 42 insertions(+), 41 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 37f39c4f31..ebae172774 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -12,17 +12,17 @@ JupyterHub and custom spawner for the Rubin Science Platform | Key | Type | Default | Description | |-----|------|---------|-------------| -| cloudsql.affinity | object | `{}` | Affinity rules for the Cloud SQL Proxy pod | -| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. | +| cloudsql.affinity | object | `{}` | Affinity rules for the Cloud SQL Auth Proxy pod | +| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with Cloud SQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | -| cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | -| cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | -| cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | +| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a Cloud SQL PostgreSQL instance | +| cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Auth Proxy pod | +| cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Auth Proxy pod | | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy pod | -| cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | -| cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | +| cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `cloud-sql-proxy` Kubernetes service account and has the `cloudsql.client` role | +| cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Auth Proxy pod | | controller.affinity | object | `{}` | Affinity rules for the Nublado controller | | controller.config.fileserver.affinity | object | `{}` | Affinity rules for user file server pods | | controller.config.fileserver.application | string | `"fileservers"` | Argo CD application in which to collect user file servers | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index d89086859d..2091ca159b 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -1,6 +1,15 @@ # Default values for Nublado. controller: + # -- Affinity rules for the Nublado controller + affinity: {} + + # -- If Google Artifact Registry is used as the image source, the Google + # service account that has an IAM binding to the `nublado-controller` + # Kubernetes service account and has the Artifact Registry reader role + # @default -- None, must be set when using Google Artifact Registry + googleServiceAccount: "" + image: # -- Nublado controller image to use repository: "ghcr.io/lsst-sqre/nublado-controller" @@ -12,8 +21,9 @@ controller: # @default -- The appVersion of the chart tag: "" - # -- Affinity rules for the Nublado controller - affinity: {} + ingress: + # -- Additional annotations to add for the Nublado controller ingress + annotations: {} # -- Node selector rules for the Nublado controller nodeSelector: {} @@ -31,23 +41,13 @@ controller: cpu: "0.05" memory: "120Mi" - # -- Tolerations for the Nublado controller - tolerations: [] - - ingress: - # -- Additional annotations to add for the Nublado controller ingress - annotations: {} - - # -- If Google Artifact Registry is used as the image source, the Google - # service account that has an IAM binding to the `nublado-controller` - # Kubernetes service account and has the Artifact Registry reader role - # @default -- None, must be set when using Google Artifact Registry - googleServiceAccount: "" - # -- Whether to enable Slack alerts. If set to true, `slack_webhook` must be # set in the corresponding Nublado Vault secret. slackAlerts: false + # -- Tolerations for the Nublado controller + tolerations: [] + # Passed as YAML to the lab controller. config: # -- Level of Python logging @@ -516,28 +516,26 @@ jupyterhub: enabled: false cloudsql: - # -- Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google - # Cloud. + # -- Enable the Cloud SQL Auth Proxy, used with Cloud SQL databases on + # Google Cloud enabled: false + # -- Affinity rules for the Cloud SQL Auth Proxy pod + affinity: {} + image: # -- Cloud SQL Auth Proxy image to use repository: "gcr.io/cloudsql-docker/gce-proxy" - # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.15" - # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" - # -- Instance connection name for a CloudSQL PostgreSQL instance - # @default -- None, must be set if Cloud SQL Auth Proxy is enabled - instanceConnectionName: "" + # -- Cloud SQL Auth Proxy tag to use + tag: "1.33.14" - # -- The Google service account that has an IAM binding to the `gafaelfawr` - # Kubernetes service account and has the `cloudsql.client` role + # -- Instance connection name for a Cloud SQL PostgreSQL instance # @default -- None, must be set if Cloud SQL Auth Proxy is enabled - serviceAccount: "" + instanceConnectionName: "" # -- Resource limits and requests for the Cloud SQL Proxy pod # @default -- See `values.yaml` @@ -549,17 +547,20 @@ cloudsql: cpu: "5m" memory: "7Mi" - # -- Annotations for the Cloud SQL Proxy pod + # -- Annotations for the Cloud SQL Auth Proxy pod podAnnotations: {} - # -- Node selection rules for the Cloud SQL Proxy pod + # -- Node selection rules for the Cloud SQL Auth Proxy pod nodeSelector: {} - # -- Tolerations for the Cloud SQL Proxy pod - tolerations: [] + # -- The Google service account that has an IAM binding to the + # `cloud-sql-proxy` Kubernetes service account and has the `cloudsql.client` + # role + # @default -- None, must be set if Cloud SQL Auth Proxy is enabled + serviceAccount: "" - # -- Affinity rules for the Cloud SQL Proxy pod - affinity: {} + # -- Tolerations for the Cloud SQL Auth Proxy pod + tolerations: [] # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. diff --git a/docs/applications/onepassword-connect/add-new-environment.rst b/docs/applications/onepassword-connect/add-new-environment.rst index 393de81951..6547bcfb97 100644 --- a/docs/applications/onepassword-connect/add-new-environment.rst +++ b/docs/applications/onepassword-connect/add-new-environment.rst @@ -28,7 +28,7 @@ When following these instructions, you will be modifying a `Secrets Automation w You will need to have permissions to modify the workflow for the 1Password Connet server that will be serving your environment. Process -======= +======== In the following steps, you'll change the permissions of the 1Password Connect server to add the new 1Password vault for your environment and create a new token with access to that vault. From 3d3d6a0bd99aa96bb962b6d07fea20016b194ee2 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 1 Dec 2023 16:02:08 -0800 Subject: [PATCH 403/588] Add explicit volume mounts for file servers The Nublado file servers should not expose every volume mounted in lab containers. Instead, expose only explicitly configured mounts, similiar to init containers, and expose only the home directory mount on idfdev and idfint. --- applications/nublado/README.md | 1 + applications/nublado/values-idfdev.yaml | 4 +++- applications/nublado/values-idfint.yaml | 3 +++ applications/nublado/values.yaml | 7 +++++++ 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index ebae172774..894e6fc532 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -38,6 +38,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.fileserver.pathPrefix | string | `"/files"` | Path prefix for user file servers | | controller.config.fileserver.resources | object | See `values.yaml` | Resource requests and limits for user file servers | | controller.config.fileserver.tolerations | list | `[]` | Tolerations for user file server pods | +| controller.config.fileserver.volumeMounts | list | `[]` | Volumes that should be made available via WebDAV | | controller.config.images.aliasTags | list | `[]` | Additional tags besides `recommendedTag` that should be recognized as aliases. | | controller.config.images.cycle | string | `nil` | Restrict images to this SAL cycle, if given. | | controller.config.images.numDailies | int | `3` | Number of most-recent dailies to prepull. | diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 970cb6bc89..a92d9f1cf8 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -5,7 +5,9 @@ controller: logLevel: "DEBUG" fileserver: enabled: true - idleTimeout: 43200 # 12 hours + volumeMounts: + - containerPath: "/home" + volumeName: "home" images: source: type: "google" diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index ce4c1c732b..305ad78759 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -4,6 +4,9 @@ controller: config: fileserver: enabled: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" images: source: type: "google" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 2091ca159b..2f131c6226 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -109,6 +109,13 @@ controller: # -- Tolerations for user file server pods tolerations: [] + # -- Volumes that should be made available via WebDAV + volumeMounts: [] + # volumeMounts: + # - containerPath: "/project" + # readOnly: true + # volumeName: "project" + images: # -- Source for prepulled images. For Docker, set `type` to `docker`, # `registry` to the hostname and `repository` to the name of the From 4d561491e62aa74114753dbaedaeb0ccfa37eb41 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 13 Dec 2023 11:57:28 -0800 Subject: [PATCH 404/588] Switch to the new nublado-inithome container Use the new Python-based container that's built as part of Nublado instead of the older initdir init container. --- applications/nublado/values-base.yaml | 6 +++--- applications/nublado/values-idfdev.yaml | 6 +++--- applications/nublado/values-idfint.yaml | 6 +++--- applications/nublado/values-idfprod.yaml | 6 +++--- applications/nublado/values-summit.yaml | 6 +++--- applications/nublado/values-tucson-teststand.yaml | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index c1fda37419..cec07cc809 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -21,10 +21,10 @@ controller: PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - - name: "initdir" + - name: "inithome" image: - repository: "ghcr.io/lsst-sqre/initdir" - tag: "0.0.4" + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.0" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index a92d9f1cf8..912be7e9ba 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -24,10 +24,10 @@ controller: GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" S3_ENDPOINT_URL: "https://storage.googleapis.com" initContainers: - - name: "initdir" + - name: "inithome" image: - repository: "ghcr.io/lsst-sqre/initdir" - tag: "0.0.4" + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.0" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 305ad78759..30acc816ec 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -46,10 +46,10 @@ controller: cpu: 8.0 memory: 32Gi initContainers: - - name: "initdir" + - name: "inithome" image: - repository: "ghcr.io/lsst-sqre/initdir" - tag: "0.0.4" + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.0" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 76aeaedea3..2971992ab2 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -31,10 +31,10 @@ controller: cpu: 4.0 memory: 16Gi initContainers: - - name: "initdir" + - name: "inithome" image: - repository: "ghcr.io/lsst-sqre/initdir" - tag: "0.0.4" + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.0" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 2fbb0e3911..5de07ebc7f 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -21,10 +21,10 @@ controller: PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - - name: "initdir" + - name: "inithome" image: - repository: "ghcr.io/lsst-sqre/initdir" - tag: "0.0.4" + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.0" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 0652ade139..87f165d915 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -21,10 +21,10 @@ controller: PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "oods" initContainers: - - name: "initdir" + - name: "inithome" image: - repository: "ghcr.io/lsst-sqre/initdir" - tag: "0.0.4" + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.0" privileged: true volumeMounts: - containerPath: "/home" From c01f94a493601d6db813cd41079d286823d20d0e Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 3 Jan 2024 09:32:18 -0700 Subject: [PATCH 405/588] use upstream giftless again --- applications/giftless/values-roundtable-dev.yaml | 2 +- applications/giftless/values-roundtable-prod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index b7bf3c778f..14c7681165 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "tickets-dm-42009" + tag: "upstream" server: debug: true ingress: diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index cac6b243db..0410c2d015 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -1,7 +1,7 @@ image: pullPolicy: "Always" repository: "docker.io/lsstsqre/giftless" - tag: "tickets-dm-42009" + tag: "upstream" server: debug: true readonly: From 030a8da0ce75d0e1db2a1a3bdf01e21fe64fecd3 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 2 Jan 2024 16:52:59 -0700 Subject: [PATCH 406/588] Add ingress configuration for InfluxDB enterprise data service - URL path is /influxdb-enterprise-data --- applications/sasquatch/README.md | 7 ++++ .../charts/influxdb-enterprise/README.md | 7 ++++ .../templates/data-ingress.yaml | 32 +++++++++++++++++++ .../charts/influxdb-enterprise/values.yaml | 11 ++++++- applications/sasquatch/values-usdfprod.yaml | 3 ++ 5 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/data-ingress.yaml diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 9d361ea09c..9eaaba55cf 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -101,6 +101,13 @@ Rubin Observatory's telemetry service. | influxdb-enterprise.data.https.secret.name | string | `"influxdb-tls"` | | | influxdb-enterprise.data.https.useCertManager | bool | `false` | | | influxdb-enterprise.data.image | object | `{}` | | +| influxdb-enterprise.data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | +| influxdb-enterprise.data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | +| influxdb-enterprise.data.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | +| influxdb-enterprise.data.ingress.className | string | `"nginx"` | | +| influxdb-enterprise.data.ingress.enabled | bool | `false` | | +| influxdb-enterprise.data.ingress.hostname | string | `""` | | +| influxdb-enterprise.data.ingress.path | string | `"/influxdb-enterprise-data(/|$)(.*)"` | | | influxdb-enterprise.data.persistence.enabled | bool | `false` | | | influxdb-enterprise.data.replicas | int | `1` | | | influxdb-enterprise.data.resources | object | `{}` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md index 060e6b19ea..79a71796cb 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/README.md +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -24,6 +24,13 @@ Run InfluxDB Enterprise on Kubernetes | data.https.secret.name | string | `"influxdb-tls"` | | | data.https.useCertManager | bool | `false` | | | data.image | object | `{}` | | +| data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | +| data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | +| data.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | +| data.ingress.className | string | `"nginx"` | | +| data.ingress.enabled | bool | `false` | | +| data.ingress.hostname | string | `""` | | +| data.ingress.path | string | `"/influxdb-enterprise-data(/|$)(.*)"` | | | data.persistence.enabled | bool | `false` | | | data.replicas | int | `1` | | | data.resources | object | `{}` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-ingress.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-ingress.yaml new file mode 100644 index 0000000000..e3dd191841 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-ingress.yaml @@ -0,0 +1,32 @@ +{{- if .Values.data.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-data + labels: + influxdb.influxdata.com/component: data +{{- include "influxdb-enterprise.labels" . | nindent 4 }} +{{- if .Values.data.ingress.annotations }} + annotations: +{{ toYaml .Values.data.ingress.annotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.data.ingress.className }} + ingressClassName: {{ .Values.data.ingress.className }} +{{- end }} + rules: +{{- if .Values.data.ingress.hostname }} + - host: {{ .Values.data.ingress.hostname | quote }} + http: +{{- else }} + - http: +{{- end }} + paths: + - path: {{ .Values.data.ingress.path }} + pathType: Prefix + backend: + service: + name: {{ template "influxdb-enterprise.fullname" . }}-data + port: + number: 8086 +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml index 6b591fbbbc..449fa43f55 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/values.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -227,7 +227,16 @@ data: # nodePort: 30091 ## Add annotations to service # annotations: {} - # + # InfluxDB ingress configuration. + ingress: + enabled: false + hostname: "" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + className: "nginx" + path: /influxdb-enterprise-data(/|$)(.*) ## Persist data to a persistent volume persistence: enabled: false diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index ba6b4a1459..768fe9e7ca 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -66,6 +66,9 @@ influxdb-enterprise: cpu: 4 data: replicas: 2 + ingress: + enabled: true + hostname: usdf-rsp.slac.stanford.edu # -- Enable InfluxDB Enterprise data pod persistence persistence: enabled: true From 2a704c009822d19e0f6b50b3b00ca57fcdc26eba Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Wed, 3 Jan 2024 10:27:28 -0700 Subject: [PATCH 407/588] Move comment outside of loop --- applications/butler/templates/ingress-anonymous.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/butler/templates/ingress-anonymous.yaml b/applications/butler/templates/ingress-anonymous.yaml index 3c58b89c78..ebfcf7264a 100644 --- a/applications/butler/templates/ingress-anonymous.yaml +++ b/applications/butler/templates/ingress-anonymous.yaml @@ -20,7 +20,6 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - {{- range $repositoryLabel := .Values.config.repositoryLabels }} # For direct end-user use of the Butler client library, the # Butler() convenience constructor must be able to load a # configuration file via unauthenticated HTTP. This exists for @@ -28,6 +27,7 @@ template: # to the existence of the Butler server -- they are passed the URI # for a repository root on the filesystem or HTTP, from which a # configuration file is loaded. + {{- range $repositoryLabel := .Values.config.repositoryLabels }} - path: "{{ $.Values.config.pathPrefix }}/repo/{{ $repositoryLabel }}/butler.yaml" pathType: "Exact" backend: From c1a0e225725035326e0121cc97aa6148845b843d Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 3 Jan 2024 09:00:19 -0700 Subject: [PATCH 408/588] Add ingress configuration for InfluxDB enterprise meta service - URL path is /influxdb-enterprise-meta --- applications/sasquatch/README.md | 7 ++++ .../charts/influxdb-enterprise/README.md | 7 ++++ .../templates/meta-ingress.yaml | 32 +++++++++++++++++++ .../charts/influxdb-enterprise/values.yaml | 11 ++++++- applications/sasquatch/values-usdfprod.yaml | 3 ++ 5 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/meta-ingress.yaml diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 9eaaba55cf..605a3e2e5a 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -127,6 +127,13 @@ Rubin Observatory's telemetry service. | influxdb-enterprise.meta.https.secret.name | string | `"influxdb-tls"` | | | influxdb-enterprise.meta.https.useCertManager | bool | `false` | | | influxdb-enterprise.meta.image | object | `{}` | | +| influxdb-enterprise.meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | +| influxdb-enterprise.meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | +| influxdb-enterprise.meta.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | +| influxdb-enterprise.meta.ingress.className | string | `"nginx"` | | +| influxdb-enterprise.meta.ingress.enabled | bool | `false` | | +| influxdb-enterprise.meta.ingress.hostname | string | `""` | | +| influxdb-enterprise.meta.ingress.path | string | `"/influxdb-enterprise-meta(/|$)(.*)"` | | | influxdb-enterprise.meta.persistence.enabled | bool | `false` | | | influxdb-enterprise.meta.podDisruptionBudget.minAvailable | int | `2` | | | influxdb-enterprise.meta.replicas | int | `3` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md index 79a71796cb..105b97867c 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/README.md +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -50,6 +50,13 @@ Run InfluxDB Enterprise on Kubernetes | meta.https.secret.name | string | `"influxdb-tls"` | | | meta.https.useCertManager | bool | `false` | | | meta.image | object | `{}` | | +| meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | +| meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | +| meta.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | +| meta.ingress.className | string | `"nginx"` | | +| meta.ingress.enabled | bool | `false` | | +| meta.ingress.hostname | string | `""` | | +| meta.ingress.path | string | `"/influxdb-enterprise-meta(/|$)(.*)"` | | | meta.persistence.enabled | bool | `false` | | | meta.podDisruptionBudget.minAvailable | int | `2` | | | meta.replicas | int | `3` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-ingress.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-ingress.yaml new file mode 100644 index 0000000000..bc7ecc42d1 --- /dev/null +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-ingress.yaml @@ -0,0 +1,32 @@ +{{- if .Values.meta.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ include "influxdb-enterprise.fullname" . }}-meta + labels: + influxdb.influxdata.com/component: meta +{{- include "influxdb-enterprise.labels" . | nindent 4 }} +{{- if .Values.meta.ingress.annotations }} + annotations: +{{ toYaml .Values.meta.ingress.annotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.meta.ingress.className }} + ingressClassName: {{ .Values.meta.ingress.className }} +{{- end }} + rules: +{{- if .Values.meta.ingress.hostname }} + - host: {{ .Values.meta.ingress.hostname | quote }} + http: +{{- else }} + - http: +{{- end }} + paths: + - path: {{ .Values.meta.ingress.path }} + pathType: Prefix + backend: + service: + name: {{ template "influxdb-enterprise.fullname" . }}-meta + port: + number: 8091 +{{- end }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml index 449fa43f55..0284bb8051 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/values.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -125,7 +125,16 @@ meta: # nodePort: 30086 ## Add annotations to service # annotations: {} - # + # InfluxDB ingress configuration. + ingress: + enabled: false + hostname: "" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + className: "nginx" + path: /influxdb-enterprise-meta(/|$)(.*) ## Persist data to a persistent volume persistence: enabled: false diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 768fe9e7ca..b974d48118 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -51,6 +51,9 @@ influxdb-enterprise: type: LoadBalancer annotations: metallb.universe.tf/address-pool: sdf-services + ingress: + enabled: true + hostname: usdf-rsp.slac.stanford.edu persistence: # -- Enable InfluxDB Enterprise meta pod persistence enabled: true From 3cc1126f30ef2db66c3f32ebd3c73fead1862a0d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 3 Jan 2024 10:24:46 -0800 Subject: [PATCH 409/588] More granular permissions for Nublado controller Ensure that JupyterHub only has access to the routes that it needs to use, and block access to the prepuller and fileserver admin routes to anyone other than exec:admin. --- .../templates/controller-ingress-admin.yaml | 2 +- .../templates/controller-ingress-hub.yaml | 28 +++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) create mode 100644 applications/nublado/templates/controller-ingress-hub.yaml diff --git a/applications/nublado/templates/controller-ingress-admin.yaml b/applications/nublado/templates/controller-ingress-admin.yaml index 8e85999384..43043b1cc4 100644 --- a/applications/nublado/templates/controller-ingress-admin.yaml +++ b/applications/nublado/templates/controller-ingress-admin.yaml @@ -8,7 +8,7 @@ config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: all: - - "admin:jupyterlab" + - "exec:admin" template: metadata: name: "controller-admin" diff --git a/applications/nublado/templates/controller-ingress-hub.yaml b/applications/nublado/templates/controller-ingress-hub.yaml new file mode 100644 index 0000000000..2c1c00c611 --- /dev/null +++ b/applications/nublado/templates/controller-ingress-hub.yaml @@ -0,0 +1,28 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "nublado-controller-hub" + labels: + {{- include "nublado.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "admin:jupyterlab" +template: + metadata: + name: "controller-hub" + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + spec: + rules: + - host: {{ .Values.global.host | quote }} + http: + paths: + - path: "{{ .Values.controller.config.pathPrefix }}/spawner/v1/labs" + pathType: "Prefix" + backend: + service: + name: "nublado-controller" + port: + number: 80 From 1495b5ae8adeef18749f348507b1cb56d21550a0 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 3 Jan 2024 15:28:48 -0700 Subject: [PATCH 410/588] Turn off giftless prod debug logging --- applications/giftless/values-roundtable-prod.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/applications/giftless/values-roundtable-prod.yaml b/applications/giftless/values-roundtable-prod.yaml index 0410c2d015..9fcb5a17c2 100644 --- a/applications/giftless/values-roundtable-prod.yaml +++ b/applications/giftless/values-roundtable-prod.yaml @@ -3,7 +3,6 @@ image: repository: "docker.io/lsstsqre/giftless" tag: "upstream" server: - debug: true readonly: replicas: 3 ingress: From 7dbb334ccb985ac39e533cb261f41d7e7982b255 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 3 Jan 2024 15:29:21 -0500 Subject: [PATCH 411/588] Update to noteburst 0.8.0 Related release: https://github.com/lsst-sqre/noteburst/releases/tag/0.8.0 --- applications/noteburst/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index 0ae38cb825..ffee5233bb 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: noteburst version: 1.0.0 -appVersion: "0.7.1" +appVersion: "0.8.0" description: Noteburst is a notebook execution service for the Rubin Science Platform. type: application home: https://noteburst.lsst.io/ From 7c9a82e517059e455eb95e33bc4e0643c392077a Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 4 Jan 2024 17:06:04 -0500 Subject: [PATCH 412/588] Switch noteburst to use recommended images We're having difficulty grabbing the latest weekly; the API may have changed. --- applications/noteburst/README.md | 2 +- applications/noteburst/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/noteburst/README.md b/applications/noteburst/README.md index 2d56c582c5..4e7cf96373 100644 --- a/applications/noteburst/README.md +++ b/applications/noteburst/README.md @@ -22,7 +22,7 @@ Noteburst is a notebook execution service for the Rubin Science Platform. | config.nubladoControllerPathPrefix | string | `"/nublado"` | URL path prefix for the Nublado JupyterLab Controller service | | config.worker.identities | list | `[]` | Science Platform user identities that workers can acquire. Each item is an object with username and uuid keys | | config.worker.imageReference | string | `""` | Nublado image reference, applicable when imageSelector is "reference" | -| config.worker.imageSelector | string | `"weekly"` | Nublado image stream to select: "recommended", "weekly" or "reference" | +| config.worker.imageSelector | string | `"recommended"` | Nublado image stream to select: "recommended", "weekly" or "reference" | | config.worker.jobTimeout | int | `300` | The default notebook execution timeout, in seconds. | | config.worker.keepAlive | string | `"normal"` | Worker keep alive mode: "normal", "fast", "disabled" | | config.worker.tokenLifetime | string | `"2419200"` | Worker token lifetime, in seconds. | diff --git a/applications/noteburst/values.yaml b/applications/noteburst/values.yaml index 34ecd9bc3f..37c2119e75 100644 --- a/applications/noteburst/values.yaml +++ b/applications/noteburst/values.yaml @@ -118,7 +118,7 @@ config: tokenScopes: "exec:notebook,read:image,read:tap,read:alertdb" # -- Nublado image stream to select: "recommended", "weekly" or "reference" - imageSelector: "weekly" + imageSelector: "recommended" # -- Nublado image reference, applicable when imageSelector is "reference" imageReference: "" From b25c308a80067347bfae2f785940437f7c94e629 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 4 Jan 2024 16:22:45 -0700 Subject: [PATCH 413/588] Explicitly disable influxdb-enterprise by default --- applications/sasquatch/README.md | 2 +- applications/sasquatch/values.yaml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 605a3e2e5a..7f64f4f87f 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -22,7 +22,7 @@ Rubin Observatory's telemetry service. | chronograf.resources.limits.memory | string | `"64Gi"` | | | chronograf.resources.requests.cpu | int | `1` | | | chronograf.resources.requests.memory | string | `"4Gi"` | | -| influxdb-enterprise | object | `{}` | Override influxdb-enterprise configuration. | +| influxdb-enterprise | object | `{"enabled":false}` | Override influxdb-enterprise configuration. | | influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"60s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb-staging.enabled | bool | `false` | Enable InfluxDB staging deployment. | | influxdb-staging.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index c5666261ba..f36b822007 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -200,7 +200,8 @@ source-influxdb: cpu: 8 # -- Override influxdb-enterprise configuration. -influxdb-enterprise: {} +influxdb-enterprise: + enabled: false # -- Override kafka-connect-manager configuration. kafka-connect-manager: {} From 008659b6234a1ee797335845403501415be66942 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 09:45:48 +0000 Subject: [PATCH 414/588] Update Helm release argo-cd to v5.52.1 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index ff30957329..bef85d071f 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.52.0 + version: 5.52.1 repository: https://argoproj.github.io/argo-helm From 196b79ae226a8ecdeb2e7e941ee96254cd727a9d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 09:45:52 +0000 Subject: [PATCH 415/588] Update Helm release argo-workflows to v0.40.4 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 9f0756acb4..92ba83971e 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.40.3 + version: 0.40.4 repository: https://argoproj.github.io/argo-helm From 3043d9e935a1e8a440afeaf2d9771d28557f377e Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 12:31:43 +0000 Subject: [PATCH 416/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 54 +++++++++++++++++++++---------------------- requirements/main.txt | 6 ++--- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index bf2879087f..82e4c61a58 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -4,9 +4,9 @@ # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/dev.txt requirements/dev.in # -alabaster==0.7.13 \ - --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \ - --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2 +alabaster==0.7.15 \ + --hash=sha256:0127f4b1db0afc914883f930e3d40763131aebac295522fc4a04d9e77c703705 \ + --hash=sha256:d99c6fd0f7a86fca68ecc5231c9de45227991c10ee6facfb894cf6afb953b142 # via sphinx annotated-types==0.6.0 \ --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ @@ -207,9 +207,9 @@ distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -documenteer[guide]==1.0.0 \ - --hash=sha256:6d6f1f97fa7636591c8cb885fadce6055fe5e57a0b694182ac9980d9fd2f69a6 \ - --hash=sha256:76b0e074833ac3941e4479fa79982f32b09e60fc1c6993e8749b99c9c4f8e1de +documenteer[guide]==1.0.1 \ + --hash=sha256:0d6bf2947456fd3456d86790874d7aab24c8f33d5b69a1f4d40bfb88a714f900 \ + --hash=sha256:d581fb54b6205daec69b4515b5a71e15781dfff1c2dd1f59fa28de1d2b2d4eb9 # via # -r requirements/dev.in # documenteer @@ -653,9 +653,9 @@ pyyaml==6.0.1 \ # pre-commit # pybtex # sphinxcontrib-redoc -referencing==0.32.0 \ - --hash=sha256:689e64fe121843dcfd57b71933318ef1f91188ffb45367332700a86ac8fd6161 \ - --hash=sha256:bdcd3efb936f82ff86f993093f6da7435c7de69a3b3a5a06678a6050184bee99 +referencing==0.32.1 \ + --hash=sha256:3c57da0513e9563eb7e203ebe9bb3a1b509b042016433bd1e45a2853466c3dd3 \ + --hash=sha256:7e4dc12271d8e15612bfe35792f5ea1c40970dadf8624602e33db2758f7ee554 # via # jsonschema # jsonschema-specifications @@ -773,24 +773,24 @@ rpds-py==0.16.2 \ # via # jsonschema # referencing -ruff==0.1.9 \ - --hash=sha256:0e17f53bcbb4fff8292dfd84cf72d767b5e146f009cccd40c2fad27641f8a7a9 \ - --hash=sha256:104aa9b5e12cb755d9dce698ab1b97726b83012487af415a4512fedd38b1459e \ - --hash=sha256:1e63bf5a4a91971082a4768a0aba9383c12392d0d6f1e2be2248c1f9054a20da \ - --hash=sha256:28d920e319783d5303333630dae46ecc80b7ba294aeffedf946a02ac0b7cc3db \ - --hash=sha256:2aec598fb65084e41a9c5d4b95726173768a62055aafb07b4eff976bac72a592 \ - --hash=sha256:331aae2cd4a0554667ac683243b151c74bd60e78fb08c3c2a4ac05ee1e606a39 \ - --hash=sha256:479ca4250cab30f9218b2e563adc362bd6ae6343df7c7b5a7865300a5156d5a6 \ - --hash=sha256:4d0738917c203246f3e275b37006faa3aa96c828b284ebfe3e99a8cb413c8c4b \ - --hash=sha256:69dac82d63a50df2ab0906d97a01549f814b16bc806deeac4f064ff95c47ddf5 \ - --hash=sha256:744dfe4b35470fa3820d5fe45758aace6269c578f7ddc43d447868cfe5078bcb \ - --hash=sha256:8151425a60878e66f23ad47da39265fc2fad42aed06fb0a01130e967a7a064f4 \ - --hash=sha256:837c739729394df98f342319f5136f33c65286b28b6b70a87c28f59354ec939b \ - --hash=sha256:aa8344310f1ae79af9ccd6e4b32749e93cddc078f9b5ccd0e45bd76a6d2e8bb6 \ - --hash=sha256:b041dee2734719ddbb4518f762c982f2e912e7f28b8ee4fe1dee0b15d1b6e800 \ - --hash=sha256:c497d769164df522fdaf54c6eba93f397342fe4ca2123a2e014a5b8fc7df81c7 \ - --hash=sha256:e6837202c2859b9f22e43cb01992373c2dbfeae5c0c91ad691a4a2e725392464 \ - --hash=sha256:e6a212f436122ac73df851f0cf006e0c6612fe6f9c864ed17ebefce0eff6a5fd +ruff==0.1.11 \ + --hash=sha256:09c415716884950080921dd6237767e52e227e397e2008e2bed410117679975b \ + --hash=sha256:0f58948c6d212a6b8d41cd59e349751018797ce1727f961c2fa755ad6208ba45 \ + --hash=sha256:190a566c8f766c37074d99640cd9ca3da11d8deae2deae7c9505e68a4a30f740 \ + --hash=sha256:231d8fb11b2cc7c0366a326a66dafc6ad449d7fcdbc268497ee47e1334f66f77 \ + --hash=sha256:4b077ce83f47dd6bea1991af08b140e8b8339f0ba8cb9b7a484c30ebab18a23f \ + --hash=sha256:5b25093dad3b055667730a9b491129c42d45e11cdb7043b702e97125bcec48a1 \ + --hash=sha256:6464289bd67b2344d2a5d9158d5eb81025258f169e69a46b741b396ffb0cda95 \ + --hash=sha256:934832f6ed9b34a7d5feea58972635c2039c7a3b434fe5ba2ce015064cb6e955 \ + --hash=sha256:97ce4d752f964ba559c7023a86e5f8e97f026d511e48013987623915431c7ea9 \ + --hash=sha256:9b8f397902f92bc2e70fb6bebfa2139008dc72ae5177e66c383fa5426cb0bf2c \ + --hash=sha256:9bd4025b9c5b429a48280785a2b71d479798a69f5c2919e7d274c5f4b32c3607 \ + --hash=sha256:a7f772696b4cdc0a3b2e527fc3c7ccc41cdcb98f5c80fdd4f2b8c50eb1458196 \ + --hash=sha256:c4a88efecec23c37b11076fe676e15c6cdb1271a38f2b415e381e87fe4517f18 \ + --hash=sha256:e1ad00662305dcb1e987f5ec214d31f7d6a062cae3e74c1cbccef15afd96611d \ + --hash=sha256:ea0d3e950e394c4b332bcdd112aa566010a9f9c95814844a7468325290aabfd9 \ + --hash=sha256:eb85ee287b11f901037a6683b2374bb0ec82928c5cbc984f575d0437979c521a \ + --hash=sha256:f9d4d88cb6eeb4dfe20f9f0519bd2eaba8119bde87c3d5065c541dbae2b5a2cb # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ diff --git a/requirements/main.txt b/requirements/main.txt index 3034ee4dd8..80169040a3 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -260,9 +260,9 @@ httpx==0.23.3 \ # via # onepasswordconnectsdk # safir -hvac==2.0.0 \ - --hash=sha256:3b14d0979b98ea993eca73b7dac7161b5547ede369a9b28f4fa40f18e74ec3f3 \ - --hash=sha256:6a51cb9a0d22fe13e824cb0b0a1ce2eeacb9ce6af68b7d1b6689e25ec1becaf5 +hvac==2.1.0 \ + --hash=sha256:73bc91e58c3fc7c6b8107cdaca9cb71fa0a893dfd80ffbc1c14e20f24c0c29d7 \ + --hash=sha256:b48bcda11a4ab0a7b6c47232c7ba7c87fda318ae2d4a7662800c465a78742894 # via -r requirements/main.in idna==3.6 \ --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ From 326ba90049e8cf4c781f25b2c4ecb7a6d871d294 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 12:32:02 +0000 Subject: [PATCH 417/588] Update gcr.io/cloudsql-docker/gce-proxy Docker tag to v1.33.15 --- applications/nublado/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 2f131c6226..01e71f86e4 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -538,7 +538,7 @@ cloudsql: pullPolicy: "IfNotPresent" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.14" + tag: "1.33.15" # -- Instance connection name for a Cloud SQL PostgreSQL instance # @default -- None, must be set if Cloud SQL Auth Proxy is enabled From 77380f828d9c4c915f580b08d717fa51b8eae863 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 8 Jan 2024 08:19:29 -0800 Subject: [PATCH 418/588] Update Helm docs --- applications/nublado/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 894e6fc532..c218dd696b 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -16,7 +16,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with Cloud SQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.14"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Auth Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Auth Proxy pod | From 7e7b2ca7687017b6812eebf99eef61034c1e1671 Mon Sep 17 00:00:00 2001 From: dspeck1 Date: Mon, 8 Jan 2024 17:15:25 +0000 Subject: [PATCH 419/588] Update kafka IPs after sasquatch maintenance --- .../next-visit-fan-out/values-usdfdev-prompt-processing.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml b/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml index 0f7e1444fc..7cabb3eb3b 100644 --- a/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml +++ b/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml @@ -5,8 +5,8 @@ knative: lsstcamUrl: http://prompt-proto-service-lsstcam.prompt-proto-service-lsstcam/next-visit kafka: - schemaRegistryUrl: http://10.99.65.182:8081 - sasquatchAddress: 10.96.224.141:9094 + schemaRegistryUrl: http://10.96.181.159:8081 + sasquatchAddress: 10.100.226.209:9094 consumerGroup: test-group-3 nextVisitTopic: test.next-visit From 354b3f0a0454aa807518a178cc7782e6cbff0e28 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 8 Jan 2024 18:03:00 -0800 Subject: [PATCH 420/588] Remove nublado2 Retire the nublado2, cachemachine, and moneypenny applications in favor of the nublado application. Move some documentation from those applications to elsewhere in the Phalanx documentation. CC-IN2P3 and RoE had not yet moved to Nublado v3, so write initial configurations for them for Nublado v4. These are probably not fully correct, but hopefully they can serve as a basis for future work. --- applications/cachemachine/Chart.yaml | 7 - applications/cachemachine/README.md | 31 -- .../cachemachine/templates/_helpers.tpl | 60 --- .../cachemachine/templates/configmap.yaml | 8 - .../cachemachine/templates/deployment.yaml | 96 ---- .../templates/ingress-anonymous.yaml | 30 -- .../cachemachine/templates/ingress.yaml | 31 -- .../templates/networkpolicy-pull.yaml | 15 - .../cachemachine/templates/networkpolicy.yaml | 21 - .../cachemachine/templates/service.yaml | 14 - .../templates/serviceaccount.yaml | 65 --- .../templates/tests/test-connection.yaml | 16 - .../cachemachine/templates/vault-secrets.yaml | 9 - applications/cachemachine/values-ccin2p3.yaml | 17 - applications/cachemachine/values-roe.yaml | 17 - applications/cachemachine/values-summit.yaml | 23 - applications/cachemachine/values.yaml | 70 --- applications/moneypenny/Chart.yaml | 15 - applications/moneypenny/README.md | 34 -- .../moneypenny/templates/_helpers.tpl | 60 --- .../moneypenny/templates/cm-m-config.yaml | 9 - .../moneypenny/templates/cm-quips.yaml | 9 - .../moneypenny/templates/configmap.yaml | 12 - .../moneypenny/templates/deployment.yaml | 96 ---- .../moneypenny/templates/ingress.yaml | 31 -- .../moneypenny/templates/networkpolicy.yaml | 23 - applications/moneypenny/templates/role.yaml | 21 - .../moneypenny/templates/rolebinding.yaml | 13 - .../moneypenny/templates/service.yaml | 15 - .../moneypenny/templates/serviceaccount.yaml | 6 - .../moneypenny/templates/vault-secrets.yaml | 9 - applications/moneypenny/values-ccin2p3.yaml | 15 - applications/moneypenny/values-roe.yaml | 15 - applications/moneypenny/values-summit.yaml | 15 - applications/moneypenny/values.yaml | 118 ----- applications/nublado/values-ccin2p3.yaml | 55 +++ applications/nublado/values-roe.yaml | 45 ++ applications/nublado/values.yaml | 2 +- applications/nublado2/Chart.yaml | 24 - applications/nublado2/README.md | 119 ----- applications/nublado2/secrets.yaml | 17 - applications/nublado2/templates/_helpers.tpl | 56 --- .../nublado2/templates/clusterrole.yaml | 28 -- .../templates/clusterrolebinding.yaml | 13 - .../nublado2/templates/gafaelfawr-token.yaml | 10 - applications/nublado2/templates/netpol.yaml | 27 -- .../nublado2/templates/nublado-config.yaml | 9 - .../nublado2/templates/vault-secrets.yaml | 33 -- applications/nublado2/values-ccin2p3.yaml | 209 --------- applications/nublado2/values-roe.yaml | 44 -- applications/nublado2/values-summit.yaml | 106 ----- applications/nublado2/values.yaml | 420 ------------------ applications/postgres/secrets.yaml | 6 - applications/telegraf/README.md | 2 +- applications/telegraf/values.yaml | 4 +- .../kubernetes-node-status-max-images.rst | 14 + docs/admin/installation.rst | 2 +- docs/admin/troubleshooting.rst | 25 +- docs/applications/cachemachine/bootstrap.rst | 38 -- docs/applications/cachemachine/gar.rst | 62 --- docs/applications/cachemachine/index.rst | 24 - docs/applications/cachemachine/pruning.rst | 18 - docs/applications/cachemachine/values.md | 12 - .../gafaelfawr/recreate-token.rst | 9 +- docs/applications/index.rst | 3 - docs/applications/moneypenny/index.rst | 22 - docs/applications/moneypenny/values.md | 12 - docs/applications/nublado/bootstrap.rst | 25 +- docs/applications/nublado/index.rst | 12 +- docs/applications/nublado/troubleshoot.rst | 15 +- docs/applications/nublado2/bootstrap.rst | 32 -- docs/applications/nublado2/index.rst | 22 - docs/applications/nublado2/troubleshoot.rst | 34 -- docs/applications/nublado2/upgrade.rst | 23 - docs/applications/nublado2/values.md | 12 - docs/applications/portal/index.rst | 2 +- environments/README.md | 3 - .../templates/cachemachine-application.yaml | 34 -- .../templates/moneypenny-application.yaml | 34 -- .../templates/nublado-users-application.yaml | 2 +- .../templates/nublado2-application.yaml | 41 -- environments/values-ccin2p3.yaml | 4 +- environments/values-roe.yaml | 4 +- environments/values.yaml | 10 - 84 files changed, 170 insertions(+), 2655 deletions(-) delete mode 100644 applications/cachemachine/Chart.yaml delete mode 100644 applications/cachemachine/README.md delete mode 100644 applications/cachemachine/templates/_helpers.tpl delete mode 100644 applications/cachemachine/templates/configmap.yaml delete mode 100644 applications/cachemachine/templates/deployment.yaml delete mode 100644 applications/cachemachine/templates/ingress-anonymous.yaml delete mode 100644 applications/cachemachine/templates/ingress.yaml delete mode 100644 applications/cachemachine/templates/networkpolicy-pull.yaml delete mode 100644 applications/cachemachine/templates/networkpolicy.yaml delete mode 100644 applications/cachemachine/templates/service.yaml delete mode 100644 applications/cachemachine/templates/serviceaccount.yaml delete mode 100644 applications/cachemachine/templates/tests/test-connection.yaml delete mode 100644 applications/cachemachine/templates/vault-secrets.yaml delete mode 100644 applications/cachemachine/values-ccin2p3.yaml delete mode 100644 applications/cachemachine/values-roe.yaml delete mode 100644 applications/cachemachine/values-summit.yaml delete mode 100644 applications/cachemachine/values.yaml delete mode 100644 applications/moneypenny/Chart.yaml delete mode 100644 applications/moneypenny/README.md delete mode 100644 applications/moneypenny/templates/_helpers.tpl delete mode 100644 applications/moneypenny/templates/cm-m-config.yaml delete mode 100644 applications/moneypenny/templates/cm-quips.yaml delete mode 100644 applications/moneypenny/templates/configmap.yaml delete mode 100644 applications/moneypenny/templates/deployment.yaml delete mode 100644 applications/moneypenny/templates/ingress.yaml delete mode 100644 applications/moneypenny/templates/networkpolicy.yaml delete mode 100644 applications/moneypenny/templates/role.yaml delete mode 100644 applications/moneypenny/templates/rolebinding.yaml delete mode 100644 applications/moneypenny/templates/service.yaml delete mode 100644 applications/moneypenny/templates/serviceaccount.yaml delete mode 100644 applications/moneypenny/templates/vault-secrets.yaml delete mode 100644 applications/moneypenny/values-ccin2p3.yaml delete mode 100644 applications/moneypenny/values-roe.yaml delete mode 100644 applications/moneypenny/values-summit.yaml delete mode 100644 applications/moneypenny/values.yaml create mode 100644 applications/nublado/values-ccin2p3.yaml create mode 100644 applications/nublado/values-roe.yaml delete mode 100644 applications/nublado2/Chart.yaml delete mode 100644 applications/nublado2/README.md delete mode 100644 applications/nublado2/secrets.yaml delete mode 100644 applications/nublado2/templates/_helpers.tpl delete mode 100644 applications/nublado2/templates/clusterrole.yaml delete mode 100644 applications/nublado2/templates/clusterrolebinding.yaml delete mode 100644 applications/nublado2/templates/gafaelfawr-token.yaml delete mode 100644 applications/nublado2/templates/netpol.yaml delete mode 100644 applications/nublado2/templates/nublado-config.yaml delete mode 100644 applications/nublado2/templates/vault-secrets.yaml delete mode 100644 applications/nublado2/values-ccin2p3.yaml delete mode 100644 applications/nublado2/values-roe.yaml delete mode 100644 applications/nublado2/values-summit.yaml delete mode 100644 applications/nublado2/values.yaml delete mode 100644 docs/applications/cachemachine/bootstrap.rst delete mode 100644 docs/applications/cachemachine/gar.rst delete mode 100644 docs/applications/cachemachine/index.rst delete mode 100644 docs/applications/cachemachine/pruning.rst delete mode 100644 docs/applications/cachemachine/values.md delete mode 100644 docs/applications/moneypenny/index.rst delete mode 100644 docs/applications/moneypenny/values.md delete mode 100644 docs/applications/nublado2/bootstrap.rst delete mode 100644 docs/applications/nublado2/index.rst delete mode 100644 docs/applications/nublado2/troubleshoot.rst delete mode 100644 docs/applications/nublado2/upgrade.rst delete mode 100644 docs/applications/nublado2/values.md delete mode 100644 environments/templates/cachemachine-application.yaml delete mode 100644 environments/templates/moneypenny-application.yaml delete mode 100644 environments/templates/nublado2-application.yaml diff --git a/applications/cachemachine/Chart.yaml b/applications/cachemachine/Chart.yaml deleted file mode 100644 index fd8100af9e..0000000000 --- a/applications/cachemachine/Chart.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v2 -name: cachemachine -version: 1.0.0 -description: JupyterLab image prepuller -sources: - - https://github.com/lsst-sqre/cachemachine -appVersion: 1.2.2 diff --git a/applications/cachemachine/README.md b/applications/cachemachine/README.md deleted file mode 100644 index 1ed392e993..0000000000 --- a/applications/cachemachine/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# cachemachine - -JupyterLab image prepuller - -## Source Code - -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the cachemachine frontend pod | -| autostart | object | `{}` | Autostart configuration. Each key is the name of a class of images to pull, and the value is the JSON specification for which and how many images to pull. | -| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the cachemachine image | -| image.repository | string | `"lsstsqre/cachemachine"` | cachemachine image to use | -| image.tag | string | The appVersion of the chart | Tag of cachemachine image to use | -| ingress.annotations | object | `{}` | Additional annotations to add for endpoints that are authenticated | -| ingress.anonymousAnnotations | object | `{}` | Additional annotations to add for endpoints that allow anonymous access, such as `/*/available` | -| nameOverride | string | `""` | Override the base name for resources | -| nodeSelector | object | `{}` | Node selector rules for the cachemachine frontend pod | -| podAnnotations | object | `{}` | Annotations for the cachemachine frontend pod | -| resources | object | `{}` | Resource limits and requests for the cachemachine frontend pod | -| serviceAccount | object | `{"annotations":{},"name":""}` | Secret names to use for all Docker pulls | -| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | -| serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the cachemachine frontend pod | diff --git a/applications/cachemachine/templates/_helpers.tpl b/applications/cachemachine/templates/_helpers.tpl deleted file mode 100644 index 6599ed07b6..0000000000 --- a/applications/cachemachine/templates/_helpers.tpl +++ /dev/null @@ -1,60 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "cachemachine.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "cachemachine.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "cachemachine.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "cachemachine.labels" -}} -app.kubernetes.io/name: {{ include "cachemachine.name" . }} -helm.sh/chart: {{ include "cachemachine.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "cachemachine.selectorLabels" -}} -app.kubernetes.io/name: {{ include "cachemachine.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "cachemachine.serviceAccountName" -}} -{{ default (include "cachemachine.fullname" .) .Values.serviceAccount.name }} -{{- end -}} diff --git a/applications/cachemachine/templates/configmap.yaml b/applications/cachemachine/templates/configmap.yaml deleted file mode 100644 index 013ff04860..0000000000 --- a/applications/cachemachine/templates/configmap.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "cachemachine.fullname" . }}-autostart - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -data: - {{- toYaml .Values.autostart | nindent 2 }} diff --git a/applications/cachemachine/templates/deployment.yaml b/applications/cachemachine/templates/deployment.yaml deleted file mode 100644 index b8105098c2..0000000000 --- a/applications/cachemachine/templates/deployment.yaml +++ /dev/null @@ -1,96 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "cachemachine.fullname" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - replicas: 1 - selector: - matchLabels: - {{- include "cachemachine.selectorLabels" . | nindent 6 }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "cachemachine.selectorLabels" . | nindent 8 }} - spec: - imagePullSecrets: - - name: "pull-secret" - serviceAccountName: {{ template "cachemachine.serviceAccountName" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 - containers: - - name: {{ .Chart.Name }} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "all" - readOnlyRootFilesystem: true - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - env: - - name: DOCKER_SECRET_NAME - value: "pull-secret" - ports: - - name: "http" - containerPort: 8080 - protocol: "TCP" - readinessProbe: - httpGet: - path: "/" - port: "http" - {{- with .Values.resources }} - resources: - {{- toYaml .Values.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: "docker-creds" - mountPath: "/etc/secrets" - readOnly: true - - name: autostart - mountPath: "/etc/cachemachine" - readOnly: true - - name: podinfo - mountPath: /etc/podinfo - volumes: - - name: docker-creds - secret: - secretName: pull-secret - - name: autostart - configMap: - name: {{ include "cachemachine.fullname" . }}-autostart - - name: podinfo - downwardAPI: - items: - - path: "annotations" - fieldRef: - fieldPath: metadata.annotations - - path: "labels" - fieldRef: - fieldPath: metadata.labels - - path: "name" - fieldRef: - fieldPath: metadata.name - - path: "uid" - fieldRef: - fieldPath: metadata.uid - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/applications/cachemachine/templates/ingress-anonymous.yaml b/applications/cachemachine/templates/ingress-anonymous.yaml deleted file mode 100644 index 4ac68ad654..0000000000 --- a/applications/cachemachine/templates/ingress-anonymous.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: gafaelfawr.lsst.io/v1alpha1 -kind: GafaelfawrIngress -metadata: - name: {{ template "cachemachine.fullname" . }}-anonymous - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -config: - baseUrl: {{ .Values.global.baseUrl | quote }} - scopes: - anonymous: true -template: - metadata: - name: {{ template "cachemachine.fullname" . }}-anonymous - annotations: - nginx.ingress.kubernetes.io/use-regex: "true" - {{- with .Values.ingress.anonymousAnnotations }} - {{- toYaml . | nindent 6 }} - {{- end }} - spec: - rules: - - host: {{ required "global.host must be set" .Values.global.host | quote }} - http: - paths: - - path: "/cachemachine/.*/(available|desired)" - pathType: "ImplementationSpecific" - backend: - service: - name: {{ template "cachemachine.fullname" . }} - port: - number: 80 diff --git a/applications/cachemachine/templates/ingress.yaml b/applications/cachemachine/templates/ingress.yaml deleted file mode 100644 index 0fe53f9cee..0000000000 --- a/applications/cachemachine/templates/ingress.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: gafaelfawr.lsst.io/v1alpha1 -kind: GafaelfawrIngress -metadata: - name: {{ template "cachemachine.fullname" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -config: - baseUrl: {{ .Values.global.baseUrl | quote }} - scopes: - all: - - "exec:admin" - loginRedirect: true -template: - metadata: - name: {{ template "cachemachine.fullname" . }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 6 }} - {{- end }} - spec: - rules: - - host: {{ required "global.host must be set" .Values.global.host | quote }} - http: - paths: - - path: "/cachemachine" - pathType: "Prefix" - backend: - service: - name: {{ template "cachemachine.fullname" . }} - port: - number: 80 diff --git a/applications/cachemachine/templates/networkpolicy-pull.yaml b/applications/cachemachine/templates/networkpolicy-pull.yaml deleted file mode 100644 index de3104385d..0000000000 --- a/applications/cachemachine/templates/networkpolicy-pull.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ template "cachemachine.fullname" . }}-pull - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - # Reject all inbound and outbound connections to the pods that exist solely - # to pull Docker images. - podSelector: - matchLabels: - cachemachine: "pull" - policyTypes: - - Ingress - - Egress diff --git a/applications/cachemachine/templates/networkpolicy.yaml b/applications/cachemachine/templates/networkpolicy.yaml deleted file mode 100644 index 2741f62d58..0000000000 --- a/applications/cachemachine/templates/networkpolicy.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "cachemachine.fullname" . }} -spec: - podSelector: - matchLabels: - {{- include "cachemachine.selectorLabels" . | nindent 6 }} - policyTypes: - - Ingress - ingress: - # Allow inbound access from pods (in any namespace) labeled - # gafaelfawr.lsst.io/ingress: true. - - from: - - namespaceSelector: {} - podSelector: - matchLabels: - gafaelfawr.lsst.io/ingress: "true" - ports: - - protocol: "TCP" - port: 8080 diff --git a/applications/cachemachine/templates/service.yaml b/applications/cachemachine/templates/service.yaml deleted file mode 100644 index 63ccbc2ed1..0000000000 --- a/applications/cachemachine/templates/service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "cachemachine.fullname" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - type: ClusterIP - ports: - - port: 80 - targetPort: "http" - protocol: "TCP" - selector: - {{- include "cachemachine.selectorLabels" . | nindent 4 }} diff --git a/applications/cachemachine/templates/serviceaccount.yaml b/applications/cachemachine/templates/serviceaccount.yaml deleted file mode 100644 index 81a80ff760..0000000000 --- a/applications/cachemachine/templates/serviceaccount.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["list"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ template "cachemachine.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ template "cachemachine.serviceAccountName" . }} - apiGroup: rbac.authorization.k8s.io ---- -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -rules: - - apiGroups: ["apps"] - resources: ["daemonsets"] - verbs: ["create", "delete"] - - apiGroups: ["apps"] - resources: ["daemonsets/status"] - verbs: ["get"] ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "cachemachine.serviceAccountName" . }} - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ template "cachemachine.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: {{ template "cachemachine.serviceAccountName" . }} - apiGroup: rbac.authorization.k8s.io diff --git a/applications/cachemachine/templates/tests/test-connection.yaml b/applications/cachemachine/templates/tests/test-connection.yaml deleted file mode 100644 index 35c987cdcc..0000000000 --- a/applications/cachemachine/templates/tests/test-connection.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: {{ include "cachemachine.fullname" . }}-test-connection - annotations: - "helm.sh/hook": "test-success" - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - containers: - - name: "wget" - image: "busybox" - command: ['wget'] - args: - - '{{ include "cachemachine.fullname" . }}:8080' - restartPolicy: Never diff --git a/applications/cachemachine/templates/vault-secrets.yaml b/applications/cachemachine/templates/vault-secrets.yaml deleted file mode 100644 index 6f813c9b7d..0000000000 --- a/applications/cachemachine/templates/vault-secrets.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: pull-secret - labels: - {{- include "cachemachine.labels" . | nindent 4 }} -spec: - path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" - type: kubernetes.io/dockerconfigjson diff --git a/applications/cachemachine/values-ccin2p3.yaml b/applications/cachemachine/values-ccin2p3.yaml deleted file mode 100644 index a5b8e8aef5..0000000000 --- a/applications/cachemachine/values-ccin2p3.yaml +++ /dev/null @@ -1,17 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "registry.hub.docker.com", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } diff --git a/applications/cachemachine/values-roe.yaml b/applications/cachemachine/values-roe.yaml deleted file mode 100644 index a5b8e8aef5..0000000000 --- a/applications/cachemachine/values-roe.yaml +++ /dev/null @@ -1,17 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "registry.hub.docker.com", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } diff --git a/applications/cachemachine/values-summit.yaml b/applications/cachemachine/values-summit.yaml deleted file mode 100644 index 30816c3254..0000000000 --- a/applications/cachemachine/values-summit.yaml +++ /dev/null @@ -1,23 +0,0 @@ -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "ts-dockerhub.lsst.org", - "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0034", - "num_releases": 0, - "num_weeklies": 3, - "num_dailies": 2, - "cycle": 34, - "alias_tags": [ - "latest", - "latest_daily", - "latest_weekly" - ] - } - ] - } diff --git a/applications/cachemachine/values.yaml b/applications/cachemachine/values.yaml deleted file mode 100644 index f6c7d38961..0000000000 --- a/applications/cachemachine/values.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Default values for cachemachine. - -# -- Override the base name for resources -nameOverride: "" - -# -- Override the full name for resources (includes the release name) -fullnameOverride: "" - -image: - # -- cachemachine image to use - repository: lsstsqre/cachemachine - - # -- Pull policy for the cachemachine image - pullPolicy: IfNotPresent - - # -- Tag of cachemachine image to use - # @default -- The appVersion of the chart - tag: "" - -# -- Secret names to use for all Docker pulls -serviceAccount: - # -- Name of the service account to use - # @default -- Name based on the fullname template - name: "" - - # -- Annotations to add to the service account - annotations: {} - -ingress: - # -- Additional annotations to add for endpoints that are authenticated - annotations: {} - - # -- Additional annotations to add for endpoints that allow anonymous - # access, such as `/*/available` - anonymousAnnotations: {} - -# -- Resource limits and requests for the cachemachine frontend pod -resources: {} - -# -- Annotations for the cachemachine frontend pod -podAnnotations: {} - -# -- Node selector rules for the cachemachine frontend pod -nodeSelector: {} - -# -- Tolerations for the cachemachine frontend pod -tolerations: [] - -# -- Affinity rules for the cachemachine frontend pod -affinity: {} - -# -- Autostart configuration. Each key is the name of a class of images to -# pull, and the value is the JSON specification for which and how many images -# to pull. -autostart: {} - -# The following will be set by parameters injected by Argo CD and should not -# be set in the individual environment values files. -global: - # -- Base URL for the environment - # @default -- Set by Argo CD - baseUrl: "" - - # -- Host name for ingress - # @default -- Set by Argo CD - host: "" - - # -- Base path for Vault secrets - # @default -- Set by Argo CD - vaultSecretsPath: "" diff --git a/applications/moneypenny/Chart.yaml b/applications/moneypenny/Chart.yaml deleted file mode 100644 index 8197816bf4..0000000000 --- a/applications/moneypenny/Chart.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v2 -appVersion: "1.0.0" -name: moneypenny -description: User provisioning actions -sources: - - https://github.com/lsst-sqre/moneypenny - - https://github.com/lsst-sqre/farthing - - https://github.com/lsst-sqre/inituserhome -version: 1.0.0 -annotations: - phalanx.lsst.io/docs: | - - id: "SQR-052" - title: >- - Proposal for privilege separation in RSP Notebook Aspect containers - url: "https://sqr-052.lsst.io/" diff --git a/applications/moneypenny/README.md b/applications/moneypenny/README.md deleted file mode 100644 index 1cfedae207..0000000000 --- a/applications/moneypenny/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# moneypenny - -User provisioning actions - -## Source Code - -* -* -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the vo-cutouts frontend pod | -| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the moneypenny image | -| image.repository | string | `"lsstsqre/moneypenny"` | moneypenny image to use | -| image.tag | string | The appVersion of the chart | Tag of moneypenny image to use | -| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | -| nameOverride | string | `""` | Override the base name for resources | -| nodeSelector | object | `{}` | Node selector rules for the vo-cutouts frontend pod | -| orders.commission | list | `[{"image":"lsstsqre/farthing","name":"farthing","securityContext":{"allowPrivilegeEscalation":false,"runAsNonRootUser":true,"runAsUser":1000}}]` | List of specifications for containers to run to commission a new user. Each member of the list should set a container `name`, `image`, and `securityContext` and may contain `volumeMounts`. | -| orders.retire | list | `[{"image":"lsstsqre/farthing","name":"farthing","securityContext":{"allowPrivilegeEscalation":false,"runAsNonRootUser":true,"runAsUser":1000}}]` | List of specifications for containers to run to retire a user. Each member of the list should set a container `name`, `image`, and `securityContext` and may contain `volumeMounts`. | -| orders.volumes | list | `[]` | Additional volumes to mount when commissioning or retiring users. | -| podAnnotations | object | `{}` | Annotations for the vo-cutouts frontend pod | -| quips | string | A small selection | Moneypenny quotes | -| replicaCount | int | `1` | Number of pods to start | -| resources | object | `{}` | Resource limits and requests for the vo-cutouts frontend pod | -| serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | diff --git a/applications/moneypenny/templates/_helpers.tpl b/applications/moneypenny/templates/_helpers.tpl deleted file mode 100644 index ff1f0f98a7..0000000000 --- a/applications/moneypenny/templates/_helpers.tpl +++ /dev/null @@ -1,60 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "moneypenny.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "moneypenny.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "moneypenny.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "moneypenny.labels" -}} -app.kubernetes.io/name: {{ include "moneypenny.name" . }} -helm.sh/chart: {{ include "moneypenny.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Selector labels -*/}} -{{- define "moneypenny.selectorLabels" -}} -app.kubernetes.io/name: {{ include "moneypenny.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "moneypenny.serviceAccountName" -}} -{{ default (include "moneypenny.fullname" .) .Values.serviceAccount.name }} -{{- end -}} diff --git a/applications/moneypenny/templates/cm-m-config.yaml b/applications/moneypenny/templates/cm-m-config.yaml deleted file mode 100644 index 5dedc2a46d..0000000000 --- a/applications/moneypenny/templates/cm-m-config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "moneypenny.fullname" . }}-m-config - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -data: - m.yaml: | - {{- toYaml .Values.orders | nindent 4 }} diff --git a/applications/moneypenny/templates/cm-quips.yaml b/applications/moneypenny/templates/cm-quips.yaml deleted file mode 100644 index a0e9f928ba..0000000000 --- a/applications/moneypenny/templates/cm-quips.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "moneypenny.fullname" . }}-quips - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -data: - quips.txt: | - {{- .Values.quips | nindent 4 }} diff --git a/applications/moneypenny/templates/configmap.yaml b/applications/moneypenny/templates/configmap.yaml deleted file mode 100644 index 646d1c8042..0000000000 --- a/applications/moneypenny/templates/configmap.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "moneypenny.fullname" .}} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -data: - SAFIR_NAME: "moneypenny" - SAFIR_PROFILE: "production" - SAFIR_LOGGER: "moneypenny" - SAFIR_LOG_LEVEL: "INFO" - DOCKER_SECRET_NAME: "pull-secret" diff --git a/applications/moneypenny/templates/deployment.yaml b/applications/moneypenny/templates/deployment.yaml deleted file mode 100644 index 2684cf8eea..0000000000 --- a/applications/moneypenny/templates/deployment.yaml +++ /dev/null @@ -1,96 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "moneypenny.fullname" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "moneypenny.selectorLabels" . | nindent 6 }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/config-m: {{ include (print $.Template.BasePath "/cm-m-config.yaml") . | sha256sum }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "moneypenny.selectorLabels" . | nindent 8 }} - spec: - imagePullSecrets: - - name: "pull-secret" - serviceAccountName: {{ include "moneypenny.serviceAccountName" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 - containers: - - name: "moneypenny" - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "all" - readOnlyRootFilesystem: true - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - envFrom: - - configMapRef: - name: {{ template "moneypenny.fullname" . }} - ports: - - name: "http" - containerPort: 8080 - protocol: "TCP" - livenessProbe: - httpGet: - path: "/" - port: "http" - readinessProbe: - httpGet: - path: "/" - port: "http" - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - name: "m-config" - mountPath: "/opt/lsst/software/moneypenny/config/M" - readOnly: true - - name: "quips" - mountPath: "/opt/lsst/software/moneypenny/config/quips" - readOnly: true - - name: "podinfo" - mountPath: "/etc/podinfo" - readOnly: true - volumes: - - name: "m-config" - configMap: - name: {{ template "moneypenny.fullname" . }}-m-config - - name: "quips" - configMap: - name: {{ template "moneypenny.fullname" . }}-quips - - name: "podinfo" - downwardAPI: - items: - - path: "name" - fieldRef: - fieldPath: "metadata.name" - - path: "uid" - fieldRef: - fieldPath: "metadata.uid" - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} diff --git a/applications/moneypenny/templates/ingress.yaml b/applications/moneypenny/templates/ingress.yaml deleted file mode 100644 index 566f195cd8..0000000000 --- a/applications/moneypenny/templates/ingress.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: gafaelfawr.lsst.io/v1alpha1 -kind: GafaelfawrIngress -metadata: - name: {{ template "moneypenny.fullname" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -config: - baseUrl: {{ .Values.global.baseUrl | quote }} - scopes: - all: - - "admin:provision" -template: - metadata: - name: {{ template "moneypenny.fullname" . }} - annotations: - nginx.ingress.kubernetes.io/proxy-read-timeout: "310" - {{- with .Values.ingress.annotations }} - {{- toYaml . | nindent 6 }} - {{- end }} - spec: - rules: - - host: {{ required "global.host must be set" .Values.global.host | quote }} - http: - paths: - - path: "/moneypenny" - pathType: Prefix - backend: - service: - name: {{ include "moneypenny.fullname" . }} - port: - number: 8080 diff --git a/applications/moneypenny/templates/networkpolicy.yaml b/applications/moneypenny/templates/networkpolicy.yaml deleted file mode 100644 index 850f72ad2a..0000000000 --- a/applications/moneypenny/templates/networkpolicy.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ template "moneypenny.fullname" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "moneypenny.selectorLabels" . | nindent 6 }} - policyTypes: - - Ingress - ingress: - # Allow inbound access from pods (in any namespace) labeled - # gafaelfawr.lsst.io/ingress: true. - - from: - - namespaceSelector: {} - podSelector: - matchLabels: - gafaelfawr.lsst.io/ingress: "true" - ports: - - protocol: "TCP" - port: 8080 diff --git a/applications/moneypenny/templates/role.yaml b/applications/moneypenny/templates/role.yaml deleted file mode 100644 index 0e730dd5fa..0000000000 --- a/applications/moneypenny/templates/role.yaml +++ /dev/null @@ -1,21 +0,0 @@ -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "moneypenny.serviceAccountName" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -rules: - - apiGroups: [""] - resources: - - "pods" - verbs: - - "create" - - "delete" - - "get" - - "list" - - "watch" - - apiGroups: [""] - resources: ["configmaps"] - verbs: - - "create" - - "delete" diff --git a/applications/moneypenny/templates/rolebinding.yaml b/applications/moneypenny/templates/rolebinding.yaml deleted file mode 100644 index 169978eeaf..0000000000 --- a/applications/moneypenny/templates/rolebinding.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "moneypenny.serviceAccountName" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -subjects: - - kind: ServiceAccount - name: {{ include "moneypenny.serviceAccountName" . }} -roleRef: - kind: Role - name: {{ include "moneypenny.serviceAccountName" . }} - apiGroup: rbac.authorization.k8s.io diff --git a/applications/moneypenny/templates/service.yaml b/applications/moneypenny/templates/service.yaml deleted file mode 100644 index 2b7d9b8da7..0000000000 --- a/applications/moneypenny/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "moneypenny.fullname" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -spec: - type: "ClusterIP" - ports: - - name: "http" - protocol: "TCP" - port: 8080 - targetPort: "http" - selector: - {{- include "moneypenny.selectorLabels" . | nindent 4 }} diff --git a/applications/moneypenny/templates/serviceaccount.yaml b/applications/moneypenny/templates/serviceaccount.yaml deleted file mode 100644 index 963cbe100d..0000000000 --- a/applications/moneypenny/templates/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "moneypenny.serviceAccountName" . }} - labels: - {{- include "moneypenny.labels" . | nindent 4 }} diff --git a/applications/moneypenny/templates/vault-secrets.yaml b/applications/moneypenny/templates/vault-secrets.yaml deleted file mode 100644 index 3be6ea057e..0000000000 --- a/applications/moneypenny/templates/vault-secrets.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: pull-secret - labels: - {{- include "moneypenny.labels" . | nindent 4 }} -spec: - path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" - type: kubernetes.io/dockerconfigjson diff --git a/applications/moneypenny/values-ccin2p3.yaml b/applications/moneypenny/values-ccin2p3.yaml deleted file mode 100644 index e653e165c2..0000000000 --- a/applications/moneypenny/values-ccin2p3.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - hostPath: - path: /data/rsp/home - type: Directory diff --git a/applications/moneypenny/values-roe.yaml b/applications/moneypenny/values-roe.yaml deleted file mode 100644 index 0dbe21c7f7..0000000000 --- a/applications/moneypenny/values-roe.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: 192.41.122.33 - path: /jhome diff --git a/applications/moneypenny/values-summit.yaml b/applications/moneypenny/values-summit.yaml deleted file mode 100644 index 1436234dbd..0000000000 --- a/applications/moneypenny/values-summit.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: nfs1.cp.lsst.org - path: /jhome diff --git a/applications/moneypenny/values.yaml b/applications/moneypenny/values.yaml deleted file mode 100644 index 743e2bc0e9..0000000000 --- a/applications/moneypenny/values.yaml +++ /dev/null @@ -1,118 +0,0 @@ -# Default values for moneypenny. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# -- Override the base name for resources -nameOverride: "" - -# -- Override the full name for resources (includes the release name) -fullnameOverride: "" - -# -- Number of pods to start -replicaCount: 1 - -image: - # -- moneypenny image to use - repository: "lsstsqre/moneypenny" - - # -- Pull policy for the moneypenny image - pullPolicy: "IfNotPresent" - - # -- Tag of moneypenny image to use - # @default -- The appVersion of the chart - tag: "" - -serviceAccount: - # -- Name of the service account to use - # @default -- Name based on the fullname template - name: "" - -ingress: - # -- Additional annotations to add to the ingress - annotations: {} - -orders: - # -- List of specifications for containers to run to commission a new user. - # Each member of the list should set a container `name`, `image`, and - # `securityContext` and may contain `volumeMounts`. - commission: - - name: farthing - image: lsstsqre/farthing - securityContext: - runAsUser: 1000 - runAsNonRootUser: true - allowPrivilegeEscalation: false - - # -- List of specifications for containers to run to retire a user. Each - # member of the list should set a container `name`, `image`, and - # `securityContext` and may contain `volumeMounts`. - retire: - - name: farthing - image: lsstsqre/farthing - securityContext: - runAsUser: 1000 - runAsNonRootUser: true - allowPrivilegeEscalation: false - - # -- Additional volumes to mount when commissioning or retiring users. - volumes: [] - -# -- Resource limits and requests for the vo-cutouts frontend pod -resources: {} - -# -- Annotations for the vo-cutouts frontend pod -podAnnotations: {} - -# -- Node selector rules for the vo-cutouts frontend pod -nodeSelector: {} - -# -- Tolerations for the vo-cutouts frontend pod -tolerations: [] - -# -- Affinity rules for the vo-cutouts frontend pod -affinity: {} - -# -- Moneypenny quotes -# @default -- A small selection -quips: | - Flattery will get you nowhere... but don't stop trying. - % - You never take me to dinner looking like this, James. You never take me to dinner, period. - % - M: (on intercom) Miss Moneypenny, give 007 the password we've agreed - with Japanese SIS. - Moneypenny: Yes, Sir. We tried to think of something that you wouldn't - forget. - Bond: Yes? - Moneypenny: I... love... you. Repeat it please, to make sure you get it. - Bond: Don't worry, I get it. Sayonara. - % - My problem is, James, you never do anything with me. - % - I didn't know you were a music lover. Any time you want to come over and hear my Barry Manilow collection... - % - Someday you'll have to make good on your innuendos. - % - You always were a cunning linguist, James. - % - Bond: (about getting shot) In your defense, a moving target is harder to hit. - Moneypenny: Then you'd better keep moving. - % - Moneypenny: Cut-throat razor. How very traditional. - Bond: Well, I like to do some things the old-fashioned way. - Moneypenny: Sometimes the old ways are best. - -# The following will be set by parameters injected by Argo CD and should not -# be set in the individual environment values files. -global: - # -- Base URL for the environment - # @default -- Set by Argo CD - baseUrl: "" - - # -- Host name for ingress - # @default -- Set by Argo CD - host: "" - - # -- Base path for Vault secrets - # @default -- Set by Argo CD - vaultSecretsPath: "" diff --git a/applications/nublado/values-ccin2p3.yaml b/applications/nublado/values-ccin2p3.yaml new file mode 100644 index 0000000000..26eff07b94 --- /dev/null +++ b/applications/nublado/values-ccin2p3.yaml @@ -0,0 +1,55 @@ +controller: + config: + images: + source: + type: "docker" + registry: "registry.hub.docker.com" + repository: "lsstsqre/sciplat-lab" + lab: + env: + AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" + AUTO_REPO_BRANCH: "prod" + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" + CULL_KERNEL_IDLE_TIMEOUT: "432000" + CULL_KERNEL_CONNECTED: "True" + CULL_KERNEL_INTERVAL: "300" + CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" + CULL_TERMINAL_INTERVAL: "300" + NO_ACTIVITY_TIMEOUT: "432000" + homedirPrefix: "/homedirs" + homedirSchema: "initialThenUsername" + homedirSuffix: "rsp_home" + initContainers: + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.0" + privileged: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" + pullSecret: "pull-secret" + volumes: + - name: "home" + source: + type: "hostPath" + path: "/pbs/home" + volumeMounts: + - containerMount: "/home" + volumeName: "home" + +proxy: + ingress: + annotations: + nginx.ingress.kubernetes.io/proxy-connect-timeout: "50s" + nginx.ingress.kubernetes.io/proxy-read-timeout: "50s" + nginx.ingress.kubernetes.io/client-max-body-size: "50m" + +jupyterhub: + hub: + db: + upgrade: true + cull: + timeout: 432000 + every: 300 + maxAge: 2160000 diff --git a/applications/nublado/values-roe.yaml b/applications/nublado/values-roe.yaml new file mode 100644 index 0000000000..038a90ba3b --- /dev/null +++ b/applications/nublado/values-roe.yaml @@ -0,0 +1,45 @@ +controller: + config: + images: + source: + type: "docker" + registry: "registry.hub.docker.com" + repository: "lsstsqre/sciplat-lab" + lab: + env: + AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" + AUTO_REPO_BRANCH: "prod" + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" + initContainers: + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.0" + privileged: true + volumeMounts: + - containerPath: "/home" + volumeName: "home" + pullSecret: "pull-secret" + volumes: + - name: "data" + source: + serverPath: "/data" + server: "192.41.122.33" + type: "nfs" + - name: "home" + source: + serverPath: "/jhome" + server: "192.41.122.33" + type: "nfs" + - name: "datasets" + source: + serverPath: "/datasets" + server: "192.41.122.33" + type: "nfs" + volumeMounts: + - containerPath: "/data" + volumeName: "data" + - containerPath: "/home" + volumeName: "home" + - containerPath: "/datasets" + volumeName: "datasets" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 01e71f86e4..b9d5163ba0 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -399,7 +399,7 @@ jupyterhub: # -- URL of PostgreSQL server # @default -- Use the in-cluster PostgreSQL installed by Phalanx - url: "postgresql://jovyan@postgres.postgres/jupyterhub" + url: "postgresql://nublado3@postgres.postgres/jupyterhub" # -- Security context for JupyterHub container containerSecurityContext: diff --git a/applications/nublado2/Chart.yaml b/applications/nublado2/Chart.yaml deleted file mode 100644 index d758b2bf68..0000000000 --- a/applications/nublado2/Chart.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v2 -name: nublado2 -version: 1.0.0 -description: JupyterHub for the Rubin Science Platform -home: https://github.com/lsst-sqre/nublado2 -sources: - - https://github.com/lsst-sqre/nublado2 -# This version is not used directly. Also update the tag in values.yaml. -appVersion: "2.6.1" - -# Match the jupyterhub Helm chart for kubeVersion -kubeVersion: ">=1.20.0-0" -dependencies: - - name: jupyterhub - # This is the Zero To Jupyterhub version, *not* the version of the - # Jupyterhub package itself. - version: "2.0.0" - repository: https://jupyterhub.github.io/helm-chart/ - -annotations: - phalanx.lsst.io/docs: | - - id: "DMTN-164" - title: "Nublado v2 Architecture" - url: "https://dmtn-164.lsst.io/" diff --git a/applications/nublado2/README.md b/applications/nublado2/README.md deleted file mode 100644 index 8f4eb07907..0000000000 --- a/applications/nublado2/README.md +++ /dev/null @@ -1,119 +0,0 @@ -# nublado2 - -JupyterHub for the Rubin Science Platform - -**Homepage:** - -## Source Code - -* - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| config.base_url | string | `""` | base_url must be set in each instantiation of this chart to the URL of the primary ingress. It's used to construct API requests to the authentication service (which should go through the ingress). | -| config.butler_secret_path | string | `""` | butler_secret_path must be set here, because it's passed through to the lab rather than being part of the Hub configuration. | -| config.cachemachine_image_policy | string | `"available"` | Cachemachine image policy: "available" or "desired". Use "desired" at instances with streaming image support. | -| config.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. | -| config.lab_environment | object | See `values.yaml` | Environment variables to set in spawned lab containers. Each value will be expanded using Jinja 2 templating. | -| config.pinned_images | list | `[]` | images to pin to spawner menu | -| config.pull_secret_path | string | `""` | pull_secret_path must also be set here; it specifies resources in the lab namespace | -| config.shutdown_on_logout | bool | `true` | shut down user pods on logout. Superfluous, because our LogoutHandler enforces this in any event, but nice to make explicit. | -| config.sizes | list | `[{"cpu":1,"name":"Small","ram":"4096M"},{"cpu":2,"name":"Medium","ram":"8192M"},{"cpu":4,"name":"Large","ram":"16384M"}]` | definitions of Lab sizes available in a given instance | -| config.user_resources_template | string | See `values.yaml` | Templates for the user resources to create for each lab spawn. This is a string that can be templated and then loaded as YAML to generate a list of Kubernetes objects to create. | -| config.volume_mounts | list | `[]` | Where to mount volumes for a particular instance | -| config.volumes | list | `[]` | Volumes to use for a particular instance | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| jupyterhub.cull.enabled | bool | `true` | | -| jupyterhub.cull.every | int | `600` | | -| jupyterhub.cull.maxAge | int | `5184000` | | -| jupyterhub.cull.removeNamedServers | bool | `true` | | -| jupyterhub.cull.timeout | int | `2592000` | | -| jupyterhub.cull.users | bool | `true` | | -| jupyterhub.hub.authenticatePrometheus | bool | `false` | | -| jupyterhub.hub.baseUrl | string | `"/nb"` | | -| jupyterhub.hub.config.Authenticator.enable_auth_state | bool | `true` | | -| jupyterhub.hub.config.JupyterHub.authenticator_class | string | `"nublado2.auth.GafaelfawrAuthenticator"` | | -| jupyterhub.hub.config.ServerApp.shutdown_no_activity_timeout | int | `604800` | | -| jupyterhub.hub.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | -| jupyterhub.hub.containerSecurityContext.runAsGroup | int | `768` | | -| jupyterhub.hub.containerSecurityContext.runAsUser | int | `768` | | -| jupyterhub.hub.db.password | string | `"true"` | | -| jupyterhub.hub.db.type | string | `"postgres"` | | -| jupyterhub.hub.db.url | string | `"postgresql://jovyan@postgres.postgres/jupyterhub"` | | -| jupyterhub.hub.existingSecret | string | `"nublado2-secret"` | | -| jupyterhub.hub.extraConfig."nublado.py" | string | `"import nublado2.hub_config\nnublado2.hub_config.HubConfig().configure(c)\n"` | | -| jupyterhub.hub.extraVolumeMounts[0].mountPath | string | `"/etc/jupyterhub/nublado_config.yaml"` | | -| jupyterhub.hub.extraVolumeMounts[0].name | string | `"nublado-config"` | | -| jupyterhub.hub.extraVolumeMounts[0].subPath | string | `"nublado_config.yaml"` | | -| jupyterhub.hub.extraVolumeMounts[1].mountPath | string | `"/etc/keys/gafaelfawr-token"` | | -| jupyterhub.hub.extraVolumeMounts[1].name | string | `"nublado-gafaelfawr"` | | -| jupyterhub.hub.extraVolumeMounts[1].subPath | string | `"token"` | | -| jupyterhub.hub.extraVolumes[0].configMap.name | string | `"nublado-config"` | | -| jupyterhub.hub.extraVolumes[0].name | string | `"nublado-config"` | | -| jupyterhub.hub.extraVolumes[1].name | string | `"nublado-gafaelfawr"` | | -| jupyterhub.hub.extraVolumes[1].secret.secretName | string | `"gafaelfawr-token"` | | -| jupyterhub.hub.image.name | string | `"lsstsqre/nublado2"` | | -| jupyterhub.hub.image.tag | string | `"2.6.1"` | | -| jupyterhub.hub.loadRoles.self.scopes[0] | string | `"admin:servers!user"` | | -| jupyterhub.hub.loadRoles.self.scopes[1] | string | `"read:metrics"` | | -| jupyterhub.hub.loadRoles.server.scopes[0] | string | `"inherit"` | | -| jupyterhub.hub.networkPolicy.enabled | bool | `false` | | -| jupyterhub.hub.resources.limits.cpu | string | `"900m"` | | -| jupyterhub.hub.resources.limits.memory | string | `"1Gi"` | | -| jupyterhub.imagePullSecrets[0].name | string | `"pull-secret"` | | -| jupyterhub.ingress.annotations | object | See `values.yaml` | Extra annotations to add to the ingress | -| jupyterhub.ingress.enabled | bool | `true` | | -| jupyterhub.ingress.ingressClassName | string | `"nginx"` | | -| jupyterhub.ingress.pathSuffix | string | `"*"` | | -| jupyterhub.prePuller.continuous.enabled | bool | `false` | | -| jupyterhub.prePuller.hook.enabled | bool | `false` | | -| jupyterhub.proxy.chp.networkPolicy.interNamespaceAccessLabels | string | `"accept"` | | -| jupyterhub.proxy.service.type | string | `"ClusterIP"` | | -| jupyterhub.scheduling.userPlaceholder.enabled | bool | `false` | | -| jupyterhub.scheduling.userScheduler.enabled | bool | `false` | | -| jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | | -| jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | | -| jupyterhub.singleuser.defaultUrl | string | `"/lab"` | | -| jupyterhub.singleuser.extraAnnotations."argocd.argoproj.io/compare-options" | string | `"IgnoreExtraneous"` | | -| jupyterhub.singleuser.extraAnnotations."argocd.argoproj.io/sync-options" | string | `"Prune=false"` | | -| jupyterhub.singleuser.extraLabels."argocd.argoproj.io/instance" | string | `"nublado-users"` | | -| jupyterhub.singleuser.extraLabels."hub.jupyter.org/network-access-hub" | string | `"true"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[0].mountPath | string | `"/etc/dask"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[0].name | string | `"dask"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[1].mountPath | string | `"/opt/lsst/software/jupyterlab/panda"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[1].name | string | `"idds-config"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[2].mountPath | string | `"/tmp"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[2].name | string | `"tmp"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[3].mountPath | string | `"/opt/lsst/software/jupyterlab/butler-secret"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[3].name | string | `"butler-secret"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[4].mountPath | string | `"/opt/lsst/software/jupyterlab/environment"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[4].name | string | `"lab-environment"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[5].mountPath | string | `"/etc/passwd"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[5].name | string | `"passwd"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[5].readOnly | bool | `true` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[5].subPath | string | `"passwd"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[6].mountPath | string | `"/etc/group"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[6].name | string | `"group"` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[6].readOnly | bool | `true` | | -| jupyterhub.singleuser.storage.extraVolumeMounts[6].subPath | string | `"group"` | | -| jupyterhub.singleuser.storage.extraVolumes[0].configMap.name | string | `"dask"` | | -| jupyterhub.singleuser.storage.extraVolumes[0].name | string | `"dask"` | | -| jupyterhub.singleuser.storage.extraVolumes[1].configMap.name | string | `"idds-config"` | | -| jupyterhub.singleuser.storage.extraVolumes[1].name | string | `"idds-config"` | | -| jupyterhub.singleuser.storage.extraVolumes[2].emptyDir | object | `{}` | | -| jupyterhub.singleuser.storage.extraVolumes[2].name | string | `"tmp"` | | -| jupyterhub.singleuser.storage.extraVolumes[3].name | string | `"butler-secret"` | | -| jupyterhub.singleuser.storage.extraVolumes[3].secret.secretName | string | `"butler-secret"` | | -| jupyterhub.singleuser.storage.extraVolumes[4].configMap.defaultMode | int | `420` | | -| jupyterhub.singleuser.storage.extraVolumes[4].configMap.name | string | `"lab-environment"` | | -| jupyterhub.singleuser.storage.extraVolumes[4].name | string | `"lab-environment"` | | -| jupyterhub.singleuser.storage.extraVolumes[5].configMap.defaultMode | int | `420` | | -| jupyterhub.singleuser.storage.extraVolumes[5].configMap.name | string | `"passwd"` | | -| jupyterhub.singleuser.storage.extraVolumes[5].name | string | `"passwd"` | | -| jupyterhub.singleuser.storage.extraVolumes[6].configMap.defaultMode | int | `420` | | -| jupyterhub.singleuser.storage.extraVolumes[6].configMap.name | string | `"group"` | | -| jupyterhub.singleuser.storage.extraVolumes[6].name | string | `"group"` | | -| jupyterhub.singleuser.storage.type | string | `"none"` | | -| network_policy.enabled | bool | `true` | | diff --git a/applications/nublado2/secrets.yaml b/applications/nublado2/secrets.yaml deleted file mode 100644 index 15d1b5eeba..0000000000 --- a/applications/nublado2/secrets.yaml +++ /dev/null @@ -1,17 +0,0 @@ -cryptkeeper_key: - description: "Encryption key for internal key management." - generate: - type: password -crypto_key: - description: "Encryption key for JupyterHub stored state." - generate: - type: password -hub_db_password: - description: "Password to authenticate to the JupyterHub session database." - generate: - type: password - if: config.internalDatabase -proxy_token: - description: "Token authenticating JupyterHub to the proxy server." - generate: - type: password diff --git a/applications/nublado2/templates/_helpers.tpl b/applications/nublado2/templates/_helpers.tpl deleted file mode 100644 index 7b318e97f0..0000000000 --- a/applications/nublado2/templates/_helpers.tpl +++ /dev/null @@ -1,56 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "nublado2.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "nublado2.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "nublado2.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Common labels -*/}} -{{- define "nublado2.labels" -}} -app.kubernetes.io/name: {{ include "nublado2.name" . }} -helm.sh/chart: {{ include "nublado2.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "nublado2.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "nublado2.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/applications/nublado2/templates/clusterrole.yaml b/applications/nublado2/templates/clusterrole.yaml deleted file mode 100644 index cc8a8b5e99..0000000000 --- a/applications/nublado2/templates/clusterrole.yaml +++ /dev/null @@ -1,28 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "nublado2.fullname" . }}-hub -rules: -- apiGroups: [""] - resources: ["pods","events", "namespaces", "serviceaccounts", "services", - "persistentvolumeclaims", "persistentvolumes", "resourcequotas", - "configmaps", "pods/log", "pods/exec"] - verbs: ["get", "list", "create", "watch", "delete", "update", "patch"] -- apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "create", "delete"] -- apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] -- apiGroups: ["rbac.authorization.k8s.io"] - resources: ["roles", "rolebindings"] - verbs: ["get", "list", "create", "delete"] -- apiGroups: ["argoproj.io"] - resources: ["workflows", "workflows/finalizers"] - verbs: ["get", "list", "create", "watch", "delete", "update", "patch"] -- apiGroups: ["argoproj.io"] - resources: ["workflowtemplates", "workflowtemplates/finalizers"] - verbs: ["get", "list", "watch"] -- apiGroups: ["ricoberger.de"] - resources: ["vaultsecrets"] - verbs: ["get", "create", "delete", "list"] diff --git a/applications/nublado2/templates/clusterrolebinding.yaml b/applications/nublado2/templates/clusterrolebinding.yaml deleted file mode 100644 index cdb0c5fd53..0000000000 --- a/applications/nublado2/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ template "nublado2.fullname" . }}-hub -subjects: - # Note: this service account is created by the jupyterhub subchart - - kind: ServiceAccount - name: hub - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ template "nublado2.fullname" . }}-hub - apiGroup: rbac.authorization.k8s.io diff --git a/applications/nublado2/templates/gafaelfawr-token.yaml b/applications/nublado2/templates/gafaelfawr-token.yaml deleted file mode 100644 index 06a9822b82..0000000000 --- a/applications/nublado2/templates/gafaelfawr-token.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: gafaelfawr.lsst.io/v1alpha1 -kind: GafaelfawrServiceToken -metadata: - name: "gafaelfawr-token" - labels: - {{- include "nublado2.labels" . | nindent 4 }} -spec: - service: "bot-nublado2" - scopes: - - "admin:provision" diff --git a/applications/nublado2/templates/netpol.yaml b/applications/nublado2/templates/netpol.yaml deleted file mode 100644 index 91da074252..0000000000 --- a/applications/nublado2/templates/netpol.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if .Values.network_policy.enabled }} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: hub - labels: - {{- include "nublado2.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - app: jupyterhub - component: hub - release: {{ .Release.Name }} - policyTypes: - - Ingress - - ingress: - # allowed pods (hub.jupyter.org/network-access-hub) --> hub - - ports: - - port: http - - port: 8081 - from: - - podSelector: - matchLabels: - hub.jupyter.org/network-access-hub: "true" - namespaceSelector: {} -{{- end }} diff --git a/applications/nublado2/templates/nublado-config.yaml b/applications/nublado2/templates/nublado-config.yaml deleted file mode 100644 index fbc234d394..0000000000 --- a/applications/nublado2/templates/nublado-config.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: nublado-config - labels: - {{- include "nublado2.labels" . | nindent 4 }} -data: - nublado_config.yaml: | - {{- toYaml .Values.config | nindent 4 }} diff --git a/applications/nublado2/templates/vault-secrets.yaml b/applications/nublado2/templates/vault-secrets.yaml deleted file mode 100644 index 962d6c1896..0000000000 --- a/applications/nublado2/templates/vault-secrets.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: "nublado2-secret" -spec: - path: "{{- .Values.global.vaultSecretsPath }}/nublado2" - type: Opaque - - templates: - {{- /* dump in values.yaml for jupyterhub, without changing it */}} - {{- /* this is copied from the zero-to-jupyterhub chart where it does this */}} - {{- $values := merge dict .Values.jupyterhub }} - {{- /* passthrough subset of Chart / Release */}} - {{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }} - {{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }} - values.yaml: {{ $values | toYaml | quote }} - - {{- /* dump in the rest of the keys in this path and their values */}} - {{- /* this uses the templating provided by vault-secrets-operator */}} - hub.db.password: "{% .Secrets.hub_db_password %}" - hub.config.JupyterHub.cookie_secret: "{% .Secrets.crypto_key %}" - hub.config.CryptKeeper.keys: "{% .Secrets.cryptkeeper_key %}" - hub.config.ConfigurableHTTPProxy.auth_token: "{% .Secrets.proxy_token %}" ---- -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: pull-secret - labels: - {{- include "nublado2.labels" . | nindent 4 }} -spec: - path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" - type: kubernetes.io/dockerconfigjson diff --git a/applications/nublado2/values-ccin2p3.yaml b/applications/nublado2/values-ccin2p3.yaml deleted file mode 100644 index 33e2c594ba..0000000000 --- a/applications/nublado2/values-ccin2p3.yaml +++ /dev/null @@ -1,209 +0,0 @@ -jupyterhub: - debug: - enabled: true - hub: - db: - upgrade: true - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["data-dev.lsst.eu"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://data-dev.lsst.eu/login" - nginx.ingress.kubernetes.io/auth-url: "https://data-dev.lsst.eu/auth?scope=exec:notebook¬ebook=true" - nginx.ingress.kubernetes.io/proxy-connect-timeout: "50s" - nginx.ingress.kubernetes.io/proxy-read-timeout: "50s" - nginx.ingress.kubernetes.io/client-max-body-size: "50m" - nginx.ingress.kubernetes.io/proxy-body-size: "50m" - -config: - base_url: "https://data-dev.lsst.eu" - butler_secret_path: "secret/k8s_operator/rsp-cc/butler-secret" - pull_secret_path: "secret/k8s_operator/rsp-cc/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_KERNEL_CONNECTED: "True" - CULL_KERNEL_INTERVAL: "300" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - CULL_TERMINAL_INTERVAL: "300" - pinned_images: - - image_url: registry.hub.docker.com/lsstsqre/sciplat-lab:recommended - name: Recommended - volumes: - - name: home - hostPath: - path: /pbs/home - - volume_mounts: - - name: home - mountPath: /home - - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - lsst_lcl:x:1000:{{ user }} - tss:x:59: - cgred:x:997: - screen:x:84: - jovyan:x:768:{{ user }}{% for g in groups %} - {{ g.name }}:x:{{ g.id }}:{{ user if g.id != gid else "" }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - {{ user }}:x:{{ uid }}:{{ gid if gid else uid }}::/home/{{ user[0] }}/{{ user }}/rsp_home:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds_cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson diff --git a/applications/nublado2/values-roe.yaml b/applications/nublado2/values-roe.yaml deleted file mode 100644 index 7ff9ae4f8f..0000000000 --- a/applications/nublado2/values-roe.yaml +++ /dev/null @@ -1,44 +0,0 @@ -jupyterhub: - ingress: - hosts: ["rsp.lsst.ac.uk"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://rsp.lsst.ac.uk/login" - nginx.ingress.kubernetes.io/auth-url: "https://rsp.lsst.ac.uk/auth?scope=exec:notebook¬ebook=true" - -config: - base_url: "https://rsp.lsst.ac.uk" - butler_secret_path: "secret/k8s_operator/roe/butler-secret" - pull_secret_path: "secret/k8s_operator/roe/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - pinned_images: - - image_url: registry.hub.docker.com/lsstsqre/sciplat-lab:recommended - name: Recommended - volumes: - - name: data - nfs: - path: /data - server: 192.41.122.33 - - name: home - nfs: - path: /jhome - server: 192.41.122.33 - - name: datasets - nfs: - path: /datasets - server: 192.41.122.33 - volume_mounts: - - name: data - mountPath: /data - - name: home - mountPath: /home - - name: datasets - mountPath: /datasets - -vault_secret_path: "secret/k8s_operator/roe/nublado2" - -pull-secret: - enabled: true - path: "secret/k8s_operator/roe/pull-secret" diff --git a/applications/nublado2/values-summit.yaml b/applications/nublado2/values-summit.yaml deleted file mode 100644 index acbac59fe8..0000000000 --- a/applications/nublado2/values-summit.yaml +++ /dev/null @@ -1,106 +0,0 @@ -jupyterhub: - ingress: - hosts: ["summit-lsp.lsst.codes"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://summit-lsp.lsst.codes/login" - hub: - baseUrl: /n2 - db: - upgrade: true - url: "postgresql://jovyan@postgresdb01.cp.lsst.org/jupyterhub" - singleuser: - extraAnnotations: - k8s.v1.cni.cncf.io/networks: "kube-system/dds" - -config: - base_url: "https://summit-lsp.lsst.codes" - butler_secret_path: "secret/k8s_operator/summit-lsp.lsst.codes/butler-secret" - pull_secret_path: "secret/k8s_operator/summit-lsp.lsst.codes/pull-secret" - lab_environment: - AUTO_REPO_URLS: "https://github.com/lsst-sqre/system-test" - AUTO_REPO_BRANCH: "prod" - AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" - DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - LSST_DDS_INTERFACE: net1 - LSST_DDS_PARTITION_PREFIX: summit - LSST_SITE: summit - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - PGUSER: "oods" - volumes: - - name: home - nfs: - path: /jhome - server: nfs1.cp.lsst.org - - name: project - nfs: - path: /project - server: nfs1.cp.lsst.org - - name: scratch - nfs: - path: /scratch - server: nfs1.cp.lsst.org - - name: auxtel - nfs: - path: /auxtel/lsstdata - server: nfs-auxtel.cp.lsst.org - readOnly: true - - name: comcam - nfs: - path: /lsstdata - server: comcam-archiver.cp.lsst.org - readOnly: true - - name: other - nfs: - path: /lsstdata - server: nfs1.cp.lsst.org - readOnly: true - - name: latiss - nfs: - path: /auxtel/repo/LATISS - server: nfs-auxtel.cp.lsst.org - - name: base-auxtel - nfs: - path: /auxtel/lsstdata/base/auxtel - server: nfs-auxtel.cp.lsst.org - readOnly: true - - name: lsstcomcam - nfs: - path: /repo/LSSTComCam - server: comcam-archiver.cp.lsst.org - - name: base-comcam - nfs: - path: /lsstdata/base/comcam - server: comcam-archiver.cp.lsst.org - readOnly: true - - name: obs-env - nfs: - path: /obs-env - server: nfs-obsenv.cp.lsst.org - volume_mounts: - - name: home - mountPath: /home - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - - name: auxtel - mountPath: /readonly/lsstdata/auxtel - readOnly: true - - name: comcam - mountPath: /readonly/lsstdata/comcam - readOnly: true - - name: other - mountPath: /readonly/lsstdata/other - readOnly: true - - name: latiss - mountPath: /repo/LATISS - - name: base-auxtel - mountPath: /data/lsstdata/base/auxtel - readOnly: true - - name: lsstcomcam - mountPath: /repo/LSSTComCam - - name: base-comcam - mountPath: /data/lsstdata/base/comcam - readOnly: true - - name: obs-env - mountPath: /net/obs-env diff --git a/applications/nublado2/values.yaml b/applications/nublado2/values.yaml deleted file mode 100644 index 8585f00d47..0000000000 --- a/applications/nublado2/values.yaml +++ /dev/null @@ -1,420 +0,0 @@ -# Default values for nublado2. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -jupyterhub: - hub: - authenticatePrometheus: false - image: - name: lsstsqre/nublado2 - tag: "2.6.1" - resources: - limits: - cpu: 900m - memory: 1Gi # Should support about 200 users - config: - Authenticator: - enable_auth_state: true - JupyterHub: - authenticator_class: nublado2.auth.GafaelfawrAuthenticator - ServerApp: - shutdown_no_activity_timeout: 604800 # one week - db: - # Password comes from the nublado2-secret. - type: "postgres" - password: "true" - url: "postgresql://jovyan@postgres.postgres/jupyterhub" - containerSecurityContext: - runAsUser: 768 - runAsGroup: 768 - allowPrivilegeEscalation: false - baseUrl: "/nb" - # Note: this has to match up with the kubernetes secret created by the - # vault secret, and since you can't put templating in a values file, I'm - # just setting the name here. - existingSecret: "nublado2-secret" - extraConfig: - nublado.py: | - import nublado2.hub_config - nublado2.hub_config.HubConfig().configure(c) - extraVolumes: - - name: nublado-config - configMap: - name: nublado-config - - name: nublado-gafaelfawr - secret: - secretName: gafaelfawr-token - extraVolumeMounts: - - name: nublado-config - mountPath: /etc/jupyterhub/nublado_config.yaml - subPath: nublado_config.yaml - - name: nublado-gafaelfawr - mountPath: /etc/keys/gafaelfawr-token - subPath: token - # We still have to use our own, enabled at the top level, which is - # similar but not identical. This one still doesn't work, even if - # you explicitly enable port 8081 so the labs can talk to the Hub. - networkPolicy: - enabled: false - loadRoles: - self: - scopes: ['admin:servers!user', 'read:metrics'] - server: - scopes: ['inherit'] # Let server use API like user - - prePuller: - continuous: - enabled: false - hook: - enabled: false - - singleuser: - cloudMetadata: - blockWithIptables: false - cmd: "/opt/lsst/software/jupyterlab/runlab.sh" - defaultUrl: "/lab" - extraAnnotations: - argocd.argoproj.io/compare-options: 'IgnoreExtraneous' - argocd.argoproj.io/sync-options: 'Prune=false' - extraLabels: - hub.jupyter.org/network-access-hub: 'true' - argocd.argoproj.io/instance: 'nublado-users' - storage: - extraVolumes: - - name: dask - configMap: - name: dask - - name: idds-config - configMap: - name: idds-config - - name: tmp - emptyDir: {} - - name: butler-secret - secret: - secretName: butler-secret - - name: lab-environment - configMap: - defaultMode: 420 - name: lab-environment - - name: passwd - configMap: - defaultMode: 420 - name: passwd - - name: group - configMap: - defaultMode: 420 - name: group - extraVolumeMounts: - - name: dask - mountPath: /etc/dask - - name: idds-config - mountPath: /opt/lsst/software/jupyterlab/panda - - name: tmp - mountPath: /tmp - - name: butler-secret - mountPath: /opt/lsst/software/jupyterlab/butler-secret - - name: lab-environment - mountPath: /opt/lsst/software/jupyterlab/environment - - name: passwd - mountPath: /etc/passwd - readOnly: true - subPath: passwd - - name: group - mountPath: /etc/group - readOnly: true - subPath: group - type: none - - proxy: - service: - type: ClusterIP - chp: - networkPolicy: - interNamespaceAccessLabels: accept - # This currently causes Minikube deployment in GH-actions to fail. - # We want it sometime but it's not critical; it will help with - # scale-down - # pdb: - # enabled: true - # minAvailable: 1 - - # Any instantiation of this chart must also set ingress.hosts and add - # the nginx.ingress.kubernetes.io/auth-signin annotation pointing to the - # appropriate fully-qualified URLs for the Gafaelfawr /login route. - ingress: - enabled: true - - # -- Extra annotations to add to the ingress - # @default -- See `values.yaml` - annotations: - nginx.ingress.kubernetes.io/auth-method: "GET" - nginx.ingress.kubernetes.io/auth-response-headers: "Authorization,Cookie,X-Auth-Request-Email,X-Auth-Request-User,X-Auth-Request-Token" - nginx.ingress.kubernetes.io/auth-url: "http://gafaelfawr.gafaelfawr.svc.cluster.local:8080/auth?scope=exec:notebook¬ebook=true&minimum_lifetime=2160000" - nginx.ingress.kubernetes.io/configuration-snippet: | - auth_request_set $auth_www_authenticate $upstream_http_www_authenticate; - auth_request_set $auth_status $upstream_http_x_error_status; - auth_request_set $auth_error_body $upstream_http_x_error_body; - error_page 403 = @autherror; - nginx.ingress.kubernetes.io/proxy-send-timeout: "300" - nginx.ingress.kubernetes.io/proxy-read-timeout: "300" - ingressClassName: "nginx" - pathSuffix: "*" - - cull: - enabled: true - timeout: 2592000 # 30 days -- shorten later - every: 600 # Check every ten minutes - users: true # log out user when we cull - removeNamedServers: true # Post-stop hook may already do this - maxAge: 5184000 # 60 days -- shorten later - - imagePullSecrets: - - name: pull-secret - - scheduling: - userScheduler: - enabled: false - userPlaceholder: - enabled: false - -config: - # -- Whether to use the cluster-internal PostgreSQL server instead of an - # external server. This is not used directly by the Nublado chart, but - # controls how the database password is managed. - internalDatabase: true - # -- base_url must be set in each instantiation of this chart to the URL of - # the primary ingress. It's used to construct API requests to the - # authentication service (which should go through the ingress). - base_url: "" - # -- butler_secret_path must be set here, because it's passed through to - # the lab rather than being part of the Hub configuration. - butler_secret_path: "" - # -- pull_secret_path must also be set here; it specifies resources in - # the lab namespace - pull_secret_path: "" - # -- images to pin to spawner menu - pinned_images: [] - # -- Cachemachine image policy: "available" or "desired". Use - # "desired" at instances with streaming image support. - cachemachine_image_policy: "available" - # -- shut down user pods on logout. Superfluous, because our - # LogoutHandler enforces this in any event, but nice to make explicit. - shutdown_on_logout: true - # -- definitions of Lab sizes available in a given instance - sizes: - - name: Small - cpu: 1 - ram: 4096M - - name: Medium - cpu: 2 - ram: 8192M - - name: Large - cpu: 4 - ram: 16384M - # -- Volumes to use for a particular instance - volumes: [] - # -- Where to mount volumes for a particular instance - volume_mounts: [] - - # -- Environment variables to set in spawned lab containers. Each value will - # be expanded using Jinja 2 templating. - # @default -- See `values.yaml` - lab_environment: - EXTERNAL_INSTANCE_URL: "{{ base_url }}" - FIREFLY_ROUTE: /portal/app - HUB_ROUTE: "{{ nublado_base_url }}" - JS9_ROUTE: /js9 - API_ROUTE: /api - TAP_ROUTE: /api/tap - SODA_ROUTE: /api/image/soda - WORKFLOW_ROUTE: /wf - AUTO_REPO_URLS: https://github.com/lsst-sqre/notebook-demo - NO_SUDO: "TRUE" - EXTERNAL_GID: "{{ gid if gid else uid }}" - EXTERNAL_GROUPS: "{{ external_groups }}" - EXTERNAL_UID: "{{ uid }}" - ACCESS_TOKEN: "{{ token }}" - IMAGE_DIGEST: "{{ options.image_info.digest }}" - IMAGE_DESCRIPTION: "{{ options.image_info.display_name }}" - RESET_USER_ENV: "{{ options.reset_user_env }}" - # We need to set CLEAR_DOTLOCAL until all images that didn't know - # about RESET_USER_ENV have aged out (late 2022) - CLEAR_DOTLOCAL: "{{ options.reset_user_env }}" - DEBUG: "{{ options.debug }}" - - # -- Templates for the user resources to create for each lab spawn. This is - # a string that can be templated and then loaded as YAML to generate a list - # of Kubernetes objects to create. - # @default -- See `values.yaml` - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - lsst_lcl:x:1000:{{ user }} - tss:x:59: - cgred:x:997: - screen:x:84: - jovyan:x:768:{{ user }}{% for g in groups %} - {{ g.name }}:x:{{ g.id }}:{{ user if g.id != gid else "" }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - {{ user }}:x:{{ uid }}:{{ gid if gid else uid }}::/home/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds_cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - -# Built-in network policy doesn't quite work (Labs can't talk to Hub, -# even with port 8081 explicitly enabled), so let's use our own for now. -network_policy: - enabled: true - -# The following will be set by parameters injected by Argo CD and should not -# be set in the individual environment values files. -global: - # -- Base path for Vault secrets - # @default -- Set by Argo CD - vaultSecretsPath: "" diff --git a/applications/postgres/secrets.yaml b/applications/postgres/secrets.yaml index 5e03d36d1a..ed33a80e1d 100644 --- a/applications/postgres/secrets.yaml +++ b/applications/postgres/secrets.yaml @@ -10,12 +10,6 @@ gafaelfawr_password: copy: application: gafaelfawr key: database-password -jupyterhub_password: - description: "Password for the Nublado v2 JupyterHub session database." - if: jupyterhub_db - copy: - application: nublado2 - key: hub_db_password lovelog_password: description: "Password for the lovelog database." if: lovelog_db diff --git a/applications/telegraf/README.md b/applications/telegraf/README.md index d54785fc49..dc680e46ce 100644 --- a/applications/telegraf/README.md +++ b/applications/telegraf/README.md @@ -16,7 +16,7 @@ Application telemetry collection service | global.enabledServices | string | Set by Argo CD | services enabled in this RSP instance | | global.host | string | Set by Argo CD | Host name for instance identification | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| prometheus_config | object | `{"argocd":{"application_controller":"http://argocd-application-controller-metrics.argocd.svc:8082/metrics","notifications_controller":"http://argocd-notifications-controller-metrics.argocd.svc:9001/metrics","redis":"http://argocd-redis-metrics.argocd.svc:9121/metrics","repo_server":"http://argocd-repo-server-metrics.argocd.svc:8084/metrics","server":"http://argocd-server-metrics.argocd.svc:8083/metrics"},"ingress-nginx":{"controller":"http://ingress-nginx-controller-metrics.ingress-nginx:10254/metrics"},"nublado2":{"hub":"http://hub.nublado2:8081/metrics"}}` | Use prometheus_config to specify all the services in the RSP that expose prometheus endpoints. A better option, eventually, will be to use telegraf-operator and capture these as pod annotations. | +| prometheus_config | object | `{"argocd":{"application_controller":"http://argocd-application-controller-metrics.argocd.svc:8082/metrics","notifications_controller":"http://argocd-notifications-controller-metrics.argocd.svc:9001/metrics","redis":"http://argocd-redis-metrics.argocd.svc:9121/metrics","repo_server":"http://argocd-repo-server-metrics.argocd.svc:8084/metrics","server":"http://argocd-server-metrics.argocd.svc:8083/metrics"},"ingress-nginx":{"controller":"http://ingress-nginx-controller-metrics.ingress-nginx:10254/metrics"},"nublado":{"hub":"http://hub.nublado:8081/metrics"}}` | Use prometheus_config to specify all the services in the RSP that expose prometheus endpoints. A better option, eventually, will be to use telegraf-operator and capture these as pod annotations. | | telegraf.args[0] | string | `"--config"` | | | telegraf.args[1] | string | `"/etc/telegraf-generated/telegraf-generated.conf"` | | | telegraf.config.inputs | list | `[]` | | diff --git a/applications/telegraf/values.yaml b/applications/telegraf/values.yaml index 4d0a61b491..b394abc387 100644 --- a/applications/telegraf/values.yaml +++ b/applications/telegraf/values.yaml @@ -43,8 +43,8 @@ prometheus_config: redis: "http://argocd-redis-metrics.argocd.svc:9121/metrics" repo_server: "http://argocd-repo-server-metrics.argocd.svc:8084/metrics" server: "http://argocd-server-metrics.argocd.svc:8083/metrics" - nublado2: - hub: "http://hub.nublado2:8081/metrics" + nublado: + hub: "http://hub.nublado:8081/metrics" ingress-nginx: controller: "http://ingress-nginx-controller-metrics.ingress-nginx:10254/metrics" diff --git a/docs/admin/infrastructure/kubernetes-node-status-max-images.rst b/docs/admin/infrastructure/kubernetes-node-status-max-images.rst index 0136a376dc..c6e77fbbd8 100644 --- a/docs/admin/infrastructure/kubernetes-node-status-max-images.rst +++ b/docs/admin/infrastructure/kubernetes-node-status-max-images.rst @@ -34,3 +34,17 @@ Second, these images, because the prepuller incorrectly believes they are not re Fortunately there is a simple fix: increase the kubelet ``nodeStatusMaxImages`` setting. The default value of 50 should either be increased to something large enough that it's implausible that that many images would fit into ephemeral storage, or set to ``-1`` to remove the cap entirely. While disabling the cap could, in theory, make node status extremely large (which is the reason the cap exists in the first place), in practice it has never proven problematic in a Phalanx deployment. Those deployments have had at most hundreds, rather than thousands or millions, of container images on any given node, so the size of the status document has always remained modest. Should you go the route of choosing a larger positive value for ``nodeStatusMaxImages`` a reasonable rule of thumb is to pick a number one-third of the size of each node's ephemeral storage in gigabytes. Thus if you had a terabyte of ephemeral storage, a ``nodeStatusMaxImages`` of ``350`` would be a good starting guess. This value is also dependent on how broadly mixed your workload is, and how large the images for the other aspects of your workload are, which is why disabling the cap entirely is the initial recommendation. + +Pruning cached images +===================== + +If you cannot change the behavior of the Kubernetes node API, you may need to trim the node image cache so that the total number of images is under 50. +If you have direct administrative access to the Kubernetes node, you can do that with the following steps: + +#. Download `purge `__. + +#. Run it on each node, using an account allowed to use the Docker socket (thus, probably in group ``docker``). + You may want to run it with ``-x`` first to see what it's going to do. + If you want output during the actual run, run it with ``-v``. + +Unfortunately, this will only temporarily solve the problem, so you will either need to do this repeatedly or find a way to change the API configuration to return more cached images. diff --git a/docs/admin/installation.rst b/docs/admin/installation.rst index 38e129333a..c84a256e21 100644 --- a/docs/admin/installation.rst +++ b/docs/admin/installation.rst @@ -46,8 +46,8 @@ To create a new Phalanx environment, take the following steps: The following applications have special bootstrapping considerations: - :px-app-bootstrap:`argocd` - - :px-app-bootstrap:`cachemachine` - :px-app-bootstrap:`gafaelfawr` + - :px-app-bootstrap:`nublado` - :px-app-bootstrap:`portal` - :px-app-bootstrap:`squareone` diff --git a/docs/admin/troubleshooting.rst b/docs/admin/troubleshooting.rst index 377a094527..5383f06074 100644 --- a/docs/admin/troubleshooting.rst +++ b/docs/admin/troubleshooting.rst @@ -18,26 +18,15 @@ When this happens, you may need to recreate the persistent volume. **Solution:** :ref:`recreate-postgres-pvc` -Spawner menu missing images, nublado stuck pulling the same image +Spawner menu missing images, Nublado stuck pulling the same image ================================================================= **Symptoms:** When a user goes to the spawner page for the Notebook Aspect, the expected menu of images is not available. Instead, the menu is missing one or more images. -The same image or set of images is pulled again each on each prepuller loop the nublado lab controller attempts. +The same image or set of images is pulled again each on each prepuller loop the Nublado controller attempts. **Solution:** :doc:`infrastructure/kubernetes-node-status-max-images` -Spawning a notebook fails with a pending error -============================================== - -**Symptoms:** When a user tries to spawn a new notebook, the spawn fails with an error saying that the user's lab is already pending spawn or is pending deletion. - -**Cause:** If the spawning of the lab fails or if the deletion of a lab fails, sometimes JupyterHub can give up on making further progress but still remember that the lab is supposedly still running. -In this case, JupyterHub may not recover without assistance. -You may need to delete the record for the affected user, and also make sure the user's lab namespace (visible in Argo CD under the ``nublado-users`` application) has been deleted. - -**Solution:** :ref:`nublado2-clear-session-database` - User gets permission denied from applications ============================================= @@ -63,16 +52,6 @@ If you need to do something that spans users or should create root-owned files, **Solution:** :doc:`infrastructure/filestore/privileged-access` -User pods don't spawn, reporting "permission denied" from Moneypenny -==================================================================== - -**Symptoms:** A user pod fails to spawn, and the error message says that Moneypenny did not have permission to execute. - -**Cause:** The ``gafaelfawr-token`` VaultSecret in the ``nublado2`` namespace is out of date. -This happened because the ``gafaelfawr-redis`` pod restarted and either it lacked persistent storage (at the T&S sites, as of July 2022), or because that storage had been lost. - -**Solution:** :doc:`/applications/gafaelfawr/recreate-token` - Login fails with "bad verification code" error ============================================== diff --git a/docs/applications/cachemachine/bootstrap.rst b/docs/applications/cachemachine/bootstrap.rst deleted file mode 100644 index 1807462ebd..0000000000 --- a/docs/applications/cachemachine/bootstrap.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. px-app-bootstrap:: cachemachine - -########################## -Bootstrapping cachemachine -########################## - -By default, cachemachine doesn't do any prepulling and doesn't provide a useful menu for Notebook Aspect spawning. -As part of bootstrapping a new environment, you will want to configure it to prepull appropriate images. - -For deployments on Google Kubernetes Engine, you will want to use Google Artifact Repository (GAR) as the source of images. -See :doc:`gar` for basic information and instructions on how to configure workload identity. - -For Telescope and Site deployments that need special images and image cycle configuration, start from the `summit configuration `__. -Consult with Telescope and Site to determine the correct recommended tag and cycle number. - -For other deployments that use the normal Rubin Notebook Aspect images, a reasonable starting configuration for cachemachine is: - -.. code-block:: yaml - - autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoMan", - "registry_url": "registry.hub.docker.com", - "repo": "lsstsqre/sciplat-lab", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - } - ] - } - -This prepulls the latest release, the latest two weeklies, and the latest three dailies, as well as the image tagged ``recommended``. diff --git a/docs/applications/cachemachine/gar.rst b/docs/applications/cachemachine/gar.rst deleted file mode 100644 index 5ba3ad269c..0000000000 --- a/docs/applications/cachemachine/gar.rst +++ /dev/null @@ -1,62 +0,0 @@ -################################################ -Google Cloud Artifact Registry (GAR) integration -################################################ - -Cachemachine optionally supports using the Google Cloud Artifact Registry (GAR) API to list images rather than the Docker API. - -This allows workload identity credentials to be used instead of Docker credentials when the images are stored in GAR. -Docker client authentication with GAR is cumbersome because a JSON token is used for authentication, and that token contains special characters that make it difficult to pass between multiple secret engine layers. - -Using the GAR API directly also avoids the need to build a cache of hashes to resolve tags to images. -The Docker API returns a list of images with a single tag, which requires constructing a cache of known hashes to determine which tags are alternate names for images that have already been seen. -The GAR API returns a list of images with all tags for that image, avoiding this problem. - -Container Image Streaming -========================= - -`Container Image Streaming `__ is used by cachemachine to decrease the time for the image pull time. -It's also used when an image isn't cached, which makes it practical to use uncached images. -With normal Docker image retrieval, using an uncached image can result in a five-minute wait and an almost-certain timeout. - -The ``sciplatlab`` images are 4GB. -Image pull time for those images decreased from 4 minutes to 30 seconds using image streaming. - -Image streaming is per project by enabling the ``containerfilesystem.googleapis.com`` API. -This was enabled via Terraform for the Interim Data Facility environments. - -Workload Identity -================= - -`Workload Identity `__ is used by Cachemachine to authenticate to the GAR API. -Workload Identity allows Kubernetes service accounts to impersonate Google Cloud Platform (GCP) Service Accounts to authenticate to GCP services. -Workload Identity is enabled on all of the Rubin Science Platform (RSP) Google Kuberentes Engine (GKE) Clusters. - -The binding between the Kubernetes and the GCP service account is done through IAM permissions deployed via Terraform. -The following Kubernetes annotation must be added to the Kubernetes ``ServiceAccount`` object as deployed via Phalanx to bind that service account to the GCP service account. - -.. code-block:: yaml - - serviceAccount: - annotations: { - iam.gke.io/gcp-service-account: cachemachine-wi@science-platform-dev-7696.iam.gserviceaccount.com - } - -To troubleshoot or validate Workload Identity, a test pod can be provisioned using `these instructions `__. - -Validating operations -===================== - -To validate cachemachine is running, check the status page at ``https://data-dev.lsst.cloud/cachemachine/jupyter``. -(Replace ``data-dev`` with the appropriate environment.) -Check the ``common_cache`` key for cached images, and see if ``images_to_cache`` is blank or only showing new images that are in the process of being downloaded. - -Future work -=========== - -- Cachemachine and Nublado both default to configuring an image pull secret when spawning pods. - This value is not used by GAR. - In GKE, the nodes default to using the built-in service account to pull images. - This means we can drop the ``pull-secret`` secret and its configuration when GAR is in use. - -- Image streaming is currently a per-region setting. - If GKE clustes are deployed outside of ``us-central1`` in the future, a GAR repository should be created for that region to stream images. diff --git a/docs/applications/cachemachine/index.rst b/docs/applications/cachemachine/index.rst deleted file mode 100644 index 72c228b3f4..0000000000 --- a/docs/applications/cachemachine/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. px-app:: cachemachine - -######################################### -cachemachine — JupyterLab image prepuller -######################################### - -The Docker images used for lab pods run by the Notebook Aspect are quite large, since they contain the full Rubin Observatory software stack. -If the image is not already cached on a Kubernetes node, starting a lab pod can take as long as five minutes and may exceed the timeout allowed by JupyterHub. - -Cachemachine is an image prepulling service designed to avoid this problem by ensuring every node in the Science Platform Kubernetes cluster has the most frequently used lab images cached. -It is also responsible for reporting the available images to :doc:`Nublado <../nublado2/index>`, used to generate the menu of images when the user creates a new lab pod. - -.. jinja:: cachemachine - :file: applications/_summary.rst.jinja - -Guides -====== - -.. toctree:: - - bootstrap - pruning - gar - values diff --git a/docs/applications/cachemachine/pruning.rst b/docs/applications/cachemachine/pruning.rst deleted file mode 100644 index b7f2829f81..0000000000 --- a/docs/applications/cachemachine/pruning.rst +++ /dev/null @@ -1,18 +0,0 @@ -############# -Image pruning -############# - -If the list of cached images on nodes gets excessively long, Kubernetes may stop updating its list of cached images. -The usual symptom is that the Notebook Aspect spawner menu of available images will be empty or missing expected images. - -This is a limitation of the Kubernetes node API. -By default, `only 50 images on a node will be shown `__. -You can work around this, if you control the Kubernetes installation, by adding ``--node-status-max-images=-1`` on the kubelet command line, or by setting ``nodeStatusMaxImages`` to ``-1`` in the kubelet configuration file. - -If you cannot change that setting, you will need to trim the node image cache so that the total number of images is under 50. - -#. Download `purge `__. - -#. Run it on each node, using an account allowed to use the Docker socket (thus, probably in group ``docker``). - You may want to run it with ``-x`` first to see what it's going to do. - If you want output during the actual run, run it with ``-v``. diff --git a/docs/applications/cachemachine/values.md b/docs/applications/cachemachine/values.md deleted file mode 100644 index f15bba3c17..0000000000 --- a/docs/applications/cachemachine/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} cachemachine -``` - -# Cachemachine Helm values reference - -Helm values reference table for the {px-app}`cachemachine` application. - -```{include} ../../../applications/cachemachine/README.md ---- -start-after: "## Values" ---- -``` diff --git a/docs/applications/gafaelfawr/recreate-token.rst b/docs/applications/gafaelfawr/recreate-token.rst index 21da258661..bc7a5de2e1 100644 --- a/docs/applications/gafaelfawr/recreate-token.rst +++ b/docs/applications/gafaelfawr/recreate-token.rst @@ -6,18 +6,17 @@ Where possible, we use persistent storage for Gafaelfawr's Redis database so tha However, if that persistent storage is deleted for some reason, or if Gafaelfawr is not configured to use persistent storage, all tokens will be invalidated. When this happens, depending on the order of restart, the ``gafaelfawr-tokens`` pod that is responsible for maintaining service tokens in the cluster may take up to 30 minutes to realize those tokens are no longer valid. -This will primarily affect the Notebook Aspect, which will be unable to authenticate to moneypenny and thus will not be able to spawn pods. -The result will be a "permission denied" error from moneypenny. +This will primarily affect the Notebook Aspect, which will be unable to authenticate to the Nublado controller and thus will not be able to spawn pods. -Gafaelfawr will automatically fix this problem after 30 minutes, but unfortunately the JupyterHub component of ``nublado2`` currently loads its token on startup and doesn't pick up changes. +Gafaelfawr will automatically fix this problem after 30 minutes, but unfortunately the JupyterHub component of ``nublado`` currently loads its token on startup and doesn't pick up changes. The easiest way to fix this problem is to force revalidation of all of the Gafaelfawr service tokens. To do that: #. Force a restart of the ``gafaelfawr-tokens`` deployment in the ``gafaelfawr`` namespace. - This will recreate the secret in ``nublado2``. + This will recreate any token secrets that are not valid. -#. Force a restart of the ``hub`` deployment in ``nublado2``. +#. Force a restart of the ``hub`` deployment in ``nublado``. This will restart the hub with the new, correct token. Be aware that when the Redis storage is wipoed, all user tokens will also be invalidated. diff --git a/docs/applications/index.rst b/docs/applications/index.rst index 05dbd070ef..3acb272e61 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -24,16 +24,13 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde :caption: Rubin Science Platform butler/index - cachemachine/index datalinker/index hips/index linters/index livetap/index mobu/index - moneypenny/index noteburst/index nublado/index - nublado2/index portal/index semaphore/index sherlock/index diff --git a/docs/applications/moneypenny/index.rst b/docs/applications/moneypenny/index.rst deleted file mode 100644 index fcc7b01682..0000000000 --- a/docs/applications/moneypenny/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. px-app:: moneypenny - -############################## -moneypenny — User provisioning -############################## - -Moneypenny is responsible for provisioning new users of the Notebook Aspect of a Science Platform installation. -It is invoked by :px-app:`nublado2` whenever a user pod is spawned and decides whether provisioning is required. -If so, it does so before the lab spawn, usually by spawning a privileged pod. - -A typical example of the type of provisioning it does is creating the user's home directory, with appropriate ownership and permissions, in an NFS file store. - -.. jinja:: moneypenny - :file: applications/_summary.rst.jinja - -Guides -====== - -.. toctree:: - :maxdepth: 1 - - values diff --git a/docs/applications/moneypenny/values.md b/docs/applications/moneypenny/values.md deleted file mode 100644 index 91608bc501..0000000000 --- a/docs/applications/moneypenny/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} moneypenny -``` - -# moneypenny Helm values reference - -Helm values reference table for the {px-app}`moneypenny` application. - -```{include} ../../../applications/moneypenny/README.md ---- -start-after: "## Values" ---- -``` diff --git a/docs/applications/nublado/bootstrap.rst b/docs/applications/nublado/bootstrap.rst index f5fb068164..6d3406d548 100644 --- a/docs/applications/nublado/bootstrap.rst +++ b/docs/applications/nublado/bootstrap.rst @@ -4,12 +4,33 @@ Bootstrapping Nublado ##################### -The JupyterLab Controller needs to know where the NFS server that provides persistent space (e.g. home directories, scratch, datasets) can be found. Ensure the correct definitions are in place in the configuration. +For details on how to write the Nublado configuration, see the `Nublado administrator documentation `__. + +GKE deployments +=============== + +When deploying Nublado on Google Kubernetes Engine, using Google Artifact Registry as the image source is strongly recommended. +This will result in better image selection menus, allow use of container streaming for faster start-up times, and avoid the need to maintain a pull secret. + +For setup instructions for using GAR with Nublado, see `Set up Google Artifact Registry in the Nublado documentation `__. +For more details about the benefits of using GAR, see the `relevant Nublado documentation page `__. Telescope and Site deployments ============================== -For Telescope and Site deployments that require instrument control, make sure you have any Multus network definitions you need in the ``values-.yaml``. +Image cycles +------------ + +Telescope and Site deployments have to limit the available images to only images that implement the current XML API. +This is done with a cycle restriction on which images are eligible for spawning. +Failing to set the cycle correctly can cause serious issues with the instrument control plane. + +For details on how to configure the cycle, see `image cycles in the Nublado documentation `__. + +Networking +---------- + +For Telescope and Site deployments that require instrument control, make sure you have any Multus network definitions you need in the :file:`values-{environment}.yaml`. This will look something like: .. code-block:: yaml diff --git a/docs/applications/nublado/index.rst b/docs/applications/nublado/index.rst index 580649eb8e..3c80a30934 100644 --- a/docs/applications/nublado/index.rst +++ b/docs/applications/nublado/index.rst @@ -1,12 +1,14 @@ .. px-app:: nublado -############################################ +####################################### nublado — JupyterHub/JupyterLab for RSP -############################################ +####################################### -The ``nublado`` service is an installation of a Rubin Observatory flavor of `Zero to JupyterHub `__ with some additional resources. This is currently the third version of ``nublado``. -The JupyterHub component provides the Notebook Aspect of the Rubin Science Platform, but replaces the KubeSpawner with a REST client to the JupyterLab Controller. -The JupyterLab Controller component not only provides user lab pod management, but also subsumes the functions formerly provided by the ``cachemachine`` and ``moneypenny`` applications. That is, in addition to creating and destroying user pods and namespaces, it handles filesystem provisioning for users, and manages prepulls of cached images to local nodes. +The ``nublado`` application provides a JupyterHub and JupyterLab interface for Rubin Science Platform users. +It also deploys a Kubernetes controller that, besides creating user lab pods, prepulls lab images and can provide per-user WebDAV file servers. + +The JupyterHub component and its proxy is deployed via `Zero to JupyterHub `__ with a custom configuration. +Alongside it, the Nublado controller is deployed by the same application as a separate FastAPI service. .. jinja:: nublado :file: applications/_summary.rst.jinja diff --git a/docs/applications/nublado/troubleshoot.rst b/docs/applications/nublado/troubleshoot.rst index 36438f8add..b569c4a52b 100644 --- a/docs/applications/nublado/troubleshoot.rst +++ b/docs/applications/nublado/troubleshoot.rst @@ -1,9 +1,17 @@ .. px-app-troubleshooting:: nublado ####################### -Troubleshooting nublado +Troubleshooting Nublado ####################### +Check image prepulling status +============================= + +Nublado will attempt to prepull all configured images to each node that it believes is allowed to run Nublado lab images. +To see the status of that prepulling, go to the ``/nublado/spawner/v1/prepulls`` route of the relevant environment. + +In the resulting JSON document, ``config`` shows the current operative configuration, ``images`` shows the prepull status of the various images, and ``nodes`` shows the prepull status by node. + .. _nublado-clear-session-database: Clear session database entry @@ -33,9 +41,8 @@ Recovery may require manually clearing the user's entry in the session database In some cases you may also need to remove the user from the spawner table. To do this, run ``select * from spawners`` and find the pod with the user's name in it, and then delete that row. -.. _nublado_node_status_max_images: - Prepuller is running continuously and/or expected menu items are missing ======================================================================== -``nodeStatusMaxImages`` should be increased or disabled: :doc:`/admin/infrastructure/kubernetes-node-status-max-images` +The Kubernetes control plane configuration variable ``nodeStatusMaxImages`` should be increased or disabled. +See :doc:`/admin/infrastructure/kubernetes-node-status-max-images`. diff --git a/docs/applications/nublado2/bootstrap.rst b/docs/applications/nublado2/bootstrap.rst deleted file mode 100644 index 0401e7d390..0000000000 --- a/docs/applications/nublado2/bootstrap.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. px-app-bootstrap:: nublado2 - -##################### -Bootstrapping Nublado -##################### - -Nublado and :px-app:`moneypenny` need to know where the NFS server that provides user home space is. -Nublado also requires other persistent storage space. -Ensure the correct definitions are in place in their configuration. - -Telescope and Site deployments -============================== - -For Telescope and Site deployments that require instrument control, make sure you have any Multus network definitions you need in the ``values-.yaml``. -This will look something like: - -.. code-block:: yaml - - singleuser: - extraAnnotations: - k8s.v1.cni.cncf.io/networks: "kube-system/macvlan-conf" - initContainers: - - name: "multus-init" - image: "lsstit/ddsnet4u:latest" - securityContext: - privileged: true - -It's possible to list multiple Multus network names separated by commas in the annotation string. -Experimentally, it appears that the interfaces will appear in the order specified. - -The ``initContainers`` entry should be inserted verbatim. -It creates a privileged container that bridges user pods to the specified networks before releasing control to the user's lab. diff --git a/docs/applications/nublado2/index.rst b/docs/applications/nublado2/index.rst deleted file mode 100644 index b08ecb8e17..0000000000 --- a/docs/applications/nublado2/index.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. px-app:: nublado2 - -############################# -nublado2 — JupyterHub for RSP -############################# - -The ``nublado2`` service is an installation of a Rubin Observatory flavor of `Zero to JupyterHub `__ with some additional resources. -It provides the Notebook Aspect of the Rubin Science Platform. - -.. jinja:: nublado2 - :file: applications/_summary.rst.jinja - -Guides -====== - -.. toctree:: - :maxdepth: 2 - - bootstrap - upgrade - troubleshoot - values diff --git a/docs/applications/nublado2/troubleshoot.rst b/docs/applications/nublado2/troubleshoot.rst deleted file mode 100644 index c5223be9c1..0000000000 --- a/docs/applications/nublado2/troubleshoot.rst +++ /dev/null @@ -1,34 +0,0 @@ -.. px-app-troubleshooting:: nublado2 - -######################## -Troubleshooting nublado2 -######################## - -.. _nublado2-clear-session-database: - -Clear session database entry -============================ - -Sometimes JupyterHub and its session database will get into an inconsistent state where it thinks a pod is already running but cannot shut it down. -The typical symptom of this is that spawns for that user fail with an error saying that the user's lab is already pending spawn or pending deletion, but the user cannot connect to their pod. - -Recovery may require manually clearing the user's entry in the session database as follows: - -#. Remove the user's lab namespace, if it exists. - -#. Remove the user from the session database. - First, connect to the database: - - .. code-block:: shell - - pod=$(kubectl get pods -n postgres | grep postgres | awk '{print $1}') - kubectl exec -it -n postgres ${pod} -- psql -U jovyan jupyterhub - - Then, at the PostgreSQL prompt: - - .. code-block:: sql - - delete from users where name='' - -In some cases you may also need to remove the user from the spawner table. -To do this, run ``select * from spawners`` and find the pod with the user's name in it, and then delete that row. diff --git a/docs/applications/nublado2/upgrade.rst b/docs/applications/nublado2/upgrade.rst deleted file mode 100644 index 9d24ba3bce..0000000000 --- a/docs/applications/nublado2/upgrade.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. px-app-upgrade:: nublado2 - -################# -Upgrading Nublado -################# - -Most of the time, upgrading Nublado can be done simply by syncing the application in Argo CD. -There will be a brief outage for spawning new pods, but users with existing pods should be able to continue working. - -Occasionally, new versions of JupyterHub will require a schema update. -We do not routinely enable automatic schema updates currently, so JupyterHub will refuse to start if a database schema update is required. -To enable schema updates, add: - -.. code-block:: yaml - - jupyterhub: - hub: - db: - upgrade: true - -(The ``jupyterhub`` and ``hub`` keys probably already exist in the ``values-.yaml`` file, so just add the ``db.upgrade`` setting in the correct spot.) -Then, JupyterHub will automatically upgrade its database when the new version starts. -You can then remove this configuration again if you're worried about automatic updates misbehaving later. diff --git a/docs/applications/nublado2/values.md b/docs/applications/nublado2/values.md deleted file mode 100644 index 5a1b65d74a..0000000000 --- a/docs/applications/nublado2/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} nublado2 -``` - -# nublado2 Helm values reference - -Helm values reference table for the {px-app}`nublado2` application. - -```{include} ../../../applications/nublado2/README.md ---- -start-after: "## Values" ---- -``` diff --git a/docs/applications/portal/index.rst b/docs/applications/portal/index.rst index 31e2c7214b..b2f588decf 100644 --- a/docs/applications/portal/index.rst +++ b/docs/applications/portal/index.rst @@ -5,7 +5,7 @@ portal — Firefly-based RSP Portal ################################# The Portal Aspect of the Rubin Science Platform, powered by Firefly. -This provides a graphical user interface for astronomical data exploration and also provides a data viewer that can be used within the Notebook Aspect (:px-app:`nublado2`). +This provides a graphical user interface for astronomical data exploration and also provides a data viewer that can be used within the Notebook Aspect (:px-app:`nublado`). .. jinja:: portal :file: applications/_summary.rst.jinja diff --git a/environments/README.md b/environments/README.md index 889fee61d9..52afa9fe6f 100644 --- a/environments/README.md +++ b/environments/README.md @@ -8,7 +8,6 @@ | applications.argo-workflows | bool | `false` | Enable the argo-workflows application | | applications.argocd | bool | `true` | Enable the Argo CD application. This must be enabled for all environments and is present here only because it makes parsing easier | | applications.butler | bool | `false` | Enable the butler application | -| applications.cachemachine | bool | `false` | Enable the cachemachine application (required by nublado2) | | applications.cert-manager | bool | `true` | Enable the cert-manager application, required unless the environment makes separate arrangements to inject a current TLS certificate | | applications.datalinker | bool | `false` | Eanble the datalinker application | | applications.exposurelog | bool | `false` | Enable the exposurelog application | @@ -20,13 +19,11 @@ | applications.linters | bool | `false` | Enable the linters application | | applications.livetap | bool | `false` | Enable the livetap application | | applications.mobu | bool | `false` | Enable the mobu application | -| applications.moneypenny | bool | `false` | Enable the moneypenny application (required by nublado2) | | applications.monitoring | bool | `false` | Enable the monitoring application | | applications.narrativelog | bool | `false` | Enable the narrativelog application | | applications.next-visit-fan-out | bool | `false` | Enable the next-visit-fan-out application | | applications.noteburst | bool | `false` | Enable the noteburst application (required by times-square) | | applications.nublado | bool | `false` | Enable the nublado application (v3 of the Notebook Aspect) | -| applications.nublado2 | bool | `false` | Enable the nublado2 application (v2 of the Notebook Aspect, now deprecated). This should not be used for new environments. | | applications.obsloctap | bool | `false` | Enable the obsloctap application | | applications.onepassword-connect | bool | `false` | Enable the onepassword-connect application | | applications.ook | bool | `false` | Enable the ook application | diff --git a/environments/templates/cachemachine-application.yaml b/environments/templates/cachemachine-application.yaml deleted file mode 100644 index e07f2d2111..0000000000 --- a/environments/templates/cachemachine-application.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.applications.cachemachine -}} -apiVersion: v1 -kind: Namespace -metadata: - name: "cachemachine" ---- -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - name: "cachemachine" - namespace: "argocd" - finalizers: - - "resources-finalizer.argocd.argoproj.io" -spec: - destination: - namespace: "cachemachine" - server: "https://kubernetes.default.svc" - project: "default" - source: - path: "applications/cachemachine" - repoURL: {{ .Values.repoUrl | quote }} - targetRevision: {{ .Values.targetRevision | quote }} - helm: - parameters: - - name: "global.host" - value: {{ .Values.fqdn | quote }} - - name: "global.baseUrl" - value: "https://{{ .Values.fqdn }}" - - name: "global.vaultSecretsPath" - value: {{ .Values.vaultPathPrefix | quote }} - valueFiles: - - "values.yaml" - - "values-{{ .Values.name }}.yaml" -{{- end -}} diff --git a/environments/templates/moneypenny-application.yaml b/environments/templates/moneypenny-application.yaml deleted file mode 100644 index 539a2a0c55..0000000000 --- a/environments/templates/moneypenny-application.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.applications.moneypenny -}} -apiVersion: v1 -kind: Namespace -metadata: - name: "moneypenny" ---- -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - name: "moneypenny" - namespace: "argocd" - finalizers: - - "resources-finalizer.argocd.argoproj.io" -spec: - destination: - namespace: "moneypenny" - server: "https://kubernetes.default.svc" - project: "default" - source: - path: "applications/moneypenny" - repoURL: {{ .Values.repoUrl | quote }} - targetRevision: {{ .Values.targetRevision | quote }} - helm: - parameters: - - name: "global.host" - value: {{ .Values.fqdn | quote }} - - name: "global.baseUrl" - value: "https://{{ .Values.fqdn }}" - - name: "global.vaultSecretsPath" - value: {{ .Values.vaultPathPrefix | quote }} - valueFiles: - - "values.yaml" - - "values-{{ .Values.name }}.yaml" -{{- end -}} diff --git a/environments/templates/nublado-users-application.yaml b/environments/templates/nublado-users-application.yaml index 4efc7793f7..c5c48ec6bc 100644 --- a/environments/templates/nublado-users-application.yaml +++ b/environments/templates/nublado-users-application.yaml @@ -1,4 +1,4 @@ -{{- if (or .Values.applications.nublado .Values.applications.nublado2) -}} +{{- if .Values.applications.nublado -}} apiVersion: argoproj.io/v1alpha1 kind: Application metadata: diff --git a/environments/templates/nublado2-application.yaml b/environments/templates/nublado2-application.yaml deleted file mode 100644 index fbfeea6e1c..0000000000 --- a/environments/templates/nublado2-application.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.applications.nublado2 -}} -apiVersion: v1 -kind: Namespace -metadata: - name: "nublado2" ---- -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - name: "nublado2" - namespace: "argocd" - finalizers: - - "resources-finalizer.argocd.argoproj.io" -spec: - destination: - namespace: "nublado2" - server: "https://kubernetes.default.svc" - project: "default" - source: - path: "applications/nublado2" - repoURL: {{ .Values.repoUrl | quote }} - targetRevision: {{ .Values.targetRevision | quote }} - helm: - valueFiles: - - "values.yaml" - - "values-{{ .Values.name }}.yaml" - parameters: - - name: "global.vaultSecretsPath" - value: {{ .Values.vaultPathPrefix | quote }} - ignoreDifferences: - - kind: "Secret" - jsonPointers: - - "/data/hub.config.ConfigurableHTTPProxy.auth_token" - - "/data/hub.config.CryptKeeper.keys" - - "/data/hub.config.JupyterHub.cookie_secret" - - group: "apps" - kind: "Deployment" - jsonPointers: - - "/spec/template/metadata/annotations/checksum~1secret" - - "/spec/template/metadata/annotations/checksum~1auth-token" -{{- end -}} diff --git a/environments/values-ccin2p3.yaml b/environments/values-ccin2p3.yaml index d9a90d13e5..04419d41ee 100644 --- a/environments/values-ccin2p3.yaml +++ b/environments/values-ccin2p3.yaml @@ -3,10 +3,8 @@ fqdn: data-dev.lsst.eu vaultPathPrefix: secret/k8s_operator/rsp-cc applications: - cachemachine: true datalinker: true - moneypenny: true - nublado2: true + nublado: true portal: true postgres: true squareone: true diff --git a/environments/values-roe.yaml b/environments/values-roe.yaml index 0e20da2923..601cf9c3b6 100644 --- a/environments/values-roe.yaml +++ b/environments/values-roe.yaml @@ -4,10 +4,8 @@ vaultUrl: "https://vault.lsst.ac.uk" vaultPathPrefix: secret/k8s_operator/roe applications: - cachemachine: true mobu: true - moneypenny: true - nublado2: true + nublado: true portal: true postgres: true squareone: true diff --git a/environments/values.yaml b/environments/values.yaml index 97b5c66376..db8444c3bc 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -39,9 +39,6 @@ applications: # -- Enable the butler application butler: false - # -- Enable the cachemachine application (required by nublado2) - cachemachine: false - # -- Enable the cert-manager application, required unless the environment # makes separate arrangements to inject a current TLS certificate cert-manager: true @@ -80,9 +77,6 @@ applications: # -- Enable the mobu application mobu: false - # -- Enable the moneypenny application (required by nublado2) - moneypenny: false - # -- Enable the monitoring application monitoring: false @@ -98,10 +92,6 @@ applications: # -- Enable the nublado application (v3 of the Notebook Aspect) nublado: false - # -- Enable the nublado2 application (v2 of the Notebook Aspect, now - # deprecated). This should not be used for new environments. - nublado2: false - # -- Enable the onepassword-connect application onepassword-connect: false From 1ae0e49dc28629711a016815805647ee13f9ac95 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 8 Jan 2024 18:17:35 -0800 Subject: [PATCH 421/588] Fix linting when an application is deleted If an application is deleted, we still attempted some linting because it showed up as changed (but with no environments). Catch that state and skip linting the application in that case. --- src/phalanx/storage/config.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 67c03d81f6..d09c8875eb 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -313,6 +313,10 @@ def get_environment_chart_path(self) -> Path: def get_modified_applications(self, branch: str) -> dict[str, list[str]]: """Get all modified application and environment pairs. + Application and environment pairs that have been deleted do not count + as modified, since we don't want to attempt to lint deleted + configurations. + Parameters ---------- branch @@ -336,8 +340,9 @@ def get_modified_applications(self, branch: str) -> dict[str, list[str]]: continue if change.affects_all_envs: envs = self.get_application_environments(change.application) - result[change.application] = envs - if not change.is_delete: + if envs: + result[change.application] = envs + elif not change.is_delete: if m := re.match("values-([^.]+).yaml$", change.path): result[change.application].append(m.group(1)) return result From 597f9cb3d91440f15ed715c5bb62c14657606216 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 13 Nov 2023 12:30:18 -0700 Subject: [PATCH 422/588] Fix up telegraf repository --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/telegraf-kafka-consumer/README.md | 2 +- .../sasquatch/charts/telegraf-kafka-consumer/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 7f64f4f87f..803047653b 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -382,7 +382,7 @@ Rubin Observatory's telemetry service. | telegraf-kafka-consumer.env[2].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | | telegraf-kafka-consumer.env[2].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | telegraf-kafka-consumer.image.pullPolicy | string | IfNotPresent | Image pull policy. | -| telegraf-kafka-consumer.image.repo | string | `"quay.io/influxdb/telegraf-nightly:latest"` | Telegraf image repository. | +| telegraf-kafka-consumer.image.repo | string | `"quay.io/influxdb/telegraf-nightly"` | Telegraf image repository. | | telegraf-kafka-consumer.image.tag | string | `"latest"` | Telegraf image tag. | | telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | telegraf-kafka-consumer.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index a399f84152..2f8f981d08 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -20,7 +20,7 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | env[2].valueFrom.secretKeyRef.key | string | `"influxdb-password"` | InfluxDB v1 password | | env[2].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | image.pullPolicy | string | IfNotPresent | Image pull policy. | -| image.repo | string | `"quay.io/influxdb/telegraf-nightly:latest"` | Telegraf image repository. | +| image.repo | string | `"quay.io/influxdb/telegraf-nightly"` | Telegraf image repository. | | image.tag | string | `"latest"` | Telegraf image tag. | | imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index 82095b397b..fafa0c8741 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -4,7 +4,7 @@ enabled: false image: # -- Telegraf image repository. - repo: "quay.io/influxdb/telegraf-nightly:latest" + repo: "quay.io/influxdb/telegraf-nightly" # -- Telegraf image tag. tag: "latest" # -- Image pull policy. From 938d9cb29ca376d1429776b5a8d496db3621172c Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 13 Nov 2023 12:32:47 -0700 Subject: [PATCH 423/588] Use default tag on ifddev --- applications/sasquatch/values-idfdev.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 49da473bbb..88486af5c4 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -38,8 +38,6 @@ influxdb: telegraf-kafka-consumer: enabled: true - image: - tag: "avrounions" kafkaConsumers: test: enabled: true From 77ea223c63c6e218aaf2ec20826939218a7e9ff1 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 9 Jan 2024 08:52:25 -0800 Subject: [PATCH 424/588] Make telegraf-password unconditional in sasquatch This appears to always be referenced by the strimzi-kafka subchart since it is referenced in the values.yaml file. For the time being, make it unconditional, although I think the logic for deciding what secrets are required may still not be correct. --- applications/sasquatch/secrets.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index ac7f00b61f..d7e02a586f 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -60,7 +60,6 @@ sasquatch-test-password: telegraf-password: description: >- ? - if: strimzi-kafka.users.telegraf.enabled ts-salkafka-password: description: >- ? From 6a92cb28dc4a94615b35c3e6729aeb3d182b7283 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 9 Jan 2024 20:04:46 +0000 Subject: [PATCH 425/588] Update Helm release jupyterhub to v3.2.1 --- applications/nublado/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 8c38ef0df6..63214c7930 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -11,7 +11,7 @@ dependencies: - name: jupyterhub # This is the Zero To Jupyterhub version, *not* the version of the # Jupyterhub package itself. - version: "3.1.0" + version: "3.2.1" repository: https://jupyterhub.github.io/helm-chart/ annotations: From db64b4b4d6a6dd70140c3121a88831598d6282a7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 10 Jan 2024 02:23:10 +0000 Subject: [PATCH 426/588] Update Helm release telegraf to v1.8.40 --- applications/telegraf/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 8f5ac9e5a6..2a62ef164f 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.39 + version: 1.8.40 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 901901bf41e45071c2c4802bd288b5569d37e84f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 10 Jan 2024 02:23:13 +0000 Subject: [PATCH 427/588] Update Helm release telegraf-ds to v1.1.22 --- applications/telegraf-ds/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index 3483a73918..f9fc08d575 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.21 + version: 1.1.22 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 6560ed556b3dbfa1e3ac93faafcf45672cd8c012 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 10 Jan 2024 03:02:07 +0000 Subject: [PATCH 428/588] Update Helm release redis to v1.0.11 --- applications/gafaelfawr/Chart.yaml | 2 +- applications/noteburst/Chart.yaml | 2 +- applications/portal/Chart.yaml | 2 +- applications/rubintv/Chart.yaml | 2 +- applications/times-square/Chart.yaml | 2 +- applications/vo-cutouts/Chart.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index b501a8635a..2bf9e7af96 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -9,7 +9,7 @@ appVersion: 9.6.1 dependencies: - name: redis - version: 1.0.10 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index ffee5233bb..b6f0c92aed 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -13,7 +13,7 @@ maintainers: dependencies: - name: redis - version: 1.0.10 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml index 5e6b5575ac..632e887269 100644 --- a/applications/portal/Chart.yaml +++ b/applications/portal/Chart.yaml @@ -9,7 +9,7 @@ appVersion: "suit-2023.2.3" dependencies: - name: redis - version: 1.0.10 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/rubintv/Chart.yaml b/applications/rubintv/Chart.yaml index d9580e1818..28e84ca86a 100644 --- a/applications/rubintv/Chart.yaml +++ b/applications/rubintv/Chart.yaml @@ -7,5 +7,5 @@ sources: appVersion: 0.1.0 dependencies: - name: redis - version: 1.0.10 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index 8b74abf857..e42cd84c0b 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -12,7 +12,7 @@ appVersion: "0.9.2" dependencies: - name: redis - version: 1.0.10 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml index 966bdb6814..0b5d2f7802 100644 --- a/applications/vo-cutouts/Chart.yaml +++ b/applications/vo-cutouts/Chart.yaml @@ -8,7 +8,7 @@ appVersion: 1.0.0 dependencies: - name: redis - version: 1.0.10 + version: 1.0.11 repository: https://lsst-sqre.github.io/charts/ annotations: From b3ce63e05c8c2c48d2f4f111d0d233a410de52c3 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 10 Jan 2024 16:02:24 -0800 Subject: [PATCH 429/588] Update Python dependencies Picks up a security fix in gitpython. --- requirements/dev.txt | 26 +++++++++++++------------- requirements/main.txt | 20 ++++++++++---------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 82e4c61a58..47d97038ae 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -1,12 +1,12 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/dev.txt requirements/dev.in # -alabaster==0.7.15 \ - --hash=sha256:0127f4b1db0afc914883f930e3d40763131aebac295522fc4a04d9e77c703705 \ - --hash=sha256:d99c6fd0f7a86fca68ecc5231c9de45227991c10ee6facfb894cf6afb953b142 +alabaster==0.7.16 \ + --hash=sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65 \ + --hash=sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92 # via sphinx annotated-types==0.6.0 \ --hash=sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43 \ @@ -236,9 +236,9 @@ gitdb==4.0.11 \ # via # -c requirements/main.txt # gitpython -gitpython==3.1.40 \ - --hash=sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4 \ - --hash=sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a +gitpython==3.1.41 \ + --hash=sha256:c36b6634d069b3f719610175020a9aed919421c87552185b085e04fbbdb10b7c \ + --hash=sha256:ed66e624884f76df22c8e16066d567aaa5a37d5b5fa19db2c6df6f7156db9048 # via # -c requirements/main.txt # documenteer @@ -264,9 +264,9 @@ iniconfig==2.0.0 \ --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 # via pytest -jinja2==3.1.2 \ - --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ - --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 +jinja2==3.1.3 \ + --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \ + --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90 # via # -c requirements/main.txt # diagrams @@ -877,9 +877,9 @@ sphinxcontrib-applehelp==1.0.7 \ --hash=sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d \ --hash=sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa # via sphinx -sphinxcontrib-bibtex==2.6.1 \ - --hash=sha256:046b49f070ae5276af34c1b8ddb9bc9562ef6de2f7a52d37a91cb8e53f54b863 \ - --hash=sha256:094c772098fe6b030cda8618c45722b2957cad0c04f328ba2b154aa08dfe720a +sphinxcontrib-bibtex==2.6.2 \ + --hash=sha256:10d45ebbb19207c5665396c9446f8012a79b8a538cb729f895b5910ab2d0b2da \ + --hash=sha256:f487af694336f28bfb7d6a17070953a7d264bec43000a2379724274f5f8d70ae # via documenteer sphinxcontrib-devhelp==1.0.5 \ --hash=sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212 \ diff --git a/requirements/main.txt b/requirements/main.txt index 80169040a3..80627b30b4 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/main.txt requirements/main.in @@ -242,9 +242,9 @@ gitdb==4.0.11 \ --hash=sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4 \ --hash=sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b # via gitpython -gitpython==3.1.40 \ - --hash=sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4 \ - --hash=sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a +gitpython==3.1.41 \ + --hash=sha256:c36b6634d069b3f719610175020a9aed919421c87552185b085e04fbbdb10b7c \ + --hash=sha256:ed66e624884f76df22c8e16066d567aaa5a37d5b5fa19db2c6df6f7156db9048 # via -r requirements/main.in h11==0.14.0 \ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ @@ -271,9 +271,9 @@ idna==3.6 \ # anyio # requests # rfc3986 -jinja2==3.1.2 \ - --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ - --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 +jinja2==3.1.3 \ + --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \ + --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90 # via -r requirements/main.in markupsafe==2.1.3 \ --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ @@ -337,9 +337,9 @@ markupsafe==2.1.3 \ --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 # via jinja2 -onepasswordconnectsdk==1.4.0 \ - --hash=sha256:a8dd3aa1750ef0d5b095368287e043bc30cc90281169d6aaaebca57d6b4e6c5c \ - --hash=sha256:c01b4e5d6faf2e985654d19f34e84efacffcc3ba487bcbcec386d7f8d3e8d88e +onepasswordconnectsdk==1.4.1 \ + --hash=sha256:133defedbc4a4658f68e32865330c2d6844b132763037b984cb74aa21dd1e7f5 \ + --hash=sha256:8402b893e007d1a339bb5658b7600b32505c88234d74bfdb307e74c14e586e42 # via -r requirements/main.in pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ From 3ba08b5f7fef37873bfb82c007fbb20e75288040 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 10 Jan 2024 17:28:15 -0800 Subject: [PATCH 430/588] Improve Nublado documentation Rework the Nublado upgrading documentation and reference new pages in the Nublado manual. Make clear that deleting user sessions from the database should no longer be necessary, and link to the Nublado manual for how to do it instead of reproducing those instructions in Phalanx. --- docs/applications/nublado/troubleshoot.rst | 26 +++++----------------- docs/applications/nublado/upgrade.rst | 16 ++++++++----- 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/docs/applications/nublado/troubleshoot.rst b/docs/applications/nublado/troubleshoot.rst index b569c4a52b..88b940740d 100644 --- a/docs/applications/nublado/troubleshoot.rst +++ b/docs/applications/nublado/troubleshoot.rst @@ -17,29 +17,13 @@ In the resulting JSON document, ``config`` shows the current operative configura Clear session database entry ============================ -Sometimes JupyterHub and its session database will get into an inconsistent state where it thinks a pod is already running but cannot shut it down. -The typical symptom of this is that spawns for that user fail with an error saying that the user's lab is already pending spawn or pending deletion, but the user cannot connect to their pod. +Historically, we sometimes saw JupyterHub get into an inconsistent state where it thought a pod was already running and couldn't be shut down. +We haven't seen this problem since switching to the Nublado controller, but it may still be possible for the JupyterHub session database to get out of sync. -Recovery may require manually clearing the user's entry in the session database as follows: +If JupyterHub keeps telling a user that their lab is already spawning or shutting down, but doesn't allow them to connect to the lab or shut it down, following the instructions on `deleting a user session `__ may fix the problem. -#. Remove the user's lab namespace, if it exists. - -#. Remove the user from the session database. - First, connect to the database: - - .. code-block:: shell - - pod=$(kubectl get pods -n postgres | grep postgres | awk '{print $1}') - kubectl exec -it -n postgres ${pod} -- psql -U jovyan jupyterhub - - Then, at the PostgreSQL prompt: - - .. code-block:: sql - - delete from users where name='' - -In some cases you may also need to remove the user from the spawner table. -To do this, run ``select * from spawners`` and find the pod with the user's name in it, and then delete that row. +If it does, investigate how JupyterHub was able to get stuck. +This indicates some sort of bug in Nublado. Prepuller is running continuously and/or expected menu items are missing ======================================================================== diff --git a/docs/applications/nublado/upgrade.rst b/docs/applications/nublado/upgrade.rst index 504325c706..10c5d6c666 100644 --- a/docs/applications/nublado/upgrade.rst +++ b/docs/applications/nublado/upgrade.rst @@ -8,8 +8,9 @@ Most of the time, upgrading Nublado can be done simply by syncing the applicatio There will be a brief outage for spawning new pods, but users with existing pods should be able to continue working. Occasionally, new versions of JupyterHub will require a schema update. -We do not routinely enable automatic schema updates currently, so JupyterHub will refuse to start if a database schema update is required. -To enable schema updates, add: +Automatic schema updates are off by default, so JupyterHub will refuse to start if a database schema update is required. + +To enable schema updates, add the following to :file:`values-{environment}.yaml` for the ``nublado`` application: .. code-block:: yaml @@ -18,7 +19,10 @@ To enable schema updates, add: db: upgrade: true -(The ``jupyterhub`` and ``hub`` keys probably already exist in the ``values-.yaml`` file, so just add the ``db.upgrade`` setting in the correct spot.) -Then, JupyterHub will automatically upgrade its database when the new version starts. -You can then remove this configuration again if you're worried about automatic updates misbehaving later. -Alternatively, if there's a schema update, it's probably a pretty major upgrade to JupyterHub, and it may be a better idea to shut down the Hub, remove all user namespaces, and then connect to the database and drop all tables; when the Hub is restarted, the correct schema will be generated. Obviously this will boot all users from the running system, but that may be appropriate for major upgrades. +(The ``jupyterhub`` and ``hub`` keys probably already exist, so just add the ``db.upgrade`` setting in the correct spot.) +JupyterHub will then automatically upgrade its database when it is restarted running the new version. + +You can then this configuration afterwards if you're worried about applying a schema update without being aware that you're doing so. + +Alternately, for major upgrades to JupyterHub, you can choose to start from an empty database. +To do this, follow the instructions in the `Nublado documentation on wiping the database `__. From 6298742322e4e78297ac15ad207162dc51be6ec5 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 13 Nov 2023 10:03:11 -0700 Subject: [PATCH 431/588] Add initial config for exposurelog/narrativelog at USDF-dev/prod --- applications/exposurelog/values-usdfdev.yaml | 6 ++++++ applications/exposurelog/values-usdfprod.yaml | 6 ++++++ applications/narrativelog/values-usdfdev.yaml | 5 +++++ applications/narrativelog/values-usdfprod.yaml | 5 +++++ environments/values-usdfdev.yaml | 2 ++ environments/values-usdfprod.yaml | 2 ++ 6 files changed, 26 insertions(+) create mode 100644 applications/exposurelog/values-usdfdev.yaml create mode 100644 applications/exposurelog/values-usdfprod.yaml create mode 100644 applications/narrativelog/values-usdfdev.yaml create mode 100644 applications/narrativelog/values-usdfprod.yaml diff --git a/applications/exposurelog/values-usdfdev.yaml b/applications/exposurelog/values-usdfdev.yaml new file mode 100644 index 0000000000..5153d2fde7 --- /dev/null +++ b/applications/exposurelog/values-usdfdev.yaml @@ -0,0 +1,6 @@ +config: + site_id: usdfdev + butler_uri_1: s3://rubin-summit-users/butler.yaml +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/applications/exposurelog/values-usdfprod.yaml b/applications/exposurelog/values-usdfprod.yaml new file mode 100644 index 0000000000..8f4f585d48 --- /dev/null +++ b/applications/exposurelog/values-usdfprod.yaml @@ -0,0 +1,6 @@ +config: + site_id: usdfprod + butler_uri_1: s3://rubin-summit-users/butler.yaml +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/applications/narrativelog/values-usdfdev.yaml b/applications/narrativelog/values-usdfdev.yaml new file mode 100644 index 0000000000..bf9b05e6b1 --- /dev/null +++ b/applications/narrativelog/values-usdfdev.yaml @@ -0,0 +1,5 @@ +config: + site_id: usdfprod +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/applications/narrativelog/values-usdfprod.yaml b/applications/narrativelog/values-usdfprod.yaml new file mode 100644 index 0000000000..bf9b05e6b1 --- /dev/null +++ b/applications/narrativelog/values-usdfprod.yaml @@ -0,0 +1,5 @@ +config: + site_id: usdfprod +db: + host: usdf-summitdb.slac.stanford.edu + user: usdf diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 0670dd6e84..c2709fb3e3 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -12,8 +12,10 @@ applications: alert-stream-broker: true datalinker: true + exposurelog: true livetap: true mobu: true + narrativelog: true noteburst: true nublado: true obsloctap: true diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index 6859d536c4..324d4682ce 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -11,8 +11,10 @@ applications: ingress-nginx: false datalinker: true + exposurelog: true livetap: true mobu: true + narrativelog: true nublado: true plot-navigator: true portal: true From fec6363fc769456395f97a25ebfdb8ec3916268a Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 11 Jan 2024 14:52:32 -0700 Subject: [PATCH 432/588] Sorry! Finishing up PR#2719 --- applications/narrativelog/values-usdfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/narrativelog/values-usdfdev.yaml b/applications/narrativelog/values-usdfdev.yaml index bf9b05e6b1..c7c8760ec3 100644 --- a/applications/narrativelog/values-usdfdev.yaml +++ b/applications/narrativelog/values-usdfdev.yaml @@ -1,5 +1,5 @@ config: - site_id: usdfprod + site_id: usdfdev db: host: usdf-summitdb.slac.stanford.edu user: usdf From 7c778f4633c61a97b202ce327c44d1a7a3720d3f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 11 Jan 2024 14:05:03 -0800 Subject: [PATCH 433/588] Fix link to Nublado setup-gar documentation --- docs/applications/nublado/bootstrap.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/applications/nublado/bootstrap.rst b/docs/applications/nublado/bootstrap.rst index 6d3406d548..f09b520353 100644 --- a/docs/applications/nublado/bootstrap.rst +++ b/docs/applications/nublado/bootstrap.rst @@ -12,7 +12,7 @@ GKE deployments When deploying Nublado on Google Kubernetes Engine, using Google Artifact Registry as the image source is strongly recommended. This will result in better image selection menus, allow use of container streaming for faster start-up times, and avoid the need to maintain a pull secret. -For setup instructions for using GAR with Nublado, see `Set up Google Artifact Registry in the Nublado documentation `__. +For setup instructions for using GAR with Nublado, see `Set up Google Artifact Registry in the Nublado documentation `__. For more details about the benefits of using GAR, see the `relevant Nublado documentation page `__. Telescope and Site deployments From 2cfd641e8a710ded3382ad37242b31fa12cd6b6d Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 21 Nov 2023 11:08:42 -0700 Subject: [PATCH 434/588] Add write:git-lfs to roundtable prod GF known scopes --- applications/gafaelfawr/values-roundtable-prod.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/applications/gafaelfawr/values-roundtable-prod.yaml b/applications/gafaelfawr/values-roundtable-prod.yaml index 22ea82132a..32dcfb324e 100644 --- a/applications/gafaelfawr/values-roundtable-prod.yaml +++ b/applications/gafaelfawr/values-roundtable-prod.yaml @@ -18,6 +18,10 @@ config: oidcServer: enabled: false + knownScopes: + "write:git-lfs": >- + Can write objects to Git LFS storage bucket + groupMapping: "exec:admin": - github: From 75f43f988f8fb31fd8eb236cbf7df587efdde24c Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 21 Nov 2023 11:53:46 -0700 Subject: [PATCH 435/588] Add additional hosts to Gafaelfawr roundtable prod --- applications/gafaelfawr/values-roundtable-prod.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/applications/gafaelfawr/values-roundtable-prod.yaml b/applications/gafaelfawr/values-roundtable-prod.yaml index 32dcfb324e..b91f96b362 100644 --- a/applications/gafaelfawr/values-roundtable-prod.yaml +++ b/applications/gafaelfawr/values-roundtable-prod.yaml @@ -45,3 +45,8 @@ config: errorFooter: | To report problems or ask for help, contact #dm-square on the LSSTC Slack. + +ingress: + additionalHosts: + - "git-lfs.lsst.cloud" + - "git-lfs-rw.lsst.cloud" From b5fa1c315f38f09e5c231d9b0c8c244a93b45c78 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 21 Nov 2023 12:00:09 -0700 Subject: [PATCH 436/588] Add correct hostname to anon Giftless TLS cert --- applications/giftless/templates/ingress.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index f06085fdbc..77e97b5227 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -67,7 +67,7 @@ template: tls: - hosts: - {{ .Values.ingress.hostname.readwrite | quote }} - secretName: tls + secretName: tls-rw rules: - host: {{ .Values.ingress.hostname.readwrite | quote }} http: From 2ba08700912b8c471086831d0d0dc2afa519e7be Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 22:21:11 +0000 Subject: [PATCH 437/588] Update ghcr.io/lsst-sqre/dal-siav2 Docker tag to v0.0.4 --- applications/siav2/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml index f511e9ef2f..7ac091e9c8 100644 --- a/applications/siav2/values.yaml +++ b/applications/siav2/values.yaml @@ -13,7 +13,7 @@ image: pullPolicy: "IfNotPresent" # -- Overrides the image tag whose default is the chart appVersion. - tag: "0.0.3" + tag: "0.0.4" ingress: # -- Additional annotations for the ingress rule From 61fa7e330fdbaade0e468daf33edba1c0ed02b27 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 11 Jan 2024 14:46:54 -0800 Subject: [PATCH 438/588] Update Helm docs --- applications/siav2/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/siav2/README.md b/applications/siav2/README.md index 0046ff2b84..1afe3efeeb 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -16,7 +16,7 @@ Simple Image Access v2 service | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the siav2 image | | image.repository | string | `"ghcr.io/lsst-sqre/dal-siav2"` | Image to use in the siav2 deployment | -| image.tag | string | `"0.0.3"` | Overrides the image tag whose default is the chart appVersion. | +| image.tag | string | `"0.0.4"` | Overrides the image tag whose default is the chart appVersion. | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | | nodeSelector | object | `{}` | Node selection rules for the siav2 deployment pod | | obsCoreTable | string | `"ivoa.ObsCore"` | ObsCore table on the TAP service to query | From 5f57affb976799039cd932512b9c84dc4c1ff354 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 11 Jan 2024 16:48:09 -0800 Subject: [PATCH 439/588] Remove alert-stream-broker from data-int The application has been broken there for a while and appears to no longer be used. --- .../alert-stream-broker/values-idfint.yaml | 134 ------------------ environments/values-idfint.yaml | 1 - 2 files changed, 135 deletions(-) delete mode 100644 applications/alert-stream-broker/values-idfint.yaml diff --git a/applications/alert-stream-broker/values-idfint.yaml b/applications/alert-stream-broker/values-idfint.yaml deleted file mode 100644 index 11c1c2a2a7..0000000000 --- a/applications/alert-stream-broker/values-idfint.yaml +++ /dev/null @@ -1,134 +0,0 @@ -alert-stream-broker: - cluster: - name: "alert-broker" - - kafka: - # Addresses based on the state as of 2021-12-02; these were assigned by - # Google and now we're pinning them. - externalListener: - tls: - enabled: true - bootstrap: - ip: "35.224.176.103" - host: alert-stream-int.lsst.cloud - brokers: - - ip: "34.28.80.188" - host: alert-stream-int-broker-0.lsst.cloud - - ip: "35.188.136.140" - host: alert-stream-int-broker-1.lsst.cloud - - ip: "35.238.84.221" - host: alert-stream-int-broker-2.lsst.cloud - # - ip: "35.184.182.182" - # host: alert-stream-int-broker-3.lsst.cloud - # - ip: "35.232.191.72" - # host: alert-stream-int-broker-4.lsst.cloud - # - ip: "34.27.122.46" - # host: alert-stream-int-broker-5.lsst.cloud - - replicas: 3 - - storage: - size: 1500Gi - - nodePool: - affinities: - - key: kafka - value: ok - - tolerations: - - key: kafka - value: ok - effect: NoSchedule - vaultSecretsPath: "secret/phalanx/idfint/alert-stream-broker" - - users: - # A user for development purposes by the Rubin team, with access to all - # topics in readonly mode. - - username: "rubin-devel-idfint" - readonlyTopics: ["*"] - groups: ["rubin-devel-idfint"] - - # A user used by the Rubin team but with similar access to the community - # broker users. - - username: "rubin-communitybroker-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["rubin-communitybroker-idfint"] - - # The actual community broker users - - username: "alerce-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["alerce-idfint"] - - - username: "ampel-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["ampel-idfint"] - - - username: "antares-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["antares-idfint"] - - - username: "babamul-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["babamul-idfint"] - - - username: "fink-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["fink-idfint"] - - - username: "lasair-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["lasair-idfint"] - - - username: "pittgoogle-idfint" - readonlyTopics: ["alerts-simulated"] - groups: ["pittgoogle-idfint"] - -alert-stream-schema-registry: - hostname: "alert-schemas-int.lsst.cloud" - schemaTopic: "registry-schemas" - -alert-stream-simulator: - clusterPort: 9092 # internal TLS listener - replayTopicName: "alerts-simulated" - replayTopicPartitions: 300 - staticTopicName: "alerts-static" - image: - tag: v1.2.1 - -alert-database: - ingester: - image: - tag: v2.0.2 - - logLevel: verbose - - schemaRegistryURL: https://alert-schemas-int.lsst.cloud - - serviceAccountName: alert-database-writer - - kafka: - cluster: alert-broker - port: 9092 - topic: alerts-simulated - - gcp: - serviceAccountName: alertdb-writer - projectID: science-platform-int-dc5d - - server: - serviceAccountName: alert-database-reader - - gcp: - serviceAccountName: alertdb-reader - projectID: science-platform-int-dc5d - - ingress: - enabled: true - host: "data-int.lsst.cloud" - gafaelfawrAuthQuery: "scope=read:alertdb" - - storage: - gcp: - project: science-platform-int-dc5d - alertBucket: rubin-alertdb-int-us-central1-packets - schemaBucket: rubin-alertdb-int-us-central1-schemas diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index b7f6751c54..7d376d4133 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -11,7 +11,6 @@ onepassword: vaultPathPrefix: "secret/phalanx/idfint" applications: - alert-stream-broker: true butler: true datalinker: true hips: true From 70a1ef5ab1f77ebdf43ae7313be0c3b7d01162aa Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 11 Jan 2024 17:00:18 -0800 Subject: [PATCH 440/588] Stop watching alert-stream-broker on idfint Tell strimzi to stop watching alert-stream-broker on idfint, since we no longer deploy it there. --- applications/strimzi/values-idfint.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/applications/strimzi/values-idfint.yaml b/applications/strimzi/values-idfint.yaml index 01bf88743b..1abe0d7c86 100644 --- a/applications/strimzi/values-idfint.yaml +++ b/applications/strimzi/values-idfint.yaml @@ -6,5 +6,4 @@ strimzi-kafka-operator: memory: "512Mi" watchNamespaces: - "sasquatch" - - "alert-stream-broker" logLevel: "INFO" From 12e5f26cb17bbb9868dcb17cc3887eee41527f19 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 12 Jan 2024 08:06:55 -0800 Subject: [PATCH 441/588] Rename fileservers to nublado-fileservers For consistency with nublado-users, and to ensure that both of the user application buckets sort together, rename fileservers to nublado-fileservers. This is the application and namespace in which Nublado creates user file servers. --- .../{fileservers => nublado-fileservers}/Chart.yaml | 2 +- applications/nublado/README.md | 4 ++-- applications/nublado/values.yaml | 4 ++-- ...lication.yaml => nublado-fileservers-application.yaml} | 8 ++++---- tests/docs/applications_test.py | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) rename applications/{fileservers => nublado-fileservers}/Chart.yaml (53%) rename environments/templates/{fileservers-application.yaml => nublado-fileservers-application.yaml} (75%) diff --git a/applications/fileservers/Chart.yaml b/applications/nublado-fileservers/Chart.yaml similarity index 53% rename from applications/fileservers/Chart.yaml rename to applications/nublado-fileservers/Chart.yaml index 1190f73e60..7f44131510 100644 --- a/applications/fileservers/Chart.yaml +++ b/applications/nublado-fileservers/Chart.yaml @@ -1,3 +1,3 @@ apiVersion: v2 -name: fileservers +name: nublado-fileservers version: 1.0.0 diff --git a/applications/nublado/README.md b/applications/nublado/README.md index c218dd696b..3f5991dfc8 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -25,7 +25,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Auth Proxy pod | | controller.affinity | object | `{}` | Affinity rules for the Nublado controller | | controller.config.fileserver.affinity | object | `{}` | Affinity rules for user file server pods | -| controller.config.fileserver.application | string | `"fileservers"` | Argo CD application in which to collect user file servers | +| controller.config.fileserver.application | string | `"nublado-fileservers"` | Argo CD application in which to collect user file servers | | controller.config.fileserver.creationTimeout | int | `120` | Timeout to wait for Kubernetes to create file servers, in seconds | | controller.config.fileserver.deleteTimeout | int | 60 (1 minute) | Timeout for deleting a user's file server from Kubernetes, in seconds | | controller.config.fileserver.enabled | bool | `false` | Enable user file servers | @@ -33,7 +33,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.fileserver.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for file server image | | controller.config.fileserver.image.repository | string | `"ghcr.io/lsst-sqre/worblehat"` | File server image to use | | controller.config.fileserver.image.tag | string | `"0.1.0"` | Tag of file server image to use | -| controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user file servers | +| controller.config.fileserver.namespace | string | `"nublado-fileservers"` | Namespace for user file servers | | controller.config.fileserver.nodeSelector | object | `{}` | Node selector rules for user file server pods | | controller.config.fileserver.pathPrefix | string | `"/files"` | Path prefix for user file servers | | controller.config.fileserver.resources | object | See `values.yaml` | Resource requests and limits for user file servers | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index b9d5163ba0..66d9ab3dd6 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -64,7 +64,7 @@ controller: affinity: {} # -- Argo CD application in which to collect user file servers - application: "fileservers" + application: "nublado-fileservers" # -- Timeout to wait for Kubernetes to create file servers, in seconds creationTimeout: 120 @@ -88,7 +88,7 @@ controller: tag: "0.1.0" # -- Namespace for user file servers - namespace: "fileservers" + namespace: "nublado-fileservers" # -- Node selector rules for user file server pods nodeSelector: {} diff --git a/environments/templates/fileservers-application.yaml b/environments/templates/nublado-fileservers-application.yaml similarity index 75% rename from environments/templates/fileservers-application.yaml rename to environments/templates/nublado-fileservers-application.yaml index 0217017542..aa469712c5 100644 --- a/environments/templates/fileservers-application.yaml +++ b/environments/templates/nublado-fileservers-application.yaml @@ -2,22 +2,22 @@ apiVersion: v1 kind: Namespace metadata: - name: "fileservers" + name: "nublado-fileservers" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: "fileservers" + name: "nublado-fileservers" namespace: "argocd" finalizers: - "resources-finalizer.argocd.argoproj.io" spec: destination: - namespace: "fileservers" + namespace: "nublado-fileservers" server: "https://kubernetes.default.svc" project: "default" source: - path: "applications/fileservers" + path: "applications/nublado-fileservers" repoURL: {{ .Values.repoUrl | quote }} targetRevision: {{ .Values.targetRevision | quote }} {{- end -}} diff --git a/tests/docs/applications_test.py b/tests/docs/applications_test.py index de80c446de..d5821e837c 100644 --- a/tests/docs/applications_test.py +++ b/tests/docs/applications_test.py @@ -37,7 +37,7 @@ def test_applications_index() -> None: for application in root_path.iterdir(): if not application.is_dir(): continue - if application.name in ("fileservers", "nublado-users"): + if application.name in ("nublado-fileservers", "nublado-users"): continue assert ( application.name in seen From 27032632d9a250356177bbbd30bc31aaac06e629 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 12 Jan 2024 08:44:45 -0800 Subject: [PATCH 442/588] Add NetworkPolicy for Nublado file servers Add a NetworkPolicy to the nublado-fileservers namespace that affects all file servers and restricts access to Gafaelfawr. Otherwise, users inside the cluster can connect directly to anyone's file server as them. --- .../templates/_helpers.tpl | 26 +++++++++++++++++++ .../templates/networkpolicy.yaml | 23 ++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 applications/nublado-fileservers/templates/_helpers.tpl create mode 100644 applications/nublado-fileservers/templates/networkpolicy.yaml diff --git a/applications/nublado-fileservers/templates/_helpers.tpl b/applications/nublado-fileservers/templates/_helpers.tpl new file mode 100644 index 0000000000..b8c17a8560 --- /dev/null +++ b/applications/nublado-fileservers/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nublado-fileservers.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "nublado-fileservers.labels" -}} +helm.sh/chart: {{ include "nublado-fileservers.chart" . }} +{{ include "nublado-fileservers.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "nublado-fileservers.selectorLabels" -}} +app.kubernetes.io/name: "nublado-fileservers" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/nublado-fileservers/templates/networkpolicy.yaml b/applications/nublado-fileservers/templates/networkpolicy.yaml new file mode 100644 index 0000000000..da7ec8c714 --- /dev/null +++ b/applications/nublado-fileservers/templates/networkpolicy.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "nublado-fileservers" + labels: + {{- include "nublado-fileservers.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + nublado.lsst.io/category: "fileserver" + policyTypes: + - Ingress + ingress: + - from: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8000 From 7e976c6b19a013ca70c688e5c81c7c913f21a2a9 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 12 Jan 2024 09:08:23 -0800 Subject: [PATCH 443/588] Fix typo in platform requirements --- docs/admin/requirements.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin/requirements.rst b/docs/admin/requirements.rst index ac8dbb9325..4324e9cade 100644 --- a/docs/admin/requirements.rst +++ b/docs/admin/requirements.rst @@ -7,7 +7,7 @@ In order to install a Phalanx environment, the following prerequisites must be i Deployment environment ====================== -Phalanx can only be installed in environments that meet the following reuqirements: +Phalanx can only be installed in environments that meet the following requirements: - Phalanx is a Kubernetes deployment platform that installs within a Kubernetes cluster. The oldest version of Kubernetes known to work is 1.23. From 93674cdf2b99d0e12cfcf5e0078ec76b1b00029d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 12 Jan 2024 09:50:59 -0800 Subject: [PATCH 444/588] Switch back to fileservers for user file servers Unfortunately, we cannot use nublado-fileservers as the namespace for user file servers since it conflicts with the namespace pattern used by Nublado for labs. Keep the namespace of fileservers but still use nublado-fileservers as the Argo CD application for appropriate sorting in the Argo CD application display. --- applications/nublado/README.md | 2 +- applications/nublado/values.yaml | 2 +- .../templates/nublado-fileservers-application.yaml | 9 +++++++-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 3f5991dfc8..229aa84f73 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -33,7 +33,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.fileserver.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for file server image | | controller.config.fileserver.image.repository | string | `"ghcr.io/lsst-sqre/worblehat"` | File server image to use | | controller.config.fileserver.image.tag | string | `"0.1.0"` | Tag of file server image to use | -| controller.config.fileserver.namespace | string | `"nublado-fileservers"` | Namespace for user file servers | +| controller.config.fileserver.namespace | string | `"fileservers"` | Namespace for user file servers | | controller.config.fileserver.nodeSelector | object | `{}` | Node selector rules for user file server pods | | controller.config.fileserver.pathPrefix | string | `"/files"` | Path prefix for user file servers | | controller.config.fileserver.resources | object | See `values.yaml` | Resource requests and limits for user file servers | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 66d9ab3dd6..01de1b9ec6 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -88,7 +88,7 @@ controller: tag: "0.1.0" # -- Namespace for user file servers - namespace: "nublado-fileservers" + namespace: "fileservers" # -- Node selector rules for user file server pods nodeSelector: {} diff --git a/environments/templates/nublado-fileservers-application.yaml b/environments/templates/nublado-fileservers-application.yaml index aa469712c5..534327cbfb 100644 --- a/environments/templates/nublado-fileservers-application.yaml +++ b/environments/templates/nublado-fileservers-application.yaml @@ -1,8 +1,13 @@ +{{/* + The namespace is fileservers even though the Argo CD application is + nublado-fileservers, since otherwise we have a conflict with the + lab namespace for a user with the username fileservers. +*/}} {{- if .Values.applications.nublado -}} apiVersion: v1 kind: Namespace metadata: - name: "nublado-fileservers" + name: "fileservers" --- apiVersion: argoproj.io/v1alpha1 kind: Application @@ -13,7 +18,7 @@ metadata: - "resources-finalizer.argocd.argoproj.io" spec: destination: - namespace: "nublado-fileservers" + namespace: "fileservers" server: "https://kubernetes.default.svc" project: "default" source: From de875b23ae07dd254d6757c1e3a99ee7d0f6d683 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 12 Jan 2024 13:36:25 -0800 Subject: [PATCH 445/588] Deploy Nublado version 4.0.1 Fixes some security issues for user file servers. --- applications/nublado/Chart.yaml | 2 +- applications/nublado/README.md | 2 +- applications/nublado/values-base.yaml | 2 +- applications/nublado/values-ccin2p3.yaml | 2 +- applications/nublado/values-idfdev.yaml | 2 +- applications/nublado/values-idfint.yaml | 28 +++++++++---------- applications/nublado/values-idfprod.yaml | 22 +++++++-------- applications/nublado/values-roe.yaml | 2 +- applications/nublado/values-summit.yaml | 2 +- .../nublado/values-tucson-teststand.yaml | 2 +- applications/nublado/values.yaml | 2 +- 11 files changed, 34 insertions(+), 34 deletions(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 63214c7930..269be6d27a 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -5,7 +5,7 @@ description: JupyterHub and custom spawner for the Rubin Science Platform sources: - https://github.com/lsst-sqre/nublado home: https://nublado.lsst.io/ -appVersion: 4.0.0 +appVersion: 4.0.1 dependencies: - name: jupyterhub diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 229aa84f73..ac8259d5fd 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -99,7 +99,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | | jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | | jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/nublado-jupyterhub"` | Image to use for JupyterHub | -| jupyterhub.hub.image.tag | string | `"4.0.0"` | Tag of image to use for JupyterHub | +| jupyterhub.hub.image.tag | string | `"4.0.1"` | Tag of image to use for JupyterHub | | jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | | jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | | jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index cec07cc809..31d68aef1a 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -24,7 +24,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.0" + tag: "4.0.1" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-ccin2p3.yaml b/applications/nublado/values-ccin2p3.yaml index 26eff07b94..b4346fe3df 100644 --- a/applications/nublado/values-ccin2p3.yaml +++ b/applications/nublado/values-ccin2p3.yaml @@ -23,7 +23,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.0" + tag: "4.0.1" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 912be7e9ba..2d811a22c8 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -27,7 +27,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.0" + tag: "4.0.1" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 30acc816ec..d181716684 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -32,24 +32,11 @@ controller: NO_ACTIVITY_TIMEOUT: "432000" CULL_KERNEL_IDLE_TIMEOUT: "432000" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - sizes: - - size: small - cpu: 1.0 - memory: 4Gi - - size: medium - cpu: 2.0 - memory: 8Gi - - size: large - cpu: 4.0 - memory: 16Gi - - size: huge - cpu: 8.0 - memory: 32Gi initContainers: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.0" + tag: "4.0.1" privileged: true volumeMounts: - containerPath: "/home" @@ -63,6 +50,19 @@ controller: secretKey: "butler-hmac-idf-creds.json" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" + sizes: + - size: small + cpu: 1.0 + memory: 4Gi + - size: medium + cpu: 2.0 + memory: 8Gi + - size: large + cpu: 4.0 + memory: 16Gi + - size: huge + cpu: 8.0 + memory: 32Gi volumes: - name: "home" source: diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 2971992ab2..6960cb560f 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -20,21 +20,11 @@ controller: NO_ACTIVITY_TIMEOUT: "432000" CULL_KERNEL_IDLE_TIMEOUT: "432000" CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - sizes: - - size: small - cpu: 1.0 - memory: 4Gi - - size: medium - cpu: 2.0 - memory: 8Gi - - size: large - cpu: 4.0 - memory: 16Gi initContainers: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.0" + tag: "4.0.1" privileged: true volumeMounts: - containerPath: "/home" @@ -48,6 +38,16 @@ controller: secretKey: "butler-hmac-idf-creds.json" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" + sizes: + - size: small + cpu: 1.0 + memory: 4Gi + - size: medium + cpu: 2.0 + memory: 8Gi + - size: large + cpu: 4.0 + memory: 16Gi volumes: - name: "home" source: diff --git a/applications/nublado/values-roe.yaml b/applications/nublado/values-roe.yaml index 038a90ba3b..656504f0c5 100644 --- a/applications/nublado/values-roe.yaml +++ b/applications/nublado/values-roe.yaml @@ -14,7 +14,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.0" + tag: "4.0.1" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 5de07ebc7f..3db89c8105 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -24,7 +24,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.0" + tag: "4.0.1" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 87f165d915..7338e82020 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -24,7 +24,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.0" + tag: "4.0.1" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 01de1b9ec6..c3e21a764c 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -381,7 +381,7 @@ jupyterhub: name: "ghcr.io/lsst-sqre/nublado-jupyterhub" # -- Tag of image to use for JupyterHub - tag: "4.0.0" + tag: "4.0.1" # -- Resource limits and requests resources: From 2575d16004cad7f8ef89d0e1f3b0274a3a9fc095 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 08:03:12 +0000 Subject: [PATCH 446/588] Update Helm release argo-workflows to v0.40.6 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 92ba83971e..4676dc65f7 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.40.4 + version: 0.40.6 repository: https://argoproj.github.io/argo-helm From bccd6a42e48c6e1e3f11e08a0032faf0181181a3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 11:31:10 +0000 Subject: [PATCH 447/588] Update Helm release argo-cd to v5.52.2 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index bef85d071f..76801d9e57 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.52.1 + version: 5.52.2 repository: https://argoproj.github.io/argo-helm From 1bf4ea2261d066c5ff9735b538b66bf0f34987cb Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 12:31:56 +0000 Subject: [PATCH 448/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 273 +++++++++++++++++++++--------------------- requirements/main.txt | 14 +-- 2 files changed, 141 insertions(+), 146 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 47d97038ae..c55c823a3e 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.11 +# This file is autogenerated by pip-compile with Python 3.12 # by the following command: # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/dev.txt requirements/dev.in @@ -670,127 +670,127 @@ rich==13.7.0 \ --hash=sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa \ --hash=sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235 # via pytest-pretty -rpds-py==0.16.2 \ - --hash=sha256:0474df4ade9a3b4af96c3d36eb81856cb9462e4c6657d4caecfd840d2a13f3c9 \ - --hash=sha256:071980663c273bf3d388fe5c794c547e6f35ba3335477072c713a3176bf14a60 \ - --hash=sha256:07aab64e2808c3ebac2a44f67e9dc0543812b715126dfd6fe4264df527556cb6 \ - --hash=sha256:088396c7c70e59872f67462fcac3ecbded5233385797021976a09ebd55961dfe \ - --hash=sha256:162d7cd9cd311c1b0ff1c55a024b8f38bd8aad1876b648821da08adc40e95734 \ - --hash=sha256:19f00f57fdd38db4bb5ad09f9ead1b535332dbf624200e9029a45f1f35527ebb \ - --hash=sha256:1bdbc5fcb04a7309074de6b67fa9bc4b418ab3fc435fec1f2779a0eced688d04 \ - --hash=sha256:1be2f033df1b8be8c3167ba3c29d5dca425592ee31e35eac52050623afba5772 \ - --hash=sha256:24f7a2eb3866a9e91f4599851e0c8d39878a470044875c49bd528d2b9b88361c \ - --hash=sha256:290a81cfbe4673285cdf140ec5cd1658ffbf63ab359f2b352ebe172e7cfa5bf0 \ - --hash=sha256:2946b120718eba9af2b4dd103affc1164a87b9e9ebff8c3e4c05d7b7a7e274e2 \ - --hash=sha256:2bd82db36cd70b3628c0c57d81d2438e8dd4b7b32a6a9f25f24ab0e657cb6c4e \ - --hash=sha256:2ddef620e70eaffebed5932ce754d539c0930f676aae6212f8e16cd9743dd365 \ - --hash=sha256:2e53b9b25cac9065328901713a7e9e3b12e4f57ef4280b370fbbf6fef2052eef \ - --hash=sha256:302bd4983bbd47063e452c38be66153760112f6d3635c7eeefc094299fa400a9 \ - --hash=sha256:349cb40897fd529ca15317c22c0eab67f5ac5178b5bd2c6adc86172045210acc \ - --hash=sha256:358dafc89ce3894c7f486c615ba914609f38277ef67f566abc4c854d23b997fa \ - --hash=sha256:35953f4f2b3216421af86fd236b7c0c65935936a94ea83ddbd4904ba60757773 \ - --hash=sha256:35ae5ece284cf36464eb160880018cf6088a9ac5ddc72292a6092b6ef3f4da53 \ - --hash=sha256:3b811d182ad17ea294f2ec63c0621e7be92a1141e1012383461872cead87468f \ - --hash=sha256:3da5a4c56953bdbf6d04447c3410309616c54433146ccdb4a277b9cb499bc10e \ - --hash=sha256:3dc6a7620ba7639a3db6213da61312cb4aa9ac0ca6e00dc1cbbdc21c2aa6eb57 \ - --hash=sha256:3f91df8e6dbb7360e176d1affd5fb0246d2b88d16aa5ebc7db94fd66b68b61da \ - --hash=sha256:4022b9dc620e14f30201a8a73898a873c8e910cb642bcd2f3411123bc527f6ac \ - --hash=sha256:413b9c17388bbd0d87a329d8e30c1a4c6e44e2bb25457f43725a8e6fe4161e9e \ - --hash=sha256:43d4dd5fb16eb3825742bad8339d454054261ab59fed2fbac84e1d84d5aae7ba \ - --hash=sha256:44627b6ca7308680a70766454db5249105fa6344853af6762eaad4158a2feebe \ - --hash=sha256:44a54e99a2b9693a37ebf245937fd6e9228b4cbd64b9cc961e1f3391ec6c7391 \ - --hash=sha256:47713dc4fce213f5c74ca8a1f6a59b622fc1b90868deb8e8e4d993e421b4b39d \ - --hash=sha256:495a14b72bbe217f2695dcd9b5ab14d4f8066a00f5d209ed94f0aca307f85f6e \ - --hash=sha256:4c46ad6356e1561f2a54f08367d1d2e70a0a1bb2db2282d2c1972c1d38eafc3b \ - --hash=sha256:4d6a9f052e72d493efd92a77f861e45bab2f6be63e37fa8ecf0c6fd1a58fedb0 \ - --hash=sha256:509b617ac787cd1149600e731db9274ebbef094503ca25158e6f23edaba1ca8f \ - --hash=sha256:5552f328eaef1a75ff129d4d0c437bf44e43f9436d3996e8eab623ea0f5fcf73 \ - --hash=sha256:5a80e2f83391ad0808b4646732af2a7b67550b98f0cae056cb3b40622a83dbb3 \ - --hash=sha256:5cf6af100ffb5c195beec11ffaa8cf8523057f123afa2944e6571d54da84cdc9 \ - --hash=sha256:5e6caa3809e50690bd92fa490f5c38caa86082c8c3315aa438bce43786d5e90d \ - --hash=sha256:5ef00873303d678aaf8b0627e111fd434925ca01c657dbb2641410f1cdaef261 \ - --hash=sha256:69ac7ea9897ec201ce68b48582f3eb34a3f9924488a5432a93f177bf76a82a7e \ - --hash=sha256:6a61226465bda9283686db8f17d02569a98e4b13c637be5a26d44aa1f1e361c2 \ - --hash=sha256:6d904c5693e08bad240f16d79305edba78276be87061c872a4a15e2c301fa2c0 \ - --hash=sha256:6dace7b26a13353e24613417ce2239491b40a6ad44e5776a18eaff7733488b44 \ - --hash=sha256:6df15846ee3fb2e6397fe25d7ca6624af9f89587f3f259d177b556fed6bebe2c \ - --hash=sha256:703d95c75a72e902544fda08e965885525e297578317989fd15a6ce58414b41d \ - --hash=sha256:726ac36e8a3bb8daef2fd482534cabc5e17334052447008405daca7ca04a3108 \ - --hash=sha256:781ef8bfc091b19960fc0142a23aedadafa826bc32b433fdfe6fd7f964d7ef44 \ - --hash=sha256:80443fe2f7b3ea3934c5d75fb0e04a5dbb4a8e943e5ff2de0dec059202b70a8b \ - --hash=sha256:83640a5d7cd3bff694747d50436b8b541b5b9b9782b0c8c1688931d6ee1a1f2d \ - --hash=sha256:84c5a4d1f9dd7e2d2c44097fb09fffe728629bad31eb56caf97719e55575aa82 \ - --hash=sha256:882ce6e25e585949c3d9f9abd29202367175e0aab3aba0c58c9abbb37d4982ff \ - --hash=sha256:888a97002e986eca10d8546e3c8b97da1d47ad8b69726dcfeb3e56348ebb28a3 \ - --hash=sha256:8aad80645a011abae487d356e0ceb359f4938dfb6f7bcc410027ed7ae4f7bb8b \ - --hash=sha256:8cb6fe8ecdfffa0e711a75c931fb39f4ba382b4b3ccedeca43f18693864fe850 \ - --hash=sha256:8d6b6937ae9eac6d6c0ca3c42774d89fa311f55adff3970fb364b34abde6ed3d \ - --hash=sha256:90123853fc8b1747f80b0d354be3d122b4365a93e50fc3aacc9fb4c2488845d6 \ - --hash=sha256:96f957d6ab25a78b9e7fc9749d754b98eac825a112b4e666525ce89afcbd9ed5 \ - --hash=sha256:981d135c7cdaf6cd8eadae1c950de43b976de8f09d8e800feed307140d3d6d00 \ - --hash=sha256:9b32f742ce5b57201305f19c2ef7a184b52f6f9ba6871cc042c2a61f0d6b49b8 \ - --hash=sha256:9f0350ef2fba5f34eb0c9000ea328e51b9572b403d2f7f3b19f24085f6f598e8 \ - --hash=sha256:a297a4d08cc67c7466c873c78039d87840fb50d05473db0ec1b7b03d179bf322 \ - --hash=sha256:a3d7e2ea25d3517c6d7e5a1cc3702cffa6bd18d9ef8d08d9af6717fc1c700eed \ - --hash=sha256:a4b682c5775d6a3d21e314c10124599976809455ee67020e8e72df1769b87bc3 \ - --hash=sha256:a4ebb8b20bd09c5ce7884c8f0388801100f5e75e7f733b1b6613c713371feefc \ - --hash=sha256:a61f659665a39a4d17d699ab3593d7116d66e1e2e3f03ef3fb8f484e91908808 \ - --hash=sha256:a9880b4656efe36ccad41edc66789e191e5ee19a1ea8811e0aed6f69851a82f4 \ - --hash=sha256:ac08472f41ea77cd6a5dae36ae7d4ed3951d6602833af87532b556c1b4601d63 \ - --hash=sha256:adc0c3d6fc6ae35fee3e4917628983f6ce630d513cbaad575b4517d47e81b4bb \ - --hash=sha256:af27423662f32d7501a00c5e7342f7dbd1e4a718aea7a239781357d15d437133 \ - --hash=sha256:b2e75e17bd0bb66ee34a707da677e47c14ee51ccef78ed6a263a4cc965a072a1 \ - --hash=sha256:b634c5ec0103c5cbebc24ebac4872b045cccb9456fc59efdcf6fe39775365bd2 \ - --hash=sha256:b6f5549d6ed1da9bfe3631ca9483ae906f21410be2445b73443fa9f017601c6f \ - --hash=sha256:bd4b677d929cf1f6bac07ad76e0f2d5de367e6373351c01a9c0a39f6b21b4a8b \ - --hash=sha256:bf721ede3eb7b829e4a9b8142bd55db0bdc82902720548a703f7e601ee13bdc3 \ - --hash=sha256:c647ca87fc0ebe808a41de912e9a1bfef9acb85257e5d63691364ac16b81c1f0 \ - --hash=sha256:ca57468da2d9a660bcf8961637c85f2fbb2aa64d9bc3f9484e30c3f9f67b1dd7 \ - --hash=sha256:cad0f59ee3dc35526039f4bc23642d52d5f6616b5f687d846bfc6d0d6d486db0 \ - --hash=sha256:cc97f0640e91d7776530f06e6836c546c1c752a52de158720c4224c9e8053cad \ - --hash=sha256:ccd4e400309e1f34a5095bf9249d371f0fd60f8a3a5c4a791cad7b99ce1fd38d \ - --hash=sha256:cffa76b385dfe1e38527662a302b19ffb0e7f5cf7dd5e89186d2c94a22dd9d0c \ - --hash=sha256:d0dd7ed2f16df2e129496e7fbe59a34bc2d7fc8db443a606644d069eb69cbd45 \ - --hash=sha256:d452817e0d9c749c431a1121d56a777bd7099b720b3d1c820f1725cb40928f58 \ - --hash=sha256:d8dda2a806dfa4a9b795950c4f5cc56d6d6159f7d68080aedaff3bdc9b5032f5 \ - --hash=sha256:dcbe1f8dd179e4d69b70b1f1d9bb6fd1e7e1bdc9c9aad345cdeb332e29d40748 \ - --hash=sha256:e0441fb4fdd39a230477b2ca9be90868af64425bfe7b122b57e61e45737a653b \ - --hash=sha256:e04e56b4ca7a770593633556e8e9e46579d66ec2ada846b401252a2bdcf70a6d \ - --hash=sha256:e061de3b745fe611e23cd7318aec2c8b0e4153939c25c9202a5811ca911fd733 \ - --hash=sha256:e93ec1b300acf89730cf27975ef574396bc04edecc358e9bd116fb387a123239 \ - --hash=sha256:e9e557db6a177470316c82f023e5d571811c9a4422b5ea084c85da9aa3c035fc \ - --hash=sha256:eab36eae3f3e8e24b05748ec9acc66286662f5d25c52ad70cadab544e034536b \ - --hash=sha256:ec23fcad480e77ede06cf4127a25fc440f7489922e17fc058f426b5256ee0edb \ - --hash=sha256:ec2e1cf025b2c0f48ec17ff3e642661da7ee332d326f2e6619366ce8e221f018 \ - --hash=sha256:ed99b4f7179d2111702020fd7d156e88acd533f5a7d3971353e568b6051d5c97 \ - --hash=sha256:ee94cb58c0ba2c62ee108c2b7c9131b2c66a29e82746e8fa3aa1a1effbd3dcf1 \ - --hash=sha256:f19afcfc0dd0dca35694df441e9b0f95bc231b512f51bded3c3d8ca32153ec19 \ - --hash=sha256:f1b9d9260e06ea017feb7172976ab261e011c1dc2f8883c7c274f6b2aabfe01a \ - --hash=sha256:f28ac0e8e7242d140f99402a903a2c596ab71550272ae9247ad78f9a932b5698 \ - --hash=sha256:f42e25c016927e2a6b1ce748112c3ab134261fc2ddc867e92d02006103e1b1b7 \ - --hash=sha256:f4bd4578e44f26997e9e56c96dedc5f1af43cc9d16c4daa29c771a00b2a26851 \ - --hash=sha256:f811771019f063bbd0aa7bb72c8a934bc13ebacb4672d712fc1639cfd314cccc +rpds-py==0.17.1 \ + --hash=sha256:01f58a7306b64e0a4fe042047dd2b7d411ee82e54240284bab63e325762c1147 \ + --hash=sha256:0210b2668f24c078307260bf88bdac9d6f1093635df5123789bfee4d8d7fc8e7 \ + --hash=sha256:02866e060219514940342a1f84303a1ef7a1dad0ac311792fbbe19b521b489d2 \ + --hash=sha256:0387ce69ba06e43df54e43968090f3626e231e4bc9150e4c3246947567695f68 \ + --hash=sha256:060f412230d5f19fc8c8b75f315931b408d8ebf56aec33ef4168d1b9e54200b1 \ + --hash=sha256:071bc28c589b86bc6351a339114fb7a029f5cddbaca34103aa573eba7b482382 \ + --hash=sha256:0bfb09bf41fe7c51413f563373e5f537eaa653d7adc4830399d4e9bdc199959d \ + --hash=sha256:10162fe3f5f47c37ebf6d8ff5a2368508fe22007e3077bf25b9c7d803454d921 \ + --hash=sha256:149c5cd24f729e3567b56e1795f74577aa3126c14c11e457bec1b1c90d212e38 \ + --hash=sha256:1701fc54460ae2e5efc1dd6350eafd7a760f516df8dbe51d4a1c79d69472fbd4 \ + --hash=sha256:1957a2ab607f9added64478a6982742eb29f109d89d065fa44e01691a20fc20a \ + --hash=sha256:1a746a6d49665058a5896000e8d9d2f1a6acba8a03b389c1e4c06e11e0b7f40d \ + --hash=sha256:1bfcad3109c1e5ba3cbe2f421614e70439f72897515a96c462ea657261b96518 \ + --hash=sha256:1d36b2b59e8cc6e576f8f7b671e32f2ff43153f0ad6d0201250a7c07f25d570e \ + --hash=sha256:1db228102ab9d1ff4c64148c96320d0be7044fa28bd865a9ce628ce98da5973d \ + --hash=sha256:1dc29db3900cb1bb40353772417800f29c3d078dbc8024fd64655a04ee3c4bdf \ + --hash=sha256:1e626b365293a2142a62b9a614e1f8e331b28f3ca57b9f05ebbf4cf2a0f0bdc5 \ + --hash=sha256:1f3c3461ebb4c4f1bbc70b15d20b565759f97a5aaf13af811fcefc892e9197ba \ + --hash=sha256:20de7b7179e2031a04042e85dc463a93a82bc177eeba5ddd13ff746325558aa6 \ + --hash=sha256:24e4900a6643f87058a27320f81336d527ccfe503984528edde4bb660c8c8d59 \ + --hash=sha256:2528ff96d09f12e638695f3a2e0c609c7b84c6df7c5ae9bfeb9252b6fa686253 \ + --hash=sha256:25f071737dae674ca8937a73d0f43f5a52e92c2d178330b4c0bb6ab05586ffa6 \ + --hash=sha256:270987bc22e7e5a962b1094953ae901395e8c1e1e83ad016c5cfcfff75a15a3f \ + --hash=sha256:292f7344a3301802e7c25c53792fae7d1593cb0e50964e7bcdcc5cf533d634e3 \ + --hash=sha256:2953937f83820376b5979318840f3ee47477d94c17b940fe31d9458d79ae7eea \ + --hash=sha256:2a792b2e1d3038daa83fa474d559acfd6dc1e3650ee93b2662ddc17dbff20ad1 \ + --hash=sha256:2a7b2f2f56a16a6d62e55354dd329d929560442bd92e87397b7a9586a32e3e76 \ + --hash=sha256:2f4eb548daf4836e3b2c662033bfbfc551db58d30fd8fe660314f86bf8510b93 \ + --hash=sha256:3664d126d3388a887db44c2e293f87d500c4184ec43d5d14d2d2babdb4c64cad \ + --hash=sha256:3677fcca7fb728c86a78660c7fb1b07b69b281964673f486ae72860e13f512ad \ + --hash=sha256:380e0df2e9d5d5d339803cfc6d183a5442ad7ab3c63c2a0982e8c824566c5ccc \ + --hash=sha256:3ac732390d529d8469b831949c78085b034bff67f584559340008d0f6041a049 \ + --hash=sha256:4128980a14ed805e1b91a7ed551250282a8ddf8201a4e9f8f5b7e6225f54170d \ + --hash=sha256:4341bd7579611cf50e7b20bb8c2e23512a3dc79de987a1f411cb458ab670eb90 \ + --hash=sha256:436474f17733c7dca0fbf096d36ae65277e8645039df12a0fa52445ca494729d \ + --hash=sha256:4dc889a9d8a34758d0fcc9ac86adb97bab3fb7f0c4d29794357eb147536483fd \ + --hash=sha256:4e21b76075c01d65d0f0f34302b5a7457d95721d5e0667aea65e5bb3ab415c25 \ + --hash=sha256:516fb8c77805159e97a689e2f1c80655c7658f5af601c34ffdb916605598cda2 \ + --hash=sha256:5576ee2f3a309d2bb403ec292d5958ce03953b0e57a11d224c1f134feaf8c40f \ + --hash=sha256:5a024fa96d541fd7edaa0e9d904601c6445e95a729a2900c5aec6555fe921ed6 \ + --hash=sha256:5d0e8a6434a3fbf77d11448c9c25b2f25244226cfbec1a5159947cac5b8c5fa4 \ + --hash=sha256:5e7d63ec01fe7c76c2dbb7e972fece45acbb8836e72682bde138e7e039906e2c \ + --hash=sha256:60e820ee1004327609b28db8307acc27f5f2e9a0b185b2064c5f23e815f248f8 \ + --hash=sha256:637b802f3f069a64436d432117a7e58fab414b4e27a7e81049817ae94de45d8d \ + --hash=sha256:65dcf105c1943cba45d19207ef51b8bc46d232a381e94dd38719d52d3980015b \ + --hash=sha256:698ea95a60c8b16b58be9d854c9f993c639f5c214cf9ba782eca53a8789d6b19 \ + --hash=sha256:70fcc6c2906cfa5c6a552ba7ae2ce64b6c32f437d8f3f8eea49925b278a61453 \ + --hash=sha256:720215373a280f78a1814becb1312d4e4d1077b1202a56d2b0815e95ccb99ce9 \ + --hash=sha256:7450dbd659fed6dd41d1a7d47ed767e893ba402af8ae664c157c255ec6067fde \ + --hash=sha256:7b7d9ca34542099b4e185b3c2a2b2eda2e318a7dbde0b0d83357a6d4421b5296 \ + --hash=sha256:7fbd70cb8b54fe745301921b0816c08b6d917593429dfc437fd024b5ba713c58 \ + --hash=sha256:81038ff87a4e04c22e1d81f947c6ac46f122e0c80460b9006e6517c4d842a6ec \ + --hash=sha256:810685321f4a304b2b55577c915bece4c4a06dfe38f6e62d9cc1d6ca8ee86b99 \ + --hash=sha256:82ada4a8ed9e82e443fcef87e22a3eed3654dd3adf6e3b3a0deb70f03e86142a \ + --hash=sha256:841320e1841bb53fada91c9725e766bb25009cfd4144e92298db296fb6c894fb \ + --hash=sha256:8587fd64c2a91c33cdc39d0cebdaf30e79491cc029a37fcd458ba863f8815383 \ + --hash=sha256:8ffe53e1d8ef2520ebcf0c9fec15bb721da59e8ef283b6ff3079613b1e30513d \ + --hash=sha256:9051e3d2af8f55b42061603e29e744724cb5f65b128a491446cc029b3e2ea896 \ + --hash=sha256:91e5a8200e65aaac342a791272c564dffcf1281abd635d304d6c4e6b495f29dc \ + --hash=sha256:93432e747fb07fa567ad9cc7aaadd6e29710e515aabf939dfbed8046041346c6 \ + --hash=sha256:938eab7323a736533f015e6069a7d53ef2dcc841e4e533b782c2bfb9fb12d84b \ + --hash=sha256:9584f8f52010295a4a417221861df9bea4c72d9632562b6e59b3c7b87a1522b7 \ + --hash=sha256:9737bdaa0ad33d34c0efc718741abaafce62fadae72c8b251df9b0c823c63b22 \ + --hash=sha256:99da0a4686ada4ed0f778120a0ea8d066de1a0a92ab0d13ae68492a437db78bf \ + --hash=sha256:99f567dae93e10be2daaa896e07513dd4bf9c2ecf0576e0533ac36ba3b1d5394 \ + --hash=sha256:9bdf1303df671179eaf2cb41e8515a07fc78d9d00f111eadbe3e14262f59c3d0 \ + --hash=sha256:9f0e4dc0f17dcea4ab9d13ac5c666b6b5337042b4d8f27e01b70fae41dd65c57 \ + --hash=sha256:a000133a90eea274a6f28adc3084643263b1e7c1a5a66eb0a0a7a36aa757ed74 \ + --hash=sha256:a3264e3e858de4fc601741498215835ff324ff2482fd4e4af61b46512dd7fc83 \ + --hash=sha256:a71169d505af63bb4d20d23a8fbd4c6ce272e7bce6cc31f617152aa784436f29 \ + --hash=sha256:a967dd6afda7715d911c25a6ba1517975acd8d1092b2f326718725461a3d33f9 \ + --hash=sha256:aa5bfb13f1e89151ade0eb812f7b0d7a4d643406caaad65ce1cbabe0a66d695f \ + --hash=sha256:ae35e8e6801c5ab071b992cb2da958eee76340e6926ec693b5ff7d6381441745 \ + --hash=sha256:b686f25377f9c006acbac63f61614416a6317133ab7fafe5de5f7dc8a06d42eb \ + --hash=sha256:b760a56e080a826c2e5af09002c1a037382ed21d03134eb6294812dda268c811 \ + --hash=sha256:b86b21b348f7e5485fae740d845c65a880f5d1eda1e063bc59bef92d1f7d0c55 \ + --hash=sha256:b9412abdf0ba70faa6e2ee6c0cc62a8defb772e78860cef419865917d86c7342 \ + --hash=sha256:bd345a13ce06e94c753dab52f8e71e5252aec1e4f8022d24d56decd31e1b9b23 \ + --hash=sha256:be22ae34d68544df293152b7e50895ba70d2a833ad9566932d750d3625918b82 \ + --hash=sha256:bf046179d011e6114daf12a534d874958b039342b347348a78b7cdf0dd9d6041 \ + --hash=sha256:c3d2010656999b63e628a3c694f23020322b4178c450dc478558a2b6ef3cb9bb \ + --hash=sha256:c64602e8be701c6cfe42064b71c84ce62ce66ddc6422c15463fd8127db3d8066 \ + --hash=sha256:d65e6b4f1443048eb7e833c2accb4fa7ee67cc7d54f31b4f0555b474758bee55 \ + --hash=sha256:d8bbd8e56f3ba25a7d0cf980fc42b34028848a53a0e36c9918550e0280b9d0b6 \ + --hash=sha256:da1ead63368c04a9bded7904757dfcae01eba0e0f9bc41d3d7f57ebf1c04015a \ + --hash=sha256:dbbb95e6fc91ea3102505d111b327004d1c4ce98d56a4a02e82cd451f9f57140 \ + --hash=sha256:dbc56680ecf585a384fbd93cd42bc82668b77cb525343170a2d86dafaed2a84b \ + --hash=sha256:df3b6f45ba4515632c5064e35ca7f31d51d13d1479673185ba8f9fefbbed58b9 \ + --hash=sha256:dfe07308b311a8293a0d5ef4e61411c5c20f682db6b5e73de6c7c8824272c256 \ + --hash=sha256:e796051f2070f47230c745d0a77a91088fbee2cc0502e9b796b9c6471983718c \ + --hash=sha256:efa767c220d94aa4ac3a6dd3aeb986e9f229eaf5bce92d8b1b3018d06bed3772 \ + --hash=sha256:f0b8bf5b8db49d8fd40f54772a1dcf262e8be0ad2ab0206b5a2ec109c176c0a4 \ + --hash=sha256:f175e95a197f6a4059b50757a3dca33b32b61691bdbd22c29e8a8d21d3914cae \ + --hash=sha256:f2f3b28b40fddcb6c1f1f6c88c6f3769cd933fa493ceb79da45968a21dccc920 \ + --hash=sha256:f6c43b6f97209e370124baf2bf40bb1e8edc25311a158867eb1c3a5d449ebc7a \ + --hash=sha256:f7f4cb1f173385e8a39c29510dd11a78bf44e360fb75610594973f5ea141028b \ + --hash=sha256:fad059a4bd14c45776600d223ec194e77db6c20255578bb5bcdd7c18fd169361 \ + --hash=sha256:ff1dcb8e8bc2261a088821b2595ef031c91d499a0c1b031c152d43fe0a6ecec8 \ + --hash=sha256:ffee088ea9b593cc6160518ba9bd319b5475e5f3e578e4552d63818773c6f56a # via # jsonschema # referencing -ruff==0.1.11 \ - --hash=sha256:09c415716884950080921dd6237767e52e227e397e2008e2bed410117679975b \ - --hash=sha256:0f58948c6d212a6b8d41cd59e349751018797ce1727f961c2fa755ad6208ba45 \ - --hash=sha256:190a566c8f766c37074d99640cd9ca3da11d8deae2deae7c9505e68a4a30f740 \ - --hash=sha256:231d8fb11b2cc7c0366a326a66dafc6ad449d7fcdbc268497ee47e1334f66f77 \ - --hash=sha256:4b077ce83f47dd6bea1991af08b140e8b8339f0ba8cb9b7a484c30ebab18a23f \ - --hash=sha256:5b25093dad3b055667730a9b491129c42d45e11cdb7043b702e97125bcec48a1 \ - --hash=sha256:6464289bd67b2344d2a5d9158d5eb81025258f169e69a46b741b396ffb0cda95 \ - --hash=sha256:934832f6ed9b34a7d5feea58972635c2039c7a3b434fe5ba2ce015064cb6e955 \ - --hash=sha256:97ce4d752f964ba559c7023a86e5f8e97f026d511e48013987623915431c7ea9 \ - --hash=sha256:9b8f397902f92bc2e70fb6bebfa2139008dc72ae5177e66c383fa5426cb0bf2c \ - --hash=sha256:9bd4025b9c5b429a48280785a2b71d479798a69f5c2919e7d274c5f4b32c3607 \ - --hash=sha256:a7f772696b4cdc0a3b2e527fc3c7ccc41cdcb98f5c80fdd4f2b8c50eb1458196 \ - --hash=sha256:c4a88efecec23c37b11076fe676e15c6cdb1271a38f2b415e381e87fe4517f18 \ - --hash=sha256:e1ad00662305dcb1e987f5ec214d31f7d6a062cae3e74c1cbccef15afd96611d \ - --hash=sha256:ea0d3e950e394c4b332bcdd112aa566010a9f9c95814844a7468325290aabfd9 \ - --hash=sha256:eb85ee287b11f901037a6683b2374bb0ec82928c5cbc984f575d0437979c521a \ - --hash=sha256:f9d4d88cb6eeb4dfe20f9f0519bd2eaba8119bde87c3d5065c541dbae2b5a2cb +ruff==0.1.13 \ + --hash=sha256:226b517f42d59a543d6383cfe03cccf0091e3e0ed1b856c6824be03d2a75d3b6 \ + --hash=sha256:2f59bcf5217c661254bd6bc42d65a6fd1a8b80c48763cb5c2293295babd945dd \ + --hash=sha256:5f0312ba1061e9b8c724e9a702d3c8621e3c6e6c2c9bd862550ab2951ac75c16 \ + --hash=sha256:6bbbc3042075871ec17f28864808540a26f0f79a4478c357d3e3d2284e832998 \ + --hash=sha256:7a36fa90eb12208272a858475ec43ac811ac37e91ef868759770b71bdabe27b6 \ + --hash=sha256:9a1600942485c6e66119da294c6294856b5c86fd6df591ce293e4a4cc8e72989 \ + --hash=sha256:9ebb40442f7b531e136d334ef0851412410061e65d61ca8ce90d894a094feb22 \ + --hash=sha256:9fb6b3b86450d4ec6a6732f9f60c4406061b6851c4b29f944f8c9d91c3611c7a \ + --hash=sha256:a623349a505ff768dad6bd57087e2461be8db58305ebd5577bd0e98631f9ae69 \ + --hash=sha256:b13ba5d7156daaf3fd08b6b993360a96060500aca7e307d95ecbc5bb47a69296 \ + --hash=sha256:dcaab50e278ff497ee4d1fe69b29ca0a9a47cd954bb17963628fa417933c6eb1 \ + --hash=sha256:e261f1baed6291f434ffb1d5c6bd8051d1c2a26958072d38dfbec39b3dda7352 \ + --hash=sha256:e3fd36e0d48aeac672aa850045e784673449ce619afc12823ea7868fcc41d8ba \ + --hash=sha256:e6894b00495e00c27b6ba61af1fc666f17de6140345e5ef27dd6e08fb987259d \ + --hash=sha256:ee3febce7863e231a467f90e681d3d89210b900d49ce88723ce052c8761be8c7 \ + --hash=sha256:f57de973de4edef3ad3044d6a50c02ad9fc2dff0d88587f25f1a48e3f72edf5e \ + --hash=sha256:f988746e3c3982bea7f824c8fa318ce7f538c4dfefec99cd09c8770bd33e6539 # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -829,14 +829,9 @@ sphinx==7.2.6 \ # sphinx-design # sphinx-jinja # sphinx-prompt - # sphinxcontrib-applehelp # sphinxcontrib-bibtex - # sphinxcontrib-devhelp - # sphinxcontrib-htmlhelp # sphinxcontrib-jquery - # sphinxcontrib-qthelp # sphinxcontrib-redoc - # sphinxcontrib-serializinghtml # sphinxext-opengraph # sphinxext-rediraffe sphinx-autodoc-typehints==1.25.2 \ @@ -873,21 +868,21 @@ sphinx-prompt==1.8.0 \ --hash=sha256:369ecc633f0711886f9b3a078c83264245be1adf46abeeb9b88b5519e4b51007 \ --hash=sha256:47482f86fcec29662fdfd23e7c04ef03582714195d01f5d565403320084372ed # via documenteer -sphinxcontrib-applehelp==1.0.7 \ - --hash=sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d \ - --hash=sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa +sphinxcontrib-applehelp==1.0.8 \ + --hash=sha256:c40a4f96f3776c4393d933412053962fac2b84f4c99a7982ba42e09576a70619 \ + --hash=sha256:cb61eb0ec1b61f349e5cc36b2028e9e7ca765be05e49641c97241274753067b4 # via sphinx sphinxcontrib-bibtex==2.6.2 \ --hash=sha256:10d45ebbb19207c5665396c9446f8012a79b8a538cb729f895b5910ab2d0b2da \ --hash=sha256:f487af694336f28bfb7d6a17070953a7d264bec43000a2379724274f5f8d70ae # via documenteer -sphinxcontrib-devhelp==1.0.5 \ - --hash=sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212 \ - --hash=sha256:fe8009aed765188f08fcaadbb3ea0d90ce8ae2d76710b7e29ea7d047177dae2f +sphinxcontrib-devhelp==1.0.6 \ + --hash=sha256:6485d09629944511c893fa11355bda18b742b83a2b181f9a009f7e500595c90f \ + --hash=sha256:9893fd3f90506bc4b97bdb977ceb8fbd823989f4316b28c3841ec128544372d3 # via sphinx -sphinxcontrib-htmlhelp==2.0.4 \ - --hash=sha256:6c26a118a05b76000738429b724a0568dbde5b72391a688577da08f11891092a \ - --hash=sha256:8001661c077a73c29beaf4a79968d0726103c5605e27db92b9ebed8bab1359e9 +sphinxcontrib-htmlhelp==2.0.5 \ + --hash=sha256:0dc87637d5de53dd5eec3a6a01753b1ccf99494bd756aafecd74b4fa9e729015 \ + --hash=sha256:393f04f112b4d2f53d93448d4bce35842f62b307ccdc549ec1585e950bc35e04 # via sphinx sphinxcontrib-jquery==4.1 \ --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \ @@ -901,16 +896,16 @@ sphinxcontrib-mermaid==0.9.2 \ --hash=sha256:252ef13dd23164b28f16d8b0205cf184b9d8e2b714a302274d9f59eb708e77af \ --hash=sha256:6795a72037ca55e65663d2a2c1a043d636dc3d30d418e56dd6087d1459d98a5d # via documenteer -sphinxcontrib-qthelp==1.0.6 \ - --hash=sha256:62b9d1a186ab7f5ee3356d906f648cacb7a6bdb94d201ee7adf26db55092982d \ - --hash=sha256:bf76886ee7470b934e363da7a954ea2825650013d367728588732c7350f49ea4 +sphinxcontrib-qthelp==1.0.7 \ + --hash=sha256:053dedc38823a80a7209a80860b16b722e9e0209e32fea98c90e4e6624588ed6 \ + --hash=sha256:e2ae3b5c492d58fcbd73281fbd27e34b8393ec34a073c792642cd8e529288182 # via sphinx sphinxcontrib-redoc==1.6.0 \ --hash=sha256:e358edbe23927d36432dde748e978cf897283a331a03e93d3ef02e348dee4561 # via documenteer -sphinxcontrib-serializinghtml==1.1.9 \ - --hash=sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54 \ - --hash=sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1 +sphinxcontrib-serializinghtml==1.1.10 \ + --hash=sha256:326369b8df80a7d2d8d7f99aa5ac577f51ea51556ed974e7716cfd4fca3f6cb7 \ + --hash=sha256:93f3f5dc458b91b192fe10c397e324f262cf163d79f3282c158e8436a2c4511f # via sphinx sphinxext-opengraph==0.9.1 \ --hash=sha256:b3b230cc6a5b5189139df937f0d9c7b23c7c204493b22646273687969dcb760e \ diff --git a/requirements/main.txt b/requirements/main.txt index 80627b30b4..545e6d3ae6 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.11 +# This file is autogenerated by pip-compile with Python 3.12 # by the following command: # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/main.txt requirements/main.in @@ -230,9 +230,9 @@ cryptography==41.0.7 \ # -r requirements/main.in # pyjwt # safir -fastapi==0.108.0 \ - --hash=sha256:5056e504ac6395bf68493d71fcfc5352fdbd5fda6f88c21f6420d80d81163296 \ - --hash=sha256:8c7bc6d315da963ee4cdb605557827071a9a7f95aeb8fcdd3bde48cdc8764dd7 +fastapi==0.109.0 \ + --hash=sha256:8c77515984cd8e8cfeb58364f8cc7a28f0692088475e2614f7bf03275eba9093 \ + --hash=sha256:b978095b9ee01a5cf49b19f4bc1ac9b8ca83aa076e770ef8fd9af09a2b88d191 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -550,9 +550,9 @@ sniffio==1.3.0 \ # anyio # httpcore # httpx -starlette==0.32.0.post1 \ - --hash=sha256:cd0cb10ddb49313f609cedfac62c8c12e56c7314b66d89bb077ba228bada1b09 \ - --hash=sha256:e54e2b7e2fb06dff9eac40133583f10dfa05913f5a85bf26f427c7a40a9a3d02 +starlette==0.35.1 \ + --hash=sha256:3e2639dac3520e4f58734ed22553f950d3f3cb1001cd2eaac4d57e8cdc5f66bc \ + --hash=sha256:50bbbda9baa098e361f398fda0928062abbaf1f54f4fadcbe17c092a01eb9a25 # via # fastapi # safir From 77ffa9f531a93552455d19363e9f864ee8744014 Mon Sep 17 00:00:00 2001 From: Sebastian Aranda Date: Mon, 15 Jan 2024 10:37:03 -0300 Subject: [PATCH 449/588] narrativelog: update appVersion to 0.6.1 --- applications/narrativelog/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/narrativelog/Chart.yaml b/applications/narrativelog/Chart.yaml index cd72dcae0e..089e6748c7 100644 --- a/applications/narrativelog/Chart.yaml +++ b/applications/narrativelog/Chart.yaml @@ -12,4 +12,4 @@ version: 1.0.0 # number should be incremented each time you make changes to the # application. Versions are not expected to follow Semantic Versioning. They # should reflect the version the application is using. -appVersion: 0.6.0 +appVersion: 0.6.1 From e5a40989c3b1944f3608b0a013fb8a669e1990e1 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 15 Jan 2024 14:15:55 -0700 Subject: [PATCH 450/588] Refactor InfluxDB Enterprise configuration - Keep all configuration settings in the configuration file - Extend configmap templates to support more configuration settings - Drop support for enabling https on the InfluxDB API as this is managed by our ingress --- applications/sasquatch/README.md | 18 ++-- .../charts/influxdb-enterprise/README.md | 18 ++-- .../templates/bootstrap-job.yaml | 18 ---- .../templates/certmanager-issuer.yaml | 10 -- .../templates/data-certmanager.yaml | 28 ------ .../templates/data-configmap.yaml | 98 +++++++++++-------- .../templates/data-statefulset.yaml | 47 --------- .../templates/meta-certmanager.yaml | 28 ------ .../templates/meta-configmap.yaml | 24 +---- .../templates/meta-statefulset.yaml | 47 --------- .../charts/influxdb-enterprise/values.yaml | 53 ++++------ 11 files changed, 96 insertions(+), 293 deletions(-) delete mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/certmanager-issuer.yaml delete mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/data-certmanager.yaml delete mode 100644 applications/sasquatch/charts/influxdb-enterprise/templates/meta-certmanager.yaml diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 803047653b..9b2a96f0c3 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -94,12 +94,16 @@ Rubin Observatory's telemetry service. | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| influxdb-enterprise.data.config.anti_entropy.enabled | bool | `false` | | +| influxdb-enterprise.data.config.cluster.log-queries-after | string | `"15s"` | | +| influxdb-enterprise.data.config.cluster.max-concurrent-queries | int | `1000` | | +| influxdb-enterprise.data.config.cluster.query-timeout | string | `"300s"` | | +| influxdb-enterprise.data.config.continuous_queries.enabled | bool | `false` | | +| influxdb-enterprise.data.config.data.trace-logging-enabled | bool | `true` | | +| influxdb-enterprise.data.config.http.auth-enabled | bool | `true` | | +| influxdb-enterprise.data.config.http.flux-enabled | bool | `true` | | +| influxdb-enterprise.data.config.logging.level | string | `"debug"` | | | influxdb-enterprise.data.env | object | `{}` | | -| influxdb-enterprise.data.flux.enabled | bool | `true` | | -| influxdb-enterprise.data.https.enabled | bool | `false` | | -| influxdb-enterprise.data.https.insecure | bool | `true` | | -| influxdb-enterprise.data.https.secret.name | string | `"influxdb-tls"` | | -| influxdb-enterprise.data.https.useCertManager | bool | `false` | | | influxdb-enterprise.data.image | object | `{}` | | | influxdb-enterprise.data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | | influxdb-enterprise.data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | @@ -122,10 +126,6 @@ Rubin Observatory's telemetry service. | influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | | influxdb-enterprise.meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | | influxdb-enterprise.meta.env | object | `{}` | | -| influxdb-enterprise.meta.https.enabled | bool | `false` | | -| influxdb-enterprise.meta.https.insecure | bool | `true` | | -| influxdb-enterprise.meta.https.secret.name | string | `"influxdb-tls"` | | -| influxdb-enterprise.meta.https.useCertManager | bool | `false` | | | influxdb-enterprise.meta.image | object | `{}` | | | influxdb-enterprise.meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | | influxdb-enterprise.meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md index 105b97867c..40c22ce19d 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/README.md +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -17,12 +17,16 @@ Run InfluxDB Enterprise on Kubernetes | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | +| data.config.anti_entropy.enabled | bool | `false` | | +| data.config.cluster.log-queries-after | string | `"15s"` | | +| data.config.cluster.max-concurrent-queries | int | `1000` | | +| data.config.cluster.query-timeout | string | `"300s"` | | +| data.config.continuous_queries.enabled | bool | `false` | | +| data.config.data.trace-logging-enabled | bool | `true` | | +| data.config.http.auth-enabled | bool | `true` | | +| data.config.http.flux-enabled | bool | `true` | | +| data.config.logging.level | string | `"debug"` | | | data.env | object | `{}` | | -| data.flux.enabled | bool | `true` | | -| data.https.enabled | bool | `false` | | -| data.https.insecure | bool | `true` | | -| data.https.secret.name | string | `"influxdb-tls"` | | -| data.https.useCertManager | bool | `false` | | | data.image | object | `{}` | | | data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | | data.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | @@ -45,10 +49,6 @@ Run InfluxDB Enterprise on Kubernetes | meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | | meta.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | | meta.env | object | `{}` | | -| meta.https.enabled | bool | `false` | | -| meta.https.insecure | bool | `true` | | -| meta.https.secret.name | string | `"influxdb-tls"` | | -| meta.https.useCertManager | bool | `false` | | | meta.image | object | `{}` | | | meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"300"` | | | meta.ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"300"` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml index 384bfc49a1..46c3381491 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/bootstrap-job.yaml @@ -49,12 +49,6 @@ spec: command: - influx args: - {{- if .Values.data.https.enabled }} - - -ssl - {{- if .Values.data.https.insecure }} - - -unsafeSsl - {{ end }} - {{ end }} - -host - {{ include "influxdb-enterprise.fullname" . }}-data - -execute @@ -90,12 +84,6 @@ spec: command: - influx args: - {{- if .Values.data.https.enabled }} - - -ssl - {{- if .Values.data.https.insecure }} - - -unsafeSsl - {{ end }} - {{ end }} - -host - {{ include "influxdb-enterprise.fullname" . }}-data - -import @@ -127,12 +115,6 @@ spec: command: - influx args: - {{- if .Values.data.https.enabled }} - - -ssl - {{- if .Values.data.https.insecure }} - - -unsafeSsl - {{ end }} - {{ end }} - -host - {{ include "influxdb-enterprise.fullname" . }}-data - -import diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/certmanager-issuer.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/certmanager-issuer.yaml deleted file mode 100644 index 72cbe9a68c..0000000000 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/certmanager-issuer.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- if or .Values.data.https.useCertManager .Values.meta.https.useCertManager -}} -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: {{ include "influxdb-enterprise.fullname" . }} - labels: - {{- include "influxdb-enterprise.labels" . | nindent 4 }} -spec: - selfSigned: {} -{{- end -}} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-certmanager.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-certmanager.yaml deleted file mode 100644 index ea53ddd991..0000000000 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/data-certmanager.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if and .Values.data.https.enabled .Values.data.https.useCertManager -}} -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: {{ include "influxdb-enterprise.fullname" . }}-data - labels: - {{- include "influxdb-enterprise.labels" . | nindent 4 }} -spec: - {{- $replicas := (int $.Values.data.replicas) }} - {{- $fullname := include "influxdb-enterprise.fullname" . }} - {{- $namespace := .Release.Namespace }} - - dnsNames: - - {{ $fullname }}-data - - {{ $fullname }}-data.{{ .Release.Namespace }}.svc - - {{- range $i := until $replicas }} - - {{ $fullname }}-data-{{ $i | toString }}.{{ $fullname }}-data - - {{ $fullname }}-data-{{ $i | toString }}.{{ $fullname }}-data.{{ $namespace }} - - {{ $fullname }}-data-{{ $i | toString }}.{{ $fullname }}-data.{{ $namespace }}.svc - {{ end }} - - isCA: true - issuerRef: - kind: Issuer - name: {{ include "influxdb-enterprise.fullname" . }} - secretName: {{ include "influxdb-enterprise.fullname" . }}-data-tls -{{- end -}} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml index b8fb74a5df..e054d5f716 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml @@ -10,17 +10,15 @@ data: bind-address = ":8088" reporting-disabled = false - {{ if .Values.data.https.enabled }} - https-enabled = true - - https-certificate = "/var/run/secrets/tls/tls.crt" - https-private-key = "/var/run/secrets/tls/tls.key" - - {{ end }} - - {{ if .Values.data.flux.enabled }} - flux-enabled = true - {{ end }} + [http] + {{- range $key, $value := index .Values.data.config.http }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} [enterprise] {{ if .Values.license.key }} @@ -31,36 +29,64 @@ data: license-path = "/var/run/secrets/influxdb/license.json" {{ end }} - [cluster] - {{ if .Values.data.https.enabled }} - https-enabled = true - - https-certificate = "/var/run/secrets/tls/tls.crt" - https-private-key = "/var/run/secrets/tls/tls.key" - - {{ if .Values.data.https.insecure }} - https-insecure-tls = true - {{ end }} - {{ end }} - [meta] dir = "/var/lib/influxdb/meta" - {{ if and .Values.meta.https.enabled }} - meta-tls-enabled = true - - {{ if .Values.meta.https.insecure }} - meta-insecure-tls = true - {{ end }} - - {{ end }} - [hinted-handoff] dir = "/var/lib/influxdb/hh" [data] dir = "/var/lib/influxdb/data" wal-dir = "/var/lib/influxdb/wal" + {{- range $key, $value := index .Values.data.config.data }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [anti-entropy] + {{- range $key, $value := index .Values.data.config.anti_entropy }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [cluster] + {{- range $key, $value := index .Values.data.config.cluster }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [continuous_queries] + {{- range $key, $value := index .Values.data.config.continuous_queries }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + + [logging] + {{- range $key, $value := index .Values.data.config.logging }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} + entrypoint.pl: |+ #!/usr/bin/env perl @@ -90,11 +116,7 @@ data: $SIG{KILL} = sub { kill 'KILL', $pid }; # Register data node with meta leader - {{ if .Values.meta.https.enabled }} - my $protocol = "https"; - {{ else }} my $protocol = "http"; - {{ end }} my $meta_service = $ENV{RELEASE_NAME} . "-meta"; # We're not going to define an exit strategy for failure here. @@ -102,9 +124,7 @@ data: while (true) { # There's no LWP/Simple available in our images, so forking out to curl 😥 print "\n\n\nREGISTER WITH META SERVICE\n\n\n"; - $exit_code = system('curl', {{ if .Values.meta.https.insecure }}'-k',{{ end }} '-XPOST', '--silent', '--fail', '--retry', '5', '--retry-delay', '0', "-Faddr=$ENV{INFLUXDB_HOSTNAME}:8088", "$protocol://$meta_service:8091/add-data"); - # $exit_code = system('curl', {{ if .Values.meta.https.insecure }}'-k',{{ end }} '-XPOST', '-v', '--silent', '--fail', '--retry', '5', '--retry-delay', '0', "-Faddr=$ENV{INFLUXDB_HOSTNAME}:8088", "$protocol://$meta_service:8091/add-data"); - + $exit_code = system('curl', '-XPOST', '--silent', '--fail', '--retry', '5', '--retry-delay', '0', "-Faddr=$ENV{INFLUXDB_HOSTNAME}:8088", "$protocol://$meta_service:8091/add-data"); if ($exit_code == 0) { $| = 1; diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml index 7239b4e7cd..fa28e08cf4 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml @@ -45,36 +45,6 @@ spec: - key: {{ .Values.license.secret.key }} path: license.json {{- end }} - {{- if .Values.data.https.enabled }} - - name: tls - secret: - {{- if .Values.data.https.useCertManager }} - secretName: {{ include "influxdb-enterprise.fullname" . }}-data-tls - {{ else }} - secretName: {{ .Values.data.https.secret.name }} - {{ if or .Values.data.https.secret.crt .Values.data.https.secret.key }} - items: - - key: {{ .Values.data.https.secret.crt }} - path: tls.crt - - key: {{ .Values.data.https.secret.key }} - path: tls.key - {{ end }} - {{ end }} - {{ end }} - {{- if and .Values.data.https.enabled .Values.data.https.secret }} - {{- if .Values.data.https.secret.ca -}} - - name: tls-ca - secret: - {{ if .Values.data.https.secret.caSecret -}} - secretName: {{ .Values.data.https.secret.caSecret }} - {{ else }} - secretName: {{ .Values.data.https.secret.name }} - {{ end }} - items: - - key: {{ .Values.data.https.secret.ca }} - path: ca.crt - {{ end }} - {{ end }} containers: - name: {{ .Chart.Name }} command: @@ -119,17 +89,11 @@ spec: httpGet: path: /ping port: http - {{- if .Values.data.https.enabled }} - scheme: HTTPS - {{- end }} readinessProbe: initialDelaySeconds: 30 httpGet: path: /ping port: http - {{- if .Values.data.https.enabled }} - scheme: HTTPS - {{- end }} volumeMounts: - name: config mountPath: /etc/influxdb @@ -139,17 +103,6 @@ spec: - name: license mountPath: /var/run/secrets/influxdb/ {{- end }} - {{- if .Values.data.https.enabled }} - - name: tls - mountPath: /var/run/secrets/tls/ - {{ end }} - {{- if and .Values.data.https.enabled .Values.data.https.secret }} - {{- if .Values.data.https.secret.ca -}} - - name: tls-ca - mountPath: /usr/share/ca-certificates/selfsigned/ca.crt - subPath: ca.crt - {{ end }} - {{ end }} resources: {{- toYaml .Values.data.resources | nindent 12 }} {{- with .Values.data.nodeSelector }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-certmanager.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-certmanager.yaml deleted file mode 100644 index 92330b7de9..0000000000 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-certmanager.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if and .Values.meta.https.enabled .Values.meta.https.useCertManager -}} -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: {{ include "influxdb-enterprise.fullname" . }}-meta - labels: - {{- include "influxdb-enterprise.labels" . | nindent 4 }} -spec: - {{- $replicas := (int $.Values.meta.replicas) }} - {{- $fullname := include "influxdb-enterprise.fullname" . }} - {{- $namespace := .Release.Namespace }} - - dnsNames: - - {{ $fullname }}-meta - - {{ $fullname }}-meta.{{ .Release.Namespace }}.svc - - {{- range $i := until $replicas }} - - {{ $fullname }}-meta-{{ $i | toString }}.{{ $fullname }}-meta - - {{ $fullname }}-meta-{{ $i | toString }}.{{ $fullname }}-meta.{{ $namespace }} - - {{ $fullname }}-meta-{{ $i | toString }}.{{ $fullname }}-meta.{{ $namespace }}.svc - {{ end }} - - isCA: true - issuerRef: - kind: Issuer - name: {{ include "influxdb-enterprise.fullname" . }} - secretName: {{ include "influxdb-enterprise.fullname" . }}-meta-tls -{{- end -}} diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml index 14275ab774..6b9d8b5ef9 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-configmap.yaml @@ -22,26 +22,6 @@ data: [meta] dir = "/var/lib/influxdb/meta" - {{ if .Values.meta.https.enabled }} - https-enabled = true - - https-certificate = "/var/run/secrets/tls/tls.crt" - https-private-key = "/var/run/secrets/tls/tls.key" - - {{ if .Values.meta.https.insecure }} - https-insecure-tls = true - {{ end }} - - {{ end }} - - {{ if and .Values.data.https.enabled }} - data-use-tls = true - - {{ if .Values.data.https.insecure }} - data-insecure-tls = true - {{ end }} - - {{ end }} entrypoint.pl: |+ #!/usr/bin/env perl @@ -78,9 +58,9 @@ data: # This should be handled by the probes on the pods while (true) { if($meta_leader eq $ENV{INFLUXDB_HOSTNAME}) { - system('influxd-ctl', {{ if .Values.meta.https.enabled }}'-bind-tls',{{ end }}{{ if .Values.meta.https.insecure }}'-k',{{ end }} 'add-meta', "$ENV{INFLUXDB_HOSTNAME}:8091"); + system('influxd-ctl', 'add-meta', "$ENV{INFLUXDB_HOSTNAME}:8091"); } else { - system('influxd-ctl', {{ if .Values.meta.https.enabled }}'-bind-tls',{{ end }}{{ if .Values.meta.https.insecure }}'-k',{{ end }} 'join', "$meta_leader:8091"); + system('influxd-ctl', 'join', "$meta_leader:8091"); } if ($? == 0) { diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml index 39995c7b35..beff940f34 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/meta-statefulset.yaml @@ -45,36 +45,6 @@ spec: - key: {{ .Values.license.secret.key }} path: license.json {{- end }} - {{- if .Values.meta.https.enabled }} - - name: tls - secret: - {{- if .Values.meta.https.useCertManager }} - secretName: {{ include "influxdb-enterprise.fullname" . }}-meta-tls - {{ else }} - secretName: {{ .Values.meta.https.secret.name }} - {{ if or .Values.meta.https.secret.crt .Values.meta.https.secret.key }} - items: - - key: {{ .Values.meta.https.secret.crt }} - path: tls.crt - - key: {{ .Values.meta.https.secret.key }} - path: tls.key - {{ end }} - {{ end }} - {{ end }} - {{- if and .Values.meta.https.enabled .Values.meta.https.secret }} - {{- if .Values.meta.https.secret.ca -}} - - name: tls-ca - secret: - {{ if .Values.meta.https.secret.caSecret -}} - secretName: {{ .Values.meta.https.secret.caSecret }} - {{ else }} - secretName: {{ .Values.meta.https.secret.name }} - {{ end }} - items: - - key: {{ .Values.meta.https.secret.ca }} - path: ca.crt - {{ end }} - {{ end }} containers: - name: {{ .Chart.Name }} command: @@ -110,16 +80,10 @@ spec: httpGet: path: /ping port: http - {{- if .Values.meta.https.enabled }} - scheme: HTTPS - {{- end }} readinessProbe: httpGet: path: /ping port: http - {{- if .Values.meta.https.enabled }} - scheme: HTTPS - {{- end }} volumeMounts: - name: config mountPath: /etc/influxdb @@ -129,17 +93,6 @@ spec: - name: license mountPath: /var/run/secrets/influxdb/ {{- end }} - {{- if .Values.meta.https.enabled }} - - name: tls - mountPath: /var/run/secrets/tls/ - {{ end }} - {{- if and .Values.meta.https.enabled .Values.meta.https.secret }} - {{- if .Values.meta.https.secret.ca -}} - - name: tls-ca - mountPath: /usr/share/ca-certificates/selfsigned/ca.crt - subPath: ca.crt - {{ end }} - {{ end }} resources: {{- toYaml .Values.meta.resources | nindent 12 }} {{- with .Values.meta.nodeSelector }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml index 0284bb8051..faff4311ac 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/values.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -157,24 +157,7 @@ meta: podDisruptionBudget: # maxUnavailable: 2 minAvailable: 2 - https: - # If you need to debug the data nodes registration with the meta nodes, we recommend - # that you comment out the active curl command in the data-configmap and uncomment the following - # line, which has -v / debugging enabled. - enabled: false - # The `useCertManager` option, when set to true, will - # automatically create the certificate resources for you. - # You do not need to set the secret.name when using this flag. - useCertManager: false - secret: - name: influxdb-tls - # crt: tls.crt - # key: tls.key - # ca: ca.crt - # caSecret: secret-name # only use if different from the above - insecure: true - ## Additional data container environment variables e.g.: - ## INFLUXDB_HTTP_FLUX_ENABLED: "true" + ## Additional data container environment variables. env: {} resources: {} @@ -264,24 +247,22 @@ data: # annotations: # accessMode: ReadWriteOnce # size: 8Gi - https: - # If you need to debug the data nodes registration with the meta nodes, we recommend - # that you comment out the active curl command in the data-configmap and uncomment the following - # line, which has -v / debugging enabled. - enabled: false - # The `useCertManager` option, when set to true, will - # automatically create the certificate resources for you. - # You do not need to set the secret.name when using this flag. - useCertManager: false - secret: - name: influxdb-tls - # crt: tls.crt - # key: tls.key - # ca: ca.crt - # caSecret: secret-name # only use if different from the above - insecure: true - flux: - enabled: true + config: + data: + trace-logging-enabled: true + anti_entropy: + enabled: false + http: + flux-enabled: true + auth-enabled: true + cluster: + max-concurrent-queries: 1000 + query-timeout: "300s" + log-queries-after: "15s" + continuous_queries: + enabled: false + logging: + level: "debug" ## Additional data container environment variables e.g.: ## INFLUXDB_HTTP_FLUX_ENABLED: "true" env: {} From e640384e6512ecce2d05cf0fccff0128d895ea9c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 16 Jan 2024 17:42:25 +0000 Subject: [PATCH 451/588] chore(deps): update helm release vault-secrets-operator to v2.5.5 --- applications/vault-secrets-operator/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml index 18b5b79169..427c4c2139 100644 --- a/applications/vault-secrets-operator/Chart.yaml +++ b/applications/vault-secrets-operator/Chart.yaml @@ -5,7 +5,7 @@ sources: - https://github.com/ricoberger/vault-secrets-operator dependencies: - name: vault-secrets-operator - version: 2.5.4 + version: 2.5.5 repository: https://ricoberger.github.io/helm-charts/ annotations: phalanx.lsst.io/docs: | From ec8d4af95747b4b31c460cb54a73b59ec38b9ebc Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Fri, 12 Jan 2024 11:03:20 -0800 Subject: [PATCH 452/588] Parameterize the postgres user for each prompt processing service Previously a shared "rubin" postgres user is set by default, and the butler config yaml of the dev central repo sets alternatives. Instead of setting the postgres user in the butler config yaml, let each prompt service set its own PGUSER. --- applications/prompt-proto-service-hsc/README.md | 1 + .../values-usdfdev-prompt-processing.yaml | 1 + applications/prompt-proto-service-hsc/values.yaml | 4 ++++ applications/prompt-proto-service-latiss/README.md | 1 + .../values-usdfdev-prompt-processing.yaml | 1 + .../values-usdfprod-prompt-processing.yaml | 1 + applications/prompt-proto-service-latiss/values.yaml | 4 ++++ applications/prompt-proto-service-lsstcam/README.md | 1 + .../values-usdfdev-prompt-processing.yaml | 1 + applications/prompt-proto-service-lsstcam/values.yaml | 4 ++++ applications/prompt-proto-service-lsstcomcam/README.md | 1 + .../values-usdfdev-prompt-processing.yaml | 1 + applications/prompt-proto-service-lsstcomcam/values.yaml | 4 ++++ .../prompt-proto-service/templates/prompt-proto-service.yaml | 2 +- 14 files changed, 26 insertions(+), 1 deletion(-) diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index 145a6d9cc3..0c5921e80d 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -22,6 +22,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | +| prompt-proto-service.instrument.calibRepoPguser | string | None, must be set | Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. | | prompt-proto-service.instrument.name | string | `"HSC"` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index 736e870385..a5bb1093c6 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -13,6 +13,7 @@ prompt-proto-service: instrument: pipelines: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] calibRepo: s3://rubin:rubin-pp-users/central_repo/ + calibRepoPguser: hsc_prompt s3: imageBucket: rubin:rubin-pp diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index 8bc1a6f9e6..9208930e56 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -32,6 +32,10 @@ prompt-proto-service: # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set calibRepo: "" + # -- Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. + # If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. + # @default -- None, must be set + calibRepoPguser: "" s3: # -- Bucket containing the incoming raw images diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index 82a0056ddc..5a8a15fbdf 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -22,6 +22,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | +| prompt-proto-service.instrument.calibRepoPguser | string | None, must be set | Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. | | prompt-proto-service.instrument.name | string | `"LATISS"` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `"latiss_v1"` | Skymap to use with the instrument | diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index 3d736c7bb2..367e905e3e 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -15,6 +15,7 @@ prompt-proto-service: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] calibRepo: s3://rubin-pp-users/central_repo/ + calibRepoPguser: latiss_prompt s3: imageBucket: rubin-pp diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index 196d694efc..6ad883454d 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -26,6 +26,7 @@ prompt-proto-service: (survey="spec_pole_with_rotation")=[] (survey="")=[] calibRepo: s3://rubin-summit-users + calibRepoPguser: rubin s3: imageBucket: rubin-summit diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index a62ed6ce5b..34ae28ceee 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -33,6 +33,10 @@ prompt-proto-service: # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set calibRepo: "" + # -- Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. + # If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. + # @default -- None, must be set + calibRepoPguser: "" s3: # -- Bucket containing the incoming raw images diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index ca3bd5b987..e07d659977 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -22,6 +22,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | +| prompt-proto-service.instrument.calibRepoPguser | string | None, must be set | Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. | | prompt-proto-service.instrument.name | string | `""` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | diff --git a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml index 5b2fdab778..3dc7d35210 100644 --- a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml @@ -12,6 +12,7 @@ prompt-proto-service: instrument: calibRepo: s3://rubin-summit-users/ + calibRepoPguser: rubin s3: imageBucket: rubin:rubin-pp diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index 9e41e9a8b4..4d70040810 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -32,6 +32,10 @@ prompt-proto-service: # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set calibRepo: "" + # -- Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. + # If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. + # @default -- None, must be set + calibRepoPguser: "" s3: # -- Bucket containing the incoming raw images diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index 7b067af8a9..426556533e 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -21,6 +21,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | +| prompt-proto-service.instrument.calibRepoPguser | string | None, must be set | Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. | | prompt-proto-service.instrument.name | string | `""` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | diff --git a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml index ab8962cb4e..2b9f48237e 100644 --- a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml @@ -12,6 +12,7 @@ prompt-proto-service: instrument: calibRepo: s3://rubin-summit-users/ + calibRepoPguser: rubin s3: imageBucket: rubin:rubin-pp diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index 6fa3d7c6ba..7fa43f4768 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -32,6 +32,10 @@ prompt-proto-service: # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set calibRepo: "" + # -- Postgres username to access the shared butler repo for calibrations, templates, and pipeline outputs. + # If `registry.centralRepoFile` is set, a local redirect is used and its config may override this config. + # @default -- None, must be set + calibRepoPguser: "" s3: # -- Bucket containing the incoming raw images diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index f3f8379241..bd6e141981 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -54,7 +54,7 @@ spec: - name: IMAGE_TIMEOUT value: {{ .Values.imageNotifications.imageTimeout | quote }} - name: PGUSER - value: rubin + value: {{ .Values.instrument.calibRepoPguser }} - name: CALIB_REPO value: {{ .Values.instrument.calibRepo }} - name: LSST_DISABLE_BUCKET_VALIDATION From fe17d515c8dde6f66a8eae0c5e07de8c839668e0 Mon Sep 17 00:00:00 2001 From: Hsin-Fang Chiang Date: Fri, 12 Jan 2024 12:48:01 -0800 Subject: [PATCH 453/588] Add tenant prefix to repo in prompt processing dev LATISS service --- .../values-usdfdev-prompt-processing.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index 367e905e3e..5d7718ff1c 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -14,12 +14,13 @@ prompt-proto-service: pipelines: >- (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] - calibRepo: s3://rubin-pp-users/central_repo/ + calibRepo: s3://rubin:rubin-pp-users/central_repo/ calibRepoPguser: latiss_prompt s3: - imageBucket: rubin-pp + imageBucket: rubin:rubin-pp endpointUrl: https://s3dfrgw.slac.stanford.edu + disableBucketValidation: '1' imageNotifications: kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 From b47445eb7343f1c96a4d2370ab8d6ac0e1951a8c Mon Sep 17 00:00:00 2001 From: Brianna Smart Date: Tue, 16 Jan 2024 10:45:29 -0800 Subject: [PATCH 454/588] Update Kafka Version --- applications/alert-stream-broker/README.md | 2 +- .../alert-stream-broker/values-usdfdev-alert-stream-broker.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index 5bfa1d8968..6eb9a7dd29 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -71,7 +71,7 @@ Alert transmission to community brokers | alert-stream-broker.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | alert-stream-broker.kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | alert-stream-broker.kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | -| alert-stream-broker.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | +| alert-stream-broker.kafka.version | string | `"3.5.1"` | Version of Kafka to deploy. | | alert-stream-broker.kafkaExporter | object | `{"enableSaramaLogging":false,"enabled":false,"groupRegex":".*","logLevel":"warning","topicRegex":".*"}` | Kafka JMX Exporter for more detailed diagnostic metrics. | | alert-stream-broker.kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging | | alert-stream-broker.kafkaExporter.enabled | bool | `false` | Enable Kafka exporter. | diff --git a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml index 282680ec8d..a178351831 100644 --- a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml +++ b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml @@ -9,7 +9,7 @@ alert-stream-broker: kafka: - version: 3.4.0 + version: 3.5.1 # -- Encoding version for messages, see # https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. logMessageFormatVersion: 3.4 From fd0dc67328da86b091ab91c2c591d6472603ea7c Mon Sep 17 00:00:00 2001 From: Brianna Smart Date: Tue, 16 Jan 2024 11:33:07 -0800 Subject: [PATCH 455/588] Change readme --- applications/alert-stream-broker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index 6eb9a7dd29..5bfa1d8968 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -71,7 +71,7 @@ Alert transmission to community brokers | alert-stream-broker.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | alert-stream-broker.kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | alert-stream-broker.kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | -| alert-stream-broker.kafka.version | string | `"3.5.1"` | Version of Kafka to deploy. | +| alert-stream-broker.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | | alert-stream-broker.kafkaExporter | object | `{"enableSaramaLogging":false,"enabled":false,"groupRegex":".*","logLevel":"warning","topicRegex":".*"}` | Kafka JMX Exporter for more detailed diagnostic metrics. | | alert-stream-broker.kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging | | alert-stream-broker.kafkaExporter.enabled | bool | `false` | Enable Kafka exporter. | From a826434b7c3bf069faf5bb85fc8020cbd89fee87 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 11 Jan 2024 13:54:11 -0700 Subject: [PATCH 456/588] Fix conditionals in Sasquatch secrets --- applications/sasquatch/secrets.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index d7e02a586f..49859d38b3 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -32,7 +32,7 @@ kafdrop-password: kafka-connect-manager-password: description: >- ? - if: strimzi-kafka.connect.enabled + if: strimzi-kafka.users.kafkaConnectManager.enabled prompt-processing-password: description: >- ? @@ -52,15 +52,16 @@ rest-proxy-sasl-jass-config: sasquatch-test-kafka-properties: description: >- ? - if: kafka.listeners.plain.enabled + if: strimzi-kafka.kafka.listeners.plain.enabled sasquatch-test-password: description: >- ? - if: kafka.listeners.plain.enabled + if: strimzi-kafka.kafka.listeners.plain.enabled telegraf-password: description: >- ? + if: telegraf-kafka-consumer.enabled ts-salkafka-password: description: >- ? - if: strimzi-kafka.users.telegraf.enabled + if: strimzi-kafka.users.ts-salkafka.enabled From 63b622ddd79798e32a19f68d683c32972f7d2a00 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 11 Jan 2024 17:06:18 -0700 Subject: [PATCH 457/588] Enable Sasquatch components at the parent chart level Enabling Sasquatch components at the subchart level doesn't work with secret conditionals. --- applications/sasquatch/README.md | 22 +++++++++---------- .../sasquatch/charts/strimzi-kafka/README.md | 20 ++++++++--------- .../charts/strimzi-kafka/values.yaml | 22 +++++++++---------- applications/sasquatch/values-base.yaml | 8 +++++++ applications/sasquatch/values-idfdev.yaml | 7 ++++++ applications/sasquatch/values-idfint.yaml | 6 +++++ applications/sasquatch/values-summit.yaml | 6 +++++ .../sasquatch/values-tucson-teststand.yaml | 9 ++++++++ applications/sasquatch/values-usdfdev.yaml | 8 +++++++ applications/sasquatch/values-usdfint.yaml | 6 ++++- applications/sasquatch/values-usdfprod.yaml | 16 ++++++++++++++ applications/sasquatch/values.yaml | 14 ++++++++++-- 12 files changed, 109 insertions(+), 35 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 9b2a96f0c3..091525e172 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -84,7 +84,7 @@ Rubin Observatory's telemetry service. | source-kapacitor.resources.requests.cpu | int | `1` | | | source-kapacitor.resources.requests.memory | string | `"1Gi"` | | | squareEvents.enabled | bool | `false` | Enable the Square Events subchart with topic and user configurations. | -| strimzi-kafka | object | `{}` | Override strimzi-kafka configuration. | +| strimzi-kafka | object | `{"connect":{"enabled":true},"kafka":{"listeners":{"external":{"enabled":true},"plain":{"enabled":true},"tls":{"enabled":true}}}}` | Override strimzi-kafka subchart configuration. | | strimzi-registry-operator | object | `{"clusterName":"sasquatch","clusterNamespace":"sasquatch","operatorNamespace":"sasquatch"}` | strimzi-registry-operator configuration. | | telegraf-kafka-consumer | object | `{}` | Override telegraf-kafka-consumer configuration. | | influxdb-enterprise.bootstrap.auth.secretName | string | `"sasquatch"` | | @@ -320,7 +320,7 @@ Rubin Observatory's telemetry service. | source-kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | | square-events.cluster.name | string | `"sasquatch"` | | | strimzi-kafka.cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | -| strimzi-kafka.connect.enabled | bool | `true` | Enable Kafka Connect. | +| strimzi-kafka.connect.enabled | bool | `false` | Enable Kafka Connect. | | strimzi-kafka.connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | strimzi-kafka.connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | strimzi-kafka.kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | @@ -336,9 +336,9 @@ Rubin Observatory's telemetry service. | strimzi-kafka.kafka.externalListener.brokers | list | `[]` | Borkers configuration. host is used in the brokers' advertised.brokers configuration and for TLS hostname verification. The format is a list of maps. | | strimzi-kafka.kafka.externalListener.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | | strimzi-kafka.kafka.externalListener.tls.enabled | bool | `false` | Whether TLS encryption is enabled. | -| strimzi-kafka.kafka.listeners.external.enabled | bool | `true` | Whether external listener is enabled. | -| strimzi-kafka.kafka.listeners.plain.enabled | bool | `true` | Whether internal plaintext listener is enabled. | -| strimzi-kafka.kafka.listeners.tls.enabled | bool | `true` | Whether internal TLS listener is enabled. | +| strimzi-kafka.kafka.listeners.external.enabled | bool | `false` | Whether external listener is enabled. | +| strimzi-kafka.kafka.listeners.plain.enabled | bool | `false` | Whether internal plaintext listener is enabled. | +| strimzi-kafka.kafka.listeners.tls.enabled | bool | `false` | Whether internal TLS listener is enabled. | | strimzi-kafka.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | @@ -357,12 +357,12 @@ Rubin Observatory's telemetry service. | strimzi-kafka.registry.ingress.hostname | string | `""` | Hostname for the Schema Registry. | | strimzi-kafka.registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | strimzi-kafka.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | -| strimzi-kafka.users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). | -| strimzi-kafka.users.kafkaConnectManager.enabled | bool | `true` | Enable user kafka-connect-manager | -| strimzi-kafka.users.promptProcessing.enabled | bool | `true` | Enable user prompt-processing | -| strimzi-kafka.users.replicator.enabled | bool | `false` | Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) | -| strimzi-kafka.users.telegraf.enabled | bool | `true` | Enable user telegraf (deployed by parent Sasquatch chart) | -| strimzi-kafka.users.tsSalKafka.enabled | bool | `true` | Enable user ts-salkafka. | +| strimzi-kafka.users.kafdrop.enabled | bool | `false` | Enable user Kafdrop (deployed by parent Sasquatch chart). | +| strimzi-kafka.users.kafkaConnectManager.enabled | bool | `false` | Enable user kafka-connect-manager | +| strimzi-kafka.users.promptProcessing.enabled | bool | `false` | Enable user prompt-processing | +| strimzi-kafka.users.replicator.enabled | bool | `false` | Enable user replicator (used by Mirror Maker 2 and required at both source and target clusters) | +| strimzi-kafka.users.telegraf.enabled | bool | `false` | Enable user telegraf (deployed by parent Sasquatch chart) | +| strimzi-kafka.users.tsSalKafka.enabled | bool | `false` | Enable user ts-salkafka, used at the telescope environments | | strimzi-kafka.zookeeper.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["zookeeper"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Zookeeper pod assignment. | | strimzi-kafka.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | strimzi-kafka.zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 93c4b9855f..83d73ae9bf 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -7,7 +7,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | Key | Type | Default | Description | |-----|------|---------|-------------| | cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | -| connect.enabled | bool | `true` | Enable Kafka Connect. | +| connect.enabled | bool | `false` | Enable Kafka Connect. | | connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | @@ -23,9 +23,9 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.externalListener.brokers | list | `[]` | Borkers configuration. host is used in the brokers' advertised.brokers configuration and for TLS hostname verification. The format is a list of maps. | | kafka.externalListener.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | | kafka.externalListener.tls.enabled | bool | `false` | Whether TLS encryption is enabled. | -| kafka.listeners.external.enabled | bool | `true` | Whether external listener is enabled. | -| kafka.listeners.plain.enabled | bool | `true` | Whether internal plaintext listener is enabled. | -| kafka.listeners.tls.enabled | bool | `true` | Whether internal TLS listener is enabled. | +| kafka.listeners.external.enabled | bool | `false` | Whether external listener is enabled. | +| kafka.listeners.plain.enabled | bool | `false` | Whether internal plaintext listener is enabled. | +| kafka.listeners.tls.enabled | bool | `false` | Whether internal TLS listener is enabled. | | kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | @@ -44,12 +44,12 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | registry.ingress.hostname | string | `""` | Hostname for the Schema Registry. | | registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | -| users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). | -| users.kafkaConnectManager.enabled | bool | `true` | Enable user kafka-connect-manager | -| users.promptProcessing.enabled | bool | `true` | Enable user prompt-processing | -| users.replicator.enabled | bool | `false` | Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) | -| users.telegraf.enabled | bool | `true` | Enable user telegraf (deployed by parent Sasquatch chart) | -| users.tsSalKafka.enabled | bool | `true` | Enable user ts-salkafka. | +| users.kafdrop.enabled | bool | `false` | Enable user Kafdrop (deployed by parent Sasquatch chart). | +| users.kafkaConnectManager.enabled | bool | `false` | Enable user kafka-connect-manager | +| users.promptProcessing.enabled | bool | `false` | Enable user prompt-processing | +| users.replicator.enabled | bool | `false` | Enable user replicator (used by Mirror Maker 2 and required at both source and target clusters) | +| users.telegraf.enabled | bool | `false` | Enable user telegraf (deployed by parent Sasquatch chart) | +| users.tsSalKafka.enabled | bool | `false` | Enable user ts-salkafka, used at the telescope environments | | zookeeper.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["zookeeper"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Zookeeper pod assignment. | | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index e819687976..d10b1468e9 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -30,15 +30,15 @@ kafka: listeners: plain: # -- Whether internal plaintext listener is enabled. - enabled: true + enabled: false tls: # -- Whether internal TLS listener is enabled. - enabled: true + enabled: false external: # -- Whether external listener is enabled. - enabled: true + enabled: false externalListener: tls: @@ -115,7 +115,7 @@ zookeeper: connect: # -- Enable Kafka Connect. - enabled: true + enabled: false # -- Custom strimzi-kafka image with connector plugins used by sasquatch. image: ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655 # -- Number of Kafka Connect replicas to run. @@ -139,28 +139,28 @@ superusers: users: replicator: - # -- Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) + # -- Enable user replicator (used by Mirror Maker 2 and required at both source and target clusters) enabled: false tsSalKafka: - # -- Enable user ts-salkafka. - enabled: true + # -- Enable user ts-salkafka, used at the telescope environments + enabled: false kafdrop: # -- Enable user Kafdrop (deployed by parent Sasquatch chart). - enabled: true + enabled: false telegraf: # -- Enable user telegraf (deployed by parent Sasquatch chart) - enabled: true + enabled: false promptProcessing: # -- Enable user prompt-processing - enabled: true + enabled: false kafkaConnectManager: # -- Enable user kafka-connect-manager - enabled: true + enabled: false mirrormaker2: # -- Enable replication in the target (passive) cluster. diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 9fb57217c7..878a150ba8 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -43,6 +43,14 @@ strimzi-kafka: users: replicator: enabled: true + tsSalKafka: + enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true influxdb: persistence: diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 88486af5c4..af03f201f4 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -16,6 +16,13 @@ strimzi-kafka: users: replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true + registry: ingress: enabled: true diff --git a/applications/sasquatch/values-idfint.yaml b/applications/sasquatch/values-idfint.yaml index d76b58e184..3b8389c218 100644 --- a/applications/sasquatch/values-idfint.yaml +++ b/applications/sasquatch/values-idfint.yaml @@ -29,6 +29,12 @@ strimzi-kafka: users: replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true influxdb: ingress: diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index ed40f707b0..440218f24d 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -23,6 +23,12 @@ strimzi-kafka: enabled: true replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true influxdb: persistence: diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 92b28c6441..98618642fb 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -18,6 +18,15 @@ strimzi-kafka: zookeeper: storage: storageClassName: rook-ceph-block + users: + tsSalKafka: + enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true registry: ingress: enabled: true diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index 0b04006e61..6d7f8a3fcb 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -14,6 +14,14 @@ strimzi-kafka: users: replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true + promptProcessing: + enabled: true influxdb: ingress: diff --git a/applications/sasquatch/values-usdfint.yaml b/applications/sasquatch/values-usdfint.yaml index f710f33562..7c874fd79e 100644 --- a/applications/sasquatch/values-usdfint.yaml +++ b/applications/sasquatch/values-usdfint.yaml @@ -12,7 +12,11 @@ strimzi-kafka: cpu: 4 memory: 8Gi users: - replicator: + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: enabled: true influxdb: diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index b974d48118..476936fc55 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -1,4 +1,12 @@ strimzi-kafka: + kafka: + listeners: + tls: + enabled: true + plain: + enabled: true + external: + enabled: true mirrormaker2: enabled: true source: @@ -14,6 +22,14 @@ strimzi-kafka: users: replicator: enabled: true + kafdrop: + enabled: true + telegraf: + enabled: true + kafkaConnectManager: + enabled: true + promptProcessing: + enabled: true influxdb: ingress: diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index f36b822007..99457a9b03 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -1,7 +1,17 @@ # Default values for Sasquatch. -# -- Override strimzi-kafka configuration. -strimzi-kafka: {} +# -- Override strimzi-kafka subchart configuration. +strimzi-kafka: + kafka: + listeners: + tls: + enabled: true + plain: + enabled: true + external: + enabled: true + connect: + enabled: true # -- strimzi-registry-operator configuration. strimzi-registry-operator: From 68aa33ebece5c1a59b90bf485aa4bf94b3cffe4b Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 13 Jan 2024 08:30:44 -0700 Subject: [PATCH 458/588] Add description to Sasquatch secrets --- applications/sasquatch/secrets-idfint.yaml | 16 +++++------ applications/sasquatch/secrets.yaml | 32 +++++++++++----------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/applications/sasquatch/secrets-idfint.yaml b/applications/sasquatch/secrets-idfint.yaml index 1ab5fdb169..08fc85c129 100644 --- a/applications/sasquatch/secrets-idfint.yaml +++ b/applications/sasquatch/secrets-idfint.yaml @@ -1,16 +1,16 @@ "kafka-connect-manager-password": - description: "?" + description: "kafka-connect-manager KafkaUser password." "prompt-processing-password": - description: "?" + description: "prompt-processing KafkaUser password." "rest-proxy-password": - description: "?" + description: "rest-proxy-password KafkaUser password." "rest-proxy-sasl-jass-config": - description: "?" + description: "rest-proxy-sasl-jass-config for connection with the Kafka broker." "sasquatch-test-kafka-properties": - description: "?" + description: "sasquatch-test properties file for connection with the Kafka broker." "sasquatch-test-password": - description: "?" + description: "sasquatch-test KafkaUser password." "telegraf-password": - description: "?" + description: "Telegraf KafkaUser password." "ts-salkafka-password": - description: "?" + description: "ts-salkafka KafkaUser password." diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index 49859d38b3..e073d7b959 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -1,67 +1,67 @@ GENERIC_CLIENT_ID: description: >- - ? + Chronograf client ID for OIDC authentication with Gafaelfawr. value: chronograf-client-id GENERIC_CLIENT_SECRET: description: >- - ? + Chronograf client secret for OIDC authentication with Gafaelfawr. generate: type: password TOKEN_SECRET: description: >- - ? + Chronograf token secret for OIDC authentication with Gafaelfawr. generate: type: password influxdb-password: description: >- - ? + InfluxDB admin password. generate: type: password influxdb-user: description: >- - ? + InfluxDB admin user. value: admin kafdrop-kafka-properties: description: >- - ? + Kafdrop properties file for connection with the Kafka broker. if: kafdrop.enabled kafdrop-password: description: >- - ? + Kafdrop KafkaUser password. if: kafdrop.enabled kafka-connect-manager-password: description: >- - ? + kafka-connect-manager Kafka user password. if: strimzi-kafka.users.kafkaConnectManager.enabled prompt-processing-password: description: >- - ? + prompt-processing KafkaUser password. if: strimzi-kafka.users.promptProcessing.enabled replicator-password: description: >- - ? + replicator KafkaUser password. if: strimzi-kafka.users.replicator.enabled rest-proxy-password: description: >- - ? + rest-proxy-password KafkaUser password. if: rest-proxy.enabled rest-proxy-sasl-jass-config: description: >- - ? + rest-proxy-sasl-jass-config for connection with the Kafka broker. if: rest-proxy.enabled sasquatch-test-kafka-properties: description: >- - ? + sasquatch-test properties file for connection with the Kafka broker. if: strimzi-kafka.kafka.listeners.plain.enabled sasquatch-test-password: description: >- - ? + sasquatch-test KafkaUser password. if: strimzi-kafka.kafka.listeners.plain.enabled telegraf-password: description: >- - ? + Telegraf KafkaUser password. if: telegraf-kafka-consumer.enabled ts-salkafka-password: description: >- - ? + ts-salkafka KafkaUser password. if: strimzi-kafka.users.ts-salkafka.enabled From 587c9a1b774e981fc3e5585d178e13a61d8f7bc1 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 15 Jan 2024 11:03:10 -0700 Subject: [PATCH 459/588] Add connect-push-secret to Sasquatch secrets - Add the GitHub Container Registry write token to Sasquatch secrets. That's used by Strimzi to build custom Kafka Connect images. --- applications/sasquatch/secrets.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index e073d7b959..d60cda7df3 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -65,3 +65,7 @@ ts-salkafka-password: description: >- ts-salkafka KafkaUser password. if: strimzi-kafka.users.ts-salkafka.enabled +connect-push-secret: + description: >- + Write token for pushing generated kafka-connect image to GitHub container registry. + if: strimzi-kafka.connect.enabled From 6168ddf330a5783682b6a5e89fba2ecc92a7c561 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 15 Jan 2024 16:24:03 -0700 Subject: [PATCH 460/588] Enable InfluxDB Enterprise connectors at USDF --- applications/sasquatch/Chart.yaml | 4 + applications/sasquatch/README.md | 62 ++++++++++++++++ applications/sasquatch/values-usdfprod.yaml | 81 +++++++++++++++++++++ applications/sasquatch/values.yaml | 4 + 4 files changed, 151 insertions(+) diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index b1d09650e2..5fac64841f 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -36,6 +36,10 @@ dependencies: alias: source-kafka-connect-manager condition: source-kafka-connect-manager.enabled version: 1.0.0 + - name: kafka-connect-manager + alias: kafka-connect-manager-enterprise + condition: kafka-connect-manager-enterprise.enabled + version: 1.0.0 - name: chronograf condition: chronograf.enabled version: 1.2.6 diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 091525e172..462382e94e 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -49,6 +49,7 @@ Rubin Observatory's telemetry service. | influxdb.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | | kafdrop.enabled | bool | `true` | Enable Kafdrop. | | kafka-connect-manager | object | `{}` | Override kafka-connect-manager configuration. | +| kafka-connect-manager-enterprise | object | `{"enabled":false}` | Override kafka-connect-manager-enterprise configuration. | | kapacitor.enabled | bool | `true` | Enable Kapacitor. | | kapacitor.envVars | object | `{"KAPACITOR_SLACK_ENABLED":true}` | Kapacitor environment variables. | | kapacitor.existingSecret | string | `"sasquatch"` | InfluxDB credentials, use influxdb-user and influxdb-password keys from secret. | @@ -232,6 +233,67 @@ Rubin Observatory's telemetry service. | kafka-connect-manager.s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. | | kafka-connect-manager.s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. | | kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | +| kafka-connect-manager-enterprise.enabled | bool | `true` | Enable Kafka Connect Manager. | +| kafka-connect-manager-enterprise.env.kafkaBrokerUrl | string | `"sasquatch-kafka-bootstrap.sasquatch:9092"` | Kafka broker URL. | +| kafka-connect-manager-enterprise.env.kafkaConnectUrl | string | `"http://sasquatch-connect-api.sasquatch:8083"` | Kafka connnect URL. | +| kafka-connect-manager-enterprise.env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. | +| kafka-connect-manager-enterprise.image.pullPolicy | string | `"IfNotPresent"` | | +| kafka-connect-manager-enterprise.image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | | +| kafka-connect-manager-enterprise.image.tag | string | `"1.3.1"` | | +| kafka-connect-manager-enterprise.influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. | +| kafka-connect-manager-enterprise.influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxErrorPolicy | string | `"NOOP"` | Error policy, see connector documetation for details. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxMaxRetries | string | `"10"` | The maximum number of times a message is retried. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | +| kafka-connect-manager-enterprise.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | +| kafka-connect-manager-enterprise.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | +| kafka-connect-manager-enterprise.influxdbSink.connectors | object | `{"example":{"enabled":false,"removePrefix":"","repairerConnector":false,"tags":"","topicsRegex":"example.topic"}}` | Connector instances to deploy. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.enabled | bool | `false` | Whether this connector instance is deployed. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.removePrefix | string | `""` | Remove prefix from topic name. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| kafka-connect-manager-enterprise.influxdbSink.connectors.example.topicsRegex | string | `"example.topic"` | Regex to select topics from Kafka. | +| kafka-connect-manager-enterprise.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | +| kafka-connect-manager-enterprise.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | +| kafka-connect-manager-enterprise.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | +| kafka-connect-manager-enterprise.jdbcSink.autoCreate | string | `"true"` | Whether to automatically create the destination table. | +| kafka-connect-manager-enterprise.jdbcSink.autoEvolve | string | `"false"` | Whether to automatically add columns in the table schema. | +| kafka-connect-manager-enterprise.jdbcSink.batchSize | string | `"3000"` | Specifies how many records to attempt to batch together for insertion into the destination table. | +| kafka-connect-manager-enterprise.jdbcSink.connectionUrl | string | `"jdbc:postgresql://localhost:5432/mydb"` | Database connection URL. | +| kafka-connect-manager-enterprise.jdbcSink.dbTimezone | string | `"UTC"` | Name of the JDBC timezone that should be used in the connector when inserting time-based values. | +| kafka-connect-manager-enterprise.jdbcSink.enabled | bool | `false` | Whether the JDBC Sink connector is deployed. | +| kafka-connect-manager-enterprise.jdbcSink.insertMode | string | `"insert"` | The insertion mode to use. Supported modes are: `insert`, `upsert` and `update`. | +| kafka-connect-manager-enterprise.jdbcSink.maxRetries | string | `"10"` | The maximum number of times to retry on errors before failing the task. | +| kafka-connect-manager-enterprise.jdbcSink.name | string | `"postgres-sink"` | Name of the connector to create. | +| kafka-connect-manager-enterprise.jdbcSink.retryBackoffMs | string | `"3000"` | The time in milliseconds to wait following an error before a retry attempt is made. | +| kafka-connect-manager-enterprise.jdbcSink.tableNameFormat | string | `"${topic}"` | A format string for the destination table name. | +| kafka-connect-manager-enterprise.jdbcSink.tasksMax | string | `"10"` | Number of Kafka Connect tasks. | +| kafka-connect-manager-enterprise.jdbcSink.topicRegex | string | `".*"` | Regex for selecting topics. | +| kafka-connect-manager-enterprise.s3Sink.behaviorOnNullValues | string | `"fail"` | How to handle records with a null value (for example, Kafka tombstone records). Valid options are ignore and fail. | +| kafka-connect-manager-enterprise.s3Sink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | +| kafka-connect-manager-enterprise.s3Sink.enabled | bool | `false` | Whether the Amazon S3 Sink connector is deployed. | +| kafka-connect-manager-enterprise.s3Sink.excludedTopicRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | +| kafka-connect-manager-enterprise.s3Sink.flushSize | string | `"1000"` | Number of records written to store before invoking file commits. | +| kafka-connect-manager-enterprise.s3Sink.locale | string | `"en-US"` | The locale to use when partitioning with TimeBasedPartitioner. | +| kafka-connect-manager-enterprise.s3Sink.name | string | `"s3-sink"` | Name of the connector to create. | +| kafka-connect-manager-enterprise.s3Sink.partitionDurationMs | string | `"3600000"` | The duration of a partition in milliseconds, used by TimeBasedPartitioner. Default is 1h for an hourly based partitioner. | +| kafka-connect-manager-enterprise.s3Sink.pathFormat | string | `"'year'=YYYY/'month'=MM/'day'=dd/'hour'=HH"` | Pattern used to format the path in the S3 object name. | +| kafka-connect-manager-enterprise.s3Sink.rotateIntervalMs | string | `"600000"` | The time interval in milliseconds to invoke file commits. Set to 10 minutes by default. | +| kafka-connect-manager-enterprise.s3Sink.s3BucketName | string | `""` | s3 bucket name. The bucket must already exist at the s3 provider. | +| kafka-connect-manager-enterprise.s3Sink.s3PartRetries | int | `3` | Maximum number of retry attempts for failed requests. Zero means no retries. | +| kafka-connect-manager-enterprise.s3Sink.s3PartSize | int | `5242880` | The Part Size in S3 Multi-part Uploads. Valid Values: [5242880,…,2147483647] | +| kafka-connect-manager-enterprise.s3Sink.s3Region | string | `"us-east-1"` | s3 region | +| kafka-connect-manager-enterprise.s3Sink.s3RetryBackoffMs | int | `200` | How long to wait in milliseconds before attempting the first retry of a failed S3 request. | +| kafka-connect-manager-enterprise.s3Sink.s3SchemaCompatibility | string | `"NONE"` | s3 schema compatibility | +| kafka-connect-manager-enterprise.s3Sink.schemaCacheConfig | int | `5000` | The size of the schema cache used in the Avro converter. | +| kafka-connect-manager-enterprise.s3Sink.storeUrl | string | `""` | The object storage connection URL, for non-AWS s3 providers. | +| kafka-connect-manager-enterprise.s3Sink.tasksMax | int | `1` | Number of Kafka Connect tasks. | +| kafka-connect-manager-enterprise.s3Sink.timestampExtractor | string | `"Record"` | The extractor determines how to obtain a timestamp from each record. | +| kafka-connect-manager-enterprise.s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. | +| kafka-connect-manager-enterprise.s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. | +| kafka-connect-manager-enterprise.s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. | +| kafka-connect-manager-enterprise.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | | rest-proxy.affinity | object | `{}` | Affinity configuration. | | rest-proxy.configurationOverrides | object | `{"access.control.allow.headers":"origin,content-type,accept,authorization","access.control.allow.methods":"GET,POST,PUT,DELETE","client.sasl.mechanism":"SCRAM-SHA-512","client.security.protocol":"SASL_PLAINTEXT"}` | Kafka REST configuration options | | rest-proxy.customEnv | string | `nil` | Kafka REST additional env variables | diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 476936fc55..0e0678e563 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -183,6 +183,87 @@ kafka-connect-manager: repairerConnector: false topicsRegex: ".*LaserTracker" +kafka-connect-manager-enterprise: + enabled: true + influxdbSink: + connectInfluxUrl: "http://sasquatch-influxdb-enterprise-data.sasquatch:8086" + connectInfluxDb: "efd" + connectors: + enterprise-auxtel: + enabled: true + repairerConnector: false + topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" + enterprise-maintel: + enabled: true + repairerConnector: false + topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" + enterprise-mtmount: + enabled: true + repairerConnector: false + topicsRegex: ".*MTMount" + tasksMax: "8" + enterprise-comcam: + enabled: true + repairerConnector: false + topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" + enterprise-eas: + enabled: true + repairerConnector: false + topicsRegex: ".*DIMM|.*DSM|.*ESS|.*HVAC|.*WeatherForecast" + enterprise-latiss: + enabled: true + repairerConnector: false + topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" + enterprise-m1m3: + enabled: true + repairerConnector: false + topicsRegex: ".*MTM1M3" + tasksMax: "8" + enterprise-m2: + enabled: true + repairerConnector: false + topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" + enterprise-obssys: + enabled: true + repairerConnector: false + topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" + enterprise-ocps: + enabled: true + repairerConnector: false + topicsRegex: ".*OCPS" + enterprise-test: + enabled: true + repairerConnector: false + topicsRegex: "lsst.sal.Test" + enterprise-pmd: + enabled: true + repairerConnector: false + topicsRegex: ".*PMD" + enterprise-calsys: + enabled: true + repairerConnector: false + topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" + enterprise-mtaircompressor: + enabled: true + repairerConnector: false + topicsRegex: ".*MTAirCompressor" + enterprise-genericcamera: + enabled: true + repairerConnector: false + topicsRegex: ".*GCHeaderService|.*GenericCamera" + enterprise-gis: + enabled: true + repairerConnector: false + topicsRegex: ".*GIS" + enterprise-mtvms: + enabled: true + repairerConnector: false + topicsRegex: ".*MTVMS" + enterprise-lasertracker: + enabled: true + repairerConnector: false + topicsRegex: ".*LaserTracker" + kafdrop: ingress: enabled: true diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index 99457a9b03..d4e11d27cc 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -222,6 +222,10 @@ source-kafka-connect-manager: env: kafkaConnectUrl: "http://sasquatch-source-connect-api.sasquatch:8083" +# -- Override kafka-connect-manager-enterprise configuration. +kafka-connect-manager-enterprise: + enabled: false + # -- Override telegraf-kafka-consumer configuration. telegraf-kafka-consumer: {} From 33011410394d5e2a666c0e721af486b03206de2f Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 15 Jan 2024 17:16:24 -0700 Subject: [PATCH 461/588] Hinted handoff queue is filling up - Increasing wal-fsync-delay to control write contention - Increase hintedHandoff.max-size to 10G - Disable memory cache max size --- applications/sasquatch/README.md | 7 +++++-- .../sasquatch/charts/influxdb-enterprise/README.md | 7 +++++-- .../templates/data-configmap.yaml | 12 ++++++++++-- .../sasquatch/charts/influxdb-enterprise/values.yaml | 8 ++++++-- 4 files changed, 26 insertions(+), 8 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 462382e94e..ce71366599 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -95,12 +95,15 @@ Rubin Observatory's telemetry service. | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | -| influxdb-enterprise.data.config.anti_entropy.enabled | bool | `false` | | +| influxdb-enterprise.data.config.antiEntropy.enabled | bool | `false` | | | influxdb-enterprise.data.config.cluster.log-queries-after | string | `"15s"` | | | influxdb-enterprise.data.config.cluster.max-concurrent-queries | int | `1000` | | | influxdb-enterprise.data.config.cluster.query-timeout | string | `"300s"` | | -| influxdb-enterprise.data.config.continuous_queries.enabled | bool | `false` | | +| influxdb-enterprise.data.config.continuousQueries.enabled | bool | `false` | | +| influxdb-enterprise.data.config.data.cache-max-memory-size | int | `0` | | | influxdb-enterprise.data.config.data.trace-logging-enabled | bool | `true` | | +| influxdb-enterprise.data.config.data.wal-fsync-delay | string | `"100ms"` | | +| influxdb-enterprise.data.config.hintedHandoff.max-size | int | `107374182400` | | | influxdb-enterprise.data.config.http.auth-enabled | bool | `true` | | | influxdb-enterprise.data.config.http.flux-enabled | bool | `true` | | | influxdb-enterprise.data.config.logging.level | string | `"debug"` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md index 40c22ce19d..61390abc77 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/README.md +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -17,12 +17,15 @@ Run InfluxDB Enterprise on Kubernetes | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | -| data.config.anti_entropy.enabled | bool | `false` | | +| data.config.antiEntropy.enabled | bool | `false` | | | data.config.cluster.log-queries-after | string | `"15s"` | | | data.config.cluster.max-concurrent-queries | int | `1000` | | | data.config.cluster.query-timeout | string | `"300s"` | | -| data.config.continuous_queries.enabled | bool | `false` | | +| data.config.continuousQueries.enabled | bool | `false` | | +| data.config.data.cache-max-memory-size | int | `0` | | | data.config.data.trace-logging-enabled | bool | `true` | | +| data.config.data.wal-fsync-delay | string | `"100ms"` | | +| data.config.hintedHandoff.max-size | int | `107374182400` | | | data.config.http.auth-enabled | bool | `true` | | | data.config.http.flux-enabled | bool | `true` | | | data.config.logging.level | string | `"debug"` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml index e054d5f716..3d6c6bcdcc 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-configmap.yaml @@ -34,6 +34,14 @@ data: [hinted-handoff] dir = "/var/lib/influxdb/hh" + {{- range $key, $value := index .Values.data.config.hintedHandoff }} + {{- $tp := typeOf $value }} + {{- if eq $tp "string" }} + {{ $key }} = {{ $value | quote }} + {{- else }} + {{ $key }} = {{ $value | int }} + {{- end }} + {{- end }} [data] dir = "/var/lib/influxdb/data" @@ -48,7 +56,7 @@ data: {{- end }} [anti-entropy] - {{- range $key, $value := index .Values.data.config.anti_entropy }} + {{- range $key, $value := index .Values.data.config.antiEntropy }} {{- $tp := typeOf $value }} {{- if eq $tp "string" }} {{ $key }} = {{ $value | quote }} @@ -68,7 +76,7 @@ data: {{- end }} [continuous_queries] - {{- range $key, $value := index .Values.data.config.continuous_queries }} + {{- range $key, $value := index .Values.data.config.continuousQueries }} {{- $tp := typeOf $value }} {{- if eq $tp "string" }} {{ $key }} = {{ $value | quote }} diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml index faff4311ac..f1e847dc05 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/values.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -250,7 +250,9 @@ data: config: data: trace-logging-enabled: true - anti_entropy: + wal-fsync-delay: "100ms" + cache-max-memory-size: 0 + antiEntropy: enabled: false http: flux-enabled: true @@ -259,7 +261,9 @@ data: max-concurrent-queries: 1000 query-timeout: "300s" log-queries-after: "15s" - continuous_queries: + hintedHandoff: + max-size: 107374182400 + continuousQueries: enabled: false logging: level: "debug" From c12a8d67d38c6c24296c2954fe3d1d7f0ba2d792 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 17 Jan 2024 11:19:57 -0700 Subject: [PATCH 462/588] Turn on Anti-Entropy service --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/influxdb-enterprise/README.md | 2 +- applications/sasquatch/charts/influxdb-enterprise/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index ce71366599..9d24b6a36c 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -95,7 +95,7 @@ Rubin Observatory's telemetry service. | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | | influxdb-enterprise.data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | -| influxdb-enterprise.data.config.antiEntropy.enabled | bool | `false` | | +| influxdb-enterprise.data.config.antiEntropy.enabled | bool | `true` | | | influxdb-enterprise.data.config.cluster.log-queries-after | string | `"15s"` | | | influxdb-enterprise.data.config.cluster.max-concurrent-queries | int | `1000` | | | influxdb-enterprise.data.config.cluster.query-timeout | string | `"300s"` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md index 61390abc77..0a2013d54e 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/README.md +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -17,7 +17,7 @@ Run InfluxDB Enterprise on Kubernetes | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.labelSelector.matchExpressions[0].values[0] | string | `"data"` | | | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey | string | `"kubernetes.io/hostname"` | | | data.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].weight | int | `1` | | -| data.config.antiEntropy.enabled | bool | `false` | | +| data.config.antiEntropy.enabled | bool | `true` | | | data.config.cluster.log-queries-after | string | `"15s"` | | | data.config.cluster.max-concurrent-queries | int | `1000` | | | data.config.cluster.query-timeout | string | `"300s"` | | diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml index f1e847dc05..9d96fadd26 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/values.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -253,7 +253,7 @@ data: wal-fsync-delay: "100ms" cache-max-memory-size: 0 antiEntropy: - enabled: false + enabled: true http: flux-enabled: true auth-enabled: true From cddabc87b791b12a429f9790c33480ec17c22316 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 17 Jan 2024 11:43:38 -0700 Subject: [PATCH 463/588] Disable connectors to InfluxDB OSS at USDF --- applications/sasquatch/values-usdfprod.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 0e0678e563..55af465716 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -104,6 +104,7 @@ influxdb-enterprise: cpu: 8 kafka-connect-manager: + enabled: false influxdbSink: # Based on the kafka producers configuration for the Summit # https://github.com/lsst-ts/argocd-csc/blob/main/apps/kafka-producers/values-summit.yaml From d052c6f827e056167223cefb3e505bb53eadaa92 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 17 Jan 2024 12:27:28 -0700 Subject: [PATCH 464/588] Update kapacitor InfluxDB connection --- applications/sasquatch/values-usdfprod.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 55af465716..a97f0c2317 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -274,7 +274,6 @@ chronograf: ingress: enabled: true hostname: usdf-rsp.slac.stanford.edu - env: GENERIC_NAME: "OIDC" GENERIC_AUTH_URL: https://usdf-rsp.slac.stanford.edu/auth/openid/login @@ -286,3 +285,6 @@ chronograf: GENERIC_API_KEY: sub PUBLIC_URL: https://usdf-rsp.slac.stanford.edu/ STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/usdfprod.json + +kapacitor: + influxURL: http://sasquatch-influxdb-enterprise-data.sasquatch:8086 From 4bc3beced8a0b054e973f00398963c0da609fa30 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 17 Jan 2024 14:09:33 -0500 Subject: [PATCH 465/588] Add new jira-data-proxy application Developed at https://github.com/lsst-sqre/jira-data-proxy the jira-data-proxy provides a read-only (only GET methods) proxy to the Jira REST API. A user can access this proxy using their common Gafaelfawr token and send GET requests to the Jira API through the proxy, which uses a built-in LDAP credential. This service is developed for Times Square reports and intended to be deployed for Rubin staff at USDF. Note: I needed to fix the name of vaultSecretsPathPrefix in the Application template; this will have to be fixed in the Phalanx templates going forward. --- applications/jira-data-proxy/.helmignore | 23 ++++++ applications/jira-data-proxy/Chart.yaml | 8 ++ applications/jira-data-proxy/README.md | 32 ++++++++ applications/jira-data-proxy/secrets.yaml | 4 + .../jira-data-proxy/templates/_helpers.tpl | 26 +++++++ .../jira-data-proxy/templates/configmap.yaml | 10 +++ .../jira-data-proxy/templates/deployment.yaml | 77 +++++++++++++++++++ .../jira-data-proxy/templates/hpa.yaml | 28 +++++++ .../jira-data-proxy/templates/ingress.yaml | 31 ++++++++ .../templates/networkpolicy.yaml | 21 +++++ .../jira-data-proxy/templates/service.yaml | 15 ++++ .../templates/vault-secret.yaml | 9 +++ .../jira-data-proxy/values-idfdev.yaml | 4 + .../jira-data-proxy/values-usdfdev.yaml | 4 + applications/jira-data-proxy/values.yaml | 74 ++++++++++++++++++ docs/applications/index.rst | 1 + docs/applications/jira-data-proxy/index.rst | 20 +++++ docs/applications/jira-data-proxy/values.md | 12 +++ environments/README.md | 1 + .../jira-data-proxy-application.yaml | 34 ++++++++ environments/values-idfdev.yaml | 1 + environments/values-usdfdev.yaml | 1 + environments/values.yaml | 3 + 23 files changed, 439 insertions(+) create mode 100644 applications/jira-data-proxy/.helmignore create mode 100644 applications/jira-data-proxy/Chart.yaml create mode 100644 applications/jira-data-proxy/README.md create mode 100644 applications/jira-data-proxy/secrets.yaml create mode 100644 applications/jira-data-proxy/templates/_helpers.tpl create mode 100644 applications/jira-data-proxy/templates/configmap.yaml create mode 100644 applications/jira-data-proxy/templates/deployment.yaml create mode 100644 applications/jira-data-proxy/templates/hpa.yaml create mode 100644 applications/jira-data-proxy/templates/ingress.yaml create mode 100644 applications/jira-data-proxy/templates/networkpolicy.yaml create mode 100644 applications/jira-data-proxy/templates/service.yaml create mode 100644 applications/jira-data-proxy/templates/vault-secret.yaml create mode 100644 applications/jira-data-proxy/values-idfdev.yaml create mode 100644 applications/jira-data-proxy/values-usdfdev.yaml create mode 100644 applications/jira-data-proxy/values.yaml create mode 100644 docs/applications/jira-data-proxy/index.rst create mode 100644 docs/applications/jira-data-proxy/values.md create mode 100644 environments/templates/jira-data-proxy-application.yaml diff --git a/applications/jira-data-proxy/.helmignore b/applications/jira-data-proxy/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/applications/jira-data-proxy/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/applications/jira-data-proxy/Chart.yaml b/applications/jira-data-proxy/Chart.yaml new file mode 100644 index 0000000000..90059d55fc --- /dev/null +++ b/applications/jira-data-proxy/Chart.yaml @@ -0,0 +1,8 @@ +apiVersion: v2 +appVersion: "tickets-DM-42460" +description: Jira API read-only proxy for Times Square users. +name: jira-data-proxy +sources: + - https://github.com/lsst-sqre/jira-data-proxy +type: application +version: 1.0.0 diff --git a/applications/jira-data-proxy/README.md b/applications/jira-data-proxy/README.md new file mode 100644 index 0000000000..6d69751f8a --- /dev/null +++ b/applications/jira-data-proxy/README.md @@ -0,0 +1,32 @@ +# jira-data-proxy + +Jira API read-only proxy for Times Square users. + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the jira-data-proxy deployment pod | +| autoscaling.enabled | bool | `false` | Enable autoscaling of jira-data-proxy deployment | +| autoscaling.maxReplicas | int | `100` | Maximum number of jira-data-proxy deployment pods | +| autoscaling.minReplicas | int | `1` | Minimum number of jira-data-proxy deployment pods | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of jira-data-proxy deployment pods | +| config.jiraUrl | string | `"https://jira.lsstcorp.org/"` | Jira base URL | +| config.logLevel | string | `"info"` | Logging level | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPathPrefix | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the jira-data-proxy image | +| image.repository | string | `"ghcr.io/lsst-sqre/jira-data-proxy"` | Image to use in the jira-data-proxy deployment | +| image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| ingress.path | string | `"/jira-data-proxy"` | Path prefix where jira-data-proxy is served | +| nodeSelector | object | `{}` | Node selection rules for the jira-data-proxy deployment pod | +| podAnnotations | object | `{}` | Annotations for the jira-data-proxy deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | `{}` | Resource limits and requests for the jira-data-proxy deployment pod | +| tolerations | list | `[]` | Tolerations for the jira-data-proxy deployment pod | diff --git a/applications/jira-data-proxy/secrets.yaml b/applications/jira-data-proxy/secrets.yaml new file mode 100644 index 0000000000..de40ddcac1 --- /dev/null +++ b/applications/jira-data-proxy/secrets.yaml @@ -0,0 +1,4 @@ +JIRA_USERNAME: + description: JIRA account username. +JIRA_PASSWORD: + description: JIRA account password. diff --git a/applications/jira-data-proxy/templates/_helpers.tpl b/applications/jira-data-proxy/templates/_helpers.tpl new file mode 100644 index 0000000000..4630659730 --- /dev/null +++ b/applications/jira-data-proxy/templates/_helpers.tpl @@ -0,0 +1,26 @@ +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "jira-data-proxy.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "jira-data-proxy.labels" -}} +helm.sh/chart: {{ include "jira-data-proxy.chart" . }} +{{ include "jira-data-proxy.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "jira-data-proxy.selectorLabels" -}} +app.kubernetes.io/name: "jira-data-proxy" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/jira-data-proxy/templates/configmap.yaml b/applications/jira-data-proxy/templates/configmap.yaml new file mode 100644 index 0000000000..2b7c79a267 --- /dev/null +++ b/applications/jira-data-proxy/templates/configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +data: + SAFIR_LOG_LEVEL: {{ .Values.config.logLevel | quote }} + SAFIR_PATH_PREFIX: {{ .Values.ingress.path | quote }} + JIRA_BASE_URL: {{ .Values.config.jiraUrl | quote }} diff --git a/applications/jira-data-proxy/templates/deployment.yaml b/applications/jira-data-proxy/templates/deployment.yaml new file mode 100644 index 0000000000..a5325f7dcc --- /dev/null +++ b/applications/jira-data-proxy/templates/deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} + app.kubernetes.io/component: "server" + app.kubernetes.io/part-of: "jira-data-proxy" +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "jira-data-proxy.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "jira-data-proxy.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "server" + app.kubernetes.io/part-of: "jira-data-proxy" + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: true + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: "/" + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + envFrom: + - configMapRef: + name: "jira-data-proxy" + env: + - name: "JIRA_USERNAME" + valueFrom: + secretKeyRef: + name: "jira-data-proxy" + key: "JIRA_USERNAME" + - name: "JIRA_PASSWORD" + valueFrom: + secretKeyRef: + name: "jira-data-proxy" + key: "JIRA_PASSWORD" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/jira-data-proxy/templates/hpa.yaml b/applications/jira-data-proxy/templates/hpa.yaml new file mode 100644 index 0000000000..1b3370740e --- /dev/null +++ b/applications/jira-data-proxy/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: "jira-data-proxy" + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: "cpu" + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: "memory" + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/jira-data-proxy/templates/ingress.yaml b/applications/jira-data-proxy/templates/ingress.yaml new file mode 100644 index 0000000000..771d96fc5e --- /dev/null +++ b/applications/jira-data-proxy/templates/ingress.yaml @@ -0,0 +1,31 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "exec:notebook" + loginRedirect: false # endpoint is for API use only +template: + metadata: + name: "jira-data-proxy" + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: {{ .Values.ingress.path | quote }} + pathType: "Prefix" + backend: + service: + name: "jira-data-proxy" + port: + number: 8080 diff --git a/applications/jira-data-proxy/templates/networkpolicy.yaml b/applications/jira-data-proxy/templates/networkpolicy.yaml new file mode 100644 index 0000000000..affb92cc0e --- /dev/null +++ b/applications/jira-data-proxy/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "jira-data-proxy" +spec: + podSelector: + matchLabels: + {{- include "jira-data-proxy.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/jira-data-proxy/templates/service.yaml b/applications/jira-data-proxy/templates/service.yaml new file mode 100644 index 0000000000..93e189e821 --- /dev/null +++ b/applications/jira-data-proxy/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "jira-data-proxy.selectorLabels" . | nindent 4 }} diff --git a/applications/jira-data-proxy/templates/vault-secret.yaml b/applications/jira-data-proxy/templates/vault-secret.yaml new file mode 100644 index 0000000000..0a37921097 --- /dev/null +++ b/applications/jira-data-proxy/templates/vault-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: "jira-data-proxy" + labels: + {{- include "jira-data-proxy.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPathPrefix }}/jira-data-proxy" + type: Opaque diff --git a/applications/jira-data-proxy/values-idfdev.yaml b/applications/jira-data-proxy/values-idfdev.yaml new file mode 100644 index 0000000000..d31626eed3 --- /dev/null +++ b/applications/jira-data-proxy/values-idfdev.yaml @@ -0,0 +1,4 @@ +image: + pullPolicy: Always +config: + logLevel: "DEBUG" diff --git a/applications/jira-data-proxy/values-usdfdev.yaml b/applications/jira-data-proxy/values-usdfdev.yaml new file mode 100644 index 0000000000..d31626eed3 --- /dev/null +++ b/applications/jira-data-proxy/values-usdfdev.yaml @@ -0,0 +1,4 @@ +image: + pullPolicy: Always +config: + logLevel: "DEBUG" diff --git a/applications/jira-data-proxy/values.yaml b/applications/jira-data-proxy/values.yaml new file mode 100644 index 0000000000..8b08a3851d --- /dev/null +++ b/applications/jira-data-proxy/values.yaml @@ -0,0 +1,74 @@ +# Default values for jira-data-proxy. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +config: + # -- Logging level + logLevel: "info" + + # -- Jira base URL + jiraUrl: "https://jira.lsstcorp.org/" + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the jira-data-proxy deployment + repository: "ghcr.io/lsst-sqre/jira-data-proxy" + + # -- Pull policy for the jira-data-proxy image + pullPolicy: "IfNotPresent" + + # -- Overrides the image tag whose default is the chart appVersion. + tag: "" + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + + # -- Path prefix where jira-data-proxy is served + path: "/jira-data-proxy" + +autoscaling: + # -- Enable autoscaling of jira-data-proxy deployment + enabled: false + + # -- Minimum number of jira-data-proxy deployment pods + minReplicas: 1 + + # -- Maximum number of jira-data-proxy deployment pods + maxReplicas: 100 + + # -- Target CPU utilization of jira-data-proxy deployment pods + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Annotations for the jira-data-proxy deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the jira-data-proxy deployment pod +resources: {} + +# -- Node selection rules for the jira-data-proxy deployment pod +nodeSelector: {} + +# -- Tolerations for the jira-data-proxy deployment pod +tolerations: [] + +# -- Affinity rules for the jira-data-proxy deployment pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPathPrefix: "" diff --git a/docs/applications/index.rst b/docs/applications/index.rst index 3acb272e61..d9c5c08f91 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -49,6 +49,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde argo-workflows/index alert-stream-broker/index exposurelog/index + jira-data-proxy/index narrativelog/index obsloctap/index plot-navigator/index diff --git a/docs/applications/jira-data-proxy/index.rst b/docs/applications/jira-data-proxy/index.rst new file mode 100644 index 0000000000..5228342a03 --- /dev/null +++ b/docs/applications/jira-data-proxy/index.rst @@ -0,0 +1,20 @@ +.. px-app:: jira-data-proxy + +########################################################### +jira-data-proxy — Jira API read-only proxy for Times Square +########################################################### + +jira-data-proxy provides read-only access to the Rubin Jira API. +This app is built for Times Square so that notebooks can access the Jira API without external credentials. +This app only implements GET endpoints. + +.. jinja:: jira-data-proxy + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/jira-data-proxy/values.md b/docs/applications/jira-data-proxy/values.md new file mode 100644 index 0000000000..dfedf06ddc --- /dev/null +++ b/docs/applications/jira-data-proxy/values.md @@ -0,0 +1,12 @@ +```{px-app-values} jira-data-proxy +``` + +# jira-data-proxy Helm values reference + +Helm values reference table for the {px-app}`jira-data-proxy` application. + +```{include} ../../../applications/jira-data-proxy/README.md +--- +start-after: "## Values" +--- +``` diff --git a/environments/README.md b/environments/README.md index 52afa9fe6f..71f5b10c8d 100644 --- a/environments/README.md +++ b/environments/README.md @@ -15,6 +15,7 @@ | applications.giftless | bool | `false` | Enable the giftless application | | applications.hips | bool | `false` | Enable the HiPS application | | applications.ingress-nginx | bool | `true` | Enable the ingress-nginx application. This is required for all environments, but is still configurable because currently USDF uses an unsupported configuration with ingress-nginx deployed in a different cluster. | +| applications.jira-data-proxy | bool | `false` | Enable the jira-data-proxy application | | applications.kubernetes-replicator | bool | `false` | Enable the kubernetes-replicator application | | applications.linters | bool | `false` | Enable the linters application | | applications.livetap | bool | `false` | Enable the livetap application | diff --git a/environments/templates/jira-data-proxy-application.yaml b/environments/templates/jira-data-proxy-application.yaml new file mode 100644 index 0000000000..fddc00e132 --- /dev/null +++ b/environments/templates/jira-data-proxy-application.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "jira-data-proxy") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "jira-data-proxy" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "jira-data-proxy" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "jira-data-proxy" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/jira-data-proxy" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} \ No newline at end of file diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 801bc0196a..d8ce3b6431 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -15,6 +15,7 @@ applications: butler: true datalinker: true hips: true + jira-data-proxy: true mobu: true noteburst: true nublado: true diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index c2709fb3e3..c6598cc449 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -13,6 +13,7 @@ applications: alert-stream-broker: true datalinker: true exposurelog: true + jira-data-proxy: true livetap: true mobu: true narrativelog: true diff --git a/environments/values.yaml b/environments/values.yaml index db8444c3bc..11295fd367 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -65,6 +65,9 @@ applications: # cluster. ingress-nginx: true + # -- Enable the jira-data-proxy application + jira-data-proxy: false + # -- Enable the kubernetes-replicator application kubernetes-replicator: false From ffe47a9ff0f88f53e56831ed03c7dda01b15d03c Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 17 Jan 2024 16:10:10 -0500 Subject: [PATCH 466/588] Change global variable to vaultSecretsPath This matches the global variable set by the Application template. --- applications/jira-data-proxy/README.md | 2 +- applications/jira-data-proxy/templates/vault-secret.yaml | 2 +- applications/jira-data-proxy/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/jira-data-proxy/README.md b/applications/jira-data-proxy/README.md index 6d69751f8a..5c7967a639 100644 --- a/applications/jira-data-proxy/README.md +++ b/applications/jira-data-proxy/README.md @@ -19,7 +19,7 @@ Jira API read-only proxy for Times Square users. | config.logLevel | string | `"info"` | Logging level | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPathPrefix | string | Set by Argo CD | Base path for Vault secrets | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the jira-data-proxy image | | image.repository | string | `"ghcr.io/lsst-sqre/jira-data-proxy"` | Image to use in the jira-data-proxy deployment | | image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. | diff --git a/applications/jira-data-proxy/templates/vault-secret.yaml b/applications/jira-data-proxy/templates/vault-secret.yaml index 0a37921097..609a812e36 100644 --- a/applications/jira-data-proxy/templates/vault-secret.yaml +++ b/applications/jira-data-proxy/templates/vault-secret.yaml @@ -5,5 +5,5 @@ metadata: labels: {{- include "jira-data-proxy.labels" . | nindent 4 }} spec: - path: "{{ .Values.global.vaultSecretsPathPrefix }}/jira-data-proxy" + path: "{{ .Values.global.vaultSecretsPath }}/jira-data-proxy" type: Opaque diff --git a/applications/jira-data-proxy/values.yaml b/applications/jira-data-proxy/values.yaml index 8b08a3851d..acee883907 100644 --- a/applications/jira-data-proxy/values.yaml +++ b/applications/jira-data-proxy/values.yaml @@ -71,4 +71,4 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD - vaultSecretsPathPrefix: "" + vaultSecretsPath: "" From 3c46b79e9b64d464b2093c790e91f50effed711f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 18 Jan 2024 14:13:21 -0800 Subject: [PATCH 467/588] Avoid helm repo update on application template If an application that is the target of a phalanx application template command has no dependency Helm repositories, avoid running a helm repo update. Since we added no repositories, helm repo update may fail if the user has no Helm repositories defined. --- src/phalanx/services/application.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index eb1b63c684..f07a990063 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -241,8 +241,8 @@ def template(self, app_name: str, env_name: str) -> str: HelmFailedError Raised if Helm fails. """ - self.add_helm_repositories([app_name], quiet=True) - self._helm.repo_update(quiet=True) + if self.add_helm_repositories([app_name], quiet=True): + self._helm.repo_update(quiet=True) self._helm.dependency_update(app_name, quiet=True) environment = self._config.load_environment(env_name) values = self._build_injected_values(app_name, environment) From 1c75423b4d2d0d418f6821b979204f6087a9a88b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 19 Jan 2024 11:04:15 -0800 Subject: [PATCH 468/588] Bump limits for gafaelfawr-operator pod The limits were too tight to run gafaelfawr audit --fix. --- applications/gafaelfawr/README.md | 2 +- applications/gafaelfawr/values.yaml | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index f3b1f9ed93..f6b6689e93 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -97,7 +97,7 @@ Authentication and identity system | operator.affinity | object | `{}` | Affinity rules for the token management pod | | operator.nodeSelector | object | `{}` | Node selection rules for the token management pod | | operator.podAnnotations | object | `{}` | Annotations for the token management pod | -| operator.resources | object | See `values.yaml` | Resource limits and requests for the Gafaelfawr Kubernetes operator | +| operator.resources | object | See `values.yaml` | Resource limits and requests for the Gafaelfawr Kubernetes operator. The limits are artificially higher since the operator pod is also where we manually run `gafaelfawr audit --fix`, which requires more CPU and memory. | | operator.tolerations | list | `[]` | Tolerations for the token management pod | | podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | | redis.affinity | object | `{}` | Affinity rules for the Redis pod | diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 5dfa393634..a0cd63df4c 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -390,12 +390,14 @@ maintenance: affinity: {} operator: - # -- Resource limits and requests for the Gafaelfawr Kubernetes operator + # -- Resource limits and requests for the Gafaelfawr Kubernetes operator. + # The limits are artificially higher since the operator pod is also where we + # manually run `gafaelfawr audit --fix`, which requires more CPU and memory. # @default -- See `values.yaml` resources: limits: - cpu: "100m" - memory: "300Mi" + cpu: "500m" + memory: "500Mi" requests: cpu: "10m" memory: "150Mi" From 2fe39ba6a9ed5ea39ee374a63384ab7412ac9390 Mon Sep 17 00:00:00 2001 From: "David H. Irving" Date: Wed, 17 Jan 2024 15:46:34 -0700 Subject: [PATCH 469/588] Add delegated token for datalinker links --- applications/datalinker/templates/ingress-image.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/datalinker/templates/ingress-image.yaml b/applications/datalinker/templates/ingress-image.yaml index 889ba5e5ca..a2aa58f7b7 100644 --- a/applications/datalinker/templates/ingress-image.yaml +++ b/applications/datalinker/templates/ingress-image.yaml @@ -9,6 +9,13 @@ config: scopes: all: - "read:image" + # Request a delegated token to use for making calls to Butler server with the + # end-user's credentials. + delegate: + internal: + service: "datalinker" + scopes: + - "read:image" template: metadata: name: {{ include "datalinker.fullname" . }}-image From 24947da42253e07e5fa68cfa892a6f4d142321eb Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 19 Jan 2024 16:15:08 -0800 Subject: [PATCH 470/588] Update to Nublado release 4.0.2 Hopefully improves error reporting by switching to pure ASGI middleware. --- applications/nublado/Chart.yaml | 2 +- applications/nublado/README.md | 2 +- applications/nublado/values-base.yaml | 2 +- applications/nublado/values-ccin2p3.yaml | 2 +- applications/nublado/values-idfdev.yaml | 2 +- applications/nublado/values-idfint.yaml | 2 +- applications/nublado/values-idfprod.yaml | 2 +- applications/nublado/values-roe.yaml | 2 +- applications/nublado/values-summit.yaml | 2 +- applications/nublado/values-tucson-teststand.yaml | 2 +- applications/nublado/values.yaml | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 269be6d27a..8714a9229c 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -5,7 +5,7 @@ description: JupyterHub and custom spawner for the Rubin Science Platform sources: - https://github.com/lsst-sqre/nublado home: https://nublado.lsst.io/ -appVersion: 4.0.1 +appVersion: 4.0.2 dependencies: - name: jupyterhub diff --git a/applications/nublado/README.md b/applications/nublado/README.md index ac8259d5fd..78b2828637 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -99,7 +99,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | | jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | | jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/nublado-jupyterhub"` | Image to use for JupyterHub | -| jupyterhub.hub.image.tag | string | `"4.0.1"` | Tag of image to use for JupyterHub | +| jupyterhub.hub.image.tag | string | `"4.0.2"` | Tag of image to use for JupyterHub | | jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | | jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | | jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | diff --git a/applications/nublado/values-base.yaml b/applications/nublado/values-base.yaml index 31d68aef1a..4d7e510c72 100644 --- a/applications/nublado/values-base.yaml +++ b/applications/nublado/values-base.yaml @@ -24,7 +24,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.1" + tag: "4.0.2" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-ccin2p3.yaml b/applications/nublado/values-ccin2p3.yaml index b4346fe3df..58d5ae0294 100644 --- a/applications/nublado/values-ccin2p3.yaml +++ b/applications/nublado/values-ccin2p3.yaml @@ -23,7 +23,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.1" + tag: "4.0.2" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index 2d811a22c8..053f136afe 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -27,7 +27,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.1" + tag: "4.0.2" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index d181716684..1bd223b5b4 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -36,7 +36,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.1" + tag: "4.0.2" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 6960cb560f..764adc9635 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -24,7 +24,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.1" + tag: "4.0.2" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-roe.yaml b/applications/nublado/values-roe.yaml index 656504f0c5..04cb8d0f55 100644 --- a/applications/nublado/values-roe.yaml +++ b/applications/nublado/values-roe.yaml @@ -14,7 +14,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.1" + tag: "4.0.2" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-summit.yaml b/applications/nublado/values-summit.yaml index 3db89c8105..5faa7dbb87 100644 --- a/applications/nublado/values-summit.yaml +++ b/applications/nublado/values-summit.yaml @@ -24,7 +24,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.1" + tag: "4.0.2" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 7338e82020..84d54b6ecf 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -24,7 +24,7 @@ controller: - name: "inithome" image: repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.1" + tag: "4.0.2" privileged: true volumeMounts: - containerPath: "/home" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index c3e21a764c..aaa537185f 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -381,7 +381,7 @@ jupyterhub: name: "ghcr.io/lsst-sqre/nublado-jupyterhub" # -- Tag of image to use for JupyterHub - tag: "4.0.1" + tag: "4.0.2" # -- Resource limits and requests resources: From 928f4533bf9a7fee4871d6d68f80506ff9cf677e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 10:01:54 +0000 Subject: [PATCH 471/588] Update Helm release argo-workflows to v0.40.7 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 4676dc65f7..23d1cb0757 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.40.6 + version: 0.40.7 repository: https://argoproj.github.io/argo-helm From a8a30b48d25aec0d623fd7e187f7efee04b6bf04 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 10:02:00 +0000 Subject: [PATCH 472/588] Update gcr.io/cloudsql-docker/gce-proxy Docker tag to v1.33.16 --- applications/gafaelfawr/values.yaml | 2 +- applications/nublado/values.yaml | 2 +- applications/sqlproxy-cross-project/values.yaml | 2 +- applications/times-square/values.yaml | 2 +- applications/vo-cutouts/values.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index a0cd63df4c..8c97dca5d9 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -316,7 +316,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.15" + tag: "1.33.16" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index aaa537185f..3ca8799eed 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -538,7 +538,7 @@ cloudsql: pullPolicy: "IfNotPresent" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.15" + tag: "1.33.16" # -- Instance connection name for a Cloud SQL PostgreSQL instance # @default -- None, must be set if Cloud SQL Auth Proxy is enabled diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index ab096071df..40f43ce31a 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.33.15" + tag: "1.33.16" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index fa3f53e24a..265246f8ee 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -126,7 +126,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.15" + tag: "1.33.16" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index 4b2c230498..3ec5120b43 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -75,7 +75,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.15" + tag: "1.33.16" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" From 80c731ef1e26338653070b666bd305408a90c303 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 12:32:56 +0000 Subject: [PATCH 473/588] [neophile] Update dependencies - Update frozen Python dependencies --- requirements/dev.txt | 171 +++++++++++++++++++++--------------------- requirements/main.txt | 135 ++++++++++++++++----------------- 2 files changed, 154 insertions(+), 152 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index c55c823a3e..a64a554260 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -28,9 +28,9 @@ babel==2.14.0 \ --hash=sha256:6919867db036398ba21eb5c7a0f6b28ab8cbc3ae7a73a44ebe34ae74a4e7d363 \ --hash=sha256:efb1a25b7118e67ce3a259bed20545c29cb68be8ad2c784c83689981b7a57287 # via sphinx -beautifulsoup4==4.12.2 \ - --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ - --hash=sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a +beautifulsoup4==4.12.3 \ + --hash=sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051 \ + --hash=sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed # via pydata-sphinx-theme certifi==2023.11.17 \ --hash=sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1 \ @@ -274,9 +274,9 @@ jinja2==3.1.3 \ # sphinx # sphinx-jinja # sphinxcontrib-redoc -jsonschema==4.20.0 \ - --hash=sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa \ - --hash=sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3 +jsonschema==4.21.1 \ + --hash=sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f \ + --hash=sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5 # via sphinxcontrib-redoc jsonschema-specifications==2023.12.1 \ --hash=sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc \ @@ -298,67 +298,67 @@ markdown-it-py[linkify]==3.0.0 \ # mdit-py-plugins # myst-parser # rich -markupsafe==2.1.3 \ - --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ - --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ - --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ - --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ - --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ - --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ - --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ - --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ - --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ - --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ - --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ - --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ - --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ - --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ - --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ - --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ - --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ - --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ - --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ - --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ - --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ - --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ - --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ - --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ - --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ - --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ - --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ - --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ - --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ - --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ - --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ - --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ - --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ - --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ - --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ - --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ - --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ - --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ - --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ - --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ - --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ - --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ - --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ - --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ - --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ - --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ - --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ - --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ - --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ - --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ - --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ - --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ - --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ - --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ - --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ - --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ - --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ - --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ - --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ - --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 +markupsafe==2.1.4 \ + --hash=sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69 \ + --hash=sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0 \ + --hash=sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d \ + --hash=sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec \ + --hash=sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5 \ + --hash=sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411 \ + --hash=sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3 \ + --hash=sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74 \ + --hash=sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0 \ + --hash=sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949 \ + --hash=sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d \ + --hash=sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279 \ + --hash=sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f \ + --hash=sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6 \ + --hash=sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc \ + --hash=sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e \ + --hash=sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954 \ + --hash=sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656 \ + --hash=sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc \ + --hash=sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518 \ + --hash=sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56 \ + --hash=sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc \ + --hash=sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa \ + --hash=sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565 \ + --hash=sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4 \ + --hash=sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb \ + --hash=sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250 \ + --hash=sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4 \ + --hash=sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959 \ + --hash=sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc \ + --hash=sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474 \ + --hash=sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863 \ + --hash=sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8 \ + --hash=sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f \ + --hash=sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2 \ + --hash=sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e \ + --hash=sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e \ + --hash=sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb \ + --hash=sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f \ + --hash=sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a \ + --hash=sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26 \ + --hash=sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d \ + --hash=sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2 \ + --hash=sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131 \ + --hash=sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789 \ + --hash=sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6 \ + --hash=sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a \ + --hash=sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858 \ + --hash=sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e \ + --hash=sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb \ + --hash=sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e \ + --hash=sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84 \ + --hash=sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7 \ + --hash=sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea \ + --hash=sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b \ + --hash=sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6 \ + --hash=sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475 \ + --hash=sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74 \ + --hash=sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a \ + --hash=sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00 # via # -c requirements/main.txt # jinja2 @@ -625,6 +625,7 @@ pyyaml==6.0.1 \ --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ + --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ @@ -773,24 +774,24 @@ rpds-py==0.17.1 \ # via # jsonschema # referencing -ruff==0.1.13 \ - --hash=sha256:226b517f42d59a543d6383cfe03cccf0091e3e0ed1b856c6824be03d2a75d3b6 \ - --hash=sha256:2f59bcf5217c661254bd6bc42d65a6fd1a8b80c48763cb5c2293295babd945dd \ - --hash=sha256:5f0312ba1061e9b8c724e9a702d3c8621e3c6e6c2c9bd862550ab2951ac75c16 \ - --hash=sha256:6bbbc3042075871ec17f28864808540a26f0f79a4478c357d3e3d2284e832998 \ - --hash=sha256:7a36fa90eb12208272a858475ec43ac811ac37e91ef868759770b71bdabe27b6 \ - --hash=sha256:9a1600942485c6e66119da294c6294856b5c86fd6df591ce293e4a4cc8e72989 \ - --hash=sha256:9ebb40442f7b531e136d334ef0851412410061e65d61ca8ce90d894a094feb22 \ - --hash=sha256:9fb6b3b86450d4ec6a6732f9f60c4406061b6851c4b29f944f8c9d91c3611c7a \ - --hash=sha256:a623349a505ff768dad6bd57087e2461be8db58305ebd5577bd0e98631f9ae69 \ - --hash=sha256:b13ba5d7156daaf3fd08b6b993360a96060500aca7e307d95ecbc5bb47a69296 \ - --hash=sha256:dcaab50e278ff497ee4d1fe69b29ca0a9a47cd954bb17963628fa417933c6eb1 \ - --hash=sha256:e261f1baed6291f434ffb1d5c6bd8051d1c2a26958072d38dfbec39b3dda7352 \ - --hash=sha256:e3fd36e0d48aeac672aa850045e784673449ce619afc12823ea7868fcc41d8ba \ - --hash=sha256:e6894b00495e00c27b6ba61af1fc666f17de6140345e5ef27dd6e08fb987259d \ - --hash=sha256:ee3febce7863e231a467f90e681d3d89210b900d49ce88723ce052c8761be8c7 \ - --hash=sha256:f57de973de4edef3ad3044d6a50c02ad9fc2dff0d88587f25f1a48e3f72edf5e \ - --hash=sha256:f988746e3c3982bea7f824c8fa318ce7f538c4dfefec99cd09c8770bd33e6539 +ruff==0.1.14 \ + --hash=sha256:1c8eca1a47b4150dc0fbec7fe68fc91c695aed798532a18dbb1424e61e9b721f \ + --hash=sha256:2270504d629a0b064247983cbc495bed277f372fb9eaba41e5cf51f7ba705a6a \ + --hash=sha256:269302b31ade4cde6cf6f9dd58ea593773a37ed3f7b97e793c8594b262466b67 \ + --hash=sha256:62ce2ae46303ee896fc6811f63d6dabf8d9c389da0f3e3f2bce8bc7f15ef5488 \ + --hash=sha256:653230dd00aaf449eb5ff25d10a6e03bc3006813e2cb99799e568f55482e5cae \ + --hash=sha256:6b3dadc9522d0eccc060699a9816e8127b27addbb4697fc0c08611e4e6aeb8b5 \ + --hash=sha256:7060156ecc572b8f984fd20fd8b0fcb692dd5d837b7606e968334ab7ff0090ab \ + --hash=sha256:722bafc299145575a63bbd6b5069cb643eaa62546a5b6398f82b3e4403329cab \ + --hash=sha256:80258bb3b8909b1700610dfabef7876423eed1bc930fe177c71c414921898efa \ + --hash=sha256:87b3acc6c4e6928459ba9eb7459dd4f0c4bf266a053c863d72a44c33246bfdbf \ + --hash=sha256:96f76536df9b26622755c12ed8680f159817be2f725c17ed9305b472a757cdbb \ + --hash=sha256:a53d8e35313d7b67eb3db15a66c08434809107659226a90dcd7acb2afa55faea \ + --hash=sha256:ab3f71f64498c7241123bb5a768544cf42821d2a537f894b22457a543d3ca7a9 \ + --hash=sha256:ad3f8088b2dfd884820289a06ab718cde7d38b94972212cc4ba90d5fbc9955f3 \ + --hash=sha256:b2027dde79d217b211d725fc833e8965dc90a16d0d3213f1298f97465956661b \ + --hash=sha256:bea9be712b8f5b4ebed40e1949379cfb2a7d907f42921cf9ab3aae07e6fba9eb \ + --hash=sha256:e3d241aa61f92b0805a7082bd89a9990826448e4d0398f0e2bc8f05c75c63d99 # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ diff --git a/requirements/main.txt b/requirements/main.txt index 545e6d3ae6..a9bbe97299 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -275,67 +275,67 @@ jinja2==3.1.3 \ --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \ --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90 # via -r requirements/main.in -markupsafe==2.1.3 \ - --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ - --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ - --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ - --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ - --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ - --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ - --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ - --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ - --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ - --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ - --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ - --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ - --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ - --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ - --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ - --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ - --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ - --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ - --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ - --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ - --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ - --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ - --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ - --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ - --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ - --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ - --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ - --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ - --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ - --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ - --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ - --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ - --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ - --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ - --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ - --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ - --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ - --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ - --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ - --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ - --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ - --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ - --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ - --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ - --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ - --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ - --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ - --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ - --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ - --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ - --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ - --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ - --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ - --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ - --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ - --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ - --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ - --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ - --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ - --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 +markupsafe==2.1.4 \ + --hash=sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69 \ + --hash=sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0 \ + --hash=sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d \ + --hash=sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec \ + --hash=sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5 \ + --hash=sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411 \ + --hash=sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3 \ + --hash=sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74 \ + --hash=sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0 \ + --hash=sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949 \ + --hash=sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d \ + --hash=sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279 \ + --hash=sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f \ + --hash=sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6 \ + --hash=sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc \ + --hash=sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e \ + --hash=sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954 \ + --hash=sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656 \ + --hash=sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc \ + --hash=sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518 \ + --hash=sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56 \ + --hash=sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc \ + --hash=sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa \ + --hash=sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565 \ + --hash=sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4 \ + --hash=sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb \ + --hash=sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250 \ + --hash=sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4 \ + --hash=sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959 \ + --hash=sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc \ + --hash=sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474 \ + --hash=sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863 \ + --hash=sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8 \ + --hash=sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f \ + --hash=sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2 \ + --hash=sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e \ + --hash=sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e \ + --hash=sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb \ + --hash=sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f \ + --hash=sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a \ + --hash=sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26 \ + --hash=sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d \ + --hash=sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2 \ + --hash=sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131 \ + --hash=sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789 \ + --hash=sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6 \ + --hash=sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a \ + --hash=sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858 \ + --hash=sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e \ + --hash=sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb \ + --hash=sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e \ + --hash=sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84 \ + --hash=sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7 \ + --hash=sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea \ + --hash=sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b \ + --hash=sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6 \ + --hash=sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475 \ + --hash=sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74 \ + --hash=sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a \ + --hash=sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00 # via jinja2 onepasswordconnectsdk==1.4.1 \ --hash=sha256:133defedbc4a4658f68e32865330c2d6844b132763037b984cb74aa21dd1e7f5 \ @@ -499,6 +499,7 @@ pyyaml==6.0.1 \ --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ + --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ @@ -531,9 +532,9 @@ rfc3986[idna2008]==1.5.0 \ # via # httpx # rfc3986 -safir==5.1.0 \ - --hash=sha256:0e4162b3b1fca558b037c06d7221b96996d7a55c92108e2e28e744d224c0076d \ - --hash=sha256:e04019e7e914aefc5ce1a9ca73c227eb3a84255d952bcd4cf3746e11cf7b1a15 +safir==5.2.0 \ + --hash=sha256:93636256dbeea847d63de6d3b434c952f81d6729f6541da09bfc7823d3f61806 \ + --hash=sha256:fe8f51d00449a60544f9eccdb8e603f085fdd4f641f5d5f13cef0f29393aad3f # via -r requirements/main.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -556,9 +557,9 @@ starlette==0.35.1 \ # via # fastapi # safir -structlog==23.3.0 \ - --hash=sha256:24b42b914ac6bc4a4e6f716e82ac70d7fb1e8c3b1035a765591953bfc37101a5 \ - --hash=sha256:d6922a88ceabef5b13b9eda9c4043624924f60edbb00397f4d193bd754cde60a +structlog==24.1.0 \ + --hash=sha256:3f6efe7d25fab6e86f277713c218044669906537bb717c1807a09d46bca0714d \ + --hash=sha256:41a09886e4d55df25bdcb9b5c9674bccfab723ff43e0a86a1b7b236be8e57b16 # via safir typing-extensions==4.9.0 \ --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \ From 70ee04a7ef06aec676b3543973e6ec57e5710614 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 22 Jan 2024 15:06:24 +0000 Subject: [PATCH 474/588] Update Helm release argo-cd to v5.53.6 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 76801d9e57..f088aacc9a 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.52.2 + version: 5.53.6 repository: https://argoproj.github.io/argo-helm From ad5b809b139b530f1c3903192b9dbfd888928a76 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 22 Jan 2024 08:53:17 -0800 Subject: [PATCH 475/588] Update Helm docs --- applications/gafaelfawr/README.md | 2 +- applications/nublado/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index f6b6689e93..9facb8b1e0 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -17,7 +17,7 @@ Authentication and identity system | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.16"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 78b2828637..fb4bd65db2 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -16,7 +16,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with Cloud SQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.16"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Auth Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Auth Proxy pod | diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index 5d53e76e72..1e7664aa30 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.33.15"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.33.16"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/times-square/README.md b/applications/times-square/README.md index 7d0e998cb5..3159971dcb 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -18,7 +18,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.16"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index 6a3d5959c6..3f919e8c27 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.15"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.16"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | From 0157f5cc4eff60507a8aacb88d3aaa241aee48c4 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 31 Mar 2023 16:36:45 -0700 Subject: [PATCH 476/588] Starting work. --- .yamllint.yml | 4 +- applications/auxtel/Chart.yaml | 10 + applications/auxtel/README.md | 15 ++ applications/auxtel/charts/csc | 1 + applications/auxtel/charts/csc_collector | 1 + .../auxtel/values-tucson-teststand.yaml | 27 +++ applications/auxtel/values.yaml | 1 + applications/eas/Chart.yaml | 13 ++ applications/eas/README.md | 15 ++ applications/eas/charts/csc | 1 + applications/eas/charts/csc_collector | 1 + applications/eas/values-tucson-teststand.yaml | 18 ++ applications/eas/values.yaml | 1 + .../templates/auxtel-application.yaml | 37 ++++ environments/templates/eas-application.yaml | 37 ++++ environments/values-tucson-teststand.yaml | 2 + environments/values.yaml | 6 + shared/charts/csc/Chart.yaml | 4 + shared/charts/csc/README.md | 33 ++++ shared/charts/csc/templates/_helpers.tpl | 76 ++++++++ .../csc/templates/configfile-configmap.yaml | 11 ++ .../csc/templates/entrypoint-configmap.yaml | 10 + shared/charts/csc/templates/job.yaml | 181 ++++++++++++++++++ .../charts/csc/templates/mountpoint-pvc.yaml | 26 +++ shared/charts/csc/templates/service.yaml | 15 ++ shared/charts/csc/values.yaml | 93 +++++++++ shared/charts/csc_collector/Chart.yaml | 4 + shared/charts/csc_collector/README.md | 11 ++ .../templates/configmap-env.yaml | 9 + .../csc_collector/templates/vault-secret.yaml | 11 ++ shared/charts/csc_collector/values.yaml | 12 ++ shared/values/values_control_system_apps.yaml | 31 +++ 32 files changed, 716 insertions(+), 1 deletion(-) create mode 100644 applications/auxtel/Chart.yaml create mode 100644 applications/auxtel/README.md create mode 120000 applications/auxtel/charts/csc create mode 120000 applications/auxtel/charts/csc_collector create mode 100644 applications/auxtel/values-tucson-teststand.yaml create mode 120000 applications/auxtel/values.yaml create mode 100644 applications/eas/Chart.yaml create mode 100644 applications/eas/README.md create mode 120000 applications/eas/charts/csc create mode 120000 applications/eas/charts/csc_collector create mode 100644 applications/eas/values-tucson-teststand.yaml create mode 120000 applications/eas/values.yaml create mode 100644 environments/templates/auxtel-application.yaml create mode 100644 environments/templates/eas-application.yaml create mode 100644 shared/charts/csc/Chart.yaml create mode 100644 shared/charts/csc/README.md create mode 100644 shared/charts/csc/templates/_helpers.tpl create mode 100644 shared/charts/csc/templates/configfile-configmap.yaml create mode 100644 shared/charts/csc/templates/entrypoint-configmap.yaml create mode 100644 shared/charts/csc/templates/job.yaml create mode 100644 shared/charts/csc/templates/mountpoint-pvc.yaml create mode 100644 shared/charts/csc/templates/service.yaml create mode 100644 shared/charts/csc/values.yaml create mode 100644 shared/charts/csc_collector/Chart.yaml create mode 100644 shared/charts/csc_collector/README.md create mode 100644 shared/charts/csc_collector/templates/configmap-env.yaml create mode 100644 shared/charts/csc_collector/templates/vault-secret.yaml create mode 100644 shared/charts/csc_collector/values.yaml create mode 100644 shared/values/values_control_system_apps.yaml diff --git a/.yamllint.yml b/.yamllint.yml index 64842f4b49..3994a4d6b9 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -1,6 +1,8 @@ extends: default -ignore: templates +ignore: + - templates + - shared/charts rules: line-length: disable diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml new file mode 100644 index 0000000000..fff785cbb6 --- /dev/null +++ b/applications/auxtel/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: auxtel +version: 1.0.0 +description: Deployment for the Auxiliary Telescope CSCs +dependencies: + - name: csc_collector + version: 1.0.0 + - name: csc + alias: ataos + version: 1.0.0 diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md new file mode 100644 index 0000000000..001c3e2398 --- /dev/null +++ b/applications/auxtel/README.md @@ -0,0 +1,15 @@ +# auxtel + +Deployment for the Auxiliary Telescope CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| imageTag | string | `""` | The default image tag for all of the child applications | +| namespace | string | `""` | | diff --git a/applications/auxtel/charts/csc b/applications/auxtel/charts/csc new file mode 120000 index 0000000000..294046490f --- /dev/null +++ b/applications/auxtel/charts/csc @@ -0,0 +1 @@ +../../../shared/charts/csc \ No newline at end of file diff --git a/applications/auxtel/charts/csc_collector b/applications/auxtel/charts/csc_collector new file mode 120000 index 0000000000..3ced684acb --- /dev/null +++ b/applications/auxtel/charts/csc_collector @@ -0,0 +1 @@ +../../../shared/charts/csc_collector \ No newline at end of file diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml new file mode 100644 index 0000000000..e8dae24ec6 --- /dev/null +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -0,0 +1,27 @@ +namespace: &ns auxtel +imageTag: &imageTag c0029 + +csc_collector: + namespace: *ns + siteTag: tucson + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + - name: butler-secret + key: butler-secret + +ataos: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/ataos + tag: *imageTag + pullPolicy: Always diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml new file mode 120000 index 0000000000..22e98f1fe2 --- /dev/null +++ b/applications/auxtel/values.yaml @@ -0,0 +1 @@ +../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/eas/Chart.yaml b/applications/eas/Chart.yaml new file mode 100644 index 0000000000..c755f290c9 --- /dev/null +++ b/applications/eas/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: eas +version: 1.0.0 +description: Deployment for the Environmental Awareness Sytems CSCs +dependencies: + - name: csc_collector + version: 1.0.0 + - name: csc + alias: dimm + version: 1.0.0 + - name: csc + alias: dimm1-sim + version: 1.0.0 diff --git a/applications/eas/README.md b/applications/eas/README.md new file mode 100644 index 0000000000..3fc769c5c6 --- /dev/null +++ b/applications/eas/README.md @@ -0,0 +1,15 @@ +# eas + +Deployment for the Environmental Awareness Sytems CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| imageTag | string | `""` | The default image tag for all of the child applications | +| namespace | string | `""` | | diff --git a/applications/eas/charts/csc b/applications/eas/charts/csc new file mode 120000 index 0000000000..294046490f --- /dev/null +++ b/applications/eas/charts/csc @@ -0,0 +1 @@ +../../../shared/charts/csc \ No newline at end of file diff --git a/applications/eas/charts/csc_collector b/applications/eas/charts/csc_collector new file mode 120000 index 0000000000..3ced684acb --- /dev/null +++ b/applications/eas/charts/csc_collector @@ -0,0 +1 @@ +../../../shared/charts/csc_collector \ No newline at end of file diff --git a/applications/eas/values-tucson-teststand.yaml b/applications/eas/values-tucson-teststand.yaml new file mode 100644 index 0000000000..5be425bf5a --- /dev/null +++ b/applications/eas/values-tucson-teststand.yaml @@ -0,0 +1,18 @@ +namespace: &ns eas +imageTag: &imageTag c0029 + +csc_collector: + namespace: *ns + siteTag: tucson + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + +dimm1-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/dimm + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 1 --simulate diff --git a/applications/eas/values.yaml b/applications/eas/values.yaml new file mode 120000 index 0000000000..22e98f1fe2 --- /dev/null +++ b/applications/eas/values.yaml @@ -0,0 +1 @@ +../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/environments/templates/auxtel-application.yaml b/environments/templates/auxtel-application.yaml new file mode 100644 index 0000000000..189232c046 --- /dev/null +++ b/environments/templates/auxtel-application.yaml @@ -0,0 +1,37 @@ +{{- if .Values.auxtel.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: auxtel +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: auxtel + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: auxtel + server: https://kubernetes.default.svc + project: default + source: + path: applications/auxtel + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/templates/eas-application.yaml b/environments/templates/eas-application.yaml new file mode 100644 index 0000000000..5666d2170c --- /dev/null +++ b/environments/templates/eas-application.yaml @@ -0,0 +1,37 @@ +{{- if .Values.eas.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: eas +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: eas + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: eas + server: https://kubernetes.default.svc + project: default + source: + path: applications/eas + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 03e0136f74..8a9ce36fa9 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -4,6 +4,8 @@ vaultPathPrefix: secret/k8s_operator/tucson-teststand.lsst.codes applications: argo-workflows: true + auxtel: true + eas: true exposurelog: true narrativelog: true nublado: true diff --git a/environments/values.yaml b/environments/values.yaml index 11295fd367..f1c81777b1 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -36,6 +36,9 @@ applications: # environments and is present here only because it makes parsing easier argocd: true + # -- Eanble the auxtel control system application + auxtel: false + # -- Enable the butler application butler: false @@ -46,6 +49,9 @@ applications: # -- Eanble the datalinker application datalinker: false + # -- Enable the eas control system application + eas: false + # -- Enable the exposurelog application exposurelog: false diff --git a/shared/charts/csc/Chart.yaml b/shared/charts/csc/Chart.yaml new file mode 100644 index 0000000000..39e973dab7 --- /dev/null +++ b/shared/charts/csc/Chart.yaml @@ -0,0 +1,4 @@ +name: csc +apiVersion: v2 +version: 1.0.0 +description: A Helm chart for deploying the Control System CSCs. \ No newline at end of file diff --git a/shared/charts/csc/README.md b/shared/charts/csc/README.md new file mode 100644 index 0000000000..c9ec6871e0 --- /dev/null +++ b/shared/charts/csc/README.md @@ -0,0 +1,33 @@ +# csc + +A Helm chart for deploying the Control System CSCs. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | This specifies the scheduling constraints of the pod | +| annotations | object | `{}` | This allows the specification of pod annotations | +| butlerSecret | object | `{}` | This key allows for specification of Butler secret information. **NOTE**: You must fill out a _secretPermFixer_ entry in addition. If this section is used, it must contain the following attributes: _containerPath_ (The directory location for the Butler secret), _dbUser_ (The username for the Butler backend database) | +| configfile | object | `{}` | This key allows specification of a YAML configuration file If this section is used, it must contain the following attributes defined: _path_ (The container path for the configuration file), _filename_ (The configuration file name), _content_ (The YAML content for the configuration file) | +| enabled | bool | `false` | Flag to enable the given CSC application | +| entrypoint | string | `nil` | This key allows specification of a script to override the entrypoint | +| env | object | `{}` | This is the namespace in which the CSC will be placed | +| envSecrets | list | `[]` | This section holds specifications for secret injection. If this section is used, each object listed must have the following attributes defined: _name_ (The label for the secret), _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), _secretKey_ (The key in the vault store containing the necessary secret) | +| image.pullPolicy | string | `"IfNotPresent"` | The policy to apply when pulling an image for deployment | +| image.repository | string | `"lsstts/test"` | The Docker registry name of the container image to use for the CSC | +| image.tag | string | `"develop"` | The tag of the container image to use for the CSC | +| imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | +| isPrimary | bool | `true` | This marks the CSC as the primary object to sync upon system starts. This is set to false when two CSCs of the same flavor are deployed (one real, one simulator) to mark the simulator so it can be filtered out for automatic syncing. | +| nameOverride | string | `""` | Provide an alternate name for the application | +| namespace | string | `""` | Namespace for the given CSC application | +| nfsMountpoint | list | `[]` | This section holds the information necessary to create a NFS mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _path_ (The path inside the container to mount), _readOnly_ (This sets if the NFS mount is read only or read/write), _server_ (The hostname of the NFS server), _serverPath_ (The path exported by the NFS server) | +| nodeSelector | object | `{}` | This allows the specification of using specific nodes to run the pod | +| pvcMountpoint | list | `[]` | This section holds the information necessary to create a volume mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _path_ (The path inside the container to mount), _accessMode_ (This sets the required access mode for the volume mount), _claimSize_ (The requested physical disk space size for the volume mount), _storageClass_ (The Kubernetes provided storage class), _ids.uid_ (OPTIONAL: An alternative UID for mounting), _ids.gid_ (OPTIONAL: An alternative GID for mounting) | +| resources | object | `{}` | This allows the specification of resources (CPU, memory) requires to run the container | +| secretPermFixer | object | `{}` | This section sets the optional use of an init container for fixing permissions on secret files. If this section is used, each object listed can have the necessary attributes specified: _name_ (The label used for the init container) _containerPath_ (The path in the container where the secret files will be stored) _secretName_ (OPTIONAL: The secret name if different from _name_) _specialInstructions_ (OPTIONAL: This allows for optional instructions to be used when fixing permissions) | +| securityContext | object | `{}` | This key allows for the specification of a pod security context for volumes. If this section is used, it must contain the following attributes: _user_ (The user id for the volumes) _group_ (The group id for the volumes) _fsGroup_ (OPTIONAL: A special supplemental group that applies to all containers in a pod) | +| service.port | int | `nil` | The port number to use for the Service. | +| service.type | string | `nil` | The Service type for the application. This is either ClusterIP (internal access) or LoadBalancer (external access) | +| service.use | bool | `false` | This sets the use of a Service API for the application | +| tolerations | list | `[]` | This specifies the tolerations of the pod for any system taints | diff --git a/shared/charts/csc/templates/_helpers.tpl b/shared/charts/csc/templates/_helpers.tpl new file mode 100644 index 0000000000..de91e15465 --- /dev/null +++ b/shared/charts/csc/templates/_helpers.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chart.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- include "chart.name" -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chart.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the CSC name by removing sim tag. +*/}} +{{- define "csc.name" -}} +{{- if contains "sim" .Chart.Name -}} +{{- .Chart.Name | splitList "-" | first -}} +{{- else -}} +{{- .Chart.Name -}} +{{- end -}} +{{- end -}} + +{{/* +Create the CSC class name by removing sim tag and index. +*/}} +{{- define "csc.class" -}} +{{- $protectedApps := list "mtm2" "mtm1m3" -}} +{{- $name := .Chart.Name -}} +{{- if contains "sim" .Chart.Name -}} +{{- $name = $name | splitList "-" | first -}} +{{- end -}} +{{- $checkForIndex := list -}} +{{- if not (has $name $protectedApps) -}} +{{- $checkForIndex = regexFindAll "[0-9]+$" $name -1 -}} +{{- end -}} +{{- if $checkForIndex -}} +{{- $index := first $checkForIndex -}} +{{- $name = regexReplaceAll $index $name "" -}} +{{- end -}} +{{- $name -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "csc.labels" -}} +helm.sh/chart: {{ .Chart.Name }} +{{ include "csc.selectorLabels" . }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "csc.selectorLabels" -}} +csc: {{ include "chart.name" . }} +csc-name: {{ include "csc.name" . }} +csc-class: {{ include "csc.class" . }} +csc-is-primary: {{ .Values.isPrimary }} +{{- end -}} diff --git a/shared/charts/csc/templates/configfile-configmap.yaml b/shared/charts/csc/templates/configfile-configmap.yaml new file mode 100644 index 0000000000..63fe6ca33c --- /dev/null +++ b/shared/charts/csc/templates/configfile-configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.configfile }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "chart.name" . }}-configfile + namespace: {{ .Values.namespace }} +data: + {{ .Values.configfile.filename }}: +{{ .Values.configfile.content | toYaml | indent 4 }} +{{- end }} + diff --git a/shared/charts/csc/templates/entrypoint-configmap.yaml b/shared/charts/csc/templates/entrypoint-configmap.yaml new file mode 100644 index 0000000000..5a9597536e --- /dev/null +++ b/shared/charts/csc/templates/entrypoint-configmap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.entrypoint }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "chart.name" . }}-entrypoint + namespace: {{ .Values.namespace }} +data: + .startup.sh: +{{ .Values.entrypoint | toYaml | indent 4 }} +{{- end }} diff --git a/shared/charts/csc/templates/job.yaml b/shared/charts/csc/templates/job.yaml new file mode 100644 index 0000000000..125fa24ff2 --- /dev/null +++ b/shared/charts/csc/templates/job.yaml @@ -0,0 +1,181 @@ +{{- if .Values.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "chart.name" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "csc.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + completions: 1 + template: + metadata: + labels: + {{- include "csc.selectorLabels" . | nindent 8 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - name: {{ include "csc.class" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + stdin: true + tty: true + envFrom: + - configMapRef: + name: csc-env-config + - secretRef: + name: ts-salkafka + {{- if or .Values.env .Values.envSecrets }} + env: + {{- range $env_var, $env_value := .Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := .Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if .Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ .Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ .Values.butlerSecret.dbUser | quote }} + {{- end }} + volumeMounts: + {{- if .Values.entrypoint }} + - name: entrypoint + mountPath: /home/saluser/.startup.sh + subPath: .startup.sh + {{- end }} + {{- if .Values.configfile }} + - name: configfile + mountPath: {{ .Values.configfile.path }}/{{ .Values.configfile.filename }} + subPath: {{ .Values.configfile.filename }} + {{- end }} + {{- if .Values.secretPermFixer }} + {{- range $values := .Values.secretPermFixer }} + - name: {{ include "chart.name" . }}-{{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- end }} + {{- end }} + {{- if .Values.pvcMountpoint }} + {{- range $values := .Values.pvcMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.path }} + {{- end}} + {{- end}} + {{- if .Values.nfsMountpoint }} + {{- range $values := .Values.nfsMountpoint }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end}} + {{- end}} + {{- with $.Values.resources }} + resources: + {{- toYaml $.Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.secretPermFixer }} + initContainers: + {{- if .Values.secretPermFixer }} + {{- range $values := .Values.secretPermFixer }} + - name: {{ include "chart.name" . }}-{{ $values.name }}-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + {{- if $values.specialInstructions }} + {{- toYaml $values.specialInstructions | nindent 14 }} + {{- end }} + volumeMounts: + - name: {{ include "chart.name" . }}-raw-{{ $values.name }} + mountPath: /secrets-raw + readOnly: true + - name: {{ include "chart.name" . }}-{{ $values.name }} + mountPath: /secrets + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.securityContext }} + securityContext: + runAsUser: {{ .Values.securityContext.user }} + runAsGroup: {{ .Values.securityContext.group }} + {{- if .Values.securityContext.fsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end}} + {{- end }} + volumes: + {{- if .Values.entrypoint }} + - name: entrypoint + configMap: + name: {{ .Release.Name }}-entrypoint + defaultMode: 0755 + items: + - key: .startup.sh + path: .startup.sh + {{- end }} + {{- if .Values.configfile }} + - name: configfile + configMap: + name: {{ .Release.Name }}-configfile + items: + - key: {{ .Values.configfile.filename }} + path: {{ .Values.configfile.filename }} + {{- end }} + {{- if .Values.secretPermFixer }} + {{- range $values := .Values.secretPermFixer }} + - name: {{ include "chart.name" . }}-{{ $values.name }} + emptyDir: {} + - name: {{ include "chart.name" . }}-raw-{{ $values.name }} + secret: + secretName: {{ $.Values.namespace }}-{{ or $values.secretName $values.name }} + defaultMode: 0600 + {{- end }} + {{- end }} + {{- if .Values.pvcMountpoint }} + {{- range $values := .Values.pvcMountpoint }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ include "chart.name" . }}-{{ $values.name }}-pvc + {{- end }} + {{- end }} + {{- if .Values.nfsMountpoint }} + {{- range $values := .Values.nfsMountpoint }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + restartPolicy: Never + imagePullSecrets: + - name: nexus3-docker + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/shared/charts/csc/templates/mountpoint-pvc.yaml b/shared/charts/csc/templates/mountpoint-pvc.yaml new file mode 100644 index 0000000000..ccc9ca871a --- /dev/null +++ b/shared/charts/csc/templates/mountpoint-pvc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.pvcMountpoint }} +{{- range $values := .Values.pvcMountpoint }} +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "chart.name" . }}-{{ $values.name }}-pvc + namespace: {{ .Values.namespace }} + {{- if $values.ids }} + annotations: + {{- if $values.ids.uid }} + pv.beta.kubernetes.io/uid: "{{ $values.ids.uid }}" + {{- end }} + {{- if $values.ids.gid }} + pv.beta.kubernetes.io/gid: "{{ $values.ids.gid }}" + {{- end }} + {{- end }} +spec: + accessModes: + - {{ $values.accessMode | quote }} + resources: + requests: + storage: {{ $values.claimSize }} + storageClassName: {{ $values.storageClass }} +{{- end }} +{{- end }} diff --git a/shared/charts/csc/templates/service.yaml b/shared/charts/csc/templates/service.yaml new file mode 100644 index 0000000000..298a1f80bf --- /dev/null +++ b/shared/charts/csc/templates/service.yaml @@ -0,0 +1,15 @@ +{{- if .Values.service.use }} +apiVersion: v1 +kind: Service +metadata: + labels: + csc: {{ include "chart.name" . }} + name: {{ include "chart.name" . }}-service + namespace: {{ .Values.namespace }} +spec: + ports: + - port: {{ .Values.service.port }} + selector: + csc: {{ include "chart.name" . }} + type: {{ .Values.service.type }} +{{- end }} diff --git a/shared/charts/csc/values.yaml b/shared/charts/csc/values.yaml new file mode 100644 index 0000000000..ed0406f841 --- /dev/null +++ b/shared/charts/csc/values.yaml @@ -0,0 +1,93 @@ +# -- Flag to enable the given CSC application +enabled: false +# -- Namespace for the given CSC application +namespace: "" +# -- Provide an alternate name for the application +nameOverride: "" +# -- This marks the CSC as the primary object to sync upon system starts. +# This is set to false when two CSCs of the same flavor are deployed (one +# real, one simulator) to mark the simulator so it can be filtered out for +# automatic syncing. +isPrimary: true +image: + # -- The Docker registry name of the container image to use for the CSC + repository: lsstts/test + # -- The tag of the container image to use for the CSC + tag: develop + # -- The policy to apply when pulling an image for deployment + pullPolicy: IfNotPresent +# -- The list of pull secrets needed for the images. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (The label identifying the pull-secret to use) +imagePullSecrets: [] +# -- This is the namespace in which the CSC will be placed +env: {} +# -- This section holds specifications for secret injection. +# If this section is used, each object listed must have the following attributes defined: +# _name_ (The label for the secret), +# _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), +# _secretKey_ (The key in the vault store containing the necessary secret) +envSecrets: [] +# -- This key allows specification of a script to override the entrypoint +entrypoint: +# -- This key allows specification of a YAML configuration file +# If this section is used, it must contain the following attributes defined: +# _path_ (The container path for the configuration file), +# _filename_ (The configuration file name), +# _content_ (The YAML content for the configuration file) +configfile: {} +# -- This key allows for specification of Butler secret information. +# **NOTE**: You must fill out a _secretPermFixer_ entry in addition. +# If this section is used, it must contain the following attributes: +# _containerPath_ (The directory location for the Butler secret), +# _dbUser_ (The username for the Butler backend database) +butlerSecret: {} +# -- This key allows for the specification of a pod security context for volumes. +# If this section is used, it must contain the following attributes: +# _user_ (The user id for the volumes) +# _group_ (The group id for the volumes) +# _fsGroup_ (OPTIONAL: A special supplemental group that applies to all containers in a pod) +securityContext: {} +# -- This section holds the information necessary to create a volume mount for the container. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (A label identifier for the mountpoint), +# _path_ (The path inside the container to mount), +# _accessMode_ (This sets the required access mode for the volume mount), +# _claimSize_ (The requested physical disk space size for the volume mount), +# _storageClass_ (The Kubernetes provided storage class), +# _ids.uid_ (OPTIONAL: An alternative UID for mounting), +# _ids.gid_ (OPTIONAL: An alternative GID for mounting) +pvcMountpoint: [] +# -- This section holds the information necessary to create a NFS mount for the container. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (A label identifier for the mountpoint), +# _path_ (The path inside the container to mount), +# _readOnly_ (This sets if the NFS mount is read only or read/write), +# _server_ (The hostname of the NFS server), +# _serverPath_ (The path exported by the NFS server) +nfsMountpoint: [] +# -- This allows the specification of pod annotations +annotations: {} +# -- This section sets the optional use of an init container for fixing permissions on secret files. +# If this section is used, each object listed can have the necessary attributes specified: +# _name_ (The label used for the init container) +# _containerPath_ (The path in the container where the secret files will be stored) +# _secretName_ (OPTIONAL: The secret name if different from _name_) +# _specialInstructions_ (OPTIONAL: This allows for optional instructions to be used when fixing permissions) +secretPermFixer: {} +service: + # -- (bool) This sets the use of a Service API for the application + use: false + # -- (int) The port number to use for the Service. + port: + # -- (string) The Service type for the application. + # This is either ClusterIP (internal access) or LoadBalancer (external access) + type: +# -- This allows the specification of resources (CPU, memory) requires to run the container +resources: {} +# -- This allows the specification of using specific nodes to run the pod +nodeSelector: {} +# -- This specifies the tolerations of the pod for any system taints +tolerations: [] +# -- This specifies the scheduling constraints of the pod +affinity: {} diff --git a/shared/charts/csc_collector/Chart.yaml b/shared/charts/csc_collector/Chart.yaml new file mode 100644 index 0000000000..7eb70158e4 --- /dev/null +++ b/shared/charts/csc_collector/Chart.yaml @@ -0,0 +1,4 @@ +name: csc_collector +apiVersion: v2 +version: 1.0.0 +description: A Helm chart provided shared information for Control System CSCs. \ No newline at end of file diff --git a/shared/charts/csc_collector/README.md b/shared/charts/csc_collector/README.md new file mode 100644 index 0000000000..c78a46d05a --- /dev/null +++ b/shared/charts/csc_collector/README.md @@ -0,0 +1,11 @@ +# csc_collector + +A Helm chart provided shared information for Control System CSCs. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| namespace | string | `""` | Namespace for shared CSC resources. | +| secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| siteTag | string | `""` | The site-specific name used for handling configurable CSCs | diff --git a/shared/charts/csc_collector/templates/configmap-env.yaml b/shared/charts/csc_collector/templates/configmap-env.yaml new file mode 100644 index 0000000000..01291c42c6 --- /dev/null +++ b/shared/charts/csc_collector/templates/configmap-env.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: csc-env-configfile +data: + LSST_SITE: {{ .Values.siteTag }} + LSST_KAFKA_BROKER_ADDR: {{ .Values.kafkaBrokerAddress }} + LSST_SCHEMA_REGISTRY_URL: {{ .Values.schemaRegistryUrl }} + TS_SALKAFKA_USERNAME: ts-salkafka diff --git a/shared/charts/csc_collector/templates/vault-secret.yaml b/shared/charts/csc_collector/templates/vault-secret.yaml new file mode 100644 index 0000000000..6e9fbe351b --- /dev/null +++ b/shared/charts/csc_collector/templates/vault-secret.yaml @@ -0,0 +1,11 @@ +{{- range $secret := .Values.secrets }} +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ $secret.name }} + namespace: {{ $.Values.namespace }} +spec: + path: {{ $.Values.global.vaultSecretsPath }}/{{ $secret.key }} + type: {{ default "Opaque" $secret.type }} +{{- end }} diff --git a/shared/charts/csc_collector/values.yaml b/shared/charts/csc_collector/values.yaml new file mode 100644 index 0000000000..1df70eb976 --- /dev/null +++ b/shared/charts/csc_collector/values.yaml @@ -0,0 +1,12 @@ +# -- Namespace for shared CSC resources. +namespace: "" + +# -- The site-specific name used for handling configurable CSCs +siteTag: "" + +# -- This section holds secret specifications. +# Each object listed can have the following attributes defined: +# _name_ (The name used by pods to access the secret) +# _key_ (The key in the vault store where the secret resides) +# _type_ (OPTIONAL: The secret type. Defaults to Opaque.) +secrets: [] diff --git a/shared/values/values_control_system_apps.yaml b/shared/values/values_control_system_apps.yaml new file mode 100644 index 0000000000..ca2d36e455 --- /dev/null +++ b/shared/values/values_control_system_apps.yaml @@ -0,0 +1,31 @@ +# The namespace for the application +namespace: "" + +# -- The default image tag for all of the child applications +imageTag: "" + +csc_collector: + # -- The site-specific name used for handling configurable CSCs + siteTag: "" + + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" From e6ae307b67365eb0cc2f923b4161f4860126391b Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 18 Apr 2023 17:46:21 -0700 Subject: [PATCH 477/588] Finish up TTS auxtel work. --- applications/auxtel/Chart.yaml | 32 ++++ .../auxtel/charts/hexapod-sim/Chart.yaml | 4 + .../auxtel/charts/hexapod-sim/README.md | 14 ++ .../hexapod-sim/templates/deployment.yaml | 29 ++++ .../charts/hexapod-sim/templates/service.yaml | 16 ++ .../auxtel/charts/hexapod-sim/values.yaml | 12 ++ .../auxtel/values-tucson-teststand.yaml | 155 ++++++++++++++++++ shared/charts/csc/templates/job.yaml | 14 +- 8 files changed, 269 insertions(+), 7 deletions(-) create mode 100644 applications/auxtel/charts/hexapod-sim/Chart.yaml create mode 100644 applications/auxtel/charts/hexapod-sim/README.md create mode 100644 applications/auxtel/charts/hexapod-sim/templates/deployment.yaml create mode 100644 applications/auxtel/charts/hexapod-sim/templates/service.yaml create mode 100644 applications/auxtel/charts/hexapod-sim/values.yaml diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml index fff785cbb6..7267774587 100644 --- a/applications/auxtel/Chart.yaml +++ b/applications/auxtel/Chart.yaml @@ -5,6 +5,38 @@ description: Deployment for the Auxiliary Telescope CSCs dependencies: - name: csc_collector version: 1.0.0 + - name: hexapod-sim + version: 1.0.0 - name: csc alias: ataos version: 1.0.0 + - name: csc + alias: atdome + version: 1.0.0 + - name: csc + alias: atdome-sim + version: 1.0.0 + - name: csc + alias: atdometrajectory + version: 1.0.0 + - name: csc + alias: atheaderservice + version: 1.0.0 + - name: csc + alias: athexapod + version: 1.0.0 + - name: csc + alias: athexapod-sim + version: 1.0.0 + - name: csc + alias: atoods + version: 1.0.0 + - name: csc + alias: atptg + version: 1.0.0 + - name: csc + alias: atspectrograph + version: 1.0.0 + - name: csc + alias: atspectrograph-sim + version: 1.0.0 diff --git a/applications/auxtel/charts/hexapod-sim/Chart.yaml b/applications/auxtel/charts/hexapod-sim/Chart.yaml new file mode 100644 index 0000000000..5dd5941e44 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +description: Chart for the hexapod simulator that supports the ATHexapod +name: hexapod-sim +version: 1.0.0 diff --git a/applications/auxtel/charts/hexapod-sim/README.md b/applications/auxtel/charts/hexapod-sim/README.md new file mode 100644 index 0000000000..0fae7a3137 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/README.md @@ -0,0 +1,14 @@ +# hexapod-sim + +Chart for the hexapod simulator that supports the ATHexapod + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| enabled | bool | `false` | Flag to enable the given CSC application | +| image | object | `{"pullPolicy":"Always","repository":"ts-dockerhub.lsst.org/hexapod_simulator","tag":"latest"}` | This section holds the configuration of the container image | +| image.pullPolicy | string | `"Always"` | The policy to apply when pulling an image for deployment | +| image.repository | string | `"ts-dockerhub.lsst.org/hexapod_simulator"` | The Docker registry name of the container image | +| image.tag | string | `"latest"` | The tag of the container image | +| namespace | string | `"auxtel"` | This is the namespace in which the hexapod controller simulator will be placed | diff --git a/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml b/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml new file mode 100644 index 0000000000..1461b78ba9 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml @@ -0,0 +1,29 @@ +{{- if .Values.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }} + namespace: {{ .Values.namespace }} + labels: + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }} + app.kubernetes.io/instance: {{ $.Release.Name }} + template: + metadata: + labels: + app: {{ .Release.Name }} + app.kubernetes.io/instance: {{ $.Release.Name }} + spec: + containers: + - name: {{ .Release.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + stdin: true + tty: true + imagePullSecrets: + - name: nexus3-docker +{{- end -}} diff --git a/applications/auxtel/charts/hexapod-sim/templates/service.yaml b/applications/auxtel/charts/hexapod-sim/templates/service.yaml new file mode 100644 index 0000000000..3c3d375ead --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/templates/service.yaml @@ -0,0 +1,16 @@ +{{- if .Values.enabled -}} +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ .Release.Name }} + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 50000 + targetPort: 50000 + selector: + app.kubernetes.io/instance: {{ .Release.Name }} + type: ClusterIP +{{- end -}} diff --git a/applications/auxtel/charts/hexapod-sim/values.yaml b/applications/auxtel/charts/hexapod-sim/values.yaml new file mode 100644 index 0000000000..699b4dd6b2 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/values.yaml @@ -0,0 +1,12 @@ +# -- Flag to enable the given CSC application +enabled: false +# -- This is the namespace in which the hexapod controller simulator will be placed +namespace: auxtel +# -- This section holds the configuration of the container image +image: + # -- The Docker registry name of the container image + repository: ts-dockerhub.lsst.org/hexapod_simulator + # -- The tag of the container image + tag: latest + # -- The policy to apply when pulling an image for deployment + pullPolicy: Always diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml index e8dae24ec6..0668248180 100644 --- a/applications/auxtel/values-tucson-teststand.yaml +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -25,3 +25,158 @@ ataos: repository: ts-dockerhub.lsst.org/ataos tag: *imageTag pullPolicy: Always + +atdome-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/atdome + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --simulate + +atdometrajectory: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/atdometrajectory + tag: *imageTag + pullPolicy: Always + +atheaderservice: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/headerservice + tag: ts-v3.1.11_c0029 + pullPolicy: Always + env: + URL_SPEC: --lfa_mode s3 --s3instance tuc + S3_ENDPOINT_URL: https://s3.tu.lsst.org + TSTAND_HEADERSERVICE: TUCSON + CAMERA: at + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + +athexapod-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/athexapod + tag: *imageTag + pullPolicy: Always + +atoods: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/atoods + tag: *imageTag + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + CTRL_OODS_CONFIG_FILE: /etc/atoods.yaml + butlerSecret: + containerPath: &bS-cP /home/saluser/.lsst + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *bS-cP + nfsMountpoint: + - name: auxtel-gen3-butler + containerPath: /repo/LATISS + readOnly: false + server: auxtel-archiver.tu.lsst.org + serverPath: /repo/LATISS + - name: auxtel-oods-data + containerPath: /data + readOnly: false + server: auxtel-archiver.tu.lsst.org + serverPath: /data + configfile: + path: /etc + filename: atoods.yaml + content: | + defaultInterval: &interval + days: 0 + hours: 0 + minutes: 0 + seconds: 0 + + ingester: + imageStagingDirectory: /data/staging/auxtel/oods + butlers: + - butler: + instrument: lsst.obs.lsst.Latiss + class: + import : lsst.ctrl.oods.gen3ButlerIngester + name : Gen3ButlerIngester + stagingDirectory : /data/lsstdata/TTS/auxtel/oods/gen3butler/raw + badFileDirectory: /data/lsstdata/TTS/auxtel/oods/gen3butler/badfiles + repoDirectory : /repo/LATISS + collections: + - LATISS/raw/all + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 30 + batchSize: 20 + scanInterval: + <<: *interval + seconds: 2 + + cacheCleaner: + # ONLY clean out empty directories here, never files + clearEmptyDirectories: + - /data/lsstdata/TTS/auxtel/oods/gen3butler/raw + # clean out empty directories and old files from these directories + clearEmptyDirectoriesAndOldFiles: + - /data/lsstdata/TTS/auxtel/oods/gen3butler/badfiles + - /data/staging/auxtel/oods + - /data/staging/auxtel/forwarder + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 31 + directoriesEmptyForMoreThan: + <<: *interval + days: 2 + +atptg: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/ptkernel + tag: *imageTag + pullPolicy: Always + env: + TELESCOPE: AT + +atspectrograph-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/atspec + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --simulate + +hexapod-sim: + enabled: true diff --git a/shared/charts/csc/templates/job.yaml b/shared/charts/csc/templates/job.yaml index 125fa24ff2..32763b8fc9 100644 --- a/shared/charts/csc/templates/job.yaml +++ b/shared/charts/csc/templates/job.yaml @@ -65,7 +65,7 @@ spec: {{- end }} {{- if .Values.secretPermFixer }} {{- range $values := .Values.secretPermFixer }} - - name: {{ include "chart.name" . }}-{{ $values.name }} + - name: {{ include "chart.name" $ }}-{{ $values.name }} mountPath: {{ $values.containerPath }} {{- end }} {{- end }} @@ -90,7 +90,7 @@ spec: initContainers: {{- if .Values.secretPermFixer }} {{- range $values := .Values.secretPermFixer }} - - name: {{ include "chart.name" . }}-{{ $values.name }}-perm-fixer + - name: {{ include "chart.name" $ }}-{{ $values.name }}-perm-fixer image: "alpine:latest" command: - "/bin/ash" @@ -103,10 +103,10 @@ spec: {{- toYaml $values.specialInstructions | nindent 14 }} {{- end }} volumeMounts: - - name: {{ include "chart.name" . }}-raw-{{ $values.name }} + - name: {{ include "chart.name" $ }}-raw-{{ $values.name }} mountPath: /secrets-raw readOnly: true - - name: {{ include "chart.name" . }}-{{ $values.name }} + - name: {{ include "chart.name" $ }}-{{ $values.name }} mountPath: /secrets {{- end }} {{- end }} @@ -139,9 +139,9 @@ spec: {{- end }} {{- if .Values.secretPermFixer }} {{- range $values := .Values.secretPermFixer }} - - name: {{ include "chart.name" . }}-{{ $values.name }} + - name: {{ include "chart.name" $ }}-{{ $values.name }} emptyDir: {} - - name: {{ include "chart.name" . }}-raw-{{ $values.name }} + - name: {{ include "chart.name" $ }}-raw-{{ $values.name }} secret: secretName: {{ $.Values.namespace }}-{{ or $values.secretName $values.name }} defaultMode: 0600 @@ -151,7 +151,7 @@ spec: {{- range $values := .Values.pvcMountpoint }} - name: {{ $values.name }} persistentVolumeClaim: - claimName: {{ include "chart.name" . }}-{{ $values.name }}-pvc + claimName: {{ include "chart.name" $ }}-{{ $values.name }}-pvc {{- end }} {{- end }} {{- if .Values.nfsMountpoint }} From 200bb09acdbaa28dab0d55270e5d71543d0cbd3c Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 10 May 2023 11:00:02 -0700 Subject: [PATCH 478/588] Finish eas work. --- .../auxtel/values-tucson-teststand.yaml | 2 +- applications/eas/Chart.yaml | 91 ++++++++- applications/eas/README.md | 2 +- applications/eas/values-tucson-teststand.yaml | 176 ++++++++++++++++++ shared/charts/csc/templates/_helpers.tpl | 11 +- 5 files changed, 273 insertions(+), 9 deletions(-) diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml index 0668248180..d21a8ffbd9 100644 --- a/applications/auxtel/values-tucson-teststand.yaml +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -6,6 +6,7 @@ csc_collector: siteTag: tucson kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + s3EndpointUrl: https://s3.tu.lsst.org secrets: - name: nexus3-docker @@ -53,7 +54,6 @@ atheaderservice: pullPolicy: Always env: URL_SPEC: --lfa_mode s3 --s3instance tuc - S3_ENDPOINT_URL: https://s3.tu.lsst.org TSTAND_HEADERSERVICE: TUCSON CAMERA: at envSecrets: diff --git a/applications/eas/Chart.yaml b/applications/eas/Chart.yaml index c755f290c9..b09c1e206e 100644 --- a/applications/eas/Chart.yaml +++ b/applications/eas/Chart.yaml @@ -1,13 +1,100 @@ apiVersion: v2 name: eas version: 1.0.0 -description: Deployment for the Environmental Awareness Sytems CSCs +description: Deployment for the Environmental Awareness Systems CSCs dependencies: - name: csc_collector version: 1.0.0 - name: csc - alias: dimm + alias: auxtel-ess01 + version: 1.0.0 + - name: csc + alias: auxtel-ess01-sim + version: 1.0.0 + - name: csc + alias: auxtel-ess02 + version: 1.0.0 + - name: csc + alias: auxtel-ess02-sim + version: 1.0.0 + - name: csc + alias: auxtel-ess03 + version: 1.0.0 + - name: csc + alias: auxtel-ess03-sim + version: 1.0.0 + - name: csc + alias: auxtel-ess04 + version: 1.0.0 + - name: csc + alias: auxtel-ess04-sim + version: 1.0.0 + - name: csc + alias: calibhill-ess01 + version: 1.0.0 + - name: csc + alias: calibhill-ess01-sim + version: 1.0.0 + - name: csc + alias: dimm1 version: 1.0.0 - name: csc alias: dimm1-sim version: 1.0.0 + - name: csc + alias: dimm2 + version: 1.0.0 + - name: csc + alias: dimm2-sim + version: 1.0.0 + - name: csc + alias: dsm1 + version: 1.0.0 + - name: csc + alias: dsm1-sim + version: 1.0.0 + - name: csc + alias: dsm2 + version: 1.0.0 + - name: csc + alias: dsm2-sim + version: 1.0.0 + - name: csc + alias: mtdome-ess01 + version: 1.0.0 + - name: csc + alias: mtdome-ess01-sim + version: 1.0.0 + - name: csc + alias: mtdome-ess02 + version: 1.0.0 + - name: csc + alias: mtdome-ess02-sim + version: 1.0.0 + - name: csc + alias: mtdome-ess03 + version: 1.0.0 + - name: csc + alias: mtdome-ess03-sim + version: 1.0.0 + - name: csc + alias: tma-ess01 + version: 1.0.0 + - name: csc + alias: tma-ess01-sim + version: 1.0.0 + - name: csc + alias: tma-ess104 + version: 1.0.0 + - name: csc + alias: tma-ess104-sim + version: 1.0.0 + - name: csc + alias: tma-ess105 + version: 1.0.0 + - name: csc + alias: tma-ess105-sim + version: 1.0.0 + - name: csc + alias: weatherforecast + version: 1.0.0 diff --git a/applications/eas/README.md b/applications/eas/README.md index 3fc769c5c6..e3ddb09329 100644 --- a/applications/eas/README.md +++ b/applications/eas/README.md @@ -1,6 +1,6 @@ # eas -Deployment for the Environmental Awareness Sytems CSCs +Deployment for the Environmental Awareness Systems CSCs ## Values diff --git a/applications/eas/values-tucson-teststand.yaml b/applications/eas/values-tucson-teststand.yaml index 5be425bf5a..82646ed4f0 100644 --- a/applications/eas/values-tucson-teststand.yaml +++ b/applications/eas/values-tucson-teststand.yaml @@ -7,6 +7,70 @@ csc_collector: kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: meteoblue + key: ts/software/meteoblue + +auxtel-ess01-sim: + enabled: true + namespace: *ns + classifier: ess201 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 201 --simulate + +auxtel-ess02-sim: + enabled: true + namespace: *ns + classifier: ess202 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 202 --simulate + +auxtel-ess03-sim: + enabled: true + namespace: *ns + classifier: ess203 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 203 --simulate + +auxtel-ess04-sim: + enabled: true + namespace: *ns + classifier: ess204 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 204 --simulate + +calibhill-ess01-sim: + enabled: true + namespace: *ns + classifier: ess301 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 301 --simulate + dimm1-sim: enabled: true namespace: *ns @@ -16,3 +80,115 @@ dimm1-sim: pullPolicy: Always env: RUN_ARG: 1 --simulate + +dimm2-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/dimm + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 2 --simulate + +dsm1-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/dsm + tag: *imageTag + pullPolicy: Always + env: + CSC_INDEX: 1 + RUN_ARG: --simulate 1 --state enabled + +dsm2-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/dsm + tag: *imageTag + pullPolicy: Always + env: + CSC_INDEX: 2 + RUN_ARG: --simulate 2 --state enabled + +mtdome-ess01-sim: + enabled: true + namespace: *ns + classifier: ess101 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 101 --simulate + +mtdome-ess02-sim: + enabled: true + namespace: *ns + classifier: ess102 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 102 --simulate + +mtdome-ess03-sim: + enabled: true + namespace: *ns + classifier: ess103 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 103 --simulate + +tma-ess01-sim: + enabled: true + namespace: *ns + classifier: ess1 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 1 --simulate + +tma-ess104-sim: + enabled: true + namespace: *ns + classifier: ess104 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 104 --simulate + +tma-ess105-sim: + enabled: true + namespace: *ns + classifier: ess105 + image: + repository: ts-dockerhub.lsst.org/ess + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 105 --simulate + +weatherforecast: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/weatherforecast + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --state enabled + envSecrets: + - name: METEOBLUE_API_KEY + secretName: meteoblue + secretKey: api-key diff --git a/shared/charts/csc/templates/_helpers.tpl b/shared/charts/csc/templates/_helpers.tpl index de91e15465..21668ec926 100644 --- a/shared/charts/csc/templates/_helpers.tpl +++ b/shared/charts/csc/templates/_helpers.tpl @@ -30,10 +30,11 @@ Create chart name and version as used by the chart label. Create the CSC name by removing sim tag. */}} {{- define "csc.name" -}} -{{- if contains "sim" .Chart.Name -}} -{{- .Chart.Name | splitList "-" | first -}} +{{- $name := or .Values.classifier .Chart.Name -}} +{{- if contains "sim" $name -}} +{{- $name | splitList "-" | first -}} {{- else -}} -{{- .Chart.Name -}} +{{- $name -}} {{- end -}} {{- end -}} @@ -42,8 +43,8 @@ Create the CSC class name by removing sim tag and index. */}} {{- define "csc.class" -}} {{- $protectedApps := list "mtm2" "mtm1m3" -}} -{{- $name := .Chart.Name -}} -{{- if contains "sim" .Chart.Name -}} +{{- $name := or .Values.classifier .Chart.Name -}} +{{- if contains "sim" $name -}} {{- $name = $name | splitList "-" | first -}} {{- end -}} {{- $checkForIndex := list -}} From 04d72e8002467bd6c0631b7d4b39c4b8d58c1b0a Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 10 May 2023 11:36:07 -0700 Subject: [PATCH 479/588] Add obssys app. --- applications/obssys/Chart.yaml | 25 +++ applications/obssys/README.md | 15 ++ applications/obssys/charts/csc | 1 + applications/obssys/charts/csc_collector | 1 + .../obssys/values-tucson-teststand.yaml | 207 ++++++++++++++++++ applications/obssys/values.yaml | 1 + .../templates/obssys-application.yaml | 37 ++++ environments/values-tucson-teststand.yaml | 1 + environments/values.yaml | 3 + .../templates/configmap-env.yaml | 1 + 10 files changed, 292 insertions(+) create mode 100644 applications/obssys/Chart.yaml create mode 100644 applications/obssys/README.md create mode 120000 applications/obssys/charts/csc create mode 120000 applications/obssys/charts/csc_collector create mode 100644 applications/obssys/values-tucson-teststand.yaml create mode 120000 applications/obssys/values.yaml create mode 100644 environments/templates/obssys-application.yaml diff --git a/applications/obssys/Chart.yaml b/applications/obssys/Chart.yaml new file mode 100644 index 0000000000..e86d40a82f --- /dev/null +++ b/applications/obssys/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v2 +name: obssys +version: 1.0.0 +description: Deployment for the Observatory System CSCs +dependencies: + - name: csc_collector + version: 1.0.0 + - name: csc + alias: atqueue + version: 1.0.0 + - name: csc + alias: atscheduler + version: 1.0.0 + - name: csc + alias: authorize + version: 1.0.0 + - name: csc + alias: mtqueue + version: 1.0.0 + - name: csc + alias: mtscheduler + version: 1.0.0 + - name: csc + alias: watcher + version: 1.0.0 diff --git a/applications/obssys/README.md b/applications/obssys/README.md new file mode 100644 index 0000000000..189eac3b68 --- /dev/null +++ b/applications/obssys/README.md @@ -0,0 +1,15 @@ +# obssys + +Deployment for the Observatory System CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| imageTag | string | `""` | The default image tag for all of the child applications | +| namespace | string | `""` | | diff --git a/applications/obssys/charts/csc b/applications/obssys/charts/csc new file mode 120000 index 0000000000..3a423a6f5f --- /dev/null +++ b/applications/obssys/charts/csc @@ -0,0 +1 @@ +../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/obssys/charts/csc_collector b/applications/obssys/charts/csc_collector new file mode 120000 index 0000000000..38853814a3 --- /dev/null +++ b/applications/obssys/charts/csc_collector @@ -0,0 +1 @@ +../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml new file mode 100644 index 0000000000..0231765cf6 --- /dev/null +++ b/applications/obssys/values-tucson-teststand.yaml @@ -0,0 +1,207 @@ +namespace: &ns obssys +imageTag: &imageTag c0029 + +csc_collector: + namespace: *ns + siteTag: tucson + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + s3EndpointUrl: https://s3.tu.lsst.org + + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + - name: butler-secret + key: butler-secret + - name: love + key: ts/software/love + +atqueue: + enabled: true + namespace: *ns + classifier: scriptqueue2 + image: + repository: ts-dockerhub.lsst.org/scriptqueue + tag: *imageTag + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + RUN_ARG: 2 --state enabled + USER_USERNAME: user + butlerSecret: + containerPath: &bS-cP /home/saluser/.lsst + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *bS-cP + nfsMountpoint: + - name: auxtel-gen3-butler + containerPath: /repo/LATISS + readOnly: false + server: auxtel-archiver.tu.lsst.org + serverPath: /repo/LATISS + - name: auxtel-gen3-oods + containerPath: /data/lsstdata/TTS/auxtel + readOnly: true + server: auxtel-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/auxtel + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam + - name: comcam-gen3-oods + containerPath: /data/lsstdata/TTS/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/comcam + - name: project-shared + containerPath: /project + readOnly: false + server: nfs-project.tu.lsst.org + serverPath: /project + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.tu.lsst.org + serverPath: /obs-env + +atscheduler: + enabled: true + namespace: *ns + classifier: scheduler2 + image: + repository: ts-dockerhub.lsst.org/scheduler + tag: *imageTag + pullPolicy: Always + env: + INDEX: 2 + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + nfsMountpoint: + - name: rubin-sim-data + containerPath: /home/saluser/rubin_sim_data + readOnly: false + server: nfs-scratch.tu.lsst.org + serverPath: /scratch/scheduler + +authorize: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/authorize + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --state enabled + AUTHLIST_USER_NAME: authlist_user + envSecrets: + - name: AUTHLIST_USER_PASS + secretName: love + secretKey: authlist-user-pass + +mtqueue: + enabled: true + namespace: *ns + classifier: scriptqueue1 + image: + repository: ts-dockerhub.lsst.org/scriptqueue + tag: *imageTag + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + RUN_ARG: 1 --state enabled + USER_USERNAME: user + butlerSecret: + containerPath: &bS-cP /home/saluser/.lsst + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *bS-cP + nfsMountpoint: + - name: auxtel-gen3-butler + containerPath: /repo/LATISS + readOnly: false + server: auxtel-archiver.tu.lsst.org + serverPath: /repo/LATISS + - name: auxtel-gen3-oods + containerPath: /data/lsstdata/TTS/auxtel + readOnly: true + server: auxtel-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/auxtel + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam + - name: comcam-gen3-oods + containerPath: /data/lsstdata/TTS/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/comcam + - name: project-shared + containerPath: /project + readOnly: false + server: nfs-project.tu.lsst.org + serverPath: /project + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.tu.lsst.org + serverPath: /obs-env + +mtscheduler: + enabled: true + namespace: *ns + classifier: scheduler1 + image: + repository: ts-dockerhub.lsst.org/scheduler + tag: *imageTag + pullPolicy: Always + env: + INDEX: 1 + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + nfsMountpoint: + - name: rubin-sim-data + containerPath: /home/saluser/rubin_sim_data + readOnly: false + server: nfs-scratch.tu.lsst.org + serverPath: /scratch/scheduler + +watcher: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/watcher + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --state enabled diff --git a/applications/obssys/values.yaml b/applications/obssys/values.yaml new file mode 120000 index 0000000000..22e98f1fe2 --- /dev/null +++ b/applications/obssys/values.yaml @@ -0,0 +1 @@ +../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/environments/templates/obssys-application.yaml b/environments/templates/obssys-application.yaml new file mode 100644 index 0000000000..f849e4fefb --- /dev/null +++ b/environments/templates/obssys-application.yaml @@ -0,0 +1,37 @@ +{{- if .Values.obssys.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: obssys +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: obssys + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: obssys + server: https://kubernetes.default.svc + project: default + source: + path: applications/obssys + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 8a9ce36fa9..f3eb187f92 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -9,6 +9,7 @@ applications: exposurelog: true narrativelog: true nublado: true + obssys: true portal: true sasquatch: true squareone: true diff --git a/environments/values.yaml b/environments/values.yaml index f1c81777b1..b15b867824 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -101,6 +101,9 @@ applications: # -- Enable the nublado application (v3 of the Notebook Aspect) nublado: false + # -- Enable the obssys control system application + obssys: false + # -- Enable the onepassword-connect application onepassword-connect: false diff --git a/shared/charts/csc_collector/templates/configmap-env.yaml b/shared/charts/csc_collector/templates/configmap-env.yaml index 01291c42c6..91c90737c6 100644 --- a/shared/charts/csc_collector/templates/configmap-env.yaml +++ b/shared/charts/csc_collector/templates/configmap-env.yaml @@ -6,4 +6,5 @@ data: LSST_SITE: {{ .Values.siteTag }} LSST_KAFKA_BROKER_ADDR: {{ .Values.kafkaBrokerAddress }} LSST_SCHEMA_REGISTRY_URL: {{ .Values.schemaRegistryUrl }} + S3_ENDPOINT_URL: {{ .Values.s3EndpointUrl }} TS_SALKAFKA_USERNAME: ts-salkafka From 65e3a042ef14cd871b22619fc77071872911b092 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 10 May 2023 14:07:54 -0700 Subject: [PATCH 480/588] Add calsys app. --- applications/calsys/Chart.yaml | 13 ++++ applications/calsys/README.md | 15 ++++ applications/calsys/charts/csc | 1 + applications/calsys/charts/csc_collector | 1 + .../calsys/values-tucson-teststand.yaml | 71 +++++++++++++++++++ applications/calsys/values.yaml | 1 + .../templates/calsys-application.yaml | 37 ++++++++++ environments/values-tucson-teststand.yaml | 1 + environments/values.yaml | 4 +- shared/charts/csc/templates/service.yaml | 9 ++- 10 files changed, 149 insertions(+), 4 deletions(-) create mode 100644 applications/calsys/Chart.yaml create mode 100644 applications/calsys/README.md create mode 120000 applications/calsys/charts/csc create mode 120000 applications/calsys/charts/csc_collector create mode 100644 applications/calsys/values-tucson-teststand.yaml create mode 120000 applications/calsys/values.yaml create mode 100644 environments/templates/calsys-application.yaml diff --git a/applications/calsys/Chart.yaml b/applications/calsys/Chart.yaml new file mode 100644 index 0000000000..d0150c7f0b --- /dev/null +++ b/applications/calsys/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: calsys +version: 1.0.0 +description: Deployment for the Calibration System CSCs +dependencies: + - name: csc_collector + version: 1.0.0 + - name: csc + alias: gcheaderservice1 + version: 1.0.0 + - name: csc + alias: simulation-gencam + version: 1.0.0 diff --git a/applications/calsys/README.md b/applications/calsys/README.md new file mode 100644 index 0000000000..12601e719b --- /dev/null +++ b/applications/calsys/README.md @@ -0,0 +1,15 @@ +# calsys + +Deployment for the Calibration System CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| imageTag | string | `""` | The default image tag for all of the child applications | +| namespace | string | `""` | | diff --git a/applications/calsys/charts/csc b/applications/calsys/charts/csc new file mode 120000 index 0000000000..3a423a6f5f --- /dev/null +++ b/applications/calsys/charts/csc @@ -0,0 +1 @@ +../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/calsys/charts/csc_collector b/applications/calsys/charts/csc_collector new file mode 120000 index 0000000000..38853814a3 --- /dev/null +++ b/applications/calsys/charts/csc_collector @@ -0,0 +1 @@ +../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/calsys/values-tucson-teststand.yaml b/applications/calsys/values-tucson-teststand.yaml new file mode 100644 index 0000000000..4fa54a594a --- /dev/null +++ b/applications/calsys/values-tucson-teststand.yaml @@ -0,0 +1,71 @@ +namespace: &ns calsys +imageTag: &imageTag c0029 + +csc_collector: + namespace: *ns + siteTag: tucson + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + s3EndpointUrl: https://s3.tu.lsst.org + + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + +gcheaderservice1: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/headerservice + tag: ts-v3.1.11_c0029 + pullPolicy: Always + env: + CAMERA: gc1 + TSTAND_HEADERSERVICE: TUCSON + URL_SPEC: --lfa_mode s3 --s3instance tuc + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + +simulation-gencam: + enabled: true + namespace: *ns + classifier: genericcamera1 + image: + repository: ts-dockerhub.lsst.org/genericcamera + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 1 + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + service: + enabled: true + port: 5013 + type: LoadBalancer diff --git a/applications/calsys/values.yaml b/applications/calsys/values.yaml new file mode 120000 index 0000000000..22e98f1fe2 --- /dev/null +++ b/applications/calsys/values.yaml @@ -0,0 +1 @@ +../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/environments/templates/calsys-application.yaml b/environments/templates/calsys-application.yaml new file mode 100644 index 0000000000..920ffe53a2 --- /dev/null +++ b/environments/templates/calsys-application.yaml @@ -0,0 +1,37 @@ +{{- if .Values.calsys.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: calsys +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: calsys + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: calsys + server: https://kubernetes.default.svc + project: default + source: + path: applications/calsys + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index f3eb187f92..2a06b6ebc6 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -5,6 +5,7 @@ vaultPathPrefix: secret/k8s_operator/tucson-teststand.lsst.codes applications: argo-workflows: true auxtel: true + calsys: true eas: true exposurelog: true narrativelog: true diff --git a/environments/values.yaml b/environments/values.yaml index b15b867824..6141256b70 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -36,12 +36,14 @@ applications: # environments and is present here only because it makes parsing easier argocd: true - # -- Eanble the auxtel control system application + # -- Enable the auxtel control system application auxtel: false # -- Enable the butler application butler: false + # -- Enable the calsys control system application + # -- Enable the cert-manager application, required unless the environment # makes separate arrangements to inject a current TLS certificate cert-manager: true diff --git a/shared/charts/csc/templates/service.yaml b/shared/charts/csc/templates/service.yaml index 298a1f80bf..f5188be55e 100644 --- a/shared/charts/csc/templates/service.yaml +++ b/shared/charts/csc/templates/service.yaml @@ -1,15 +1,18 @@ -{{- if .Values.service.use }} +{{- if .Values.service.enabled }} apiVersion: v1 kind: Service metadata: labels: - csc: {{ include "chart.name" . }} + csc: {{ include "csc.name" . }} name: {{ include "chart.name" . }}-service namespace: {{ .Values.namespace }} spec: + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} ports: - port: {{ .Values.service.port }} selector: - csc: {{ include "chart.name" . }} + csc: {{ include "csc.name" . }} type: {{ .Values.service.type }} {{- end }} From ccb5f41925c8d17e4c9d35ecfd97ce167779ff55 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 11 May 2023 18:20:15 -0700 Subject: [PATCH 481/588] Add simonyitel app. --- applications/simonyitel/Chart.yaml | 79 ++++++ applications/simonyitel/README.md | 15 + applications/simonyitel/charts/csc | 1 + applications/simonyitel/charts/csc_collector | 1 + .../simonyitel/values-tucson-teststand.yaml | 267 ++++++++++++++++++ applications/simonyitel/values.yaml | 1 + .../templates/simonyitel-application.yaml | 37 +++ environments/values-tucson-teststand.yaml | 1 + environments/values.yaml | 3 + shared/charts/csc/templates/job.yaml | 4 +- 10 files changed, 407 insertions(+), 2 deletions(-) create mode 100644 applications/simonyitel/Chart.yaml create mode 100644 applications/simonyitel/README.md create mode 120000 applications/simonyitel/charts/csc create mode 120000 applications/simonyitel/charts/csc_collector create mode 100644 applications/simonyitel/values-tucson-teststand.yaml create mode 120000 applications/simonyitel/values.yaml create mode 100644 environments/templates/simonyitel-application.yaml diff --git a/applications/simonyitel/Chart.yaml b/applications/simonyitel/Chart.yaml new file mode 100644 index 0000000000..ab000119f0 --- /dev/null +++ b/applications/simonyitel/Chart.yaml @@ -0,0 +1,79 @@ +apiVersion: v2 +name: simonyitel +version: 1.0.0 +description: Deployment for the Simonyi Survey Telescope CSCs +dependencies: + - name: csc_collector + version: 1.0.0 + - name: csc + alias: ccheaderservice + version: 1.0.0 + - name: csc + alias: ccoods + version: 1.0.0 + - name: csc + alias: lasertracker1 + version: 1.0.0 + - name: csc + alias: lasertracker1-sim + version: 1.0.0 + - name: csc + alias: mtaircompressor1 + version: 1.0.0 + - name: csc + alias: mtaircompressor1-sim + version: 1.0.0 + - name: csc + alias: mtaircompressor2 + version: 1.0.0 + - name: csc + alias: mtaircompressor2-sim + version: 1.0.0 + - name: csc + alias: mtaos + version: 1.0.0 + - name: csc + alias: mtcamhexapod + version: 1.0.0 + - name: csc + alias: mtcamhexapod-sim + version: 1.0.0 + - name: csc + alias: mtdome + version: 1.0.0 + - name: csc + alias: mtdome-sim + version: 1.0.0 + - name: csc + alias: mtdometrajectory + version: 1.0.0 + - name: csc + alias: mtm1m3-sim + version: 1.0.0 + - name: csc + alias: mtm2 + version: 1.0.0 + - name: csc + alias: mtm2-sim + version: 1.0.0 + - name: csc + alias: mtm2hexapod + version: 1.0.0 + - name: csc + alias: mtm2hexapod-sim + version: 1.0.0 + - name: csc + alias: mtmount + version: 1.0.0 + - name: csc + alias: mtmount-sim + version: 1.0.0 + - name: csc + alias: mtptg + version: 1.0.0 + - name: csc + alias: mtrotator + version: 1.0.0 + - name: csc + alias: mtrotator-sim + version: 1.0.0 diff --git a/applications/simonyitel/README.md b/applications/simonyitel/README.md new file mode 100644 index 0000000000..1e94eb32cc --- /dev/null +++ b/applications/simonyitel/README.md @@ -0,0 +1,15 @@ +# simonyitel + +Deployment for the Simonyi Survey Telescope CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| imageTag | string | `""` | The default image tag for all of the child applications | +| namespace | string | `""` | | diff --git a/applications/simonyitel/charts/csc b/applications/simonyitel/charts/csc new file mode 120000 index 0000000000..3a423a6f5f --- /dev/null +++ b/applications/simonyitel/charts/csc @@ -0,0 +1 @@ +../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/simonyitel/charts/csc_collector b/applications/simonyitel/charts/csc_collector new file mode 120000 index 0000000000..38853814a3 --- /dev/null +++ b/applications/simonyitel/charts/csc_collector @@ -0,0 +1 @@ +../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/simonyitel/values-tucson-teststand.yaml b/applications/simonyitel/values-tucson-teststand.yaml new file mode 100644 index 0000000000..f78a2120af --- /dev/null +++ b/applications/simonyitel/values-tucson-teststand.yaml @@ -0,0 +1,267 @@ +namespace: &ns simonyitel +imageTag: &imageTag c0029 + +csc_collector: + namespace: *ns + siteTag: tucson + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + s3EndpointUrl: https://s3.tu.lsst.org + + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + - name: butler-secret + key: butler-secret + +ccheaderservice: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/headerservice + tag: ts-v3.1.11_c0029 + pullPolicy: Always + env: + URL_SPEC: --lfa_mode s3 --s3instance tuc + TSTAND_HEADERSERVICE: TUCSON + CAMERA: cc + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + +ccoods: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/ccoods + tag: *imageTag + pullPolicy: Always + env: + DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml + CTRL_OODS_CONFIG_FILE: /etc/ccoods.yaml + butlerSecret: + containerPath: &bS-cP /home/saluser/.lsst + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *bS-cP + nfsMountpoint: + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam + - name: comcam-oods-data + containerPath: /data + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /data + configfile: + path: /etc + filename: ccoods.yaml + content: | + defaultInterval: &interval + days: 0 + hours: 0 + minutes: 0 + seconds: 0 + + ingester: + imageStagingDirectory: /data/staging/comcam/oods + butlers: + - butler: + instrument: lsst.obs.lsst.LsstComCam + class: + import : lsst.ctrl.oods.gen3ButlerIngester + name : Gen3ButlerIngester + stagingDirectory : /data/lsstdata/TTS/comcam/oods/gen3butler/raw + badFileDirectory: /data/lsstdata/TTS/comcam/oods/gen3butler/badfiles + repoDirectory : /repo/LSSTComCam + collections: + - LSSTComCam/raw/all + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 30 + batchSize: 20 + scanInterval: + <<: *interval + seconds: 2 + + cacheCleaner: + # ONLY clean out empty directories here, never files + clearEmptyDirectories: + - /data/lsstdata/TTS/comcam/oods/gen3butler/raw + # clean out empty directories and old files from these directories + clearEmptyDirectoriesAndOldFiles: + - /data/lsstdata/TTS/comcam/oods/gen3butler/badfiles + - /data/staging/comcam/oods + - /data/staging/comcam/forwarder + scanInterval: + <<: *interval + hours: 1 + filesOlderThan: + <<: *interval + days: 31 + directoriesEmptyForMoreThan: + <<: *interval + days: 2 + +lasertracker1-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/lasertracker + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 1 --simulate 2 + +mtaircompressor1-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/mtaircompressor + tag: *imageTag + pullPolicy: Always + +mtaircompressor2-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/mtaircompressor + tag: *imageTag + pullPolicy: Always + +mtaos: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/mtaos + tag: *imageTag + pullPolicy: Always + butlerSecret: + containerPath: *bS-cP + dbUser: oods + secretPermFixer: + - name: butler-secret + containerPath: *bS-cP + nfsMountpoint: + - name: comcam-gen3-butler + containerPath: /repo/LSSTComCam + readOnly: false + server: comcam-archiver.tu.lsst.org + serverPath: /repo/LSSTComCam + - name: comcam-gen3-oods + containerPath: /data/lsstdata/TTS/comcam + readOnly: true + server: comcam-archiver.tu.lsst.org + serverPath: /lsstdata/TTS/comcam + - name: scratch + containerPath: /scratch + readOnly: false + server: nfs-scratch.tu.lsst.org + serverPath: /scratch + +mtcamhexapod-sim: + enabled: true + namespace: *ns + classifier: mthexapod1 + image: + repository: ts-dockerhub.lsst.org/mthexapod + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --simulate 1 + +mtdome-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/mtdome + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --simulate 1 + +mtdometrajectory: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/mtdometrajectory + tag: *imageTag + pullPolicy: Always + +mtm1m3-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/mtm1m3_sim + tag: *imageTag + pullPolicy: Always + +mtm2-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/m2 + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --simulate + +mtm2hexapod-sim: + enabled: true + namespace: *ns + classifier: mthexapod2 + image: + repository: ts-dockerhub.lsst.org/mthexapod + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --simulate 2 + +mtmount-sim: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/mtmount + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --simulate + +mtptg: + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/ptkernel + tag: *imageTag + pullPolicy: Always + env: + TELESCOPE: MT + +mtrotator-sim: + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/mtrotator + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: --simulate diff --git a/applications/simonyitel/values.yaml b/applications/simonyitel/values.yaml new file mode 120000 index 0000000000..22e98f1fe2 --- /dev/null +++ b/applications/simonyitel/values.yaml @@ -0,0 +1 @@ +../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/environments/templates/simonyitel-application.yaml b/environments/templates/simonyitel-application.yaml new file mode 100644 index 0000000000..e52c222dda --- /dev/null +++ b/environments/templates/simonyitel-application.yaml @@ -0,0 +1,37 @@ +{{- if .Values.simonyitel.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: simonyitel +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: simonyitel + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: simonyitel + server: https://kubernetes.default.svc + project: default + source: + path: applications/simonyitel + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 2a06b6ebc6..584d979713 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -13,6 +13,7 @@ applications: obssys: true portal: true sasquatch: true + simonyitel: true squareone: true strimzi: true telegraf: true diff --git a/environments/values.yaml b/environments/values.yaml index 6141256b70..c172819073 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -141,6 +141,9 @@ applications: # -- Enable the siav2 application siav2: false + # -- Enable the simonyitel control system application + simonyitel: false + # -- Enable the ssotap application ssotap: false diff --git a/shared/charts/csc/templates/job.yaml b/shared/charts/csc/templates/job.yaml index 32763b8fc9..cae752716e 100644 --- a/shared/charts/csc/templates/job.yaml +++ b/shared/charts/csc/templates/job.yaml @@ -32,7 +32,7 @@ spec: name: csc-env-config - secretRef: name: ts-salkafka - {{- if or .Values.env .Values.envSecrets }} + {{- if or (or .Values.env .Values.envSecrets) .Values.butlerSecret }} env: {{- range $env_var, $env_value := .Values.env }} - name: {{ $env_var }} @@ -137,7 +137,7 @@ spec: - key: {{ .Values.configfile.filename }} path: {{ .Values.configfile.filename }} {{- end }} - {{- if .Values.secretPermFixer }} + {{- if .Values.secretPermFixer }} {{- range $values := .Values.secretPermFixer }} - name: {{ include "chart.name" $ }}-{{ $values.name }} emptyDir: {} From 497ffb57a4fc8ef5fe40a7d63a59f9d12d3d7ea4 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 15 May 2023 08:07:27 -0700 Subject: [PATCH 482/588] Add dmocps app. --- applications/dmocps/Chart.yaml | 13 +++++++ applications/dmocps/README.md | 15 ++++++++ applications/dmocps/charts/csc | 1 + applications/dmocps/charts/csc_collector | 1 + .../dmocps/values-tucson-teststand.yaml | 37 +++++++++++++++++++ applications/dmocps/values.yaml | 1 + .../templates/dmocps-application.yaml | 29 +++++++++++++++ environments/values-tucson-teststand.yaml | 1 + environments/values.yaml | 3 ++ 9 files changed, 101 insertions(+) create mode 100644 applications/dmocps/Chart.yaml create mode 100644 applications/dmocps/README.md create mode 120000 applications/dmocps/charts/csc create mode 120000 applications/dmocps/charts/csc_collector create mode 100644 applications/dmocps/values-tucson-teststand.yaml create mode 120000 applications/dmocps/values.yaml create mode 100644 environments/templates/dmocps-application.yaml diff --git a/applications/dmocps/Chart.yaml b/applications/dmocps/Chart.yaml new file mode 100644 index 0000000000..aeab461001 --- /dev/null +++ b/applications/dmocps/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: dmocps +version: 1.0.0 +description: Deployment for the DM OCPS CSCs +dependencies: + - name: csc_collector + version: 1.0.0 + - name: csc + alias: atocps + version: 1.0.0 + - name: csc + alias: ccocps + version: 1.0.0 diff --git a/applications/dmocps/README.md b/applications/dmocps/README.md new file mode 100644 index 0000000000..77c72385d0 --- /dev/null +++ b/applications/dmocps/README.md @@ -0,0 +1,15 @@ +# dmocps + +Deployment for the DM OCPS CSCs + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| imageTag | string | `""` | The default image tag for all of the child applications | +| namespace | string | `""` | | diff --git a/applications/dmocps/charts/csc b/applications/dmocps/charts/csc new file mode 120000 index 0000000000..3a423a6f5f --- /dev/null +++ b/applications/dmocps/charts/csc @@ -0,0 +1 @@ +../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/dmocps/charts/csc_collector b/applications/dmocps/charts/csc_collector new file mode 120000 index 0000000000..38853814a3 --- /dev/null +++ b/applications/dmocps/charts/csc_collector @@ -0,0 +1 @@ +../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/dmocps/values-tucson-teststand.yaml b/applications/dmocps/values-tucson-teststand.yaml new file mode 100644 index 0000000000..e7349b5609 --- /dev/null +++ b/applications/dmocps/values-tucson-teststand.yaml @@ -0,0 +1,37 @@ +namespace: &ns uws +imageTag: &imageTag c0029 + +csc_collector: + namespace: *ns + siteTag: tucson + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + +atocps: + enabled: true + namespace: *ns + classifier: ocps1 + image: + repository: ts-dockerhub.lsst.org/dmocps + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 1 + +ccocps: + enabled: true + namespace: *ns + classifier: ocps2 + image: + repository: ts-dockerhub.lsst.org/dmocps + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 2 diff --git a/applications/dmocps/values.yaml b/applications/dmocps/values.yaml new file mode 120000 index 0000000000..22e98f1fe2 --- /dev/null +++ b/applications/dmocps/values.yaml @@ -0,0 +1 @@ +../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/environments/templates/dmocps-application.yaml b/environments/templates/dmocps-application.yaml new file mode 100644 index 0000000000..ab1c75545e --- /dev/null +++ b/environments/templates/dmocps-application.yaml @@ -0,0 +1,29 @@ +{{- if .Values.dmocps.enabled -}} +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: dmocps + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: uws + server: https://kubernetes.default.svc + project: default + source: + path: applications/dmocps + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 584d979713..6e98ccbbcb 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -6,6 +6,7 @@ applications: argo-workflows: true auxtel: true calsys: true + dmocps: true eas: true exposurelog: true narrativelog: true diff --git a/environments/values.yaml b/environments/values.yaml index c172819073..a6b2b90508 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -51,6 +51,9 @@ applications: # -- Eanble the datalinker application datalinker: false + # -- Enable the dmocps control system application + dmocps: false + # -- Enable the eas control system application eas: false From 0f70a5316023abcb01ff3b536ecb3c6e9bc136f1 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 15 May 2023 09:03:51 -0700 Subject: [PATCH 483/588] Add control-system-test app. --- applications/control-system-test/Chart.yaml | 10 +++++ applications/control-system-test/README.md | 15 ++++++++ applications/control-system-test/charts/csc | 1 + .../control-system-test/charts/csc_collector | 1 + .../values-tucson-teststand.yaml | 28 ++++++++++++++ applications/control-system-test/values.yaml | 1 + .../control-system-test-application.yaml | 37 +++++++++++++++++++ environments/values-tucson-teststand.yaml | 1 + environments/values.yaml | 3 ++ 9 files changed, 97 insertions(+) create mode 100644 applications/control-system-test/Chart.yaml create mode 100644 applications/control-system-test/README.md create mode 120000 applications/control-system-test/charts/csc create mode 120000 applications/control-system-test/charts/csc_collector create mode 100644 applications/control-system-test/values-tucson-teststand.yaml create mode 120000 applications/control-system-test/values.yaml create mode 100644 environments/templates/control-system-test-application.yaml diff --git a/applications/control-system-test/Chart.yaml b/applications/control-system-test/Chart.yaml new file mode 100644 index 0000000000..4bb5ded773 --- /dev/null +++ b/applications/control-system-test/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +name: control-system-test +version: 1.0.0 +description: Deployment for the Test CSCs and Integration Testing Workflows +dependencies: + - name: csc_collector + version: 1.0.0 + - name: csc + alias: test42 + version: 1.0.0 diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md new file mode 100644 index 0000000000..c87235782d --- /dev/null +++ b/applications/control-system-test/README.md @@ -0,0 +1,15 @@ +# control-system-test + +Deployment for the Test CSCs and Integration Testing Workflows + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| imageTag | string | `""` | The default image tag for all of the child applications | +| namespace | string | `""` | | diff --git a/applications/control-system-test/charts/csc b/applications/control-system-test/charts/csc new file mode 120000 index 0000000000..3a423a6f5f --- /dev/null +++ b/applications/control-system-test/charts/csc @@ -0,0 +1 @@ +../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/control-system-test/charts/csc_collector b/applications/control-system-test/charts/csc_collector new file mode 120000 index 0000000000..38853814a3 --- /dev/null +++ b/applications/control-system-test/charts/csc_collector @@ -0,0 +1 @@ +../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/control-system-test/values-tucson-teststand.yaml b/applications/control-system-test/values-tucson-teststand.yaml new file mode 100644 index 0000000000..0e55653e4c --- /dev/null +++ b/applications/control-system-test/values-tucson-teststand.yaml @@ -0,0 +1,28 @@ +namespace: &ns control-system-test +imageTag: &imageTag c0029 + +csc_collector: + namespace: *ns + siteTag: tucson + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + s3EndpointUrl: https://s3.tu.lsst.org + + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + +test42: + enabled: true + namespace: *ns + image: + repository: ts-dockerhub.lsst.org/test + tag: *imageTag + pullPolicy: Always + env: + RUN_ARG: 42 diff --git a/applications/control-system-test/values.yaml b/applications/control-system-test/values.yaml new file mode 120000 index 0000000000..22e98f1fe2 --- /dev/null +++ b/applications/control-system-test/values.yaml @@ -0,0 +1 @@ +../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/environments/templates/control-system-test-application.yaml b/environments/templates/control-system-test-application.yaml new file mode 100644 index 0000000000..8a2055ba23 --- /dev/null +++ b/environments/templates/control-system-test-application.yaml @@ -0,0 +1,37 @@ +{{- if (index .Values "strimzi-registry-operator" "enabled") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: control-system-test +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: control-system-test + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: control-system-test + server: https://kubernetes.default.svc + project: default + source: + path: applications/control-system-test + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 6e98ccbbcb..3f5636a021 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -6,6 +6,7 @@ applications: argo-workflows: true auxtel: true calsys: true + control-system-test: true dmocps: true eas: true exposurelog: true diff --git a/environments/values.yaml b/environments/values.yaml index a6b2b90508..34ca448f97 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -48,6 +48,9 @@ applications: # makes separate arrangements to inject a current TLS certificate cert-manager: true + # -- Enable the control-system-test application + control-system-test: false + # -- Eanble the datalinker application datalinker: false From e8473f3963fbdfd11a3afcd815f9619a50106c0d Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 15 May 2023 12:06:08 -0700 Subject: [PATCH 484/588] Wiring up global parameters. --- applications/auxtel/README.md | 9 ++++-- .../auxtel/values-tucson-teststand.yaml | 12 -------- applications/calsys/README.md | 9 ++++-- .../calsys/values-tucson-teststand.yaml | 6 ---- applications/control-system-test/README.md | 9 ++++-- .../values-tucson-teststand.yaml | 6 ---- applications/dmocps/README.md | 9 ++++-- .../dmocps/values-tucson-teststand.yaml | 6 ---- applications/eas/README.md | 9 ++++-- applications/eas/values-tucson-teststand.yaml | 20 ------------- applications/obssys/README.md | 9 ++++-- .../obssys/values-tucson-teststand.yaml | 11 -------- applications/simonyitel/README.md | 9 ++++-- .../simonyitel/values-tucson-teststand.yaml | 19 ------------- .../templates/auxtel-application.yaml | 12 ++++++++ .../templates/calsys-application.yaml | 12 ++++++++ .../control-system-test-application.yaml | 12 ++++++++ .../templates/dmocps-application.yaml | 12 ++++++++ environments/templates/eas-application.yaml | 12 ++++++++ .../templates/obssys-application.yaml | 12 ++++++++ .../templates/simonyitel-application.yaml | 12 ++++++++ environments/values-tucson-teststand.yaml | 4 +++ environments/values.yaml | 23 +++++++++++++++ shared/charts/csc/README.md | 2 +- shared/charts/csc/templates/job.yaml | 3 +- shared/charts/csc/values.yaml | 2 +- .../templates/configmap-env.yaml | 9 +++--- shared/values/values_control_system_apps.yaml | 28 +++++++++++++++++-- 28 files changed, 195 insertions(+), 103 deletions(-) diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md index 001c3e2398..9cd93e7ad4 100644 --- a/applications/auxtel/README.md +++ b/applications/auxtel/README.md @@ -6,10 +6,15 @@ Deployment for the Auxiliary Telescope CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| imageTag | string | `""` | The default image tag for all of the child applications | | namespace | string | `""` | | diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml index d21a8ffbd9..b24dcc5fda 100644 --- a/applications/auxtel/values-tucson-teststand.yaml +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -1,12 +1,7 @@ namespace: &ns auxtel -imageTag: &imageTag c0029 csc_collector: namespace: *ns - siteTag: tucson - kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 - s3EndpointUrl: https://s3.tu.lsst.org secrets: - name: nexus3-docker @@ -24,7 +19,6 @@ ataos: namespace: *ns image: repository: ts-dockerhub.lsst.org/ataos - tag: *imageTag pullPolicy: Always atdome-sim: @@ -32,7 +26,6 @@ atdome-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/atdome - tag: *imageTag pullPolicy: Always env: RUN_ARG: --simulate @@ -42,7 +35,6 @@ atdometrajectory: namespace: *ns image: repository: ts-dockerhub.lsst.org/atdometrajectory - tag: *imageTag pullPolicy: Always atheaderservice: @@ -75,7 +67,6 @@ athexapod-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/athexapod - tag: *imageTag pullPolicy: Always atoods: @@ -83,7 +74,6 @@ atoods: namespace: *ns image: repository: ts-dockerhub.lsst.org/atoods - tag: *imageTag pullPolicy: Always env: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml @@ -163,7 +153,6 @@ atptg: namespace: *ns image: repository: ts-dockerhub.lsst.org/ptkernel - tag: *imageTag pullPolicy: Always env: TELESCOPE: AT @@ -173,7 +162,6 @@ atspectrograph-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/atspec - tag: *imageTag pullPolicy: Always env: RUN_ARG: --simulate diff --git a/applications/calsys/README.md b/applications/calsys/README.md index 12601e719b..12454a1338 100644 --- a/applications/calsys/README.md +++ b/applications/calsys/README.md @@ -6,10 +6,15 @@ Deployment for the Calibration System CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| imageTag | string | `""` | The default image tag for all of the child applications | | namespace | string | `""` | | diff --git a/applications/calsys/values-tucson-teststand.yaml b/applications/calsys/values-tucson-teststand.yaml index 4fa54a594a..4b40667ec2 100644 --- a/applications/calsys/values-tucson-teststand.yaml +++ b/applications/calsys/values-tucson-teststand.yaml @@ -1,12 +1,7 @@ namespace: &ns calsys -imageTag: &imageTag c0029 csc_collector: namespace: *ns - siteTag: tucson - kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 - s3EndpointUrl: https://s3.tu.lsst.org secrets: - name: nexus3-docker @@ -48,7 +43,6 @@ simulation-gencam: classifier: genericcamera1 image: repository: ts-dockerhub.lsst.org/genericcamera - tag: *imageTag pullPolicy: Always env: RUN_ARG: 1 diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md index c87235782d..cd5399c328 100644 --- a/applications/control-system-test/README.md +++ b/applications/control-system-test/README.md @@ -6,10 +6,15 @@ Deployment for the Test CSCs and Integration Testing Workflows | Key | Type | Default | Description | |-----|------|---------|-------------| +| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| imageTag | string | `""` | The default image tag for all of the child applications | | namespace | string | `""` | | diff --git a/applications/control-system-test/values-tucson-teststand.yaml b/applications/control-system-test/values-tucson-teststand.yaml index 0e55653e4c..a9a31d0a8a 100644 --- a/applications/control-system-test/values-tucson-teststand.yaml +++ b/applications/control-system-test/values-tucson-teststand.yaml @@ -1,12 +1,7 @@ namespace: &ns control-system-test -imageTag: &imageTag c0029 csc_collector: namespace: *ns - siteTag: tucson - kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 - s3EndpointUrl: https://s3.tu.lsst.org secrets: - name: nexus3-docker @@ -22,7 +17,6 @@ test42: namespace: *ns image: repository: ts-dockerhub.lsst.org/test - tag: *imageTag pullPolicy: Always env: RUN_ARG: 42 diff --git a/applications/dmocps/README.md b/applications/dmocps/README.md index 77c72385d0..2142a0a8ef 100644 --- a/applications/dmocps/README.md +++ b/applications/dmocps/README.md @@ -6,10 +6,15 @@ Deployment for the DM OCPS CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| imageTag | string | `""` | The default image tag for all of the child applications | | namespace | string | `""` | | diff --git a/applications/dmocps/values-tucson-teststand.yaml b/applications/dmocps/values-tucson-teststand.yaml index e7349b5609..a34bec196c 100644 --- a/applications/dmocps/values-tucson-teststand.yaml +++ b/applications/dmocps/values-tucson-teststand.yaml @@ -1,11 +1,7 @@ namespace: &ns uws -imageTag: &imageTag c0029 csc_collector: namespace: *ns - siteTag: tucson - kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 secrets: - name: nexus3-docker @@ -20,7 +16,6 @@ atocps: classifier: ocps1 image: repository: ts-dockerhub.lsst.org/dmocps - tag: *imageTag pullPolicy: Always env: RUN_ARG: 1 @@ -31,7 +26,6 @@ ccocps: classifier: ocps2 image: repository: ts-dockerhub.lsst.org/dmocps - tag: *imageTag pullPolicy: Always env: RUN_ARG: 2 diff --git a/applications/eas/README.md b/applications/eas/README.md index e3ddb09329..e88535607e 100644 --- a/applications/eas/README.md +++ b/applications/eas/README.md @@ -6,10 +6,15 @@ Deployment for the Environmental Awareness Systems CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| imageTag | string | `""` | The default image tag for all of the child applications | | namespace | string | `""` | | diff --git a/applications/eas/values-tucson-teststand.yaml b/applications/eas/values-tucson-teststand.yaml index 82646ed4f0..e0888ea0e2 100644 --- a/applications/eas/values-tucson-teststand.yaml +++ b/applications/eas/values-tucson-teststand.yaml @@ -1,11 +1,7 @@ namespace: &ns eas -imageTag: &imageTag c0029 csc_collector: namespace: *ns - siteTag: tucson - kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 secrets: - name: nexus3-docker @@ -22,7 +18,6 @@ auxtel-ess01-sim: classifier: ess201 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 201 --simulate @@ -33,7 +28,6 @@ auxtel-ess02-sim: classifier: ess202 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 202 --simulate @@ -44,7 +38,6 @@ auxtel-ess03-sim: classifier: ess203 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 203 --simulate @@ -55,7 +48,6 @@ auxtel-ess04-sim: classifier: ess204 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 204 --simulate @@ -66,7 +58,6 @@ calibhill-ess01-sim: classifier: ess301 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 301 --simulate @@ -76,7 +67,6 @@ dimm1-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/dimm - tag: *imageTag pullPolicy: Always env: RUN_ARG: 1 --simulate @@ -86,7 +76,6 @@ dimm2-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/dimm - tag: *imageTag pullPolicy: Always env: RUN_ARG: 2 --simulate @@ -96,7 +85,6 @@ dsm1-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/dsm - tag: *imageTag pullPolicy: Always env: CSC_INDEX: 1 @@ -107,7 +95,6 @@ dsm2-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/dsm - tag: *imageTag pullPolicy: Always env: CSC_INDEX: 2 @@ -119,7 +106,6 @@ mtdome-ess01-sim: classifier: ess101 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 101 --simulate @@ -130,7 +116,6 @@ mtdome-ess02-sim: classifier: ess102 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 102 --simulate @@ -141,7 +126,6 @@ mtdome-ess03-sim: classifier: ess103 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 103 --simulate @@ -152,7 +136,6 @@ tma-ess01-sim: classifier: ess1 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 1 --simulate @@ -163,7 +146,6 @@ tma-ess104-sim: classifier: ess104 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 104 --simulate @@ -174,7 +156,6 @@ tma-ess105-sim: classifier: ess105 image: repository: ts-dockerhub.lsst.org/ess - tag: *imageTag pullPolicy: Always env: RUN_ARG: 105 --simulate @@ -184,7 +165,6 @@ weatherforecast: namespace: *ns image: repository: ts-dockerhub.lsst.org/weatherforecast - tag: *imageTag pullPolicy: Always env: RUN_ARG: --state enabled diff --git a/applications/obssys/README.md b/applications/obssys/README.md index 189eac3b68..5b104bd913 100644 --- a/applications/obssys/README.md +++ b/applications/obssys/README.md @@ -6,10 +6,15 @@ Deployment for the Observatory System CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| imageTag | string | `""` | The default image tag for all of the child applications | | namespace | string | `""` | | diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml index 0231765cf6..34dc0ca2af 100644 --- a/applications/obssys/values-tucson-teststand.yaml +++ b/applications/obssys/values-tucson-teststand.yaml @@ -1,12 +1,7 @@ namespace: &ns obssys -imageTag: &imageTag c0029 csc_collector: namespace: *ns - siteTag: tucson - kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 - s3EndpointUrl: https://s3.tu.lsst.org secrets: - name: nexus3-docker @@ -27,7 +22,6 @@ atqueue: classifier: scriptqueue2 image: repository: ts-dockerhub.lsst.org/scriptqueue - tag: *imageTag pullPolicy: Always env: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml @@ -77,7 +71,6 @@ atscheduler: classifier: scheduler2 image: repository: ts-dockerhub.lsst.org/scheduler - tag: *imageTag pullPolicy: Always env: INDEX: 2 @@ -106,7 +99,6 @@ authorize: namespace: *ns image: repository: ts-dockerhub.lsst.org/authorize - tag: *imageTag pullPolicy: Always env: RUN_ARG: --state enabled @@ -122,7 +114,6 @@ mtqueue: classifier: scriptqueue1 image: repository: ts-dockerhub.lsst.org/scriptqueue - tag: *imageTag pullPolicy: Always env: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml @@ -172,7 +163,6 @@ mtscheduler: classifier: scheduler1 image: repository: ts-dockerhub.lsst.org/scheduler - tag: *imageTag pullPolicy: Always env: INDEX: 1 @@ -201,7 +191,6 @@ watcher: namespace: *ns image: repository: ts-dockerhub.lsst.org/watcher - tag: *imageTag pullPolicy: Always env: RUN_ARG: --state enabled diff --git a/applications/simonyitel/README.md b/applications/simonyitel/README.md index 1e94eb32cc..6b1c1cecff 100644 --- a/applications/simonyitel/README.md +++ b/applications/simonyitel/README.md @@ -6,10 +6,15 @@ Deployment for the Simonyi Survey Telescope CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| csc_collector.siteTag | string | `""` | The site-specific name used for handling configurable CSCs | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| imageTag | string | `""` | The default image tag for all of the child applications | | namespace | string | `""` | | diff --git a/applications/simonyitel/values-tucson-teststand.yaml b/applications/simonyitel/values-tucson-teststand.yaml index f78a2120af..a4e015f4c7 100644 --- a/applications/simonyitel/values-tucson-teststand.yaml +++ b/applications/simonyitel/values-tucson-teststand.yaml @@ -1,12 +1,7 @@ namespace: &ns simonyitel -imageTag: &imageTag c0029 csc_collector: namespace: *ns - siteTag: tucson - kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 - s3EndpointUrl: https://s3.tu.lsst.org secrets: - name: nexus3-docker @@ -49,7 +44,6 @@ ccoods: namespace: *ns image: repository: ts-dockerhub.lsst.org/ccoods - tag: *imageTag pullPolicy: Always env: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml @@ -129,7 +123,6 @@ lasertracker1-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/lasertracker - tag: *imageTag pullPolicy: Always env: RUN_ARG: 1 --simulate 2 @@ -139,7 +132,6 @@ mtaircompressor1-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/mtaircompressor - tag: *imageTag pullPolicy: Always mtaircompressor2-sim: @@ -147,7 +139,6 @@ mtaircompressor2-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/mtaircompressor - tag: *imageTag pullPolicy: Always mtaos: @@ -155,7 +146,6 @@ mtaos: namespace: *ns image: repository: ts-dockerhub.lsst.org/mtaos - tag: *imageTag pullPolicy: Always butlerSecret: containerPath: *bS-cP @@ -186,7 +176,6 @@ mtcamhexapod-sim: classifier: mthexapod1 image: repository: ts-dockerhub.lsst.org/mthexapod - tag: *imageTag pullPolicy: Always env: RUN_ARG: --simulate 1 @@ -196,7 +185,6 @@ mtdome-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/mtdome - tag: *imageTag pullPolicy: Always env: RUN_ARG: --simulate 1 @@ -206,7 +194,6 @@ mtdometrajectory: namespace: *ns image: repository: ts-dockerhub.lsst.org/mtdometrajectory - tag: *imageTag pullPolicy: Always mtm1m3-sim: @@ -214,7 +201,6 @@ mtm1m3-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/mtm1m3_sim - tag: *imageTag pullPolicy: Always mtm2-sim: @@ -222,7 +208,6 @@ mtm2-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/m2 - tag: *imageTag pullPolicy: Always env: RUN_ARG: --simulate @@ -233,7 +218,6 @@ mtm2hexapod-sim: classifier: mthexapod2 image: repository: ts-dockerhub.lsst.org/mthexapod - tag: *imageTag pullPolicy: Always env: RUN_ARG: --simulate 2 @@ -243,7 +227,6 @@ mtmount-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/mtmount - tag: *imageTag pullPolicy: Always env: RUN_ARG: --simulate @@ -252,7 +235,6 @@ mtptg: namespace: *ns image: repository: ts-dockerhub.lsst.org/ptkernel - tag: *imageTag pullPolicy: Always env: TELESCOPE: MT @@ -261,7 +243,6 @@ mtrotator-sim: namespace: *ns image: repository: ts-dockerhub.lsst.org/mtrotator - tag: *imageTag pullPolicy: Always env: RUN_ARG: --simulate diff --git a/environments/templates/auxtel-application.yaml b/environments/templates/auxtel-application.yaml index 189232c046..7e90b14590 100644 --- a/environments/templates/auxtel-application.yaml +++ b/environments/templates/auxtel-application.yaml @@ -31,6 +31,18 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemImageTag" + value: {{ .Values.controlSystemImageTag | quote }} + - name: "global.controlSystemSiteTag" + value: {{ .Values.controlSystemSiteTag | quote }} + - name: "global.controlSystemTopicName" + value: {{ .Values.controlSystemTopicName | quote }} + - name: "global.controlSystemKafkaBrokerAddress" + value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemSchemaRegistryUrl" + value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} + - name: "global.controlSystemS3EndpointUrl" + value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.environment }}.yaml" diff --git a/environments/templates/calsys-application.yaml b/environments/templates/calsys-application.yaml index 920ffe53a2..dd34165569 100644 --- a/environments/templates/calsys-application.yaml +++ b/environments/templates/calsys-application.yaml @@ -31,6 +31,18 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemImageTag" + value: {{ .Values.controlSystemImageTag | quote }} + - name: "global.controlSystemSiteTag" + value: {{ .Values.controlSystemSiteTag | quote }} + - name: "global.controlSystemTopicName" + value: {{ .Values.controlSystemTopicName | quote }} + - name: "global.controlSystemKafkaBrokerAddress" + value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemSchemaRegistryUrl" + value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} + - name: "global.controlSystemS3EndpointUrl" + value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.environment }}.yaml" diff --git a/environments/templates/control-system-test-application.yaml b/environments/templates/control-system-test-application.yaml index 8a2055ba23..02c83a35a8 100644 --- a/environments/templates/control-system-test-application.yaml +++ b/environments/templates/control-system-test-application.yaml @@ -31,6 +31,18 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemImageTag" + value: {{ .Values.controlSystemImageTag | quote }} + - name: "global.controlSystemSiteTag" + value: {{ .Values.controlSystemSiteTag | quote }} + - name: "global.controlSystemTopicName" + value: {{ .Values.controlSystemTopicName | quote }} + - name: "global.controlSystemKafkaBrokerAddress" + value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemSchemaRegistryUrl" + value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} + - name: "global.controlSystemS3EndpointUrl" + value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.environment }}.yaml" diff --git a/environments/templates/dmocps-application.yaml b/environments/templates/dmocps-application.yaml index ab1c75545e..05ca7587d5 100644 --- a/environments/templates/dmocps-application.yaml +++ b/environments/templates/dmocps-application.yaml @@ -23,6 +23,18 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemImageTag" + value: {{ .Values.controlSystemImageTag | quote }} + - name: "global.controlSystemSiteTag" + value: {{ .Values.controlSystemSiteTag | quote }} + - name: "global.controlSystemTopicName" + value: {{ .Values.controlSystemTopicName | quote }} + - name: "global.controlSystemKafkaBrokerAddress" + value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemSchemaRegistryUrl" + value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} + - name: "global.controlSystemS3EndpointUrl" + value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.environment }}.yaml" diff --git a/environments/templates/eas-application.yaml b/environments/templates/eas-application.yaml index 5666d2170c..9196b1d9fb 100644 --- a/environments/templates/eas-application.yaml +++ b/environments/templates/eas-application.yaml @@ -31,6 +31,18 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemImageTag" + value: {{ .Values.controlSystemImageTag | quote }} + - name: "global.controlSystemSiteTag" + value: {{ .Values.controlSystemSiteTag | quote }} + - name: "global.controlSystemTopicName" + value: {{ .Values.controlSystemTopicName | quote }} + - name: "global.controlSystemKafkaBrokerAddress" + value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemSchemaRegistryUrl" + value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} + - name: "global.controlSystemS3EndpointUrl" + value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.environment }}.yaml" diff --git a/environments/templates/obssys-application.yaml b/environments/templates/obssys-application.yaml index f849e4fefb..4ec2e2323f 100644 --- a/environments/templates/obssys-application.yaml +++ b/environments/templates/obssys-application.yaml @@ -31,6 +31,18 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemImageTag" + value: {{ .Values.controlSystemImageTag | quote }} + - name: "global.controlSystemSiteTag" + value: {{ .Values.controlSystemSiteTag | quote }} + - name: "global.controlSystemTopicName" + value: {{ .Values.controlSystemTopicName | quote }} + - name: "global.controlSystemKafkaBrokerAddress" + value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemSchemaRegistryUrl" + value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} + - name: "global.controlSystemS3EndpointUrl" + value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.environment }}.yaml" diff --git a/environments/templates/simonyitel-application.yaml b/environments/templates/simonyitel-application.yaml index e52c222dda..5999f3ed50 100644 --- a/environments/templates/simonyitel-application.yaml +++ b/environments/templates/simonyitel-application.yaml @@ -31,6 +31,18 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemImageTag" + value: {{ .Values.controlSystemImageTag | quote }} + - name: "global.controlSystemSiteTag" + value: {{ .Values.controlSystemSiteTag | quote }} + - name: "global.controlSystemTopicName" + value: {{ .Values.controlSystemTopicName | quote }} + - name: "global.controlSystemKafkaBrokerAddress" + value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemSchemaRegistryUrl" + value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} + - name: "global.controlSystemS3EndpointUrl" + value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.environment }}.yaml" diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 3f5636a021..67ae29f4fb 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -20,3 +20,7 @@ applications: strimzi: true telegraf: true telegraf-ds: true + +controlSystemImageTag: c0030 +controlSystemSiteTag: tucson +controlSystemS3EndpointUrl: https://s3.tu.lsst.org diff --git a/environments/values.yaml b/environments/values.yaml index 34ca448f97..a19864a768 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -207,3 +207,26 @@ applications: # -- Enable the vo-cutouts application vo-cutouts: false + +# The following settings are used for the control system + +# -- Image tag for the control system deployment +# @default -- None, must be set +controlSystemImageTag: "" + +# -- Site tag for the control system deployment +# @default -- None, must be set +controlSystemSiteTag: "" + +# -- Topic name tag for the control system deployment +controlSystemTopicName: sal + +# -- Kafka broker address for the control system deployment +controlSystemKafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + +# -- Schema registry URL for the control system deployment +controlSystemSchemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + +# -- S3 endpoint (LFA) for the control system deployment +# @default -- None, must be set: "" +controlSystemS3EndpointUrl: "" diff --git a/shared/charts/csc/README.md b/shared/charts/csc/README.md index c9ec6871e0..fbfa1b4d3a 100644 --- a/shared/charts/csc/README.md +++ b/shared/charts/csc/README.md @@ -16,7 +16,7 @@ A Helm chart for deploying the Control System CSCs. | envSecrets | list | `[]` | This section holds specifications for secret injection. If this section is used, each object listed must have the following attributes defined: _name_ (The label for the secret), _secretName_ (The name of the vault store reference. Uses the _namespace_ attribute to construct the full name), _secretKey_ (The key in the vault store containing the necessary secret) | | image.pullPolicy | string | `"IfNotPresent"` | The policy to apply when pulling an image for deployment | | image.repository | string | `"lsstts/test"` | The Docker registry name of the container image to use for the CSC | -| image.tag | string | `"develop"` | The tag of the container image to use for the CSC | +| image.tag | string | `nil` | The tag of the container image to use for the CSC | | imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | | isPrimary | bool | `true` | This marks the CSC as the primary object to sync upon system starts. This is set to false when two CSCs of the same flavor are deployed (one real, one simulator) to mark the simulator so it can be filtered out for automatic syncing. | | nameOverride | string | `""` | Provide an alternate name for the application | diff --git a/shared/charts/csc/templates/job.yaml b/shared/charts/csc/templates/job.yaml index cae752716e..6890ac0dc3 100644 --- a/shared/charts/csc/templates/job.yaml +++ b/shared/charts/csc/templates/job.yaml @@ -23,7 +23,8 @@ spec: spec: containers: - name: {{ include "csc.class" . }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystemImageTag }} + image: "{{ .Values.image.repository }}:{{ $imageTag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} stdin: true tty: true diff --git a/shared/charts/csc/values.yaml b/shared/charts/csc/values.yaml index ed0406f841..feee09b63f 100644 --- a/shared/charts/csc/values.yaml +++ b/shared/charts/csc/values.yaml @@ -13,7 +13,7 @@ image: # -- The Docker registry name of the container image to use for the CSC repository: lsstts/test # -- The tag of the container image to use for the CSC - tag: develop + tag: # -- The policy to apply when pulling an image for deployment pullPolicy: IfNotPresent # -- The list of pull secrets needed for the images. diff --git a/shared/charts/csc_collector/templates/configmap-env.yaml b/shared/charts/csc_collector/templates/configmap-env.yaml index 91c90737c6..913433d6de 100644 --- a/shared/charts/csc_collector/templates/configmap-env.yaml +++ b/shared/charts/csc_collector/templates/configmap-env.yaml @@ -3,8 +3,9 @@ kind: ConfigMap metadata: name: csc-env-configfile data: - LSST_SITE: {{ .Values.siteTag }} - LSST_KAFKA_BROKER_ADDR: {{ .Values.kafkaBrokerAddress }} - LSST_SCHEMA_REGISTRY_URL: {{ .Values.schemaRegistryUrl }} - S3_ENDPOINT_URL: {{ .Values.s3EndpointUrl }} + LSST_SITE: {{ $.Values.global.controlSystemSiteTag }} + LSST_TOPIC_NAME: {{ $.Values.global.controlSystemTopicName }} + LSST_KAFKA_BROKER_ADDR: {{ $.Values.global.controlSystemKafkaBrokerAddress }} + LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystemSchemaRegistryUrl }} + S3_ENDPOINT_URL: {{ $.Values.global.controlSystemS3EndpointUrl }} TS_SALKAFKA_USERNAME: ts-salkafka diff --git a/shared/values/values_control_system_apps.yaml b/shared/values/values_control_system_apps.yaml index ca2d36e455..50b4b94769 100644 --- a/shared/values/values_control_system_apps.yaml +++ b/shared/values/values_control_system_apps.yaml @@ -2,11 +2,11 @@ namespace: "" # -- The default image tag for all of the child applications -imageTag: "" +# imageTag: "" csc_collector: # -- The site-specific name used for handling configurable CSCs - siteTag: "" + # siteTag: "" # -- This section holds secret specifications. # Each object listed can have the following attributes defined: @@ -29,3 +29,27 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD vaultSecretsPath: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" From 9f19ea0f97123b098b785b1a82e77d0d0e9bc002 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 15 May 2023 13:40:23 -0700 Subject: [PATCH 485/588] Pass namespaces through global. --- applications/auxtel/README.md | 3 +-- .../auxtel/values-tucson-teststand.yaml | 12 ----------- applications/calsys/README.md | 3 +-- .../calsys/values-tucson-teststand.yaml | 6 ------ applications/control-system-test/README.md | 3 +-- .../values-tucson-teststand.yaml | 5 ----- applications/dmocps/README.md | 3 +-- .../dmocps/values-tucson-teststand.yaml | 6 ------ applications/eas/README.md | 3 +-- applications/eas/values-tucson-teststand.yaml | 20 ------------------- applications/obssys/README.md | 3 +-- applications/simonyitel/README.md | 3 +-- .../simonyitel/values-tucson-teststand.yaml | 19 ------------------ .../templates/auxtel-application.yaml | 2 ++ .../templates/calsys-application.yaml | 2 ++ .../control-system-test-application.yaml | 2 ++ .../templates/dmocps-application.yaml | 2 ++ environments/templates/eas-application.yaml | 2 ++ .../templates/obssys-application.yaml | 2 ++ .../templates/simonyitel-application.yaml | 2 ++ environments/values.yaml | 4 ++++ shared/charts/csc/README.md | 1 - .../csc/templates/configfile-configmap.yaml | 2 +- .../csc/templates/entrypoint-configmap.yaml | 2 +- shared/charts/csc/templates/job.yaml | 2 +- .../charts/csc/templates/mountpoint-pvc.yaml | 2 +- shared/charts/csc/templates/service.yaml | 2 +- shared/charts/csc/values.yaml | 2 -- .../csc_collector/templates/vault-secret.yaml | 2 +- shared/values/values_control_system_apps.yaml | 13 ++++-------- 30 files changed, 35 insertions(+), 100 deletions(-) diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md index 9cd93e7ad4..e3c3504db2 100644 --- a/applications/auxtel/README.md +++ b/applications/auxtel/README.md @@ -6,9 +6,9 @@ Deployment for the Auxiliary Telescope CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | @@ -17,4 +17,3 @@ Deployment for the Auxiliary Telescope CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| namespace | string | `""` | | diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml index b24dcc5fda..95cd977115 100644 --- a/applications/auxtel/values-tucson-teststand.yaml +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -1,8 +1,4 @@ -namespace: &ns auxtel - csc_collector: - namespace: *ns - secrets: - name: nexus3-docker key: pull-secret @@ -16,14 +12,12 @@ csc_collector: ataos: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/ataos pullPolicy: Always atdome-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/atdome pullPolicy: Always @@ -32,14 +26,12 @@ atdome-sim: atdometrajectory: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/atdometrajectory pullPolicy: Always atheaderservice: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/headerservice tag: ts-v3.1.11_c0029 @@ -64,14 +56,12 @@ atheaderservice: athexapod-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/athexapod pullPolicy: Always atoods: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/atoods pullPolicy: Always @@ -150,7 +140,6 @@ atoods: atptg: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/ptkernel pullPolicy: Always @@ -159,7 +148,6 @@ atptg: atspectrograph-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/atspec pullPolicy: Always diff --git a/applications/calsys/README.md b/applications/calsys/README.md index 12454a1338..602a93199e 100644 --- a/applications/calsys/README.md +++ b/applications/calsys/README.md @@ -6,9 +6,9 @@ Deployment for the Calibration System CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | @@ -17,4 +17,3 @@ Deployment for the Calibration System CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| namespace | string | `""` | | diff --git a/applications/calsys/values-tucson-teststand.yaml b/applications/calsys/values-tucson-teststand.yaml index 4b40667ec2..8b9766ff98 100644 --- a/applications/calsys/values-tucson-teststand.yaml +++ b/applications/calsys/values-tucson-teststand.yaml @@ -1,8 +1,4 @@ -namespace: &ns calsys - csc_collector: - namespace: *ns - secrets: - name: nexus3-docker key: pull-secret @@ -14,7 +10,6 @@ csc_collector: gcheaderservice1: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/headerservice tag: ts-v3.1.11_c0029 @@ -39,7 +34,6 @@ gcheaderservice1: simulation-gencam: enabled: true - namespace: *ns classifier: genericcamera1 image: repository: ts-dockerhub.lsst.org/genericcamera diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md index cd5399c328..ee96e268a0 100644 --- a/applications/control-system-test/README.md +++ b/applications/control-system-test/README.md @@ -6,9 +6,9 @@ Deployment for the Test CSCs and Integration Testing Workflows | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | @@ -17,4 +17,3 @@ Deployment for the Test CSCs and Integration Testing Workflows | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| namespace | string | `""` | | diff --git a/applications/control-system-test/values-tucson-teststand.yaml b/applications/control-system-test/values-tucson-teststand.yaml index a9a31d0a8a..51346cbc0f 100644 --- a/applications/control-system-test/values-tucson-teststand.yaml +++ b/applications/control-system-test/values-tucson-teststand.yaml @@ -1,8 +1,4 @@ -namespace: &ns control-system-test - csc_collector: - namespace: *ns - secrets: - name: nexus3-docker key: pull-secret @@ -14,7 +10,6 @@ csc_collector: test42: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/test pullPolicy: Always diff --git a/applications/dmocps/README.md b/applications/dmocps/README.md index 2142a0a8ef..7de092f4d8 100644 --- a/applications/dmocps/README.md +++ b/applications/dmocps/README.md @@ -6,9 +6,9 @@ Deployment for the DM OCPS CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | @@ -17,4 +17,3 @@ Deployment for the DM OCPS CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| namespace | string | `""` | | diff --git a/applications/dmocps/values-tucson-teststand.yaml b/applications/dmocps/values-tucson-teststand.yaml index a34bec196c..a8d3c2a85f 100644 --- a/applications/dmocps/values-tucson-teststand.yaml +++ b/applications/dmocps/values-tucson-teststand.yaml @@ -1,8 +1,4 @@ -namespace: &ns uws - csc_collector: - namespace: *ns - secrets: - name: nexus3-docker key: pull-secret @@ -12,7 +8,6 @@ csc_collector: atocps: enabled: true - namespace: *ns classifier: ocps1 image: repository: ts-dockerhub.lsst.org/dmocps @@ -22,7 +17,6 @@ atocps: ccocps: enabled: true - namespace: *ns classifier: ocps2 image: repository: ts-dockerhub.lsst.org/dmocps diff --git a/applications/eas/README.md b/applications/eas/README.md index e88535607e..7e154eae62 100644 --- a/applications/eas/README.md +++ b/applications/eas/README.md @@ -6,9 +6,9 @@ Deployment for the Environmental Awareness Systems CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | @@ -17,4 +17,3 @@ Deployment for the Environmental Awareness Systems CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| namespace | string | `""` | | diff --git a/applications/eas/values-tucson-teststand.yaml b/applications/eas/values-tucson-teststand.yaml index e0888ea0e2..107055acf0 100644 --- a/applications/eas/values-tucson-teststand.yaml +++ b/applications/eas/values-tucson-teststand.yaml @@ -1,8 +1,4 @@ -namespace: &ns eas - csc_collector: - namespace: *ns - secrets: - name: nexus3-docker key: pull-secret @@ -14,7 +10,6 @@ csc_collector: auxtel-ess01-sim: enabled: true - namespace: *ns classifier: ess201 image: repository: ts-dockerhub.lsst.org/ess @@ -24,7 +19,6 @@ auxtel-ess01-sim: auxtel-ess02-sim: enabled: true - namespace: *ns classifier: ess202 image: repository: ts-dockerhub.lsst.org/ess @@ -34,7 +28,6 @@ auxtel-ess02-sim: auxtel-ess03-sim: enabled: true - namespace: *ns classifier: ess203 image: repository: ts-dockerhub.lsst.org/ess @@ -44,7 +37,6 @@ auxtel-ess03-sim: auxtel-ess04-sim: enabled: true - namespace: *ns classifier: ess204 image: repository: ts-dockerhub.lsst.org/ess @@ -54,7 +46,6 @@ auxtel-ess04-sim: calibhill-ess01-sim: enabled: true - namespace: *ns classifier: ess301 image: repository: ts-dockerhub.lsst.org/ess @@ -64,7 +55,6 @@ calibhill-ess01-sim: dimm1-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/dimm pullPolicy: Always @@ -73,7 +63,6 @@ dimm1-sim: dimm2-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/dimm pullPolicy: Always @@ -82,7 +71,6 @@ dimm2-sim: dsm1-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/dsm pullPolicy: Always @@ -92,7 +80,6 @@ dsm1-sim: dsm2-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/dsm pullPolicy: Always @@ -102,7 +89,6 @@ dsm2-sim: mtdome-ess01-sim: enabled: true - namespace: *ns classifier: ess101 image: repository: ts-dockerhub.lsst.org/ess @@ -112,7 +98,6 @@ mtdome-ess01-sim: mtdome-ess02-sim: enabled: true - namespace: *ns classifier: ess102 image: repository: ts-dockerhub.lsst.org/ess @@ -122,7 +107,6 @@ mtdome-ess02-sim: mtdome-ess03-sim: enabled: true - namespace: *ns classifier: ess103 image: repository: ts-dockerhub.lsst.org/ess @@ -132,7 +116,6 @@ mtdome-ess03-sim: tma-ess01-sim: enabled: true - namespace: *ns classifier: ess1 image: repository: ts-dockerhub.lsst.org/ess @@ -142,7 +125,6 @@ tma-ess01-sim: tma-ess104-sim: enabled: true - namespace: *ns classifier: ess104 image: repository: ts-dockerhub.lsst.org/ess @@ -152,7 +134,6 @@ tma-ess104-sim: tma-ess105-sim: enabled: true - namespace: *ns classifier: ess105 image: repository: ts-dockerhub.lsst.org/ess @@ -162,7 +143,6 @@ tma-ess105-sim: weatherforecast: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/weatherforecast pullPolicy: Always diff --git a/applications/obssys/README.md b/applications/obssys/README.md index 5b104bd913..ddc8b78e21 100644 --- a/applications/obssys/README.md +++ b/applications/obssys/README.md @@ -6,9 +6,9 @@ Deployment for the Observatory System CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | @@ -17,4 +17,3 @@ Deployment for the Observatory System CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| namespace | string | `""` | | diff --git a/applications/simonyitel/README.md b/applications/simonyitel/README.md index 6b1c1cecff..23ed7cb5c3 100644 --- a/applications/simonyitel/README.md +++ b/applications/simonyitel/README.md @@ -6,9 +6,9 @@ Deployment for the Simonyi Survey Telescope CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector | object | `{"secrets":[]}` | The default image tag for all of the child applications imageTag: "" | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | @@ -17,4 +17,3 @@ Deployment for the Simonyi Survey Telescope CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| namespace | string | `""` | | diff --git a/applications/simonyitel/values-tucson-teststand.yaml b/applications/simonyitel/values-tucson-teststand.yaml index a4e015f4c7..281956b2fe 100644 --- a/applications/simonyitel/values-tucson-teststand.yaml +++ b/applications/simonyitel/values-tucson-teststand.yaml @@ -1,8 +1,4 @@ -namespace: &ns simonyitel - csc_collector: - namespace: *ns - secrets: - name: nexus3-docker key: pull-secret @@ -16,7 +12,6 @@ csc_collector: ccheaderservice: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/headerservice tag: ts-v3.1.11_c0029 @@ -41,7 +36,6 @@ ccheaderservice: ccoods: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/ccoods pullPolicy: Always @@ -120,7 +114,6 @@ ccoods: lasertracker1-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/lasertracker pullPolicy: Always @@ -129,21 +122,18 @@ lasertracker1-sim: mtaircompressor1-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/mtaircompressor pullPolicy: Always mtaircompressor2-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/mtaircompressor pullPolicy: Always mtaos: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/mtaos pullPolicy: Always @@ -172,7 +162,6 @@ mtaos: mtcamhexapod-sim: enabled: true - namespace: *ns classifier: mthexapod1 image: repository: ts-dockerhub.lsst.org/mthexapod @@ -182,7 +171,6 @@ mtcamhexapod-sim: mtdome-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/mtdome pullPolicy: Always @@ -191,21 +179,18 @@ mtdome-sim: mtdometrajectory: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/mtdometrajectory pullPolicy: Always mtm1m3-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/mtm1m3_sim pullPolicy: Always mtm2-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/m2 pullPolicy: Always @@ -214,7 +199,6 @@ mtm2-sim: mtm2hexapod-sim: enabled: true - namespace: *ns classifier: mthexapod2 image: repository: ts-dockerhub.lsst.org/mthexapod @@ -224,7 +208,6 @@ mtm2hexapod-sim: mtmount-sim: enabled: true - namespace: *ns image: repository: ts-dockerhub.lsst.org/mtmount pullPolicy: Always @@ -232,7 +215,6 @@ mtmount-sim: RUN_ARG: --simulate mtptg: - namespace: *ns image: repository: ts-dockerhub.lsst.org/ptkernel pullPolicy: Always @@ -240,7 +222,6 @@ mtptg: TELESCOPE: MT mtrotator-sim: - namespace: *ns image: repository: ts-dockerhub.lsst.org/mtrotator pullPolicy: Always diff --git a/environments/templates/auxtel-application.yaml b/environments/templates/auxtel-application.yaml index 7e90b14590..12dd1f9e7d 100644 --- a/environments/templates/auxtel-application.yaml +++ b/environments/templates/auxtel-application.yaml @@ -31,6 +31,8 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemAppNamespace" + value: "auxtel" - name: "global.controlSystemImageTag" value: {{ .Values.controlSystemImageTag | quote }} - name: "global.controlSystemSiteTag" diff --git a/environments/templates/calsys-application.yaml b/environments/templates/calsys-application.yaml index dd34165569..7f7d7f5e38 100644 --- a/environments/templates/calsys-application.yaml +++ b/environments/templates/calsys-application.yaml @@ -31,6 +31,8 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemAppNamespace" + value: "calsys" - name: "global.controlSystemImageTag" value: {{ .Values.controlSystemImageTag | quote }} - name: "global.controlSystemSiteTag" diff --git a/environments/templates/control-system-test-application.yaml b/environments/templates/control-system-test-application.yaml index 02c83a35a8..a1efceff31 100644 --- a/environments/templates/control-system-test-application.yaml +++ b/environments/templates/control-system-test-application.yaml @@ -31,6 +31,8 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemAppNamespace" + value: "control-system-test" - name: "global.controlSystemImageTag" value: {{ .Values.controlSystemImageTag | quote }} - name: "global.controlSystemSiteTag" diff --git a/environments/templates/dmocps-application.yaml b/environments/templates/dmocps-application.yaml index 05ca7587d5..3911677456 100644 --- a/environments/templates/dmocps-application.yaml +++ b/environments/templates/dmocps-application.yaml @@ -23,6 +23,8 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemAppNamespace" + value: "uws" - name: "global.controlSystemImageTag" value: {{ .Values.controlSystemImageTag | quote }} - name: "global.controlSystemSiteTag" diff --git a/environments/templates/eas-application.yaml b/environments/templates/eas-application.yaml index 9196b1d9fb..f8c39ed3fd 100644 --- a/environments/templates/eas-application.yaml +++ b/environments/templates/eas-application.yaml @@ -31,6 +31,8 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemAppNamespace" + value: "eas" - name: "global.controlSystemImageTag" value: {{ .Values.controlSystemImageTag | quote }} - name: "global.controlSystemSiteTag" diff --git a/environments/templates/obssys-application.yaml b/environments/templates/obssys-application.yaml index 4ec2e2323f..a856462112 100644 --- a/environments/templates/obssys-application.yaml +++ b/environments/templates/obssys-application.yaml @@ -31,6 +31,8 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemAppNamespace" + value: "obssys" - name: "global.controlSystemImageTag" value: {{ .Values.controlSystemImageTag | quote }} - name: "global.controlSystemSiteTag" diff --git a/environments/templates/simonyitel-application.yaml b/environments/templates/simonyitel-application.yaml index 5999f3ed50..112934afc2 100644 --- a/environments/templates/simonyitel-application.yaml +++ b/environments/templates/simonyitel-application.yaml @@ -31,6 +31,8 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemAppNamespace" + value: "simonyitel" - name: "global.controlSystemImageTag" value: {{ .Values.controlSystemImageTag | quote }} - name: "global.controlSystemSiteTag" diff --git a/environments/values.yaml b/environments/values.yaml index a19864a768..01e03befc1 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -210,6 +210,10 @@ applications: # The following settings are used for the control system +# -- Application namespacce for the control system deployment +# @default -- None, must be set +controlSystemAppNamespace: "" + # -- Image tag for the control system deployment # @default -- None, must be set controlSystemImageTag: "" diff --git a/shared/charts/csc/README.md b/shared/charts/csc/README.md index fbfa1b4d3a..86ce313979 100644 --- a/shared/charts/csc/README.md +++ b/shared/charts/csc/README.md @@ -20,7 +20,6 @@ A Helm chart for deploying the Control System CSCs. | imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | | isPrimary | bool | `true` | This marks the CSC as the primary object to sync upon system starts. This is set to false when two CSCs of the same flavor are deployed (one real, one simulator) to mark the simulator so it can be filtered out for automatic syncing. | | nameOverride | string | `""` | Provide an alternate name for the application | -| namespace | string | `""` | Namespace for the given CSC application | | nfsMountpoint | list | `[]` | This section holds the information necessary to create a NFS mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _path_ (The path inside the container to mount), _readOnly_ (This sets if the NFS mount is read only or read/write), _server_ (The hostname of the NFS server), _serverPath_ (The path exported by the NFS server) | | nodeSelector | object | `{}` | This allows the specification of using specific nodes to run the pod | | pvcMountpoint | list | `[]` | This section holds the information necessary to create a volume mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _path_ (The path inside the container to mount), _accessMode_ (This sets the required access mode for the volume mount), _claimSize_ (The requested physical disk space size for the volume mount), _storageClass_ (The Kubernetes provided storage class), _ids.uid_ (OPTIONAL: An alternative UID for mounting), _ids.gid_ (OPTIONAL: An alternative GID for mounting) | diff --git a/shared/charts/csc/templates/configfile-configmap.yaml b/shared/charts/csc/templates/configfile-configmap.yaml index 63fe6ca33c..3bab1bfbdf 100644 --- a/shared/charts/csc/templates/configfile-configmap.yaml +++ b/shared/charts/csc/templates/configfile-configmap.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: {{ include "chart.name" . }}-configfile - namespace: {{ .Values.namespace }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} data: {{ .Values.configfile.filename }}: {{ .Values.configfile.content | toYaml | indent 4 }} diff --git a/shared/charts/csc/templates/entrypoint-configmap.yaml b/shared/charts/csc/templates/entrypoint-configmap.yaml index 5a9597536e..e9f347899b 100644 --- a/shared/charts/csc/templates/entrypoint-configmap.yaml +++ b/shared/charts/csc/templates/entrypoint-configmap.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: {{ include "chart.name" . }}-entrypoint - namespace: {{ .Values.namespace }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} data: .startup.sh: {{ .Values.entrypoint | toYaml | indent 4 }} diff --git a/shared/charts/csc/templates/job.yaml b/shared/charts/csc/templates/job.yaml index 6890ac0dc3..3913701abd 100644 --- a/shared/charts/csc/templates/job.yaml +++ b/shared/charts/csc/templates/job.yaml @@ -3,7 +3,7 @@ apiVersion: batch/v1 kind: Job metadata: name: {{ include "chart.name" . }} - namespace: {{ .Values.namespace }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} labels: {{- include "csc.labels" . | nindent 4 }} {{- with .Values.annotations }} diff --git a/shared/charts/csc/templates/mountpoint-pvc.yaml b/shared/charts/csc/templates/mountpoint-pvc.yaml index ccc9ca871a..63d69fd88f 100644 --- a/shared/charts/csc/templates/mountpoint-pvc.yaml +++ b/shared/charts/csc/templates/mountpoint-pvc.yaml @@ -5,7 +5,7 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: name: {{ include "chart.name" . }}-{{ $values.name }}-pvc - namespace: {{ .Values.namespace }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} {{- if $values.ids }} annotations: {{- if $values.ids.uid }} diff --git a/shared/charts/csc/templates/service.yaml b/shared/charts/csc/templates/service.yaml index f5188be55e..584813660d 100644 --- a/shared/charts/csc/templates/service.yaml +++ b/shared/charts/csc/templates/service.yaml @@ -5,7 +5,7 @@ metadata: labels: csc: {{ include "csc.name" . }} name: {{ include "chart.name" . }}-service - namespace: {{ .Values.namespace }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} spec: {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} loadBalancerIP: {{ .Values.service.loadBalancerIP }} diff --git a/shared/charts/csc/values.yaml b/shared/charts/csc/values.yaml index feee09b63f..5a190bcf50 100644 --- a/shared/charts/csc/values.yaml +++ b/shared/charts/csc/values.yaml @@ -1,7 +1,5 @@ # -- Flag to enable the given CSC application enabled: false -# -- Namespace for the given CSC application -namespace: "" # -- Provide an alternate name for the application nameOverride: "" # -- This marks the CSC as the primary object to sync upon system starts. diff --git a/shared/charts/csc_collector/templates/vault-secret.yaml b/shared/charts/csc_collector/templates/vault-secret.yaml index 6e9fbe351b..9f5b7ac80c 100644 --- a/shared/charts/csc_collector/templates/vault-secret.yaml +++ b/shared/charts/csc_collector/templates/vault-secret.yaml @@ -4,7 +4,7 @@ apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: name: {{ $secret.name }} - namespace: {{ $.Values.namespace }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} spec: path: {{ $.Values.global.vaultSecretsPath }}/{{ $secret.key }} type: {{ default "Opaque" $secret.type }} diff --git a/shared/values/values_control_system_apps.yaml b/shared/values/values_control_system_apps.yaml index 50b4b94769..70e29b7d0b 100644 --- a/shared/values/values_control_system_apps.yaml +++ b/shared/values/values_control_system_apps.yaml @@ -1,13 +1,4 @@ -# The namespace for the application -namespace: "" - -# -- The default image tag for all of the child applications -# imageTag: "" - csc_collector: - # -- The site-specific name used for handling configurable CSCs - # siteTag: "" - # -- This section holds secret specifications. # Each object listed can have the following attributes defined: # _name_ (The name used by pods to access the secret) @@ -30,6 +21,10 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + # -- Image tag for the control system deployment # @default -- Set by ArgoCD controlSystemImageTag: "" From 5e5b9d54f98775b8240e421f88be6d01fb19bfc3 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 16 May 2023 09:41:30 -0700 Subject: [PATCH 486/588] Formatting for Chart.yaml. --- applications/auxtel/Chart.yaml | 74 ++++---- applications/calsys/Chart.yaml | 16 +- applications/control-system-test/Chart.yaml | 10 +- applications/dmocps/Chart.yaml | 16 +- applications/eas/Chart.yaml | 190 ++++++++++---------- applications/obssys/Chart.yaml | 40 ++--- applications/simonyitel/Chart.yaml | 148 +++++++-------- 7 files changed, 247 insertions(+), 247 deletions(-) diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml index 7267774587..bfcf914e14 100644 --- a/applications/auxtel/Chart.yaml +++ b/applications/auxtel/Chart.yaml @@ -3,40 +3,40 @@ name: auxtel version: 1.0.0 description: Deployment for the Auxiliary Telescope CSCs dependencies: - - name: csc_collector - version: 1.0.0 - - name: hexapod-sim - version: 1.0.0 - - name: csc - alias: ataos - version: 1.0.0 - - name: csc - alias: atdome - version: 1.0.0 - - name: csc - alias: atdome-sim - version: 1.0.0 - - name: csc - alias: atdometrajectory - version: 1.0.0 - - name: csc - alias: atheaderservice - version: 1.0.0 - - name: csc - alias: athexapod - version: 1.0.0 - - name: csc - alias: athexapod-sim - version: 1.0.0 - - name: csc - alias: atoods - version: 1.0.0 - - name: csc - alias: atptg - version: 1.0.0 - - name: csc - alias: atspectrograph - version: 1.0.0 - - name: csc - alias: atspectrograph-sim - version: 1.0.0 +- name: csc_collector + version: 1.0.0 +- name: hexapod-sim + version: 1.0.0 +- name: csc + alias: ataos + version: 1.0.0 +- name: csc + alias: atdome + version: 1.0.0 +- name: csc + alias: atdome-sim + version: 1.0.0 +- name: csc + alias: atdometrajectory + version: 1.0.0 +- name: csc + alias: atheaderservice + version: 1.0.0 +- name: csc + alias: athexapod + version: 1.0.0 +- name: csc + alias: athexapod-sim + version: 1.0.0 +- name: csc + alias: atoods + version: 1.0.0 +- name: csc + alias: atptg + version: 1.0.0 +- name: csc + alias: atspectrograph + version: 1.0.0 +- name: csc + alias: atspectrograph-sim + version: 1.0.0 diff --git a/applications/calsys/Chart.yaml b/applications/calsys/Chart.yaml index d0150c7f0b..011979f109 100644 --- a/applications/calsys/Chart.yaml +++ b/applications/calsys/Chart.yaml @@ -3,11 +3,11 @@ name: calsys version: 1.0.0 description: Deployment for the Calibration System CSCs dependencies: - - name: csc_collector - version: 1.0.0 - - name: csc - alias: gcheaderservice1 - version: 1.0.0 - - name: csc - alias: simulation-gencam - version: 1.0.0 +- name: csc_collector + version: 1.0.0 +- name: csc + alias: gcheaderservice1 + version: 1.0.0 +- name: csc + alias: simulation-gencam + version: 1.0.0 diff --git a/applications/control-system-test/Chart.yaml b/applications/control-system-test/Chart.yaml index 4bb5ded773..bcc8c6bb50 100644 --- a/applications/control-system-test/Chart.yaml +++ b/applications/control-system-test/Chart.yaml @@ -3,8 +3,8 @@ name: control-system-test version: 1.0.0 description: Deployment for the Test CSCs and Integration Testing Workflows dependencies: - - name: csc_collector - version: 1.0.0 - - name: csc - alias: test42 - version: 1.0.0 +- name: csc_collector + version: 1.0.0 +- name: csc + alias: test42 + version: 1.0.0 diff --git a/applications/dmocps/Chart.yaml b/applications/dmocps/Chart.yaml index aeab461001..dd3827f119 100644 --- a/applications/dmocps/Chart.yaml +++ b/applications/dmocps/Chart.yaml @@ -3,11 +3,11 @@ name: dmocps version: 1.0.0 description: Deployment for the DM OCPS CSCs dependencies: - - name: csc_collector - version: 1.0.0 - - name: csc - alias: atocps - version: 1.0.0 - - name: csc - alias: ccocps - version: 1.0.0 +- name: csc_collector + version: 1.0.0 +- name: csc + alias: atocps + version: 1.0.0 +- name: csc + alias: ccocps + version: 1.0.0 diff --git a/applications/eas/Chart.yaml b/applications/eas/Chart.yaml index b09c1e206e..6af654f0f0 100644 --- a/applications/eas/Chart.yaml +++ b/applications/eas/Chart.yaml @@ -3,98 +3,98 @@ name: eas version: 1.0.0 description: Deployment for the Environmental Awareness Systems CSCs dependencies: - - name: csc_collector - version: 1.0.0 - - name: csc - alias: auxtel-ess01 - version: 1.0.0 - - name: csc - alias: auxtel-ess01-sim - version: 1.0.0 - - name: csc - alias: auxtel-ess02 - version: 1.0.0 - - name: csc - alias: auxtel-ess02-sim - version: 1.0.0 - - name: csc - alias: auxtel-ess03 - version: 1.0.0 - - name: csc - alias: auxtel-ess03-sim - version: 1.0.0 - - name: csc - alias: auxtel-ess04 - version: 1.0.0 - - name: csc - alias: auxtel-ess04-sim - version: 1.0.0 - - name: csc - alias: calibhill-ess01 - version: 1.0.0 - - name: csc - alias: calibhill-ess01-sim - version: 1.0.0 - - name: csc - alias: dimm1 - version: 1.0.0 - - name: csc - alias: dimm1-sim - version: 1.0.0 - - name: csc - alias: dimm2 - version: 1.0.0 - - name: csc - alias: dimm2-sim - version: 1.0.0 - - name: csc - alias: dsm1 - version: 1.0.0 - - name: csc - alias: dsm1-sim - version: 1.0.0 - - name: csc - alias: dsm2 - version: 1.0.0 - - name: csc - alias: dsm2-sim - version: 1.0.0 - - name: csc - alias: mtdome-ess01 - version: 1.0.0 - - name: csc - alias: mtdome-ess01-sim - version: 1.0.0 - - name: csc - alias: mtdome-ess02 - version: 1.0.0 - - name: csc - alias: mtdome-ess02-sim - version: 1.0.0 - - name: csc - alias: mtdome-ess03 - version: 1.0.0 - - name: csc - alias: mtdome-ess03-sim - version: 1.0.0 - - name: csc - alias: tma-ess01 - version: 1.0.0 - - name: csc - alias: tma-ess01-sim - version: 1.0.0 - - name: csc - alias: tma-ess104 - version: 1.0.0 - - name: csc - alias: tma-ess104-sim - version: 1.0.0 - - name: csc - alias: tma-ess105 - version: 1.0.0 - - name: csc - alias: tma-ess105-sim - version: 1.0.0 - - name: csc - alias: weatherforecast - version: 1.0.0 +- name: csc_collector + version: 1.0.0 +- name: csc + alias: auxtel-ess01 + version: 1.0.0 +- name: csc + alias: auxtel-ess01-sim + version: 1.0.0 +- name: csc + alias: auxtel-ess02 + version: 1.0.0 +- name: csc + alias: auxtel-ess02-sim + version: 1.0.0 +- name: csc + alias: auxtel-ess03 + version: 1.0.0 +- name: csc + alias: auxtel-ess03-sim + version: 1.0.0 +- name: csc + alias: auxtel-ess04 + version: 1.0.0 +- name: csc + alias: auxtel-ess04-sim + version: 1.0.0 +- name: csc + alias: calibhill-ess01 + version: 1.0.0 +- name: csc + alias: calibhill-ess01-sim + version: 1.0.0 +- name: csc + alias: dimm1 + version: 1.0.0 +- name: csc + alias: dimm1-sim + version: 1.0.0 +- name: csc + alias: dimm2 + version: 1.0.0 +- name: csc + alias: dimm2-sim + version: 1.0.0 +- name: csc + alias: dsm1 + version: 1.0.0 +- name: csc + alias: dsm1-sim + version: 1.0.0 +- name: csc + alias: dsm2 + version: 1.0.0 +- name: csc + alias: dsm2-sim + version: 1.0.0 +- name: csc + alias: mtdome-ess01 + version: 1.0.0 +- name: csc + alias: mtdome-ess01-sim + version: 1.0.0 +- name: csc + alias: mtdome-ess02 + version: 1.0.0 +- name: csc + alias: mtdome-ess02-sim + version: 1.0.0 +- name: csc + alias: mtdome-ess03 + version: 1.0.0 +- name: csc + alias: mtdome-ess03-sim + version: 1.0.0 +- name: csc + alias: tma-ess01 + version: 1.0.0 +- name: csc + alias: tma-ess01-sim + version: 1.0.0 +- name: csc + alias: tma-ess104 + version: 1.0.0 +- name: csc + alias: tma-ess104-sim + version: 1.0.0 +- name: csc + alias: tma-ess105 + version: 1.0.0 +- name: csc + alias: tma-ess105-sim + version: 1.0.0 +- name: csc + alias: weatherforecast + version: 1.0.0 diff --git a/applications/obssys/Chart.yaml b/applications/obssys/Chart.yaml index e86d40a82f..8e25270e58 100644 --- a/applications/obssys/Chart.yaml +++ b/applications/obssys/Chart.yaml @@ -3,23 +3,23 @@ name: obssys version: 1.0.0 description: Deployment for the Observatory System CSCs dependencies: - - name: csc_collector - version: 1.0.0 - - name: csc - alias: atqueue - version: 1.0.0 - - name: csc - alias: atscheduler - version: 1.0.0 - - name: csc - alias: authorize - version: 1.0.0 - - name: csc - alias: mtqueue - version: 1.0.0 - - name: csc - alias: mtscheduler - version: 1.0.0 - - name: csc - alias: watcher - version: 1.0.0 +- name: csc_collector + version: 1.0.0 +- name: csc + alias: atqueue + version: 1.0.0 +- name: csc + alias: atscheduler + version: 1.0.0 +- name: csc + alias: authorize + version: 1.0.0 +- name: csc + alias: mtqueue + version: 1.0.0 +- name: csc + alias: mtscheduler + version: 1.0.0 +- name: csc + alias: watcher + version: 1.0.0 diff --git a/applications/simonyitel/Chart.yaml b/applications/simonyitel/Chart.yaml index ab000119f0..ef31522b69 100644 --- a/applications/simonyitel/Chart.yaml +++ b/applications/simonyitel/Chart.yaml @@ -3,77 +3,77 @@ name: simonyitel version: 1.0.0 description: Deployment for the Simonyi Survey Telescope CSCs dependencies: - - name: csc_collector - version: 1.0.0 - - name: csc - alias: ccheaderservice - version: 1.0.0 - - name: csc - alias: ccoods - version: 1.0.0 - - name: csc - alias: lasertracker1 - version: 1.0.0 - - name: csc - alias: lasertracker1-sim - version: 1.0.0 - - name: csc - alias: mtaircompressor1 - version: 1.0.0 - - name: csc - alias: mtaircompressor1-sim - version: 1.0.0 - - name: csc - alias: mtaircompressor2 - version: 1.0.0 - - name: csc - alias: mtaircompressor2-sim - version: 1.0.0 - - name: csc - alias: mtaos - version: 1.0.0 - - name: csc - alias: mtcamhexapod - version: 1.0.0 - - name: csc - alias: mtcamhexapod-sim - version: 1.0.0 - - name: csc - alias: mtdome - version: 1.0.0 - - name: csc - alias: mtdome-sim - version: 1.0.0 - - name: csc - alias: mtdometrajectory - version: 1.0.0 - - name: csc - alias: mtm1m3-sim - version: 1.0.0 - - name: csc - alias: mtm2 - version: 1.0.0 - - name: csc - alias: mtm2-sim - version: 1.0.0 - - name: csc - alias: mtm2hexapod - version: 1.0.0 - - name: csc - alias: mtm2hexapod-sim - version: 1.0.0 - - name: csc - alias: mtmount - version: 1.0.0 - - name: csc - alias: mtmount-sim - version: 1.0.0 - - name: csc - alias: mtptg - version: 1.0.0 - - name: csc - alias: mtrotator - version: 1.0.0 - - name: csc - alias: mtrotator-sim - version: 1.0.0 +- name: csc_collector + version: 1.0.0 +- name: csc + alias: ccheaderservice + version: 1.0.0 +- name: csc + alias: ccoods + version: 1.0.0 +- name: csc + alias: lasertracker1 + version: 1.0.0 +- name: csc + alias: lasertracker1-sim + version: 1.0.0 +- name: csc + alias: mtaircompressor1 + version: 1.0.0 +- name: csc + alias: mtaircompressor1-sim + version: 1.0.0 +- name: csc + alias: mtaircompressor2 + version: 1.0.0 +- name: csc + alias: mtaircompressor2-sim + version: 1.0.0 +- name: csc + alias: mtaos + version: 1.0.0 +- name: csc + alias: mtcamhexapod + version: 1.0.0 +- name: csc + alias: mtcamhexapod-sim + version: 1.0.0 +- name: csc + alias: mtdome + version: 1.0.0 +- name: csc + alias: mtdome-sim + version: 1.0.0 +- name: csc + alias: mtdometrajectory + version: 1.0.0 +- name: csc + alias: mtm1m3-sim + version: 1.0.0 +- name: csc + alias: mtm2 + version: 1.0.0 +- name: csc + alias: mtm2-sim + version: 1.0.0 +- name: csc + alias: mtm2hexapod + version: 1.0.0 +- name: csc + alias: mtm2hexapod-sim + version: 1.0.0 +- name: csc + alias: mtmount + version: 1.0.0 +- name: csc + alias: mtmount-sim + version: 1.0.0 +- name: csc + alias: mtptg + version: 1.0.0 +- name: csc + alias: mtrotator + version: 1.0.0 +- name: csc + alias: mtrotator-sim + version: 1.0.0 From 131ca99ada1ee33217b396f5b253bcc3f1910437 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 16 May 2023 09:46:51 -0700 Subject: [PATCH 487/588] Add script for updating shared chart versions. --- pyproject.toml | 1 + src/phalanx/control_system/__init__.py | 0 .../update_shared_chart_version.py | 79 +++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 src/phalanx/control_system/__init__.py create mode 100644 src/phalanx/control_system/update_shared_chart_version.py diff --git a/pyproject.toml b/pyproject.toml index 9b7834e8eb..a74db26743 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ requires-python = ">=3.11" [project.scripts] phalanx = "phalanx.cli:main" +update-shared-chart-version = "phalanx.control_system.update_shared_chart_version:run" [project.urls] Homepage = "https://phalanx.lsst.io" diff --git a/src/phalanx/control_system/__init__.py b/src/phalanx/control_system/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/phalanx/control_system/update_shared_chart_version.py b/src/phalanx/control_system/update_shared_chart_version.py new file mode 100644 index 0000000000..ec49782814 --- /dev/null +++ b/src/phalanx/control_system/update_shared_chart_version.py @@ -0,0 +1,79 @@ +import argparse +import pathlib + +import yaml + +APPS_DIR = "applications" + +DIR_MAP = {"csc": "csc", "collector": "csc_collector"} + + +def shared_chart(appdir: pathlib.Path, shared_dir: str) -> bool: + """Determine if app directory has templates dir as link. + + Parameters + ---------- + appdir: `pathlib.Path` + The application directory to check. + shared_dir: `str` + The shared directory to make sure the link resolves to. + + Returns + ------- + `bool`: True if the link resolves to the requested shared dir. + """ + try: + chart_dir = appdir / "charts" / shared_dir + return ( + chart_dir.is_symlink() and chart_dir.resolve().name == shared_dir + ) + except OSError: + return False + + +def main(opts: argparse.Namespace) -> None: + print( + f"Updating {opts.app_type} apps Helm chart " + f"to version {opts.chart_version}" + ) + + apps = pathlib.PosixPath(APPS_DIR) + dirlist = list(apps.iterdir()) + for appdir in dirlist: + if not shared_chart(appdir, DIR_MAP[opts.app_type]): + continue + + chart = appdir / "Chart.yaml" + + with chart.open() as ifile: + values = yaml.safe_load(ifile) + + dependencies = values["dependencies"] + for dependency in dependencies: + if dependency["name"] == DIR_MAP[opts.app_type]: + dependency["version"] = opts.chart_version + + # print(appdir, values) + + with chart.open("w") as ofile: + yaml.dump(values, ofile, sort_keys=False) + + +def run() -> None: + description = [ + "Update version for apps using the csc or shared Helm chart" + ] + parser = argparse.ArgumentParser( + description=" ".join(description), + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "app_type", + choices=list(DIR_MAP.keys()), + help="Specify the application type to set the chart version for.", + ) + parser.add_argument( + "chart_version", help="The version of the Helm chart to set." + ) + args = parser.parse_args() + main(args) From a0cbd7d707bf715e7055ebc4778f05560c1c52b9 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 12 Jun 2023 18:02:22 -0700 Subject: [PATCH 488/588] Checkpoint love work. --- applications/love/Chart.yaml | 16 ++ applications/love/README.md | 19 ++ applications/love/charts/csc | 1 + applications/love/charts/csc_collector | 1 + .../love/charts/love-manager/Chart.yaml | 4 + .../love/charts/love-manager/README.md | 87 +++++++++ .../love-manager/templates/_helpers.tpl | 174 +++++++++++++++++ .../templates/database-service.yaml | 11 ++ .../templates/database-statefulset.yaml | 58 ++++++ .../templates/manager-deployment.yaml | 55 ++++++ .../love-manager/templates/manager-hpa.yaml | 32 ++++ .../templates/manager-service.yaml | 11 ++ .../templates/redis-deployment.yaml | 45 +++++ .../love-manager/templates/redis-service.yaml | 11 ++ .../love-manager/templates/vault-secret.yaml | 9 + .../templates/view-backup-cronjob.yaml | 83 ++++++++ .../love/charts/love-manager/values.yaml | 180 ++++++++++++++++++ .../love/charts/love-nginx/Chart.yaml | 4 + applications/love/charts/love-nginx/README.md | 38 ++++ .../charts/love-nginx/templates/_helpers.tpl | 47 +++++ .../charts/love-nginx/templates/config.yaml | 8 + .../charts/love-nginx/templates/ingress.yaml | 27 +++ .../love-nginx/templates/love-config.yaml | 8 + .../templates/nginx-deployment.yaml | 89 +++++++++ .../charts/love-nginx/templates/service.yaml | 16 ++ .../love-nginx/templates/volumeclaim.yaml | 12 ++ .../love/charts/love-nginx/values.yaml | 103 ++++++++++ .../love/charts/love-producer/Chart.yaml | 4 + .../love/charts/love-producer/README.md | 20 ++ .../love-producer/templates/_helpers.tpl | 53 ++++++ .../love-producer/templates/deployment.yaml | 68 +++++++ .../love/charts/love-producer/values.yaml | 30 +++ .../love/values-tucson-teststand.yaml | 174 +++++++++++++++++ applications/love/values.yaml | 1 + environments/templates/love-application.yaml | 51 +++++ environments/values-tucson-teststand.yaml | 1 + environments/values.yaml | 3 + 37 files changed, 1554 insertions(+) create mode 100644 applications/love/Chart.yaml create mode 100644 applications/love/README.md create mode 120000 applications/love/charts/csc create mode 120000 applications/love/charts/csc_collector create mode 100644 applications/love/charts/love-manager/Chart.yaml create mode 100644 applications/love/charts/love-manager/README.md create mode 100644 applications/love/charts/love-manager/templates/_helpers.tpl create mode 100644 applications/love/charts/love-manager/templates/database-service.yaml create mode 100644 applications/love/charts/love-manager/templates/database-statefulset.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-deployment.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-hpa.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-service.yaml create mode 100644 applications/love/charts/love-manager/templates/redis-deployment.yaml create mode 100644 applications/love/charts/love-manager/templates/redis-service.yaml create mode 100644 applications/love/charts/love-manager/templates/vault-secret.yaml create mode 100644 applications/love/charts/love-manager/templates/view-backup-cronjob.yaml create mode 100644 applications/love/charts/love-manager/values.yaml create mode 100644 applications/love/charts/love-nginx/Chart.yaml create mode 100644 applications/love/charts/love-nginx/README.md create mode 100644 applications/love/charts/love-nginx/templates/_helpers.tpl create mode 100644 applications/love/charts/love-nginx/templates/config.yaml create mode 100644 applications/love/charts/love-nginx/templates/ingress.yaml create mode 100644 applications/love/charts/love-nginx/templates/love-config.yaml create mode 100644 applications/love/charts/love-nginx/templates/nginx-deployment.yaml create mode 100644 applications/love/charts/love-nginx/templates/service.yaml create mode 100644 applications/love/charts/love-nginx/templates/volumeclaim.yaml create mode 100644 applications/love/charts/love-nginx/values.yaml create mode 100644 applications/love/charts/love-producer/Chart.yaml create mode 100644 applications/love/charts/love-producer/README.md create mode 100644 applications/love/charts/love-producer/templates/_helpers.tpl create mode 100644 applications/love/charts/love-producer/templates/deployment.yaml create mode 100644 applications/love/charts/love-producer/values.yaml create mode 100644 applications/love/values-tucson-teststand.yaml create mode 120000 applications/love/values.yaml create mode 100644 environments/templates/love-application.yaml diff --git a/applications/love/Chart.yaml b/applications/love/Chart.yaml new file mode 100644 index 0000000000..bda5f7ab69 --- /dev/null +++ b/applications/love/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: love +version: 1.0.0 +description: Deployment for the LSST Operators Visualization Environment +dependencies: +- name: csc_collector + version: 1.0.0 +- name: csc + alias: love-commander + version: 1.0.0 +- name: love-manager + version: 1.0.0 +- name: love-nginx + version: 1.0.0 +- name: love-producer + version: 1.0.0 diff --git a/applications/love/README.md b/applications/love/README.md new file mode 100644 index 0000000000..051b49d522 --- /dev/null +++ b/applications/love/README.md @@ -0,0 +1,19 @@ +# love + +Deployment for the LSST Operators Visualization Environment + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | +| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/love/charts/csc b/applications/love/charts/csc new file mode 120000 index 0000000000..294046490f --- /dev/null +++ b/applications/love/charts/csc @@ -0,0 +1 @@ +../../../shared/charts/csc \ No newline at end of file diff --git a/applications/love/charts/csc_collector b/applications/love/charts/csc_collector new file mode 120000 index 0000000000..3ced684acb --- /dev/null +++ b/applications/love/charts/csc_collector @@ -0,0 +1 @@ +../../../shared/charts/csc_collector \ No newline at end of file diff --git a/applications/love/charts/love-manager/Chart.yaml b/applications/love/charts/love-manager/Chart.yaml new file mode 100644 index 0000000000..cee16201a6 --- /dev/null +++ b/applications/love/charts/love-manager/Chart.yaml @@ -0,0 +1,4 @@ +name: love-manager +apiVersion: v2 +version: 1.0.0 +description: Helm chart for the LOVE manager service. diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md new file mode 100644 index 0000000000..ad1deed8da --- /dev/null +++ b/applications/love/charts/love-manager/README.md @@ -0,0 +1,87 @@ +# love-manager + +Helm chart for the LOVE manager service. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the LOVE manager pods | +| autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| database.affinity | object | `{}` | Affinity rules for the LOVE database pods | +| database.env.POSTGRES_DB | string | `"postgres"` | Define the database type | +| database.env.POSTGRES_USER | string | `"postgres"` | Define the database user | +| database.envSecrets.POSTGRES_PASSWORD | string | `"db-pass"` | The database password secret key name | +| database.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the database image | +| database.image.repository | string | `"postgres"` | The database image to use | +| database.image.tag | string | `"12.0"` | The tag to use for the database image | +| database.nodeSelector | object | `{}` | Node selection rules for the LOVE database pods | +| database.port | int | `5432` | The database port number | +| database.resources | object | `{}` | Resource specifications for the LOVE database pods | +| database.storage.accessMode | string | `"ReadWriteMany"` | The access mode for the database storage | +| database.storage.claimSize | string | `"2Gi"` | The size of the database storage request | +| database.storage.name | string | `"love-manager-database"` | Label for the database storage point | +| database.storage.path | string | `"/var/lib/postgresql/data"` | Path within the running container | +| database.storage.storageClass | string | `"local-store"` | The storage class to request the disk allocation from | +| database.tolerations | list | `[]` | Toleration specifications for the LOVE database pods | +| env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager | +| env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| env.DB_NAME | string | `"postgres"` | The name of the database being used for the LOVE manager. Must match `database.env.POSTGRES_DB` | +| env.DB_PORT | int | `5432` | The port for the database Must match `database.port` | +| env.DB_USER | string | `"postgres"` | The database user needed for access from the LOVE manager. Must match `database.env.POSTGRES_USER` | +| env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | +| env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | +| envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager admin user password secret key name | +| envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager cmd_user user password secret key name | +| envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager process connection password secret key name | +| envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager secret secret key name | +| envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager user user password secret key name | +| image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager image | +| image.repository | string | `"lsstts/love-manager"` | The LOVE manager image to use | +| image.tag | string | `nil` | | +| nodeSelector | object | `{}` | Node selection rules for the LOVE manager pods | +| ports.container | int | `8000` | The port on the container for normal communications | +| ports.node | int | `30000` | The port on the node for normal communcations | +| readinessProbe | object | `{}` | Configuration for the LOVE manager pods readiness probe | +| redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | +| redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | +| redis.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the redis image | +| redis.image.repository | string | `"redis"` | The redis image to use | +| redis.image.tag | string | `"5.0.3"` | The tag to use for the redis image | +| redis.nodeSelector | object | `{}` | Node selection rules for the LOVE redis pods | +| redis.port | int | `6379` | The redis port number | +| redis.resources | object | `{}` | Resource specifications for the LOVE redis pods | +| redis.tolerations | list | `[]` | Toleration specifications for the LOVE redis pods | +| replicas | int | `1` | Set the default number of LOVE manager pod replicas | +| resources | object | `{}` | Resource specifications for the LOVE manager pods | +| tolerations | list | `[]` | Toleration specifications for the LOVE manager pods | +| viewBackup.affinity | object | `{}` | Affinity rules for the LOVE view backup pods | +| viewBackup.enabled | bool | `false` | Whether view backup is active | +| viewBackup.env | object | `{}` | Place to specify additional environment variables for the view backup job | +| viewBackup.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the view backup image | +| viewBackup.image.repository | string | `"lsstts/love-view-backup"` | The view backup image to use | +| viewBackup.image.tag | string | `"develop"` | The tag to use for the view backup image | +| viewBackup.nodeSelector | object | `{}` | Node selection rules for the LOVE view backup pods | +| viewBackup.resources | object | `{}` | Resource specifications for the LOVE view backup pods | +| viewBackup.restartPolicy | string | `"Never"` | The restart policy type for the view backup cronjob | +| viewBackup.schedule | string | `"0 0 1 1 *"` | The view backup job schedule in cron format | +| viewBackup.tolerations | list | `[]` | Toleration specifications for the LOVE view backup pods | +| viewBackup.ttlSecondsAfterFinished | string | `""` | Time after view backup job finishes before deletion (ALPHA) | diff --git a/applications/love/charts/love-manager/templates/_helpers.tpl b/applications/love/charts/love-manager/templates/_helpers.tpl new file mode 100644 index 0000000000..661706d2ca --- /dev/null +++ b/applications/love/charts/love-manager/templates/_helpers.tpl @@ -0,0 +1,174 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "love-manager.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-manager.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "love-manager.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "love-manager.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "love-manager.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Handle environment parameters + */}} +{{- define "helpers.envFromList" -}} +{{- $secretName := .secretName }} +{{- range $var, $value := .env }} +{{- $item := dict "var" $var "value" $value "secretName" $secretName }} +{{ include "helpers.envType" $item }} +{{- end }} +{{- end }} + +{{/* +Determine type of environment +*/}} +{{- define "helpers.envType" -}} +- name: {{ .var }} +{{- if ne .secretName "" }} + valueFrom: + secretKeyRef: + name: {{ .secretName }}-secrets + key: {{ .value }} +{{- else }} + value: {{ .value | quote }} +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name for database. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-manager.database.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- printf "%s-database" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-database" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels - database +*/}} +{{- define "love-manager.database.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager.database.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels - database +*/}} +{{- define "love-manager.database.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ include "love-manager.database.fullname" . }} +{{- end }} + +{{/* +Create a default fully qualified app name for redis. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-manager.redis.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- printf "%s-redis" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-redis" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels - redis +*/}} +{{- define "love-manager.redis.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager.redis.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels - redis +*/}} +{{- define "love-manager.redis.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ include "love-manager.redis.fullname" . }} +{{- end }} + +{{/* +Create a default fully qualified app name for the view backup. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-manager.view-backup.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- printf "%s-view-backup" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s-view-backup" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Common labels - view backup +*/}} +{{- define "love-manager.view-backup.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager.view-backup.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels - view backup +*/}} +{{- define "love-manager.view-backup.selectorLabels" -}} +type: love-manager-view-backup-job +{{- end }} diff --git a/applications/love/charts/love-manager/templates/database-service.yaml b/applications/love/charts/love-manager/templates/database-service.yaml new file mode 100644 index 0000000000..520d57000d --- /dev/null +++ b/applications/love/charts/love-manager/templates/database-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-manager.database.fullname" . }}-service + namespace: {{ $.Values.global.controlSystemAppNamespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-manager.database.fullname" . }} + ports: + - port: {{ .Values.database.port }} + diff --git a/applications/love/charts/love-manager/templates/database-statefulset.yaml b/applications/love/charts/love-manager/templates/database-statefulset.yaml new file mode 100644 index 0000000000..010aa93358 --- /dev/null +++ b/applications/love/charts/love-manager/templates/database-statefulset.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "love-manager.database.fullname" . }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} + labels: + {{- include "love-manager.database.labels" . | nindent 4 }} +spec: + serviceName: {{ include "love-manager.database.fullname" . }}-service + selector: + matchLabels: + {{- include "love-manager.database.selectorLabels" . | nindent 6 }} + replicas: {{ .Values.database.replicas | default 1 }} + template: + metadata: + labels: + {{- include "love-manager.database.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ include "love-manager.database.fullname" . }} + image: "{{ .Values.database.image.repository }}:{{ .Values.database.image.tag }}" + imagePullPolicy: {{ .Values.database.image.pullPolicy }} + ports: + - containerPort: {{ .Values.database.port }} + volumeMounts: + - mountPath: {{ .Values.database.storage.path }} + name: {{ .Values.database.storage.name }}-pvc + env: + {{- $data := dict "env" .Values.database.env "secretName" "" }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- $data := dict "env" .Values.database.envSecrets "secretName" .Values.envSecretKeyName }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- with $.Values.database.resources }} + resources: + {{- toYaml $.Values.database.resources | nindent 10 }} + {{- end }} + {{- with $.Values.database.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.database.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.database.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: {{ .Values.database.storage.name }}-pvc + spec: + accessModes: + - {{ .Values.database.storage.accessMode | quote }} + storageClassName: {{ .Values.database.storage.storageClass }} + resources: + requests: + storage: {{ .Values.database.storage.claimSize }} diff --git a/applications/love/charts/love-manager/templates/manager-deployment.yaml b/applications/love/charts/love-manager/templates/manager-deployment.yaml new file mode 100644 index 0000000000..39abc8590d --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-manager.fullname" . }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} + labels: + {{- include "love-manager.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-manager.selectorLabels" . | nindent 6 }} + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + template: + metadata: + labels: + {{- include "love-manager.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ include "love-manager.fullname" . }} + {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystemImageTag }} + image: "{{ .Values.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.ports.container }} + env: + {{- $data := dict "env" .Values.env "secretName" "" }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- $data := dict "secretName" .Values.envSecretKeyName "env" .Values.envSecrets }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- with $.Values.resources }} + resources: + {{- toYaml $.Values.resources | nindent 10 }} + {{- end }} + {{- with $.Values.readinessProbe }} + readinessProbe: + {{- toYaml $.Values.readinessProbe | nindent 10 }} + {{- end }} + {{- if $.Values.image.nexus3 }} + imagePullSecrets: + - name: nexus3-docker + {{- end }} + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-hpa.yaml b/applications/love/charts/love-manager/templates/manager-hpa.yaml new file mode 100644 index 0000000000..f7e1b8c2ea --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-hpa.yaml @@ -0,0 +1,32 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "love-manager.fullname" . }} + labels: + {{- include "love-manager.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "love-manager.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-service.yaml b/applications/love/charts/love-manager/templates/manager-service.yaml new file mode 100644 index 0000000000..0f9fc95e34 --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-manager.fullname" . }}-service + namespace: {{ .Values.namespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-manager.fullname" . }} + ports: + - port: {{ .Values.ports.container }} + diff --git a/applications/love/charts/love-manager/templates/redis-deployment.yaml b/applications/love/charts/love-manager/templates/redis-deployment.yaml new file mode 100644 index 0000000000..bc4421a7e4 --- /dev/null +++ b/applications/love/charts/love-manager/templates/redis-deployment.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-manager.redis.fullname" . }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} + labels: + {{- include "love-manager.redis.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-manager.redis.selectorLabels" . | nindent 6 }} + replicas: {{ .Values.redis.replicas | default 1 }} + template: + metadata: + labels: + {{- include "love-manager.redis.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ include "love-manager.redis.fullname" . }} + image: "{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}" + imagePullPolicy: {{ .Values.redis.image.pullPolicy }} + command: [ "redis-server", "--appendonly", "yes", "--requirepass", "$(REDIS_PASS)" ] + ports: + - containerPort: {{ .Values.redis.port }} + env: + {{- $data := dict "env" .Values.redis.env "secretName" "" }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- $data := dict "env" .Values.redis.envSecrets "secretName" .Values.envSecretKeyName }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- with $.Values.redis.resources }} + resources: + {{- toYaml $.Values.redis.resources | nindent 10 }} + {{- end }} + {{- with $.Values.redis.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.redis.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.redis.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-manager/templates/redis-service.yaml b/applications/love/charts/love-manager/templates/redis-service.yaml new file mode 100644 index 0000000000..5afec4bc11 --- /dev/null +++ b/applications/love/charts/love-manager/templates/redis-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-manager.redis.fullname" . }}-service + namespace: {{ $.Values.global.controlSystemAppNamespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-manager.redis.fullname" . }} + ports: + - port: {{ .Values.redis.port }} + diff --git a/applications/love/charts/love-manager/templates/vault-secret.yaml b/applications/love/charts/love-manager/templates/vault-secret.yaml new file mode 100644 index 0000000000..e6e927d144 --- /dev/null +++ b/applications/love/charts/love-manager/templates/vault-secret.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ $.Values.envSecretKeyName }}-secrets + namespace: {{ $.Values.global.controlSystemAppNamespace }} +spec: + path: {{ $.Values.global.vaultSecretsPath }}/ts/software/{{ $.Values.envSecretKeyName }} + type: Opaque diff --git a/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml new file mode 100644 index 0000000000..b3103f5078 --- /dev/null +++ b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml @@ -0,0 +1,83 @@ +{{- if .Values.viewBackup.enabled }} +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "love-manager.view-backup.fullname" . }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} + labels: + {{- include "love-manager.view-backup.labels" . | nindent 4 }} +spec: + concurrencyPolicy: Forbid + schedule: {{ .Values.viewBackup.schedule | quote }} + jobTemplate: + metadata: + labels: + {{- include "love-manager.view-backup.labels" . | nindent 8 }} + spec: + completions: 1 + {{- if .Values.viewBackup.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.viewBackup.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + labels: + {{- include "love-manager.view-backup.labels" . | nindent 12 }} + spec: + containers: + - name: {{ include "love-manager.view-backup.fullname" . }} + {{- $imageTag := .Values.viewBackup.image.tag | default $.Values.global.controlSystemImageTag }} + image: "{{ .Values.viewBackup.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ .Values.viewBackup.image.pullPolicy }} + envFrom: + - configMapRef: + name: csc-env-config + env: + - name: PGHOST + value: {{ .Values.env.DB_HOST | quote }} + - name: PGPORT + value: {{ .Values.database.port | quote }} + - name: PGDATABASE + value: {{ .Values.database.env.POSTGRES_DB | quote }} + - name: PGUSER + value: {{ .Values.database.env.POSTGRES_USER | quote }} + - name: LOVE_SITE + value: {{ .Values.env.LOVE_SITE | quote }} + {{- range $env_var, $env_value := .Values.viewBackup.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + - name: PGPASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.namespace }}-secrets + key: {{ .Values.database.envSecrets.POSTGRES_PASSWORD }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: lfa + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: lfa + key: aws-secret-access-key + {{- with $.Values.viewBackup.resources }} + resources: + {{- toYaml $.Values.viewBackup.resources | nindent 16 }} + {{- end }} + restartPolicy: {{ .Values.viewBackup.restartPolicy }} + imagePullSecrets: + - name: nexus3-docker + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 12 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml $ | nindent 12 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml $ | nindent 12 }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml new file mode 100644 index 0000000000..e0598107f0 --- /dev/null +++ b/applications/love/charts/love-manager/values.yaml @@ -0,0 +1,180 @@ +image: + # -- The LOVE manager image to use + repository: lsstts/love-manager + # str -- The tag to use for the LOVE manager image + tag: + # -- The pull policy on the LOVE manager image + pullPolicy: IfNotPresent +ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 +# -- The top-level secret key name that houses the rest of the secrets +envSecretKeyName: love +env: + # -- The site tag where LOVE is being run + LOVE_SITE: local + # -- The external URL from the NGINX server for LOVE + SERVER_URL: love.lsst.local + # -- Set the manager to use LFA storage + REMOTE_STORAGE: true + # -- Set the hostname for the Jira instance + JIRA_API_HOSTNAME: jira.lsstcorp.org + # -- Set the Jira project ID + JIRA_PROJECT_ID: 14601 + # -- Set the URL for the OLE instance + OLE_API_HOSTNAME: site.lsst.local + # -- Set the URI for the 1st LDAP server + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local + # -- Set the URI for the 2nd LDAP server + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local + # -- Set the URI for the 3rd LDAP server + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local + # -- The URL path for the LOVE producer websocket host + LOVE_PRODUCER_WEBSOCKET_HOST: love-service/manager/ws/subscription + # -- Label for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_HOSTNAME: love-commander-service + # -- Port number for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_PORT: 5000 + # -- The type of database engine being used for the LOVE manager + DB_ENGINE: postgresql + # -- The name of the database being used for the LOVE manager. + # Must match `database.env.POSTGRES_DB` + DB_NAME: postgres + # -- The database user needed for access from the LOVE manager. + # Must match `database.env.POSTGRES_USER` + DB_USER: postgres + # -- The name of the database service + DB_HOST: love-manager-database-service + # -- The port for the database + # Must match `database.port` + DB_PORT: 5432 + # -- The name of the redis service + REDIS_HOST: love-manager-redis-service +envSecrets: + # -- The LOVE manager secret secret key name + SECRET_KEY: manager-secret-key + # -- The LOVE manager process connection password secret key name + PROCESS_CONNECTION_PASS: process-connection-pass + # -- The LOVE manager admin user password secret key name + ADMIN_USER_PASS: admin-user-pass + # -- The LOVE manager user user password secret key name + USER_USER_PASS: user-user-pass + # -- The LOVE manager cmd_user user password secret key name + CMD_USER_PASS: cmd-user-pass + # -- The database password secret key name. + # Must match `database.envSecrets.POSTGRES_PASSWORD` + DB_PASS: db-pass + # -- The redis password secret key name. + # Must match `redis.envSecrets.REDIS_PASS` + REDIS_PASS: redis-pass +# -- Set the default number of LOVE manager pod replicas +replicas: 1 +autoscaling: + # -- Whether automatic horizontal scaling is active + enabled: true + # -- The allowed minimum number of replicas + minReplicas: 1 + # -- The allowed maximum number of replicas + maxReplicas: 100 + # -- The percentage of CPU utilization that will trigger the scaling + targetCPUUtilizationPercentage: 80 + # -- (int) The percentage of memory utilization that will trigger the scaling + targetMemoryUtilizationPercentage: "" +# -- Resource specifications for the LOVE manager pods +resources: {} +# -- Node selection rules for the LOVE manager pods +nodeSelector: {} +# -- Toleration specifications for the LOVE manager pods +tolerations: [] +# -- Affinity rules for the LOVE manager pods +affinity: {} +# -- Configuration for the LOVE manager pods readiness probe +readinessProbe: {} +database: + image: + # -- The database image to use + repository: postgres + # -- The tag to use for the database image + tag: "12.0" + # -- The pull policy for the database image + pullPolicy: IfNotPresent + # -- The database port number + port: 5432 + storage: + # -- Label for the database storage point + name: love-manager-database + # -- Path within the running container + path: /var/lib/postgresql/data + # -- The storage class to request the disk allocation from + storageClass: local-store + # -- The access mode for the database storage + accessMode: ReadWriteMany + # -- The size of the database storage request + claimSize: 2Gi + # -- Resource specifications for the LOVE database pods + resources: {} + # -- Node selection rules for the LOVE database pods + nodeSelector: {} + # -- Toleration specifications for the LOVE database pods + tolerations: [] + # -- Affinity rules for the LOVE database pods + affinity: {} + env: + # -- Define the database type + POSTGRES_DB: postgres + # -- Define the database user + POSTGRES_USER: postgres + envSecrets: + # -- The database password secret key name + POSTGRES_PASSWORD: db-pass +redis: + image: + # -- The redis image to use + repository: redis + # -- The tag to use for the redis image + tag: 5.0.3 + # -- The pull policy for the redis image + pullPolicy: IfNotPresent + envSecrets: + # -- The redis password secret key name + REDIS_PASS: redis-pass + # -- The redis port number + port: 6379 + # -- Resource specifications for the LOVE redis pods + resources: {} + # -- Node selection rules for the LOVE redis pods + nodeSelector: {} + # -- Toleration specifications for the LOVE redis pods + tolerations: [] + # -- Affinity rules for the LOVE redis pods + affinity: {} +viewBackup: + # -- Whether view backup is active + enabled: false + image: + # -- The view backup image to use + repository: lsstts/love-view-backup + # -- The tag to use for the view backup image + tag: develop + # -- The pull policy to use for the view backup image + pullPolicy: IfNotPresent + # -- Place to specify additional environment variables for the view backup job + env: {} + # -- The view backup job schedule in cron format + schedule: "0 0 1 1 *" + # -- The restart policy type for the view backup cronjob + restartPolicy: Never + # -- Time after view backup job finishes before deletion (ALPHA) + ttlSecondsAfterFinished: "" + # -- Resource specifications for the LOVE view backup pods + resources: {} + # -- Node selection rules for the LOVE view backup pods + nodeSelector: {} + # -- Toleration specifications for the LOVE view backup pods + tolerations: [] + # -- Affinity rules for the LOVE view backup pods + affinity: {} diff --git a/applications/love/charts/love-nginx/Chart.yaml b/applications/love/charts/love-nginx/Chart.yaml new file mode 100644 index 0000000000..53060a9776 --- /dev/null +++ b/applications/love/charts/love-nginx/Chart.yaml @@ -0,0 +1,4 @@ +name: love-nginx +apiVersion: v2 +version: 1.0.0 +description: Helm chart for the LOVE Nginx server. diff --git a/applications/love/charts/love-nginx/README.md b/applications/love/charts/love-nginx/README.md new file mode 100644 index 0000000000..c5ce11091e --- /dev/null +++ b/applications/love/charts/love-nginx/README.md @@ -0,0 +1,38 @@ +# love-nginx + +Helm chart for the LOVE Nginx server. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the NGINX pod | +| image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the NGINX image | +| image.repository | string | `"nginx"` | The NGINX image to use | +| image.tag | string | `"1.14.2"` | The tag to use for the NGINX image | +| imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | +| ingress.annotations | object | `{}` | Annotations for the NGINX ingress | +| ingress.className | string | `""` | Assign the Ingress class name | +| ingress.hostname | string | `"love.local"` | Hostname for the NGINX ingress | +| ingress.httpPath | string | `"/"` | Path name associated with the NGINX ingress | +| ingress.pathType | string | `""` | Set the Kubernetes path type for the NGINX ingress | +| initContainers.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the frontend image | +| initContainers.frontend.image.repository | string | `"lsstts/love-frontend"` | The frontend image to use | +| initContainers.frontend.image.tag | string | `nil` | | +| initContainers.manager.command | list | `["/bin/sh","-c","mkdir -p /usr/src/love-manager/media/thumbnails; mkdir -p /usr/src/love-manager/media/configs; cp -Rv /usr/src/love/manager/static /usr/src/love-manager; cp -uv /usr/src/love/manager/ui_framework/fixtures/thumbnails/* /usr/src/love-manager/media/thumbnails; cp -uv /usr/src/love/manager/api/fixtures/configs/* /usr/src/love-manager/media/configs"]` | The command to execute for the love-manager static content | +| initContainers.manager.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the love-manager static content image | +| initContainers.manager.image.repository | string | `"lsstts/love-manager"` | The static love-manager content image to use | +| initContainers.manager.image.tag | string | `nil` | | +| loveConfig | string | `"{\n \"alarms\": {\n \"minSeveritySound\": \"serious\",\n \"minSeverityNotification\": \"warning\"\n },\n \"camFeeds\": {\n \"generic\": \"/gencam\",\n \"allSky\": \"/gencam\"\n }\n}\n"` | Configuration specificiation for the LOVE service | +| namespace | string | `"love"` | The overall namespace for the application | +| nginxConfig | string | `"server {\n listen 80;\n server_name localhost;\n location / {\n root /usr/src/love-frontend;\n try_files $uri$args $uri$args/ $uri/ /index.html;\n }\n location /manager {\n proxy_pass http://love-manager-service:8000;\n proxy_http_version 1.1;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n proxy_set_header Host $host;\n proxy_redirect off;\n }\n location /manager/static {\n alias /usr/src/love-manager/static;\n }\n location /manager/media {\n alias /usr/src/love-manager/media;\n }\n}\n"` | Configuration specification for the NGINX service | +| nodeSelector | object | `{}` | Node selection rules for the NGINX pod | +| ports.container | int | `80` | Container port for the NGINX service | +| ports.node | int | `30000` | Node port for the NGINX service | +| resources | object | `{}` | Resource specifications for the NGINX pod | +| serviceType | string | `"ClusterIP"` | Service type specification | +| staticStore.accessMode | string | `"ReadWriteMany"` | The access mode for the NGINX static store | +| staticStore.claimSize | string | `"2Gi"` | The size of the NGINX static store request | +| staticStore.name | string | `"love-nginx-static"` | Label for the NGINX static store | +| staticStore.storageClass | string | `"local-store"` | The storage class to request the disk allocation from | +| tolerations | list | `[]` | Toleration specifications for the NGINX pod | diff --git a/applications/love/charts/love-nginx/templates/_helpers.tpl b/applications/love/charts/love-nginx/templates/_helpers.tpl new file mode 100644 index 0000000000..75e97afd42 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/_helpers.tpl @@ -0,0 +1,47 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "love-nginx.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-nginx.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "love-nginx.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "love-nginx.labels" -}} +helm.sh/chart: {{ include "love-nginx.chart" . }} +{{ include "love-nginx.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "love-nginx.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-nginx.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/love/charts/love-nginx/templates/config.yaml b/applications/love/charts/love-nginx/templates/config.yaml new file mode 100644 index 0000000000..c1eee8e834 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/config.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-conf +data: + nginx.conf: | +{{ .Values.nginxConfig | indent 4 }} diff --git a/applications/love/charts/love-nginx/templates/ingress.yaml b/applications/love/charts/love-nginx/templates/ingress.yaml new file mode 100644 index 0000000000..1f093ed75a --- /dev/null +++ b/applications/love/charts/love-nginx/templates/ingress.yaml @@ -0,0 +1,27 @@ +--- +{{- if eq .Values.serviceType "ClusterIP" }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ .Release.Name }}-ingress + namespace: {{ $.Values.global.controlSystemAppNamespace }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + rules: + - host: {{ .Values.ingress.hostname }} + http: + paths: + - path: {{ .Values.ingress.httpPath }} + pathType: {{ default "Prefix" .Values.ingress.pathType }} + backend: + service: + name: {{ .Release.Name }}-service + port: + number: {{ .Values.ports.container }} +{{- end }} diff --git a/applications/love/charts/love-nginx/templates/love-config.yaml b/applications/love/charts/love-nginx/templates/love-config.yaml new file mode 100644 index 0000000000..190a9e29a2 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/love-config.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: love-conf +data: + default.json: | +{{ .Values.loveConfig | indent 4 }} diff --git a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml new file mode 100644 index 0000000000..c8ac9facca --- /dev/null +++ b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-nginx.fullname" . }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} + labels: + {{- include "love-nginx.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-nginx.selectorLabels" . | nindent 6 }} + replicas: {{ .Values.replicas | default 1 }} + template: + metadata: + labels: + {{- include "love-nginx.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + initContainers: + - name: love-frontend + {{- $feImageTag := .Values.initContainers.frontend.image.tag | default $.Values.global.controlSystemImageTag }} + image: "{{ .Values.initContainers.frontend.image.repository }}:{{ $feImageTag }}" + imagePullPolicy: {{ .Values.initContainers.frontend.image.pullPolicy }} + command: ["/bin/sh", "-c", "cp -Rv /usr/src/love/ /usr/src/love-frontend"] + volumeMounts: + - mountPath: /usr/src + name: {{ .Values.staticStore.name }} + - name: love-manager-static + {{- $mgImageTag := .Values.initContainers.manager.image.tag | default $.Values.global.controlSystemImageTag }} + image: "{{ .Values.initContainers.manager.image.repository }}:{{ $mgImageTag }}" + imagePullPolicy: {{ .Values.initContainers.manager.image.pullPolicy }} + {{- with .Values.initContainers.manager.command }} + command: + {{- range $item := $.Values.initContainers.manager.command }} + - {{ $item | quote }} + {{- end }} + {{- end }} + volumeMounts: + - mountPath: /usr/src + name: {{ .Values.staticStore.name }} + containers: + - name: {{ include "love-nginx.fullname" . }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: {{ .Values.ports.container }} + volumeMounts: + - mountPath: /etc/nginx/conf.d + readOnly: true + name: nginx-conf + - mountPath: /usr/src + name: {{ .Values.staticStore.name }} + - mountPath: /usr/src/love-manager/media/configs + name: love-conf + {{- with $.Values.resources }} + resources: + {{- toYaml $.Values.resources | nindent 10 }} + {{- end }} + volumes: + - name: nginx-conf + configMap: + name: nginx-conf + items: + - key: nginx.conf + path: nginx.conf + - name: {{ .Values.staticStore.name }} + persistentVolumeClaim: + claimName: {{ .Values.staticStore.name }}-pvc + - name: love-conf + configMap: + name: love-conf + items: + - key: default.json + path: default.json + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-nginx/templates/service.yaml b/applications/love/charts/love-nginx/templates/service.yaml new file mode 100644 index 0000000000..fea87197b3 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/service.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-nginx.fullname" . }}-service + namespace: {{ $.Values.global.controlSystemAppNamespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-nginx.fullname" . }} + type: {{ .Values.serviceType }} + ports: + - port: {{ .Values.ports.container }} + targetPort: {{ .Values.ports.container }} + {{- if ne .Values.serviceType "ClusterIP" }} + nodePort: {{ .Values.ports.node }} + {{- end }} diff --git a/applications/love/charts/love-nginx/templates/volumeclaim.yaml b/applications/love/charts/love-nginx/templates/volumeclaim.yaml new file mode 100644 index 0000000000..3f09833420 --- /dev/null +++ b/applications/love/charts/love-nginx/templates/volumeclaim.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.staticStore.name }}-pvc +spec: + accessModes: + - {{ .Values.staticStore.accessMode | quote }} + resources: + requests: + storage: {{ .Values.staticStore.claimSize }} + storageClassName: {{ .Values.staticStore.storageClass }} diff --git a/applications/love/charts/love-nginx/values.yaml b/applications/love/charts/love-nginx/values.yaml new file mode 100644 index 0000000000..43e96cb8e8 --- /dev/null +++ b/applications/love/charts/love-nginx/values.yaml @@ -0,0 +1,103 @@ +# -- The overall namespace for the application +namespace: love +image: + # -- The NGINX image to use + repository: nginx + # -- The tag to use for the NGINX image + tag: 1.14.2 + # -- The pull policy on the NGINX image + pullPolicy: IfNotPresent +# -- Service type specification +serviceType: ClusterIP +ports: + # -- Container port for the NGINX service + container: 80 + # -- Node port for the NGINX service + node: 30000 +ingress: + # -- Hostname for the NGINX ingress + hostname: love.local + # -- Path name associated with the NGINX ingress + httpPath: / + # -- Set the Kubernetes path type for the NGINX ingress + pathType: "" + # -- Assign the Ingress class name + className: "" + # -- Annotations for the NGINX ingress + annotations: {} +# -- Configuration specification for the NGINX service +nginxConfig: | + server { + listen 80; + server_name localhost; + location / { + root /usr/src/love-frontend; + try_files $uri$args $uri$args/ $uri/ /index.html; + } + location /manager { + proxy_pass http://love-manager-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /manager/static { + alias /usr/src/love-manager/static; + } + location /manager/media { + alias /usr/src/love-manager/media; + } + } +# -- Configuration specificiation for the LOVE service +loveConfig: | + { + "alarms": { + "minSeveritySound": "serious", + "minSeverityNotification": "warning" + }, + "camFeeds": { + "generic": "/gencam", + "allSky": "/gencam" + } + } +# -- The list of pull secrets needed for the images. +# If this section is used, each object listed can have the following attributes defined: +# _name_ (The label identifying the pull-secret to use) +imagePullSecrets: [] +initContainers: + frontend: + image: + # -- The frontend image to use + repository: lsstts/love-frontend + # str -- The tag to use for the frontend image + tag: + # -- The pull policy to use for the frontend image + pullPolicy: IfNotPresent + manager: + image: + # -- The static love-manager content image to use + repository: lsstts/love-manager + # str -- The tag to use for the love-manager static content image + tag: + # -- The pull policy to use for the love-manager static content image + pullPolicy: IfNotPresent + # -- The command to execute for the love-manager static content + command: ["/bin/sh", "-c", "mkdir -p /usr/src/love-manager/media/thumbnails; mkdir -p /usr/src/love-manager/media/configs; cp -Rv /usr/src/love/manager/static /usr/src/love-manager; cp -uv /usr/src/love/manager/ui_framework/fixtures/thumbnails/* /usr/src/love-manager/media/thumbnails; cp -uv /usr/src/love/manager/api/fixtures/configs/* /usr/src/love-manager/media/configs"] +staticStore: + # -- Label for the NGINX static store + name: love-nginx-static + # -- The storage class to request the disk allocation from + storageClass: local-store + # -- The access mode for the NGINX static store + accessMode: ReadWriteMany + # -- The size of the NGINX static store request + claimSize: 2Gi +# -- Resource specifications for the NGINX pod +resources: {} +# -- Node selection rules for the NGINX pod +nodeSelector: {} +# -- Toleration specifications for the NGINX pod +tolerations: [] +# -- Affinity rules for the NGINX pod +affinity: {} diff --git a/applications/love/charts/love-producer/Chart.yaml b/applications/love/charts/love-producer/Chart.yaml new file mode 100644 index 0000000000..101bd0ad9e --- /dev/null +++ b/applications/love/charts/love-producer/Chart.yaml @@ -0,0 +1,4 @@ +name: love-producer +apiVersion: v2 +version: 1.0.0 +description: Helm chart for the LOVE producers. diff --git a/applications/love/charts/love-producer/README.md b/applications/love/charts/love-producer/README.md new file mode 100644 index 0000000000..a894d0ad9a --- /dev/null +++ b/applications/love/charts/love-producer/README.md @@ -0,0 +1,20 @@ +# love-producer + +Helm chart for the LOVE producers. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the LOVE producer pods | +| env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | +| envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | +| image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | +| image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | +| image.tag | string | `nil` | | +| nodeSelector | object | `{}` | Node selection rules for the LOVE producer pods | +| podAnnotations | object | `{}` | This allows the specification of pod annotations. | +| producers | object | `{}` | This sections sets the list of producers to use. The producers should be specified like: _name_: _CSC name:index_ Example: ataos: ATAOS:0 | +| replicaCount | int | `1` | Set the replica count for the LOVE producers | +| resources | object | `{}` | Resource specifications for the LOVE producer pods | +| tolerations | list | `[]` | Toleration specifications for the LOVE producer pods | diff --git a/applications/love/charts/love-producer/templates/_helpers.tpl b/applications/love/charts/love-producer/templates/_helpers.tpl new file mode 100644 index 0000000000..b012503f89 --- /dev/null +++ b/applications/love/charts/love-producer/templates/_helpers.tpl @@ -0,0 +1,53 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "love-producer.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "love-producer.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create app name from release and producer name. +*/}} +{{- define "love-producer.appName" -}} +{{ printf "%s-%s" .Release.Name .Producer | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "love-producer.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "love-producer.labels" -}} +helm.sh/chart: {{ include "love-producer.chart" . }} +{{ include "love-producer.selectorLabels" . }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "love-producer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-producer.name" . }} +{{- end }} diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml new file mode 100644 index 0000000000..34473e45c1 --- /dev/null +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -0,0 +1,68 @@ +{{- range $producer, $csc := .Values.producers }} +{{ $appName := printf "%s-%s" $.Release.Name $producer | trunc 63 | trimSuffix "-" }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ $appName }} + labels: + {{- include "love-producer.labels" $ | nindent 4 }} +spec: + replicas: {{ $.Values.replicaCount }} + selector: + matchLabels: + {{- include "love-producer.selectorLabels" $ | nindent 6 }} + app.kubernetes.io/instance: {{ $appName }} + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml $ | nindent 8 }} + {{- end }} + labels: + {{- include "love-producer.selectorLabels" $ | nindent 8 }} + app.kubernetes.io/instance: {{ $appName }} + spec: + containers: + - name: {{ $appName }} + {{- $imageTag := $.Values.image.tag | default $.Values.global.controlSystemImageTag }} + image: "{{ $.Values.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + envFrom: + - configMapRef: + name: csc-env-config + - secretRef: + name: ts-salkafka + env: + - name: LOVE_CSC_PRODUCER + value: {{ $csc | quote }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env_var, $env_value := $.Values.envSecrets }} + - name: {{ $env_var }} + valueFrom: + secretKeyRef: + name: {{ $.Values.namespace }}-secrets + key: {{ $env_value }} + {{- end }} + {{- with $.Values.resources }} + resources: + {{- toYaml $.Values.resources | nindent 12 }} + {{- end }} + imagePullSecrets: + - name: nexus3-docker + {{- with $.Values.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-producer/values.yaml b/applications/love/charts/love-producer/values.yaml new file mode 100644 index 0000000000..f79fc5b13e --- /dev/null +++ b/applications/love/charts/love-producer/values.yaml @@ -0,0 +1,30 @@ +# -- Set the replica count for the LOVE producers +replicaCount: 1 +image: + # -- The LOVE producer image to use + repository: lsstts/love-producer + # str -- The tag to use for the LOVE producer image + tag: + # -- The pull policy on the LOVE producer image + pullPolicy: IfNotPresent +# -- This section holds a set of key, value pairs for environmental variables +env: + WEBSOCKET_HOST: love-nginx/manager/ws/subscription +# -- This section holds a set of key, value pairs for secrets +envSecrets: + PROCESS_CONNECTION_PASS: process-connection-pass +# -- This sections sets the list of producers to use. +# The producers should be specified like: +# _name_: _CSC name:index_ +# Example: ataos: ATAOS:0 +producers: {} +# -- This allows the specification of pod annotations. +podAnnotations: {} +# -- Resource specifications for the LOVE producer pods +resources: {} +# -- Node selection rules for the LOVE producer pods +nodeSelector: {} +# -- Toleration specifications for the LOVE producer pods +tolerations: [] +# -- Affinity rules for the LOVE producer pods +affinity: {} diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml new file mode 100644 index 0000000000..5ef1fee5e1 --- /dev/null +++ b/applications/love/values-tucson-teststand.yaml @@ -0,0 +1,174 @@ +csc_collector: + secrets: + - name: nexus3-docker + key: pull-secret + type: kubernetes.io/dockerconfigjson + - name: ts-salkafka + key: ts/software/ts-salkafka + - name: lfa + key: ts/software/lfa + +love-commander: + enabled: true + image: + repository: ts-dockerhub.lsst.org/love-commander + pullPolicy: Always + env: + S3_INSTANCE: tuc + envSecrets: + - name: AWS_ACCESS_KEY_ID + secretName: lfa + secretKey: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + secretName: lfa + secretKey: aws-secret-access-key + - name: MYS3_ACCESS_KEY + secretName: lfa + secretKey: aws-access-key-id + - name: MYS3_SECRET_KEY + secretName: lfa + secretKey: aws-secret-access-key + service: + use: true + port: 5000 + type: ClusterIP + +love-manager: + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + SERVER_URL: love.tu.lsst.org + OLE_API_HOSTNAME: tucson-teststand.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.tu.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.tu.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.tu.lsst.org + REDIS_CONFIG_EXPIRY: 5 + REDIS_CONFIG_CAPACITY: 5000 + LOVE_SITE: tucson + envSecrets: + AUTHLIST_USER_PASS: authlist-user-pass + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + resources: + requests: + cpu: 250m + memory: 500Mi + limits: + cpu: 750m + memory: 1000Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + database: + image: + repository: postgres + tag: '12.0' + pullPolicy: IfNotPresent + storage: + name: love-manager-database + path: /var/lib/postgresql + storageClass: rook-ceph-block + accessMode: ReadWriteOnce + claimSize: 2Gi + redis: + image: + repository: redis + tag: 5.0.3 + pullPolicy: IfNotPresent + viewBackup: + enabled: true + image: + repository: ts-dockerhub.lsst.org/love-view-backup + pullPolicy: Always + schedule: 0 12 * * * + +love-nginx: + image: + repository: nginx + tag: 1.13.1 + pullPolicy: Always + ingress: + hostname: love.tu.lsst.org + annotations: + kubernetes.io/ingress.class: nginx + imagePullSecrets: + - name: nexus3-docker + initContainers: + frontend: + image: + repository: ts-dockerhub.lsst.org/love-frontend + pullPolicy: Always + manager: + image: + repository: ts-dockerhub.lsst.org/love-manager-static + pullPolicy: Always + command: + - /bin/sh + - -c + - cp -Rv /usr/src/love/manager/static /usr/src/love-manager; cp -Rv /usr/src/love/manager/media + /usr/src/love-manager + staticStore: + name: love-nginx-static + storageClass: rook-ceph-block + accessMode: ReadWriteOnce + claimSize: 2Gi + nginxConfig: | + server { + listen 80; + server_name localhost; + location / { + root /usr/src/love-frontend; + try_files $uri$args $uri$args/ $uri/ /index.html; + } + location /manager { + proxy_pass http://love-manager-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /media { + alias /usr/src/love-manager/media; + } + location /manager/static { + alias /usr/src/love-manager/static; + } + location /manager/media { + alias /usr/src/love-manager/media; + } + location /simcam { + proxy_pass http://simulation-gencam-service.calsys:5013; + proxy_redirect off; + } + } + loveConfig: | + { + "alarms": { + "minSeveritySound": "serious", + "minSeverityNotification": "warning" + }, + "camFeeds": { + "simcam": "/simcam" + }, + "efd": { + "defaultEfdInstance": "tucson_teststand_efd" + } + } + +love-producer: + image: + repository: ts-dockerhub.lsst.org/love-producer + pullPolicy: Always + env: + WEBSOCKET_HOST: love-nginx-service/manager/ws/subscription + producers: + ataos: ATAOS:0 + atcamera: ATCamera:0 diff --git a/applications/love/values.yaml b/applications/love/values.yaml new file mode 120000 index 0000000000..22e98f1fe2 --- /dev/null +++ b/applications/love/values.yaml @@ -0,0 +1 @@ +../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/environments/templates/love-application.yaml b/environments/templates/love-application.yaml new file mode 100644 index 0000000000..f899ba53dd --- /dev/null +++ b/environments/templates/love-application.yaml @@ -0,0 +1,51 @@ +{{- if .Values.love.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: love +spec: + finalizers: + - kubernetes +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: love + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: love + server: https://kubernetes.default.svc + project: default + source: + path: applications/love + repoURL: {{ .Values.repoURL }} + targetRevision: {{ .Values.targetRevision }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.controlSystemAppNamespace" + value: "love" + - name: "global.controlSystemImageTag" + value: {{ .Values.controlSystemImageTag | quote }} + - name: "global.controlSystemSiteTag" + value: {{ .Values.controlSystemSiteTag | quote }} + - name: "global.controlSystemTopicName" + value: {{ .Values.controlSystemTopicName | quote }} + - name: "global.controlSystemKafkaBrokerAddress" + value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemSchemaRegistryUrl" + value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} + - name: "global.controlSystemS3EndpointUrl" + value: {{ .Values.controlSystemS3EndpointUrl | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 67ae29f4fb..460d054fd7 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -10,6 +10,7 @@ applications: dmocps: true eas: true exposurelog: true + love: true narrativelog: true nublado: true obssys: true diff --git a/environments/values.yaml b/environments/values.yaml index 01e03befc1..36750105e1 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -91,6 +91,9 @@ applications: # -- Enable the livetap application livetap: false + # -- Enable the love control system application + love: false + # -- Enable the mobu application mobu: false From 36ff064c2d2778c1f501ca631f50e555c109d3a6 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 12 Jul 2023 12:35:08 -0700 Subject: [PATCH 489/588] Checkpoint love work. --- .../love/charts/love-producer/README.md | 12 ++--- .../love-producer/templates/deployment.yaml | 52 ++++++++++++++----- .../love/charts/love-producer/values.yaml | 26 ++++++---- .../love/values-tucson-teststand.yaml | 9 +++- 4 files changed, 69 insertions(+), 30 deletions(-) diff --git a/applications/love/charts/love-producer/README.md b/applications/love/charts/love-producer/README.md index a894d0ad9a..6c10b9b910 100644 --- a/applications/love/charts/love-producer/README.md +++ b/applications/love/charts/love-producer/README.md @@ -6,15 +6,15 @@ Helm chart for the LOVE producers. | Key | Type | Default | Description | |-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the LOVE producer pods | +| affinity | object | `{}` | Affinity rules applied to all LOVE producer pods | +| annotations | object | `{}` | This allows for the specification of pod annotations. | | env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | | envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | | image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | | image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | | image.tag | string | `nil` | | -| nodeSelector | object | `{}` | Node selection rules for the LOVE producer pods | -| podAnnotations | object | `{}` | This allows the specification of pod annotations. | -| producers | object | `{}` | This sections sets the list of producers to use. The producers should be specified like: _name_: _CSC name:index_ Example: ataos: ATAOS:0 | +| nodeSelector | object | `{}` | Node selection rules applied to all LOVE producer pods | +| producers | obj | `[]` | This sections sets the list of producers to use. The producers are collected into producer groups and a CSC producers will be assigned to a given container. The producers should be specified like: _name_: The top-level name for the producer group. _cscs_: Map of _CSC name:index_ Example: ataos: ATAOS:0 The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | | replicaCount | int | `1` | Set the replica count for the LOVE producers | -| resources | object | `{}` | Resource specifications for the LOVE producer pods | -| tolerations | list | `[]` | Toleration specifications for the LOVE producer pods | +| resources | object | `{}` | Resource specifications applied to all LOVE producer pods | +| tolerations | list | `[]` | Toleration specifications applied to all LOVE producer pods | diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml index 34473e45c1..e2026a0e44 100644 --- a/applications/love/charts/love-producer/templates/deployment.yaml +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -1,5 +1,5 @@ -{{- range $producer, $csc := .Values.producers }} -{{ $appName := printf "%s-%s" $.Release.Name $producer | trunc 63 | trimSuffix "-" }} +{{- range $producer := .Values.producers }} +{{ $appName := printf "%s-%s" $.Release.Name $producer.name | trunc 63 | trimSuffix "-" }} --- apiVersion: apps/v1 kind: Deployment @@ -15,7 +15,7 @@ spec: app.kubernetes.io/instance: {{ $appName }} template: metadata: - {{- with $.Values.podAnnotations }} + {{- with $.Values.annotations }} annotations: {{- toYaml $ | nindent 8 }} {{- end }} @@ -24,7 +24,8 @@ spec: app.kubernetes.io/instance: {{ $appName }} spec: containers: - - name: {{ $appName }} + {{- range $cName, $csc := $producer.cscs }} + - name: {{ $cName }} {{- $imageTag := $.Values.image.tag | default $.Values.global.controlSystemImageTag }} image: "{{ $.Values.image.repository }}:{{ $imageTag }}" imagePullPolicy: {{ $.Values.image.pullPolicy }} @@ -44,25 +45,50 @@ spec: - name: {{ $env_var }} valueFrom: secretKeyRef: - name: {{ $.Values.namespace }}-secrets + name: love-secrets key: {{ $env_value }} {{- end }} - {{- with $.Values.resources }} + {{- if or $.Values.resources $producer.resources }} + {{- $resources := "" }} + {{- if $producer.resources }} + {{- $resources = $producer.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} resources: - {{- toYaml $.Values.resources | nindent 12 }} + {{- toYaml $resources | nindent 12 }} {{- end }} + {{- end }} imagePullSecrets: - name: nexus3-docker - {{- with $.Values.nodeSelector }} + {{- if or $.Values.nodeSelector $producer.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $producer.nodeSelector }} + {{- $nodeSelector = $producer.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} nodeSelector: - {{- toYaml $ | nindent 8 }} + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $producer.affinity }} + {{- $affinity := "" }} + {{- if $producer.affinity }} + {{- $affinity = $producer.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} {{- end }} - {{- with $.Values.affinity }} affinity: - {{- toYaml $ | nindent 8 }} + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $producer.tolerations }} + {{- $tolerations := "" }} + {{- if $producer.tolerations }} + {{- $tolerations = $producer.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} {{- end }} - {{- with $.Values.tolerations }} tolerations: - {{- toYaml $ | nindent 8 }} + {{- toYaml $tolerations | nindent 8 }} {{- end }} {{- end }} diff --git a/applications/love/charts/love-producer/values.yaml b/applications/love/charts/love-producer/values.yaml index f79fc5b13e..161c42fe9f 100644 --- a/applications/love/charts/love-producer/values.yaml +++ b/applications/love/charts/love-producer/values.yaml @@ -13,18 +13,26 @@ env: # -- This section holds a set of key, value pairs for secrets envSecrets: PROCESS_CONNECTION_PASS: process-connection-pass -# -- This sections sets the list of producers to use. +# -- (obj) This sections sets the list of producers to use. +# The producers are collected into producer groups and a CSC producers +# will be assigned to a given container. # The producers should be specified like: -# _name_: _CSC name:index_ +# _name_: The top-level name for the producer group. +# _cscs_: Map of _CSC name:index_ # Example: ataos: ATAOS:0 -producers: {} -# -- This allows the specification of pod annotations. -podAnnotations: {} -# -- Resource specifications for the LOVE producer pods +# The following attributes are optional +# _resources_ (A resource object specification) +# _nodeSelector_ (A node selector object specification) +# _tolerations_ (A list of tolerations) +# _affinity_ (An affinity object specification) +producers: [] +# -- This allows for the specification of pod annotations. +annotations: {} +# -- Resource specifications applied to all LOVE producer pods resources: {} -# -- Node selection rules for the LOVE producer pods +# -- Node selection rules applied to all LOVE producer pods nodeSelector: {} -# -- Toleration specifications for the LOVE producer pods +# -- Toleration specifications applied to all LOVE producer pods tolerations: [] -# -- Affinity rules for the LOVE producer pods +# -- Affinity rules applied to all LOVE producer pods affinity: {} diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index 5ef1fee5e1..e3c5ad2c12 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -170,5 +170,10 @@ love-producer: env: WEBSOCKET_HOST: love-nginx-service/manager/ws/subscription producers: - ataos: ATAOS:0 - atcamera: ATCamera:0 + - name: auxtel + cscs: + ataos: ATAOS:0 + atdome: ATDome:0 + - name: latiss + cscs: + atcamera: ATCamera:0 From 7d3963b0b0d02a57acb52ecf263d148e32ece1ef Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 12 Jul 2023 14:40:33 -0700 Subject: [PATCH 490/588] Checkpoint love work. --- .../love/values-tucson-teststand.yaml | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index e3c5ad2c12..ccb37068a3 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -174,6 +174,73 @@ love-producer: cscs: ataos: ATAOS:0 atdome: ATDome:0 + atdometrajectory: ATDomeTrajectory:0 + athexapod: ATHexapod:0 + atmcs: ATMCS:0 + atpneumatics: ATPneumatics:0 + atptg: ATPtg:0 + - name: comcam + cscs: + cccamera: CCCamera:0 + ccheaderservice: CCHeaderService:0 + ccocps: OCPS:2 + ccoods: CCOODS:0 + - name: eas + cscs: + auxteless01: ESS:201 + auxteless02: ESS:202 + auxteless03: ESS:203 + auxteless04: ESS:204 + calibhilless01: ESS:301 + dimm1: DIMM:1 + dimm2: DIMM:2 + dsm1: DSM:1 + dsm2: DSM:2 + mtdomeess01: ESS:101 + mtdomeess02: ESS:102 + mtdomeess03: ESS:103 + tmaess01: ESS:1 + tmaess104: ESS:104 + tmaess105: ESS:105 + weatherforecast: WeatherForecast:0 + - name: genericcamera + cscs: + gcheaderservice1: GCHeaderService:1 + genericcamera1: GenericCamera:1 - name: latiss cscs: atcamera: ATCamera:0 + atheaderservice: ATHeaderService:0 + atocps: OCPS:1 + atoods: ATOODS:0 + atspectrograph: ATSpectrograph:0 + - name: mtm1m3 + cscs: + mtm1m3: MTM1M3:0 + - name: mtm2 + cscs: + mtm2: MTM2:0 + - name: obssys + cscs: + atscheduler: Scheduler:2 + atscriptqueue: ScriptQueue:2 + authorize: Authorize:0 + love: LOVE:0 + mtscheduler: Scheduler:1 + mtscriptqueue: ScriptQueue:1 + watcher: Watcher:0 + - name: simonyitel + cscs: + camerahexapod: MTHexapod:1 + m2hexapod: MTHexapod:2 + mtaos: MTAOS:0 + mtdome: MTDome:0 + mtdometrajectory: MTDomeTrajectory:0 + mtmount: MTMount:0 + mtptg: MTPtg:0 + mtrotator: MTRotator:0 + - name: simonyitel-support + cscs: + lasertracker1: LaserTracker:1 + mtaircompressor1: MTAirCompressor:1 + mtaircompressor2: MTAirCompressor:2 From ea1c2a9b2678493d5679d158b86373fc35c89e8d Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 17 Aug 2023 12:20:17 -0700 Subject: [PATCH 491/588] Updates for love. --- applications/auxtel/README.md | 8 +- applications/love/README.md | 125 +++++++++++++++++- .../love/charts/love-manager/README.md | 2 + .../templates/redis-configmap.yaml | 8 ++ .../templates/redis-deployment.yaml | 13 +- .../love/charts/love-manager/values.yaml | 5 + applications/love/charts/love-nginx/README.md | 2 +- .../templates/nginx-deployment.yaml | 2 +- .../love/charts/love-nginx/values.yaml | 2 +- .../love/values-tucson-teststand.yaml | 43 +++--- 10 files changed, 184 insertions(+), 26 deletions(-) create mode 100644 applications/love/charts/love-manager/templates/redis-configmap.yaml diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md index e3c3504db2..933e46278f 100644 --- a/applications/auxtel/README.md +++ b/applications/auxtel/README.md @@ -6,7 +6,6 @@ Deployment for the Auxiliary Telescope CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | @@ -17,3 +16,10 @@ Deployment for the Auxiliary Telescope CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| hexapod-sim.enabled | bool | `false` | Flag to enable the given CSC application | +| hexapod-sim.image | object | `{"pullPolicy":"Always","repository":"ts-dockerhub.lsst.org/hexapod_simulator","tag":"latest"}` | This section holds the configuration of the container image | +| hexapod-sim.image.pullPolicy | string | `"Always"` | The policy to apply when pulling an image for deployment | +| hexapod-sim.image.repository | string | `"ts-dockerhub.lsst.org/hexapod_simulator"` | The Docker registry name of the container image | +| hexapod-sim.image.tag | string | `"latest"` | The tag of the container image | +| hexapod-sim.namespace | string | `"auxtel"` | This is the namespace in which the hexapod controller simulator will be placed | diff --git a/applications/love/README.md b/applications/love/README.md index 051b49d522..ab9e7a1cdf 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -6,7 +6,6 @@ Deployment for the LSST Operators Visualization Environment | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | @@ -17,3 +16,127 @@ Deployment for the LSST Operators Visualization Environment | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| love-manager.affinity | object | `{}` | Affinity rules for the LOVE manager pods | +| love-manager.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| love-manager.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| love-manager.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| love-manager.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| love-manager.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| love-manager.database.affinity | object | `{}` | Affinity rules for the LOVE database pods | +| love-manager.database.env.POSTGRES_DB | string | `"postgres"` | Define the database type | +| love-manager.database.env.POSTGRES_USER | string | `"postgres"` | Define the database user | +| love-manager.database.envSecrets.POSTGRES_PASSWORD | string | `"db-pass"` | The database password secret key name | +| love-manager.database.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the database image | +| love-manager.database.image.repository | string | `"postgres"` | The database image to use | +| love-manager.database.image.tag | string | `"12.0"` | The tag to use for the database image | +| love-manager.database.nodeSelector | object | `{}` | Node selection rules for the LOVE database pods | +| love-manager.database.port | int | `5432` | The database port number | +| love-manager.database.resources | object | `{}` | Resource specifications for the LOVE database pods | +| love-manager.database.storage.accessMode | string | `"ReadWriteMany"` | The access mode for the database storage | +| love-manager.database.storage.claimSize | string | `"2Gi"` | The size of the database storage request | +| love-manager.database.storage.name | string | `"love-manager-database"` | Label for the database storage point | +| love-manager.database.storage.path | string | `"/var/lib/postgresql/data"` | Path within the running container | +| love-manager.database.storage.storageClass | string | `"local-store"` | The storage class to request the disk allocation from | +| love-manager.database.tolerations | list | `[]` | Toleration specifications for the LOVE database pods | +| love-manager.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| love-manager.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| love-manager.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| love-manager.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager | +| love-manager.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| love-manager.env.DB_NAME | string | `"postgres"` | The name of the database being used for the LOVE manager. Must match `database.env.POSTGRES_DB` | +| love-manager.env.DB_PORT | int | `5432` | The port for the database Must match `database.port` | +| love-manager.env.DB_USER | string | `"postgres"` | The database user needed for access from the LOVE manager. Must match `database.env.POSTGRES_USER` | +| love-manager.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| love-manager.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| love-manager.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | +| love-manager.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| love-manager.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| love-manager.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| love-manager.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| love-manager.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| love-manager.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| love-manager.envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | +| love-manager.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager admin user password secret key name | +| love-manager.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager cmd_user user password secret key name | +| love-manager.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| love-manager.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager process connection password secret key name | +| love-manager.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| love-manager.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager secret secret key name | +| love-manager.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager user user password secret key name | +| love-manager.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager image | +| love-manager.image.repository | string | `"lsstts/love-manager"` | The LOVE manager image to use | +| love-manager.image.tag | string | `nil` | | +| love-manager.nodeSelector | object | `{}` | Node selection rules for the LOVE manager pods | +| love-manager.ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.ports.node | int | `30000` | The port on the node for normal communcations | +| love-manager.readinessProbe | object | `{}` | Configuration for the LOVE manager pods readiness probe | +| love-manager.redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | +| love-manager.redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | +| love-manager.redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | +| love-manager.redis.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the redis image | +| love-manager.redis.image.repository | string | `"redis"` | The redis image to use | +| love-manager.redis.image.tag | string | `"5.0.3"` | The tag to use for the redis image | +| love-manager.redis.nodeSelector | object | `{}` | Node selection rules for the LOVE redis pods | +| love-manager.redis.port | int | `6379` | The redis port number | +| love-manager.redis.resources | object | `{}` | Resource specifications for the LOVE redis pods | +| love-manager.redis.tolerations | list | `[]` | Toleration specifications for the LOVE redis pods | +| love-manager.replicas | int | `1` | Set the default number of LOVE manager pod replicas | +| love-manager.resources | object | `{}` | Resource specifications for the LOVE manager pods | +| love-manager.tolerations | list | `[]` | Toleration specifications for the LOVE manager pods | +| love-manager.viewBackup.affinity | object | `{}` | Affinity rules for the LOVE view backup pods | +| love-manager.viewBackup.enabled | bool | `false` | Whether view backup is active | +| love-manager.viewBackup.env | object | `{}` | Place to specify additional environment variables for the view backup job | +| love-manager.viewBackup.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the view backup image | +| love-manager.viewBackup.image.repository | string | `"lsstts/love-view-backup"` | The view backup image to use | +| love-manager.viewBackup.image.tag | string | `"develop"` | The tag to use for the view backup image | +| love-manager.viewBackup.nodeSelector | object | `{}` | Node selection rules for the LOVE view backup pods | +| love-manager.viewBackup.resources | object | `{}` | Resource specifications for the LOVE view backup pods | +| love-manager.viewBackup.restartPolicy | string | `"Never"` | The restart policy type for the view backup cronjob | +| love-manager.viewBackup.schedule | string | `"0 0 1 1 *"` | The view backup job schedule in cron format | +| love-manager.viewBackup.tolerations | list | `[]` | Toleration specifications for the LOVE view backup pods | +| love-manager.viewBackup.ttlSecondsAfterFinished | string | `""` | Time after view backup job finishes before deletion (ALPHA) | +| love-nginx.affinity | object | `{}` | Affinity rules for the NGINX pod | +| love-nginx.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the NGINX image | +| love-nginx.image.repository | string | `"nginx"` | The NGINX image to use | +| love-nginx.image.tag | string | `"1.14.2"` | The tag to use for the NGINX image | +| love-nginx.imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | +| love-nginx.ingress.annotations | object | `{}` | Annotations for the NGINX ingress | +| love-nginx.ingress.className | string | `"nginx"` | Assign the Ingress class name | +| love-nginx.ingress.hostname | string | `"love.local"` | Hostname for the NGINX ingress | +| love-nginx.ingress.httpPath | string | `"/"` | Path name associated with the NGINX ingress | +| love-nginx.ingress.pathType | string | `""` | Set the Kubernetes path type for the NGINX ingress | +| love-nginx.initContainers.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the frontend image | +| love-nginx.initContainers.frontend.image.repository | string | `"lsstts/love-frontend"` | The frontend image to use | +| love-nginx.initContainers.frontend.image.tag | string | `nil` | | +| love-nginx.initContainers.manager.command | list | `["/bin/sh","-c","mkdir -p /usr/src/love-manager/media/thumbnails; mkdir -p /usr/src/love-manager/media/configs; cp -Rv /usr/src/love/manager/static /usr/src/love-manager; cp -uv /usr/src/love/manager/ui_framework/fixtures/thumbnails/* /usr/src/love-manager/media/thumbnails; cp -uv /usr/src/love/manager/api/fixtures/configs/* /usr/src/love-manager/media/configs"]` | The command to execute for the love-manager static content | +| love-nginx.initContainers.manager.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the love-manager static content image | +| love-nginx.initContainers.manager.image.repository | string | `"lsstts/love-manager"` | The static love-manager content image to use | +| love-nginx.initContainers.manager.image.tag | string | `nil` | | +| love-nginx.loveConfig | string | `"{\n \"alarms\": {\n \"minSeveritySound\": \"serious\",\n \"minSeverityNotification\": \"warning\"\n },\n \"camFeeds\": {\n \"generic\": \"/gencam\",\n \"allSky\": \"/gencam\"\n }\n}\n"` | Configuration specificiation for the LOVE service | +| love-nginx.namespace | string | `"love"` | The overall namespace for the application | +| love-nginx.nginxConfig | string | `"server {\n listen 80;\n server_name localhost;\n location / {\n root /usr/src/love-frontend;\n try_files $uri$args $uri$args/ $uri/ /index.html;\n }\n location /manager {\n proxy_pass http://love-manager-service:8000;\n proxy_http_version 1.1;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n proxy_set_header Host $host;\n proxy_redirect off;\n }\n location /manager/static {\n alias /usr/src/love-manager/static;\n }\n location /manager/media {\n alias /usr/src/love-manager/media;\n }\n}\n"` | Configuration specification for the NGINX service | +| love-nginx.nodeSelector | object | `{}` | Node selection rules for the NGINX pod | +| love-nginx.ports.container | int | `80` | Container port for the NGINX service | +| love-nginx.ports.node | int | `30000` | Node port for the NGINX service | +| love-nginx.resources | object | `{}` | Resource specifications for the NGINX pod | +| love-nginx.serviceType | string | `"ClusterIP"` | Service type specification | +| love-nginx.staticStore.accessMode | string | `"ReadWriteMany"` | The access mode for the NGINX static store | +| love-nginx.staticStore.claimSize | string | `"2Gi"` | The size of the NGINX static store request | +| love-nginx.staticStore.name | string | `"love-nginx-static"` | Label for the NGINX static store | +| love-nginx.staticStore.storageClass | string | `"local-store"` | The storage class to request the disk allocation from | +| love-nginx.tolerations | list | `[]` | Toleration specifications for the NGINX pod | +| love-producer.affinity | object | `{}` | Affinity rules applied to all LOVE producer pods | +| love-producer.annotations | object | `{}` | This allows for the specification of pod annotations. | +| love-producer.env | object | `{"WEBSOCKET_HOST":"love-nginx/manager/ws/subscription"}` | This section holds a set of key, value pairs for environmental variables | +| love-producer.envSecrets | object | `{"PROCESS_CONNECTION_PASS":"process-connection-pass"}` | This section holds a set of key, value pairs for secrets | +| love-producer.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE producer image | +| love-producer.image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | +| love-producer.image.tag | string | `nil` | | +| love-producer.nodeSelector | object | `{}` | Node selection rules applied to all LOVE producer pods | +| love-producer.producers | obj | `[]` | This sections sets the list of producers to use. The producers are collected into producer groups and a CSC producers will be assigned to a given container. The producers should be specified like: _name_: The top-level name for the producer group. _cscs_: Map of _CSC name:index_ Example: ataos: ATAOS:0 The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | +| love-producer.replicaCount | int | `1` | Set the replica count for the LOVE producers | +| love-producer.resources | object | `{}` | Resource specifications applied to all LOVE producer pods | +| love-producer.tolerations | list | `[]` | Toleration specifications applied to all LOVE producer pods | diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index ad1deed8da..fc06207495 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -46,6 +46,7 @@ Helm chart for the LOVE manager service. | env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | | env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | | env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | | envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | | envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager admin user password secret key name | | envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager cmd_user user password secret key name | @@ -62,6 +63,7 @@ Helm chart for the LOVE manager service. | ports.node | int | `30000` | The port on the node for normal communcations | | readinessProbe | object | `{}` | Configuration for the LOVE manager pods readiness probe | | redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | +| redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | | redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | | redis.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the redis image | | redis.image.repository | string | `"redis"` | The redis image to use | diff --git a/applications/love/charts/love-manager/templates/redis-configmap.yaml b/applications/love/charts/love-manager/templates/redis-configmap.yaml new file mode 100644 index 0000000000..fcff21243d --- /dev/null +++ b/applications/love/charts/love-manager/templates/redis-configmap.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: redis-conf +data: + redis.conf: | +{{ .Values.redis.config | indent 4 }} diff --git a/applications/love/charts/love-manager/templates/redis-deployment.yaml b/applications/love/charts/love-manager/templates/redis-deployment.yaml index bc4421a7e4..3e27f50898 100644 --- a/applications/love/charts/love-manager/templates/redis-deployment.yaml +++ b/applications/love/charts/love-manager/templates/redis-deployment.yaml @@ -19,7 +19,7 @@ spec: - name: {{ include "love-manager.redis.fullname" . }} image: "{{ .Values.redis.image.repository }}:{{ .Values.redis.image.tag }}" imagePullPolicy: {{ .Values.redis.image.pullPolicy }} - command: [ "redis-server", "--appendonly", "yes", "--requirepass", "$(REDIS_PASS)" ] + command: [ "redis-server", "/data/redis.conf", "--appendonly", "yes", "--requirepass", "$(REDIS_PASS)" ] ports: - containerPort: {{ .Values.redis.port }} env: @@ -27,10 +27,21 @@ spec: {{- include "helpers.envFromList" $data | indent 10 }} {{- $data := dict "env" .Values.redis.envSecrets "secretName" .Values.envSecretKeyName }} {{- include "helpers.envFromList" $data | indent 10 }} + volumeMounts: + - mountPath: /data/redis.conf + readOnly: true + name: redis-conf {{- with $.Values.redis.resources }} resources: {{- toYaml $.Values.redis.resources | nindent 10 }} {{- end }} + volumes: + - name: redis-conf + configMap: + name: redis-conf + items: + - key: redis.conf + path: redis.conf {{- with $.Values.redis.nodeSelector }} nodeSelector: {{- toYaml $ | nindent 8 }} diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index e0598107f0..1adf351455 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -17,6 +17,8 @@ env: LOVE_SITE: local # -- The external URL from the NGINX server for LOVE SERVER_URL: love.lsst.local + # -- The Kubernetes sub-path for LOVE + URL_SUBPATH: /love # -- Set the manager to use LFA storage REMOTE_STORAGE: true # -- Set the hostname for the Jira instance @@ -152,6 +154,9 @@ redis: tolerations: [] # -- Affinity rules for the LOVE redis pods affinity: {} + # -- Configuration specification for the redis service + config: | + timeout 60 viewBackup: # -- Whether view backup is active enabled: false diff --git a/applications/love/charts/love-nginx/README.md b/applications/love/charts/love-nginx/README.md index c5ce11091e..c383628235 100644 --- a/applications/love/charts/love-nginx/README.md +++ b/applications/love/charts/love-nginx/README.md @@ -12,7 +12,7 @@ Helm chart for the LOVE Nginx server. | image.tag | string | `"1.14.2"` | The tag to use for the NGINX image | | imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | | ingress.annotations | object | `{}` | Annotations for the NGINX ingress | -| ingress.className | string | `""` | Assign the Ingress class name | +| ingress.className | string | `"nginx"` | Assign the Ingress class name | | ingress.hostname | string | `"love.local"` | Hostname for the NGINX ingress | | ingress.httpPath | string | `"/"` | Path name associated with the NGINX ingress | | ingress.pathType | string | `""` | Set the Kubernetes path type for the NGINX ingress | diff --git a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml index c8ac9facca..93000d0cc2 100644 --- a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml +++ b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml @@ -24,7 +24,7 @@ spec: {{- $feImageTag := .Values.initContainers.frontend.image.tag | default $.Values.global.controlSystemImageTag }} image: "{{ .Values.initContainers.frontend.image.repository }}:{{ $feImageTag }}" imagePullPolicy: {{ .Values.initContainers.frontend.image.pullPolicy }} - command: ["/bin/sh", "-c", "cp -Rv /usr/src/love/ /usr/src/love-frontend"] + command: ["/bin/sh", "-c", "mkdir -p /usr/src/love-frontend; cp -Rv /usr/src/love/ /usr/src/love-frontend"] volumeMounts: - mountPath: /usr/src name: {{ .Values.staticStore.name }} diff --git a/applications/love/charts/love-nginx/values.yaml b/applications/love/charts/love-nginx/values.yaml index 43e96cb8e8..bb8a20b5fb 100644 --- a/applications/love/charts/love-nginx/values.yaml +++ b/applications/love/charts/love-nginx/values.yaml @@ -22,7 +22,7 @@ ingress: # -- Set the Kubernetes path type for the NGINX ingress pathType: "" # -- Assign the Ingress class name - className: "" + className: nginx # -- Annotations for the NGINX ingress annotations: {} # -- Configuration specification for the NGINX service diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index ccb37068a3..ac5d117a38 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -38,7 +38,7 @@ love-manager: repository: ts-dockerhub.lsst.org/love-manager pullPolicy: Always env: - SERVER_URL: love.tu.lsst.org + SERVER_URL: tucson-teststand.lsst.codes OLE_API_HOSTNAME: tucson-teststand.lsst.codes AUTH_LDAP_1_SERVER_URI: ldap://ipa1.tu.lsst.org AUTH_LDAP_2_SERVER_URI: ldap://ipa2.tu.lsst.org @@ -46,6 +46,7 @@ love-manager: REDIS_CONFIG_EXPIRY: 5 REDIS_CONFIG_CAPACITY: 5000 LOVE_SITE: tucson + LOVE_PRODUCER_WEBSOCKET_HOST: love-nginx-service/love/manager/ws/subscription envSecrets: AUTHLIST_USER_PASS: authlist-user-pass AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password @@ -69,7 +70,7 @@ love-manager: database: image: repository: postgres - tag: '12.0' + tag: '15.0' pullPolicy: IfNotPresent storage: name: love-manager-database @@ -80,8 +81,10 @@ love-manager: redis: image: repository: redis - tag: 5.0.3 + tag: '7' pullPolicy: IfNotPresent + config: | + timeout 60 viewBackup: enabled: true image: @@ -92,18 +95,19 @@ love-manager: love-nginx: image: repository: nginx - tag: 1.13.1 + tag: 1.25.1 pullPolicy: Always ingress: - hostname: love.tu.lsst.org + hostname: tucson-teststand.lsst.codes + httpPath: /love annotations: - kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/ssl-redirect: "true" imagePullSecrets: - name: nexus3-docker initContainers: frontend: image: - repository: ts-dockerhub.lsst.org/love-frontend + repository: ts-dockerhub.lsst.org/love-frontend-k8s pullPolicy: Always manager: image: @@ -112,8 +116,7 @@ love-nginx: command: - /bin/sh - -c - - cp -Rv /usr/src/love/manager/static /usr/src/love-manager; cp -Rv /usr/src/love/manager/media - /usr/src/love-manager + - mkdir -p /usr/src/love-manager; cp -Rv /usr/src/love/manager/media /usr/src/love-manager; cp -Rv /usr/src/love/manager/static /usr/src/love-manager staticStore: name: love-nginx-static storageClass: rook-ceph-block @@ -123,11 +126,11 @@ love-nginx: server { listen 80; server_name localhost; - location / { + location /love { root /usr/src/love-frontend; - try_files $uri$args $uri$args/ $uri/ /index.html; + try_files $uri$args $uri$args/ $uri/ /love/index.html; } - location /manager { + location /love/manager { proxy_pass http://love-manager-service:8000; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; @@ -135,18 +138,18 @@ love-nginx: proxy_set_header Host $host; proxy_redirect off; } - location /media { + location /love/media { alias /usr/src/love-manager/media; } - location /manager/static { + location /love/manager/static { alias /usr/src/love-manager/static; } - location /manager/media { + location /love/manager/media { alias /usr/src/love-manager/media; } - location /simcam { - proxy_pass http://simulation-gencam-service.calsys:5013; - proxy_redirect off; + location /love/simcam { + proxy_pass http://simulation-gencam-service.calsys:5013/; + proxy_set_header Host $host/love; } } loveConfig: | @@ -156,7 +159,7 @@ love-nginx: "minSeverityNotification": "warning" }, "camFeeds": { - "simcam": "/simcam" + "simcam": "/love/simcam" }, "efd": { "defaultEfdInstance": "tucson_teststand_efd" @@ -168,7 +171,7 @@ love-producer: repository: ts-dockerhub.lsst.org/love-producer pullPolicy: Always env: - WEBSOCKET_HOST: love-nginx-service/manager/ws/subscription + WEBSOCKET_HOST: love-nginx-service/love/manager/ws/subscription producers: - name: auxtel cscs: From 3ac1d07af24aebd3b14ab1b880cb0e494863c3e2 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 17 Aug 2023 15:38:14 -0700 Subject: [PATCH 492/588] Rename dmocps to uws. --- applications/{dmocps => uws}/Chart.yaml | 4 ++-- applications/{dmocps => uws}/README.md | 4 ++-- applications/{dmocps => uws}/charts/csc | 0 applications/{dmocps => uws}/charts/csc_collector | 0 applications/{dmocps => uws}/values-tucson-teststand.yaml | 0 applications/{dmocps => uws}/values.yaml | 0 .../{dmocps-application.yaml => uws-application.yaml} | 6 +++--- environments/values-tucson-teststand.yaml | 2 +- environments/values.yaml | 7 ++++--- 9 files changed, 12 insertions(+), 11 deletions(-) rename applications/{dmocps => uws}/Chart.yaml (73%) rename applications/{dmocps => uws}/README.md (96%) rename applications/{dmocps => uws}/charts/csc (100%) rename applications/{dmocps => uws}/charts/csc_collector (100%) rename applications/{dmocps => uws}/values-tucson-teststand.yaml (100%) rename applications/{dmocps => uws}/values.yaml (100%) rename environments/templates/{dmocps-application.yaml => uws-application.yaml} (94%) diff --git a/applications/dmocps/Chart.yaml b/applications/uws/Chart.yaml similarity index 73% rename from applications/dmocps/Chart.yaml rename to applications/uws/Chart.yaml index dd3827f119..0e0bd25da8 100644 --- a/applications/dmocps/Chart.yaml +++ b/applications/uws/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 -name: dmocps +name: uws version: 1.0.0 -description: Deployment for the DM OCPS CSCs +description: Deployment for the UWS and DM OCPS CSCs dependencies: - name: csc_collector version: 1.0.0 diff --git a/applications/dmocps/README.md b/applications/uws/README.md similarity index 96% rename from applications/dmocps/README.md rename to applications/uws/README.md index 7de092f4d8..7216017493 100644 --- a/applications/dmocps/README.md +++ b/applications/uws/README.md @@ -1,6 +1,6 @@ -# dmocps +# uws -Deployment for the DM OCPS CSCs +Deployment for the UWS and DM OCPS CSCs ## Values diff --git a/applications/dmocps/charts/csc b/applications/uws/charts/csc similarity index 100% rename from applications/dmocps/charts/csc rename to applications/uws/charts/csc diff --git a/applications/dmocps/charts/csc_collector b/applications/uws/charts/csc_collector similarity index 100% rename from applications/dmocps/charts/csc_collector rename to applications/uws/charts/csc_collector diff --git a/applications/dmocps/values-tucson-teststand.yaml b/applications/uws/values-tucson-teststand.yaml similarity index 100% rename from applications/dmocps/values-tucson-teststand.yaml rename to applications/uws/values-tucson-teststand.yaml diff --git a/applications/dmocps/values.yaml b/applications/uws/values.yaml similarity index 100% rename from applications/dmocps/values.yaml rename to applications/uws/values.yaml diff --git a/environments/templates/dmocps-application.yaml b/environments/templates/uws-application.yaml similarity index 94% rename from environments/templates/dmocps-application.yaml rename to environments/templates/uws-application.yaml index 3911677456..6c05734d0b 100644 --- a/environments/templates/dmocps-application.yaml +++ b/environments/templates/uws-application.yaml @@ -1,8 +1,8 @@ -{{- if .Values.dmocps.enabled -}} +{{- if .Values.uws.enabled -}} apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: dmocps + name: uws namespace: argocd finalizers: - resources-finalizer.argocd.argoproj.io @@ -12,7 +12,7 @@ spec: server: https://kubernetes.default.svc project: default source: - path: applications/dmocps + path: applications/uws repoURL: {{ .Values.repoURL }} targetRevision: {{ .Values.targetRevision }} helm: diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 460d054fd7..39df92db51 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -7,7 +7,6 @@ applications: auxtel: true calsys: true control-system-test: true - dmocps: true eas: true exposurelog: true love: true @@ -19,6 +18,7 @@ applications: simonyitel: true squareone: true strimzi: true + uws: true telegraf: true telegraf-ds: true diff --git a/environments/values.yaml b/environments/values.yaml index 36750105e1..1f1f6aa4c6 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -54,9 +54,6 @@ applications: # -- Eanble the datalinker application datalinker: false - # -- Enable the dmocps control system application - dmocps: false - # -- Enable the eas control system application eas: false @@ -204,6 +201,10 @@ applications: # -- Enable the times-square application times-square: false + # -- Enable the uws application. This includes the dmocps control system + # application. + uws: false + # -- Enable the vault-secrets-operator application. This is required for all # environments. vault-secrets-operator: true From 96e38eddaaca0e6fa47a91a27417a3d10129fdb3 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 17 Aug 2023 16:26:55 -0700 Subject: [PATCH 493/588] Add uws-api-server chart and app. --- applications/uws/Chart.yaml | 2 + applications/uws/README.md | 28 ++++++- .../uws/charts/uws-api-server/Chart.yaml | 9 +++ .../uws/charts/uws-api-server/README.md | 34 +++++++++ .../uws-api-server/templates/_helpers.tpl | 45 ++++++++++++ .../uws-api-server/templates/configmap.yaml | 27 +++++++ .../templates/deployment-client.yaml | 35 +++++++++ .../templates/deployment-server.yaml | 57 +++++++++++++++ .../uws-api-server/templates/ingress.yaml | 24 ++++++ .../charts/uws-api-server/templates/rbac.yaml | 28 +++++++ .../uws-api-server/templates/service.yaml | 13 ++++ .../templates/vault-secrets.yaml | 13 ++++ .../uws-api-server/templates/volumes.yaml | 73 +++++++++++++++++++ .../uws/charts/uws-api-server/values.yaml | 73 +++++++++++++++++++ applications/uws/values-tucson-teststand.yaml | 62 ++++++++++++++++ 15 files changed, 522 insertions(+), 1 deletion(-) create mode 100644 applications/uws/charts/uws-api-server/Chart.yaml create mode 100644 applications/uws/charts/uws-api-server/README.md create mode 100644 applications/uws/charts/uws-api-server/templates/_helpers.tpl create mode 100644 applications/uws/charts/uws-api-server/templates/configmap.yaml create mode 100644 applications/uws/charts/uws-api-server/templates/deployment-client.yaml create mode 100644 applications/uws/charts/uws-api-server/templates/deployment-server.yaml create mode 100644 applications/uws/charts/uws-api-server/templates/ingress.yaml create mode 100644 applications/uws/charts/uws-api-server/templates/rbac.yaml create mode 100644 applications/uws/charts/uws-api-server/templates/service.yaml create mode 100644 applications/uws/charts/uws-api-server/templates/vault-secrets.yaml create mode 100644 applications/uws/charts/uws-api-server/templates/volumes.yaml create mode 100644 applications/uws/charts/uws-api-server/values.yaml diff --git a/applications/uws/Chart.yaml b/applications/uws/Chart.yaml index 0e0bd25da8..ea6539c2c5 100644 --- a/applications/uws/Chart.yaml +++ b/applications/uws/Chart.yaml @@ -11,3 +11,5 @@ dependencies: - name: csc alias: ccocps version: 1.0.0 +- name: uws-api-server + version: 1.5.0 diff --git a/applications/uws/README.md b/applications/uws/README.md index 7216017493..f5d98a8214 100644 --- a/applications/uws/README.md +++ b/applications/uws/README.md @@ -6,7 +6,6 @@ Deployment for the UWS and DM OCPS CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | @@ -17,3 +16,30 @@ Deployment for the UWS and DM OCPS CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| uws-api-server.basePath | string | `"uws-server"` | The base path for the client ingress | +| uws-api-server.butlerPg | object | `{}` | Configuration for Postgres backed butlers The object must have the following attributes defined: _secretKey_ (A label that points to the VaultSecret for the postgres credentials) _containerPath_ (The directory location in the container for the Butler secret) _dbUser_ (The database user name for butler access) | +| uws-api-server.client.enabled | bool | `false` | Turn on the UWS client system if desired | +| uws-api-server.createNamespace | bool | `false` | Temporary flag to make service deploy own namespace. Doing this to not disrupt other sites. | +| uws-api-server.hostname | string | `""` | Hostname for the client ingress | +| uws-api-server.image.repository | string | `"lsstdm/uws-api-server"` | The Docker registry name of the UWS server container image | +| uws-api-server.image.tag | string | `"latest"` | The tag of the UWS server container image | +| uws-api-server.job.image.repository | string | `"lsstsqre/centos"` | The Docker registry name of the UWS job container image | +| uws-api-server.job.image.tag | string | `"d_latest"` | The tag of the UWS job container image | +| uws-api-server.job.securityContext.fsGroup | int | `202` | Set the filesystem GID for the mounted volumes in the UWS job container | +| uws-api-server.job.securityContext.runAsGroup | int | `202` | Set the GID for the UWS job container entrypoint | +| uws-api-server.job.securityContext.runAsUser | int | `1000` | Set the UID for the UWS job container entrypoint | +| uws-api-server.logLevel | string | `"WARNING"` | Log level of server. Set to "DEBUG" for highest verbosity | +| uws-api-server.replicaCount | int | `1` | Set the replica count for the UWS server | +| uws-api-server.server.securityContext.fsGroup | int | `202` | Set the filesystem GID for the mounted volumes in the UWS server container | +| uws-api-server.server.securityContext.runAsGroup | int | `202` | Set the GID for the UWS server container entrypoint | +| uws-api-server.server.securityContext.runAsUser | int | `1000` | Set the UID for the UWS server container entrypoint | +| uws-api-server.targetCluster | string | `""` | Target Kubernetes cluster | +| uws-api-server.vaultPathPrefix | string | `""` | Site-specific Vault path for secrets. | +| uws-api-server.volumes | list | `[]` | Central data volumes to be mounted in job containers. Each object listed can have the following attributes defined: _name_ (A label identifier for the data volume mount) _server_ (The hostname for the NFS server with the data volume mount) _claimName_ (The PVC claim name for the data volume mount) _mountPath_ (The mount path in the server container for the data volume mount) _exportPath_ (The export path on the NFS server for the data volume mount) _subPath_ (A possible sub path for the data volume mount) _readOnly_ (Flag to mark the data volume mount as read only or read/write) | +| uws-api-server.workingVolume.claimName | string | `""` | The PVC claim name for the working volume | +| uws-api-server.workingVolume.exportPath | string | `""` | The export path on the NFS server for the working volume | +| uws-api-server.workingVolume.mountPath | string | `"/uws"` | The mount path in the server container for the working volume | +| uws-api-server.workingVolume.name | string | `"job-files"` | A label identifier for the working volume | +| uws-api-server.workingVolume.server | string | `""` | The hostname for the NFS server with the working volume | +| uws-api-server.workingVolume.subPath | string | `""` | A possible sub path for the working volume mount | diff --git a/applications/uws/charts/uws-api-server/Chart.yaml b/applications/uws/charts/uws-api-server/Chart.yaml new file mode 100644 index 0000000000..c8b882a7fe --- /dev/null +++ b/applications/uws/charts/uws-api-server/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: uws-api-server +version: 1.5.0 +description: Helm chart for deploying the Universal Worker Service API Server +maintainers: + - name: Kian-Tat Lim + email: ktl@slac.stanford.edu + - name: Michael Reuter + email: mareuter@lsst.org diff --git a/applications/uws/charts/uws-api-server/README.md b/applications/uws/charts/uws-api-server/README.md new file mode 100644 index 0000000000..e176c46975 --- /dev/null +++ b/applications/uws/charts/uws-api-server/README.md @@ -0,0 +1,34 @@ +# uws-api-server + +Helm chart for deploying the Universal Worker Service API Server + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| basePath | string | `"uws-server"` | The base path for the client ingress | +| butlerPg | object | `{}` | Configuration for Postgres backed butlers The object must have the following attributes defined: _secretKey_ (A label that points to the VaultSecret for the postgres credentials) _containerPath_ (The directory location in the container for the Butler secret) _dbUser_ (The database user name for butler access) | +| client.enabled | bool | `false` | Turn on the UWS client system if desired | +| createNamespace | bool | `false` | Temporary flag to make service deploy own namespace. Doing this to not disrupt other sites. | +| hostname | string | `""` | Hostname for the client ingress | +| image.repository | string | `"lsstdm/uws-api-server"` | The Docker registry name of the UWS server container image | +| image.tag | string | `"latest"` | The tag of the UWS server container image | +| job.image.repository | string | `"lsstsqre/centos"` | The Docker registry name of the UWS job container image | +| job.image.tag | string | `"d_latest"` | The tag of the UWS job container image | +| job.securityContext.fsGroup | int | `202` | Set the filesystem GID for the mounted volumes in the UWS job container | +| job.securityContext.runAsGroup | int | `202` | Set the GID for the UWS job container entrypoint | +| job.securityContext.runAsUser | int | `1000` | Set the UID for the UWS job container entrypoint | +| logLevel | string | `"WARNING"` | Log level of server. Set to "DEBUG" for highest verbosity | +| replicaCount | int | `1` | Set the replica count for the UWS server | +| server.securityContext.fsGroup | int | `202` | Set the filesystem GID for the mounted volumes in the UWS server container | +| server.securityContext.runAsGroup | int | `202` | Set the GID for the UWS server container entrypoint | +| server.securityContext.runAsUser | int | `1000` | Set the UID for the UWS server container entrypoint | +| targetCluster | string | `""` | Target Kubernetes cluster | +| vaultPathPrefix | string | `""` | Site-specific Vault path for secrets. | +| volumes | list | `[]` | Central data volumes to be mounted in job containers. Each object listed can have the following attributes defined: _name_ (A label identifier for the data volume mount) _server_ (The hostname for the NFS server with the data volume mount) _claimName_ (The PVC claim name for the data volume mount) _mountPath_ (The mount path in the server container for the data volume mount) _exportPath_ (The export path on the NFS server for the data volume mount) _subPath_ (A possible sub path for the data volume mount) _readOnly_ (Flag to mark the data volume mount as read only or read/write) | +| workingVolume.claimName | string | `""` | The PVC claim name for the working volume | +| workingVolume.exportPath | string | `""` | The export path on the NFS server for the working volume | +| workingVolume.mountPath | string | `"/uws"` | The mount path in the server container for the working volume | +| workingVolume.name | string | `"job-files"` | A label identifier for the working volume | +| workingVolume.server | string | `""` | The hostname for the NFS server with the working volume | +| workingVolume.subPath | string | `""` | A possible sub path for the working volume mount | diff --git a/applications/uws/charts/uws-api-server/templates/_helpers.tpl b/applications/uws/charts/uws-api-server/templates/_helpers.tpl new file mode 100644 index 0000000000..16ab573354 --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/_helpers.tpl @@ -0,0 +1,45 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "uws-api-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "uws-api-server.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "uws-api-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "uws-api-server.labels" -}} +app.kubernetes.io/name: {{ include "uws-api-server.name" . }} +helm.sh/chart: {{ include "uws-api-server.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} diff --git a/applications/uws/charts/uws-api-server/templates/configmap.yaml b/applications/uws/charts/uws-api-server/templates/configmap.yaml new file mode 100644 index 0000000000..b147ff8d8a --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/configmap.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-configmap +data: + config: | + workingVolume: + {{- toYaml .Values.workingVolume | nindent 6 }} + volumes: + {{- toYaml .Values.volumes | nindent 6 }} + server: + service: {{ .Release.Name }}-server + port: 8080 + protocol: "http" + basePath: "/api/v1" + logLevel: "{{ .Values.logLevel }}" + job: + image: + repository: "{{ .Values.job.image.repository }}" + tag: "{{ .Values.job.image.tag }}" + securityContext: + {{- toYaml .Values.job.securityContext | nindent 8 }} + {{- if .Values.butlerPg }} + butlerPg: + containerPath: "{{ .Values.butlerPg.containerPath }}" + dbUser: "{{ .Values.butlerPg.dbUser }}" + {{- end }} diff --git a/applications/uws/charts/uws-api-server/templates/deployment-client.yaml b/applications/uws/charts/uws-api-server/templates/deployment-client.yaml new file mode 100644 index 0000000000..0785af1199 --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/deployment-client.yaml @@ -0,0 +1,35 @@ +{{- if .Values.client.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-client + labels: + app: {{ .Release.Name }}-client + chart: {{ template "uws-api-server.chart" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Release.Name }}-client + template: + metadata: + labels: + app: {{ .Release.Name }}-client + spec: + volumes: + # Server configuration + - name: config + configMap: + name: {{ .Release.Name }}-configmap + containers: + - name: uws-api-client + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: Always + command: ["/bin/bash", "-c", "sleep 1000d"] + volumeMounts: + # Server configuration + - name: config + subPath: config + mountPath: /etc/config/uws.yaml +{{- end }} diff --git a/applications/uws/charts/uws-api-server/templates/deployment-server.yaml b/applications/uws/charts/uws-api-server/templates/deployment-server.yaml new file mode 100644 index 0000000000..02f3c8556d --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/deployment-server.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-server + labels: + app: {{ .Release.Name }}-server + chart: {{ template "uws-api-server.chart" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ .Release.Name }}-server + template: + metadata: + labels: + app: {{ .Release.Name }}-server + spec: + securityContext: + {{ toYaml .Values.server.securityContext | nindent 8 }} + serviceAccountName: {{ .Release.Name }}-job-manager + volumes: + # Server configuration + - name: config + configMap: + name: {{ .Release.Name }}-configmap + # Volume to host job data + - name: "{{ .Values.workingVolume.name }}" + persistentVolumeClaim: + claimName: "{{ .Values.workingVolume.claimName }}" + {{- range .Values.volumes }} + - name: "{{ .name }}" + persistentVolumeClaim: + claimName: "{{ .claimName }}" + {{- end }} + containers: + - name: uws-api-server + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: Always + command: ["/bin/bash", "-c", "cd server && python3 server.py"] + ports: + - containerPort: 8080 + volumeMounts: + # Working directory for job data + - name: "{{ .Values.workingVolume.name }}" + mountPath: "{{ .Values.workingVolume.mountPath }}" + subPath: "{{ .Values.workingVolume.subPath }}" + # Server configuration + - name: config + subPath: config + mountPath: /etc/config/uws.yaml + # Shared data volumes (environment-specific) + {{- range .Values.volumes }} + - name: "{{ .name }}" + mountPath: "{{ .mountPath }}" + subPath: "{{ .subPath }}" + readOnly: {{ .readOnly }} + {{- end }} diff --git a/applications/uws/charts/uws-api-server/templates/ingress.yaml b/applications/uws/charts/uws-api-server/templates/ingress.yaml new file mode 100644 index 0000000000..bb2e67a650 --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/ingress.yaml @@ -0,0 +1,24 @@ +--- +kind: Ingress +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "uws-api-server.fullname" $ }}-ingress + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/use-regex: 'true' + nginx.ingress.kubernetes.io/auth-type: basic + nginx.ingress.kubernetes.io/auth-secret: uws-server-basic-auth + nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required' +spec: + rules: + - host: {{ .Values.hostname }} + http: + paths: + - path: /{{ .Values.basePath }}(/|$)(.*) + pathType: "ImplementationSpecific" + backend: + service: + name: {{ .Release.Name }}-server + port: + number: 80 diff --git a/applications/uws/charts/uws-api-server/templates/rbac.yaml b/applications/uws/charts/uws-api-server/templates/rbac.yaml new file mode 100644 index 0000000000..e2248ca9dd --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/rbac.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-job-manager + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Release.Name }}-job-manager-role +rules: + - apiGroups: ["batch"] + resources: ["jobs", "jobs/status", "configmaps"] + verbs: ["get", "list", "watch", "create", "delete"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Release.Name }}-job-manager-rolebinding +roleRef: + kind: Role + name: {{ .Release.Name }}-job-manager-role + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-job-manager diff --git a/applications/uws/charts/uws-api-server/templates/service.yaml b/applications/uws/charts/uws-api-server/templates/service.yaml new file mode 100644 index 0000000000..d59529eadb --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/service.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + name: {{ .Release.Name }}-server + labels: + app: {{ .Release.Name }}-server + chart: {{ template "uws-api-server.chart" . }} +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + app: {{ .Release.Name }}-server diff --git a/applications/uws/charts/uws-api-server/templates/vault-secrets.yaml b/applications/uws/charts/uws-api-server/templates/vault-secrets.yaml new file mode 100644 index 0000000000..475ccec3fa --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/vault-secrets.yaml @@ -0,0 +1,13 @@ +{{- if .Values.butlerPg }} +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "uws-api-server.fullname" . }}-butler-secret + namespace: uws + labels: + app.kubernetes.io/name: {{ include "uws-api-server.name" . }} +spec: + path: {{ required "vaultPathPrefix must be set" .Values.vaultPathPrefix }}/{{ required "butlerPg.secretKey must be set" .Values.butlerPg.secretKey }} + type: Opaque +{{- end }} diff --git a/applications/uws/charts/uws-api-server/templates/volumes.yaml b/applications/uws/charts/uws-api-server/templates/volumes.yaml new file mode 100644 index 0000000000..8c12257f3d --- /dev/null +++ b/applications/uws/charts/uws-api-server/templates/volumes.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "uws-api-server.fullname" $ }}-uws-server-pv + labels: + app: {{ template "uws-api-server.fullname" $ }} + chart: {{ template "uws-api-server.chart" $ }} + name: {{ template "uws-api-server.chart" $ }}-uws-server +spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + server: {{ .Values.workingVolume.server }} + path: {{ .Values.workingVolume.exportPath }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.workingVolume.claimName }} + labels: + app: {{ template "uws-api-server.fullname" $ }} + chart: {{ template "uws-api-server.chart" $ }} +spec: + resources: + requests: + storage: 1Gi + accessModes: + - ReadWriteMany + storageClassName: "" + selector: + matchLabels: + name: {{ template "uws-api-server.chart" $ }}-uws-server + +{{- range .Values.volumes }} +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: {{ template "uws-api-server.fullname" $ }}-{{ .name }}-pv + labels: + app: {{ template "uws-api-server.fullname" $ }} + chart: {{ template "uws-api-server.chart" $ }} + name: {{ template "uws-api-server.chart" $ }}-{{ .name }} +spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteMany + nfs: + server: {{ .server }} + path: {{ .exportPath }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .claimName }} + labels: + app: {{ template "uws-api-server.fullname" $ }} + chart: {{ template "uws-api-server.chart" $ }} +spec: + resources: + requests: + storage: 1Gi + accessModes: + - ReadWriteMany + storageClassName: "" + selector: + matchLabels: + name: {{ template "uws-api-server.chart" $ }}-{{ .name }} +{{- end }} diff --git a/applications/uws/charts/uws-api-server/values.yaml b/applications/uws/charts/uws-api-server/values.yaml new file mode 100644 index 0000000000..fc61e04fbc --- /dev/null +++ b/applications/uws/charts/uws-api-server/values.yaml @@ -0,0 +1,73 @@ +# -- Set the replica count for the UWS server +replicaCount: 1 +image: + # -- The Docker registry name of the UWS server container image + repository: lsstdm/uws-api-server + # -- The tag of the UWS server container image + tag: latest +# -- Target Kubernetes cluster +targetCluster: "" +# -- Hostname for the client ingress +hostname: "" +# -- The base path for the client ingress +basePath: "uws-server" +# -- Log level of server. Set to "DEBUG" for highest verbosity +logLevel: "WARNING" +# -- Site-specific Vault path for secrets. +vaultPathPrefix: "" +server: + securityContext: + # -- Set the UID for the UWS server container entrypoint + runAsUser: 1000 + # -- Set the GID for the UWS server container entrypoint + runAsGroup: 202 + # -- Set the filesystem GID for the mounted volumes in the UWS server container + fsGroup: 202 +client: + # -- Turn on the UWS client system if desired + enabled: false +job: + image: + # -- The Docker registry name of the UWS job container image + repository: "lsstsqre/centos" + # -- The tag of the UWS job container image + tag: "d_latest" + securityContext: + # -- Set the UID for the UWS job container entrypoint + runAsUser: 1000 + # -- Set the GID for the UWS job container entrypoint + runAsGroup: 202 + # -- Set the filesystem GID for the mounted volumes in the UWS job container + fsGroup: 202 +# -- Configuration for Postgres backed butlers +# The object must have the following attributes defined: +# _secretKey_ (A label that points to the VaultSecret for the postgres credentials) +# _containerPath_ (The directory location in the container for the Butler secret) +# _dbUser_ (The database user name for butler access) +butlerPg: {} +workingVolume: + # -- A label identifier for the working volume + name: job-files + # -- The hostname for the NFS server with the working volume + server: "" + # -- The export path on the NFS server for the working volume + exportPath: "" + # -- The PVC claim name for the working volume + claimName: "" + # -- The mount path in the server container for the working volume + mountPath: "/uws" + # -- A possible sub path for the working volume mount + subPath: "" +# -- Central data volumes to be mounted in job containers. +# Each object listed can have the following attributes defined: +# _name_ (A label identifier for the data volume mount) +# _server_ (The hostname for the NFS server with the data volume mount) +# _claimName_ (The PVC claim name for the data volume mount) +# _mountPath_ (The mount path in the server container for the data volume mount) +# _exportPath_ (The export path on the NFS server for the data volume mount) +# _subPath_ (A possible sub path for the data volume mount) +# _readOnly_ (Flag to mark the data volume mount as read only or read/write) +volumes: [] +# -- Temporary flag to make service deploy own namespace. +# Doing this to not disrupt other sites. +createNamespace: false diff --git a/applications/uws/values-tucson-teststand.yaml b/applications/uws/values-tucson-teststand.yaml index a8d3c2a85f..211ceec91f 100644 --- a/applications/uws/values-tucson-teststand.yaml +++ b/applications/uws/values-tucson-teststand.yaml @@ -1,3 +1,65 @@ +uws-api-server: + targetCluster: "tucson-teststand" + hostname: tucson-teststand.lsst.codes + image: + tag: latest + logLevel: INFO + vaultPathPrefix: secret/k8s_operator/tucson-teststand.lsst.codes + butlerPg: + secretKey: butler-secret + containerPath: /home/lsst/.lsst + dbUser: oods + workingVolume: + name: job-files + server: nfs-scratch.tu.lsst.org + exportPath: "/scratch" + claimName: uws-server-pvc + mountPath: "/uws" + subPath: "uws" + volumes: + - name: project + server: nfs-project.tu.lsst.org + claimName: project-pvc + mountPath: "/project" + exportPath: "/project" + subPath: "" + readOnly: false + - name: home + server: nfs-jhome.tu.lsst.org + claimName: home-pvc + mountPath: "/jhome" + exportPath: "/jhome" + subPath: "" + readOnly: false + - name: repo-latiss + server: nfs-auxtel.tu.lsst.org + claimName: repo-latiss-pvc + mountPath: "/repo/LATISS" + exportPath: "/auxtel/repo/LATISS" + subPath: "" + readOnly: false + - name: repo-comcam + server: comcam-archiver.tu.lsst.org + claimName: repo-comcam-pvc + mountPath: "/repo/LSSTComCam" + exportPath: "/repo/LSSTComCam" + subPath: "" + readOnly: false + - name: data-auxtel + server: nfs-auxtel.tu.lsst.org + claimName: data-auxtel-pvc + mountPath: "/data/lsstdata/TTS/auxtel" + exportPath: "/auxtel/lsstdata/TTS/auxtel" + subPath: "" + readOnly: true + - name: data-comcam + server: comcam-archiver.tu.lsst.org + claimName: data-comcam-pvc + mountPath: "/data/lsstdata/TTS/comcam" + exportPath: "/lsstdata/TTS/comcam" + subPath: "" + readOnly: true + csc_collector: secrets: - name: nexus3-docker From 7d31043dd4a57c6d81ca480542caab02e8f4c5c6 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 8 Sep 2023 11:14:10 -0700 Subject: [PATCH 494/588] Make pre-commit happy after large rebase. --- docs/extras/schemas/environment.json | 28 +++++++++++++++++++ environments/README.md | 14 ++++++++++ .../update_shared_chart_version.py | 12 ++++++-- 3 files changed, 52 insertions(+), 2 deletions(-) diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index f31d0a264b..c59efb1b8f 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -157,6 +157,34 @@ "default": null, "description": "Branch of the Git repository holding Argo CD configuration. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", "title": "Git repository branch" + }, + "controlSystemAppNamespace": { + "title": "Controlsystemappnamespace", + "type": "string" + }, + "controlSystemImageTag": { + "title": "Controlsystemimagetag", + "type": "string" + }, + "controlSystemSiteTag": { + "title": "Controlsystemsitetag", + "type": "string" + }, + "controlSystemTopicName": { + "title": "Controlsystemtopicname", + "type": "string" + }, + "controlSystemKafkaBrokerAddress": { + "title": "Controlsystemkafkabrokeraddress", + "type": "string" + }, + "controlSystemSchemaRegistryUrl": { + "title": "Controlsystemschemaregistryurl", + "type": "string" + }, + "controlSystemS3EndpointUrl": { + "title": "Controlsystems3endpointurl", + "type": "string" } }, "required": [ diff --git a/environments/README.md b/environments/README.md index 71f5b10c8d..fdeb4c8a24 100644 --- a/environments/README.md +++ b/environments/README.md @@ -7,9 +7,12 @@ | applications.alert-stream-broker | bool | `false` | Enable the alert-stream-broker application | | applications.argo-workflows | bool | `false` | Enable the argo-workflows application | | applications.argocd | bool | `true` | Enable the Argo CD application. This must be enabled for all environments and is present here only because it makes parsing easier | +| applications.auxtel | bool | `false` | Enable the auxtel control system application | | applications.butler | bool | `false` | Enable the butler application | | applications.cert-manager | bool | `true` | Enable the cert-manager application, required unless the environment makes separate arrangements to inject a current TLS certificate | +| applications.control-system-test | bool | `false` | Enable the control-system-test application | | applications.datalinker | bool | `false` | Eanble the datalinker application | +| applications.eas | bool | `false` | Enable the eas control system application | | applications.exposurelog | bool | `false` | Enable the exposurelog application | | applications.gafaelfawr | bool | `true` | Enable the Gafaelfawr application. This is required by Phalanx since most other applications use `GafaelfawrIngress` | | applications.giftless | bool | `false` | Enable the giftless application | @@ -19,6 +22,7 @@ | applications.kubernetes-replicator | bool | `false` | Enable the kubernetes-replicator application | | applications.linters | bool | `false` | Enable the linters application | | applications.livetap | bool | `false` | Enable the livetap application | +| applications.love | bool | `false` | Enable the love control system application | | applications.mobu | bool | `false` | Enable the mobu application | | applications.monitoring | bool | `false` | Enable the monitoring application | | applications.narrativelog | bool | `false` | Enable the narrativelog application | @@ -26,6 +30,7 @@ | applications.noteburst | bool | `false` | Enable the noteburst application (required by times-square) | | applications.nublado | bool | `false` | Enable the nublado application (v3 of the Notebook Aspect) | | applications.obsloctap | bool | `false` | Enable the obsloctap application | +| applications.obssys | bool | `false` | Enable the obssys control system application | | applications.onepassword-connect | bool | `false` | Enable the onepassword-connect application | | applications.ook | bool | `false` | Enable the ook application | | applications.plot-navigator | bool | `false` | Enable the plot-navigator application | @@ -43,6 +48,7 @@ | applications.semaphore | bool | `false` | Enable the semaphore application | | applications.sherlock | bool | `false` | Enable the sherlock application | | applications.siav2 | bool | `false` | Enable the siav2 application | + applications.simonyitel | bool | `false` | Enable the simonyitel control system application | | applications.sqlproxy-cross-project | bool | `false` | Enable the sqlproxy-cross-project application | | applications.squarebot | bool | `false` | Enable the squarebot application | | applications.squareone | bool | `false` | Enable the squareone application | @@ -53,9 +59,17 @@ | applications.telegraf | bool | `false` | Enable the telegraf application | | applications.telegraf-ds | bool | `false` | Enable the telegraf-ds application | | applications.times-square | bool | `false` | Enable the times-square application | +| applications.uws | bool | `false` | Enable the uws application. This includes the dmocps control system application. | | applications.vault-secrets-operator | bool | `true` | Enable the vault-secrets-operator application. This is required for all environments. | | applications.vo-cutouts | bool | `false` | Enable the vo-cutouts application | | butlerRepositoryIndex | string | None, must be set | Butler repository index to use for this environment | +| controlSystemAppNamespace | string | None, must be set | Application namespacce for the control system deployment | +| controlSystemImageTag | string | None, must be set | Image tag for the control system deployment | +| controlSystemKafkaBrokerAddress | string | `"sasquatch-kafka-brokers.sasquatch:9092"` | Kafka broker address for the control system deployment | +| controlSystemS3EndpointUrl | string | None, must be set: "" | S3 endpoint (LFA) for the control system deployment | +| controlSystemSchemaRegistryUrl | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL for the control system deployment | +| controlSystemSiteTag | string | None, must be set | Site tag for the control system deployment | +| controlSystemTopicName | string | `"sal"` | Topic name tag for the control system deployment | | fqdn | string | None, must be set | Fully-qualified domain name where the environment is running | | name | string | None, must be set | Name of the environment | | repoUrl | string | `"https://github.com/lsst-sqre/phalanx.git"` | URL of the repository for all applications | diff --git a/src/phalanx/control_system/update_shared_chart_version.py b/src/phalanx/control_system/update_shared_chart_version.py index ec49782814..874e4f06e2 100644 --- a/src/phalanx/control_system/update_shared_chart_version.py +++ b/src/phalanx/control_system/update_shared_chart_version.py @@ -1,3 +1,5 @@ +"""Script for updating shared chart versions.""" + import argparse import pathlib @@ -32,6 +34,13 @@ def shared_chart(appdir: pathlib.Path, shared_dir: str) -> bool: def main(opts: argparse.Namespace) -> None: + """Execute shared chart version update. + + Parameters + ---------- + opts: `argparse.Namespace` + The command-line options. + """ print( f"Updating {opts.app_type} apps Helm chart " f"to version {opts.chart_version}" @@ -53,13 +62,12 @@ def main(opts: argparse.Namespace) -> None: if dependency["name"] == DIR_MAP[opts.app_type]: dependency["version"] = opts.chart_version - # print(appdir, values) - with chart.open("w") as ofile: yaml.dump(values, ofile, sort_keys=False) def run() -> None: + """Script run function.""" description = [ "Update version for apps using the csc or shared Helm chart" ] From 0ec28bdc4bb799714560614b363df553d1f28258 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 8 Sep 2023 11:33:31 -0700 Subject: [PATCH 495/588] Update TTS tag for kafka deployment. --- environments/values-tucson-teststand.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 39df92db51..8adbe05318 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -22,6 +22,6 @@ applications: telegraf: true telegraf-ds: true -controlSystemImageTag: c0030 +controlSystemImageTag: k0001 controlSystemSiteTag: tucson controlSystemS3EndpointUrl: https://s3.tu.lsst.org From 6b6d037bc359105e255ab5e1925956d4aae22033 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 8 Sep 2023 13:59:00 -0700 Subject: [PATCH 496/588] Add integration-testing chart. --- applications/control-system-test/Chart.yaml | 3 + applications/control-system-test/README.md | 11 +- .../charts/integration-testing/Chart.yaml | 4 + .../charts/integration-testing/README.md | 16 + .../templates/cleanup-reports-workflow.yaml | 29 ++ .../templates/controller-configmap.yaml | 19 ++ .../templates/imaging-workflow.yaml | 251 ++++++++++++++ .../templates/job-workflow-template.yaml | 55 +++ .../integration-testing/templates/rbac.yaml | 31 ++ .../templates/restart-workflow.yaml | 142 ++++++++ .../templates/saved-reports-pvc.yaml | 10 + .../templates/shutdown-workflow.yaml | 54 +++ .../templates/testing-workflow.yaml | 318 ++++++++++++++++++ .../charts/integration-testing/values.yaml | 18 + .../values-tucson-teststand.yaml | 5 + applications/control-system-test/values.yaml | 54 ++- 16 files changed, 1018 insertions(+), 2 deletions(-) create mode 100644 applications/control-system-test/charts/integration-testing/Chart.yaml create mode 100644 applications/control-system-test/charts/integration-testing/README.md create mode 100644 applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml create mode 100644 applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml create mode 100644 applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml create mode 100644 applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml create mode 100644 applications/control-system-test/charts/integration-testing/templates/rbac.yaml create mode 100644 applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml create mode 100644 applications/control-system-test/charts/integration-testing/templates/saved-reports-pvc.yaml create mode 100644 applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml create mode 100644 applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml create mode 100644 applications/control-system-test/charts/integration-testing/values.yaml mode change 120000 => 100644 applications/control-system-test/values.yaml diff --git a/applications/control-system-test/Chart.yaml b/applications/control-system-test/Chart.yaml index bcc8c6bb50..e7dd94e727 100644 --- a/applications/control-system-test/Chart.yaml +++ b/applications/control-system-test/Chart.yaml @@ -8,3 +8,6 @@ dependencies: - name: csc alias: test42 version: 1.0.0 +- name: integration-testing + version: 1.0.0 + condition: integration-testing.enabled diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md index ee96e268a0..aaf35fbe85 100644 --- a/applications/control-system-test/README.md +++ b/applications/control-system-test/README.md @@ -6,7 +6,6 @@ Deployment for the Test CSCs and Integration Testing Workflows | Key | Type | Default | Description | |-----|------|---------|-------------| -| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | @@ -17,3 +16,13 @@ Deployment for the Test CSCs and Integration Testing Workflows | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| integration-testing.enabled | bool | `false` | | +| integration-testing.envEfd | string | `nil` | The Name of the EFD instance. | +| integration-testing.image.tag | string | `nil` | The image tag for the Integration Test runner container | +| integration-testing.persistentVolume.claimName | string | `"saved-reports"` | PVC name for saving the reports | +| integration-testing.persistentVolume.storage | string | `"1Gi"` | Storage size request for the PVC | +| integration-testing.reportLocation | string | `"/home/saluser/robotframework_EFD/Reports"` | Container location of the RobotFramework reports | +| integration-testing.s3Bucket | string | `nil` | The S3 bucket name to use | +| integration-testing.serviceAccount | string | `"integration-tests"` | This sets the service account name | +| integration-testing.workflowName | string | `"integration-test-workflow"` | Name for the top-level workflow | diff --git a/applications/control-system-test/charts/integration-testing/Chart.yaml b/applications/control-system-test/charts/integration-testing/Chart.yaml new file mode 100644 index 0000000000..16458ad1e8 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/Chart.yaml @@ -0,0 +1,4 @@ +apiVersion: v2 +name: integration-testing +description: Helm chart for Integration Testing Workflows. +version: 1.0.0 diff --git a/applications/control-system-test/charts/integration-testing/README.md b/applications/control-system-test/charts/integration-testing/README.md new file mode 100644 index 0000000000..cee995e935 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/README.md @@ -0,0 +1,16 @@ +# integration-testing + +Helm chart for Integration Testing Workflows. + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| envEfd | string | `nil` | The Name of the EFD instance. | +| image.tag | string | `nil` | The image tag for the Integration Test runner container | +| persistentVolume.claimName | string | `"saved-reports"` | PVC name for saving the reports | +| persistentVolume.storage | string | `"1Gi"` | Storage size request for the PVC | +| reportLocation | string | `"/home/saluser/robotframework_EFD/Reports"` | Container location of the RobotFramework reports | +| s3Bucket | string | `nil` | The S3 bucket name to use | +| serviceAccount | string | `"integration-tests"` | This sets the service account name | +| workflowName | string | `"integration-test-workflow"` | Name for the top-level workflow | diff --git a/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml new file mode 100644 index 0000000000..3c7ca95b65 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml @@ -0,0 +1,29 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: cleanup-reports-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + entrypoint: cleanup-reports + templates: + - name: cleanup-reports + container: + image: alpine:latest + command: [sh, -c] + args: ["rm /pvc/*.*"] + volumeMounts: + - name: testreports + mountPath: /pvc diff --git a/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml new file mode 100644 index 0000000000..bac2f013b0 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: integration-test-controller-configmap + labels: + # Note that this label is required for the informer to detect this ConfigMap. + workflows.argoproj.io/configmap-type: Parameter +data: + artifactRepository: | # However, all nested maps must be strings + s3: + endpoint: {{ $.Values.global.controlSystemS3EndpointUrl }} + bucket: {{ .Values.s3Bucket }} + insecure: false + accessKeySecret: + name: lfa + key: aws-access-key-id + secretKeySecret: + name: lfa + key: aws-secret-access-key diff --git a/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml new file mode 100644 index 0000000000..450a014722 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml @@ -0,0 +1,251 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: imaging-test-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20230601" + entrypoint: run-tests + onExit: save-reports + templates: + - name: run-tests + dag: + tasks: + - name: auxtel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Housekeeping.list" + - name: jobname + value: auxtel-housekeeping + - name: maintel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_MainTel_Housekeeping.list" + - name: jobname + value: maintel-housekeeping + - name: auxtel-image-verification + depends: auxtel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Image_Verification.list" + - name: jobname + value: auxtel-image-verification + - name: auxtel-latiss-daytime-checkout + depends: auxtel-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_LATISS_Checkout.list" + - name: jobname + value: auxtel-latiss-daytime-checkout + - name: auxtel-telescope-dome-daytime-checkout + depends: auxtel-latiss-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" + - name: jobname + value: auxtel-telescope-dome-daytime-checkout + - name: auxtel-prep-flat + depends: auxtel-telescope-dome-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Prep_Flat.list" + - name: jobname + value: auxtel-prep-flat + - name: auxtel-flat-calibrations + depends: auxtel-prep-flat + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Flat_Calibrations.list" + - name: jobname + value: auxtel-flat-calibrations + - name: auxtel-ptc-calibrations + depends: auxtel-flat-calibrations + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_PTC_Calibrations.list" + - name: jobname + value: auxtel-ptc-calibrations + - name: auxtel-prep-onsky + depends: auxtel-ptc-calibrations + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Prep_Onsky.list" + - name: jobname + value: auxtel-prep-onsky + - name: auxtel-cwfs-align + depends: auxtel-prep-onsky + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_WEP_Align.list" + - name: jobname + value: auxtel-cwfs-align + - name: auxtel-acq-take-seq-pointing + depends: auxtel-cwfs-align + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_POINTING.list" + - name: jobname + value: auxtel-acq-take-seq-pointing + - name: auxtel-acq-take-seq-verify + depends: auxtel-acq-take-seq-pointing + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_VERIFY.list" + - name: jobname + value: auxtel-acq-take-seq-verify + - name: auxtel-acq-take-seq-test + depends: auxtel-acq-take-seq-verify + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_TEST.list" + - name: jobname + value: auxtel-acq-take-seq-test + - name: auxtel-acq-take-seq-nominal + depends: auxtel-acq-take-seq-test + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_NOMINAL.list" + - name: jobname + value: auxtel-acq-take-seq-nominal + - name: auxtel-stop + depends: auxtel-acq-take-seq-nominal + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Stop.list" + - name: jobname + value: auxtel-stop + - name: auxtel-shutdown + depends: auxtel-stop + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Shutdown.list" + - name: jobname + value: auxtel-shutdown + - name: enable-atcs + depends: auxtel-shutdown + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Enable_ATCS.list" + - name: jobname + value: enable-atcs + - name: comcam-image-verification + depends: maintel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Image_Verification.list" + - name: jobname + value: comcam-image-verification + - name: comcam-calibrations + depends: comcam-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Calibrations.list" + - name: jobname + value: comcam-calibrations + + - name: save-reports + container: + image: alpine:latest + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/ + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }} diff --git a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml new file mode 100644 index 0000000000..4ac473ecb2 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml @@ -0,0 +1,55 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: integration-test-job-template +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + entrypoint: inttest-template + imagePullSecrets: + - name: nexus3-docker + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + templates: + - name: inttest-template + inputs: + parameters: + - name: integrationtest + value: "-A Run-Robot.list" + - name: jobname + value: "myjob" + outputs: + parameters: + - name: job-name + valueFrom: + jsonPath: '{.metadata.name}' + metadata: + labels: + argocd.argoproj.io/instance: integration-testing + securityContext: + runAsUser: 73006 + runAsGroup: 73006 + fsGroup: 73006 + container: + name: test-{{ printf "{{inputs.parameters.jobname}}" }} + {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystemImageTag }} + image: "ts-dockerhub.lsst.org/integrationtests:{{ $imageTag }}" + imagePullPolicy: Always + envFrom: + - configMapRef: + name: csc-env-config + - secretRef: + name: ts-salkafka + env: + - name: ENV_EFD + value: {{ .Values.envEfd }} + - name: RUN_ARG + value: {{ printf "'{{inputs.parameters.integrationtest}}'" }} + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + readOnly: false diff --git a/applications/control-system-test/charts/integration-testing/templates/rbac.yaml b/applications/control-system-test/charts/integration-testing/templates/rbac.yaml new file mode 100644 index 0000000000..a417c9c9e7 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/rbac.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Values.serviceAccount }}-role +rules: + - apiGroups: ["batch"] + resources: ["jobs", "jobs/status", "configmaps"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["argoproj.io"] + resources: ["workflowtaskresults"] + verbs: ["create", "patch"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.serviceAccount }}-rolebinding +roleRef: + kind: Role + name: {{ .Values.serviceAccount }}-role + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccount }} diff --git a/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml new file mode 100644 index 0000000000..bfe96f0150 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml @@ -0,0 +1,142 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: restart-test-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20230601" + entrypoint: run-tests + onExit: save-reports + templates: + - name: run-tests + dag: + tasks: + - name: cameras-offline + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Offline.list" + - name: jobname + value: cameras-offline + - name: standby + depends: cameras-offline + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Standby.list" + - name: jobname + value: standby + - name: disabled + depends: standby + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Disabled.list" + - name: jobname + value: disabled + - name: enabled + depends: disabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Enabled.list" + - name: jobname + value: enabled + - name: auxtel-housekeeping + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Housekeeping.list" + - name: jobname + value: auxtel-housekeeping + - name: maintel-housekeeping + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_MainTel_Housekeeping.list" + - name: jobname + value: maintel-housekeeping + - name: auxtel-image-verification + depends: auxtel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Image_Verification.list" + - name: jobname + value: auxtel-image-verification + - name: comcam-image-verification + depends: maintel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Image_Verification.list" + - name: jobname + value: comcam-image-verification + - name: love-stress-test + depends: auxtel-image-verification && comcam-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_LOVE_Stress_Test.list" + - name: jobname + value: love-stress-test + + - name: save-reports + container: + image: alpine:latest + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/ + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }} diff --git a/applications/control-system-test/charts/integration-testing/templates/saved-reports-pvc.yaml b/applications/control-system-test/charts/integration-testing/templates/saved-reports-pvc.yaml new file mode 100644 index 0000000000..53ed38d981 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/saved-reports-pvc.yaml @@ -0,0 +1,10 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ .Values.persistentVolume.claimName }} +spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: rook-ceph-block + resources: + requests: + storage: {{ .Values.persistentVolume.storage }} diff --git a/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml new file mode 100644 index 0000000000..3ff64caf36 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml @@ -0,0 +1,54 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: shutdown-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20230327" + entrypoint: run-tests + onExit: save-reports + templates: + - name: run-tests + dag: + tasks: + - name: shutdown + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Shutdown.list" + - name: jobname + value: shutdown + + - name: save-reports + container: + image: alpine:latest + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/ + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }} diff --git a/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml new file mode 100644 index 0000000000..5453e43e46 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml @@ -0,0 +1,318 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: {{ .Values.workflowName }} + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20230327" + entrypoint: run-tests + onExit: save-reports + templates: + - name: run-tests + dag: + tasks: + - name: cameras-offline + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Offline.list" + - name: jobname + value: cameras-offline + - name: standby + depends: cameras-offline + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Standby.list" + - name: jobname + value: standby + - name: disabled + depends: standby + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Disabled.list" + - name: jobname + value: disabled + - name: enabled + depends: disabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Enabled.list" + - name: jobname + value: enabled + - name: auxtel-housekeeping + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Housekeeping.list" + - name: jobname + value: auxtel-housekeeping + - name: maintel-housekeeping + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_MainTel_Housekeeping.list" + - name: jobname + value: maintel-housekeeping + - name: auxtel-image-verification + depends: auxtel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Image_Verification.list" + - name: jobname + value: auxtel-image-verification + - name: auxtel-latiss-daytime-checkout + depends: auxtel-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_LATISS_Checkout.list" + - name: jobname + value: auxtel-latiss-daytime-checkout + - name: auxtel-telescope-dome-daytime-checkout + depends: auxtel-latiss-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" + - name: jobname + value: auxtel-telescope-dome-daytime-checkout + - name: auxtel-prep-flat + depends: auxtel-telescope-dome-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Prep_Flat.list" + - name: jobname + value: auxtel-prep-flat + - name: auxtel-flat-calibrations + depends: auxtel-prep-flat + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Flat_Calibrations.list" + - name: jobname + value: auxtel-flat-calibrations + - name: auxtel-ptc-calibrations + depends: auxtel-flat-calibrations + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_PTC_Calibrations.list" + - name: jobname + value: auxtel-ptc-calibrations + - name: auxtel-prep-onsky + depends: auxtel-ptc-calibrations + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Prep_Onsky.list" + - name: jobname + value: auxtel-prep-onsky + - name: auxtel-cwfs-align + depends: auxtel-prep-onsky + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_WEP_Align.list" + - name: jobname + value: auxtel-cwfs-align + - name: auxtel-acq-take-seq-pointing + depends: auxtel-cwfs-align + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_POINTING.list" + - name: jobname + value: auxtel-acq-take-seq-pointing + - name: auxtel-acq-take-seq-verify + depends: auxtel-acq-take-seq-pointing + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_VERIFY.list" + - name: jobname + value: auxtel-acq-take-seq-verify + - name: auxtel-acq-take-seq-test + depends: auxtel-acq-take-seq-verify + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_TEST.list" + - name: jobname + value: auxtel-acq-take-seq-test + - name: auxtel-acq-take-seq-nominal + depends: auxtel-acq-take-seq-test + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Acq_Take_Seq_NOMINAL.list" + - name: jobname + value: auxtel-acq-take-seq-nominal + - name: auxtel-stop + depends: auxtel-acq-take-seq-nominal + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Stop.list" + - name: jobname + value: auxtel-stop + - name: auxtel-shutdown + depends: auxtel-stop + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Shutdown.list" + - name: jobname + value: auxtel-shutdown + - name: enable-atcs + depends: auxtel-shutdown + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Enable_ATCS.list" + - name: jobname + value: enable-atcs + - name: comcam-image-verification + depends: maintel-housekeeping + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Image_Verification.list" + - name: jobname + value: comcam-image-verification + - name: comcam-calibrations + depends: comcam-image-verification + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_BigCamera_Calibrations.list" + - name: jobname + value: comcam-calibrations + - name: love-stress-test + depends: comcam-calibrations && enable-atcs + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_LOVE_Stress_Test.list" + - name: jobname + value: love-stress-test + - name: shutdown + depends: love-stress-test + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Shutdown.list" + - name: jobname + value: shutdown + + - name: save-reports + container: + image: alpine:latest + volumeMounts: + - name: testreports + mountPath: {{ .Values.reportLocation }} + outputs: + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/ + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }} diff --git a/applications/control-system-test/charts/integration-testing/values.yaml b/applications/control-system-test/charts/integration-testing/values.yaml new file mode 100644 index 0000000000..67083d7190 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/values.yaml @@ -0,0 +1,18 @@ +# -- The Name of the EFD instance. +envEfd: +# -- The S3 bucket name to use +s3Bucket: +# -- Container location of the RobotFramework reports +reportLocation: /home/saluser/robotframework_EFD/Reports +image: + # -- The image tag for the Integration Test runner container + tag: +# -- Name for the top-level workflow +workflowName: integration-test-workflow +# -- This sets the service account name +serviceAccount: integration-tests +persistentVolume: + # -- PVC name for saving the reports + claimName: saved-reports + # -- Storage size request for the PVC + storage: 1Gi diff --git a/applications/control-system-test/values-tucson-teststand.yaml b/applications/control-system-test/values-tucson-teststand.yaml index 51346cbc0f..9ec18f218e 100644 --- a/applications/control-system-test/values-tucson-teststand.yaml +++ b/applications/control-system-test/values-tucson-teststand.yaml @@ -15,3 +15,8 @@ test42: pullPolicy: Always env: RUN_ARG: 42 + +integration-testing: + enabled: true + envEfd: tucson_teststand_efd + s3Bucket: rubinobs-lfa-tuc diff --git a/applications/control-system-test/values.yaml b/applications/control-system-test/values.yaml deleted file mode 120000 index 22e98f1fe2..0000000000 --- a/applications/control-system-test/values.yaml +++ /dev/null @@ -1 +0,0 @@ -../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/control-system-test/values.yaml b/applications/control-system-test/values.yaml new file mode 100644 index 0000000000..353a68dc0c --- /dev/null +++ b/applications/control-system-test/values.yaml @@ -0,0 +1,53 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +integration-testing: + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" From e6463092cd6b27edf2f63c92c720bae6ce148bfc Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 8 Sep 2023 14:00:36 -0700 Subject: [PATCH 497/588] Add back missing application. --- environments/README.md | 1 + environments/values.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/environments/README.md b/environments/README.md index fdeb4c8a24..4464f09423 100644 --- a/environments/README.md +++ b/environments/README.md @@ -9,6 +9,7 @@ | applications.argocd | bool | `true` | Enable the Argo CD application. This must be enabled for all environments and is present here only because it makes parsing easier | | applications.auxtel | bool | `false` | Enable the auxtel control system application | | applications.butler | bool | `false` | Enable the butler application | +| applications.calsys | bool | `false` | Enable the calsys control system application | | applications.cert-manager | bool | `true` | Enable the cert-manager application, required unless the environment makes separate arrangements to inject a current TLS certificate | | applications.control-system-test | bool | `false` | Enable the control-system-test application | | applications.datalinker | bool | `false` | Eanble the datalinker application | diff --git a/environments/values.yaml b/environments/values.yaml index 1f1f6aa4c6..4a8149473c 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -43,6 +43,7 @@ applications: butler: false # -- Enable the calsys control system application + calsys: false # -- Enable the cert-manager application, required unless the environment # makes separate arrangements to inject a current TLS certificate From 5bc1b5c6b30adc89a5ba6390d31cbf60995fa76e Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 11 Sep 2023 12:33:34 -0700 Subject: [PATCH 498/588] Change love-manager target CPU utilization. --- applications/love/values-tucson-teststand.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index ac5d117a38..a80f47b16a 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -54,7 +54,7 @@ love-manager: enabled: true minReplicas: 2 maxReplicas: 25 - targetCPUUtilizationPercentage: 50 + targetCPUUtilizationPercentage: 75 resources: requests: cpu: 250m From 81a861b52c0d3e65e4f3f3b65e7fb530ca8039d7 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 11 Sep 2023 19:54:43 -0700 Subject: [PATCH 499/588] Switch to file references. --- applications/auxtel/Chart.yaml | 12 +++++ applications/auxtel/charts/csc | 1 - applications/auxtel/charts/csc_collector | 1 - applications/auxtel/values.yaml | 51 ++++++++++++++++++- applications/calsys/Chart.yaml | 3 ++ applications/calsys/charts/csc | 1 - applications/calsys/charts/csc_collector | 1 - applications/calsys/values.yaml | 51 ++++++++++++++++++- applications/control-system-test/Chart.yaml | 2 + applications/control-system-test/charts/csc | 1 - .../control-system-test/charts/csc_collector | 1 - applications/eas/Chart.yaml | 32 ++++++++++++ applications/eas/charts/csc | 1 - applications/eas/charts/csc_collector | 1 - applications/eas/values.yaml | 51 ++++++++++++++++++- applications/love/Chart.yaml | 2 + applications/love/charts/csc | 1 - applications/love/charts/csc_collector | 1 - applications/love/values.yaml | 51 ++++++++++++++++++- applications/obssys/Chart.yaml | 7 +++ applications/obssys/charts/csc | 1 - applications/obssys/charts/csc_collector | 1 - applications/obssys/values.yaml | 51 ++++++++++++++++++- applications/simonyitel/Chart.yaml | 25 +++++++++ applications/simonyitel/charts/csc | 1 - applications/simonyitel/charts/csc_collector | 1 - applications/simonyitel/values.yaml | 51 ++++++++++++++++++- applications/uws/Chart.yaml | 3 ++ applications/uws/charts/csc | 1 - applications/uws/charts/csc_collector | 1 - applications/uws/values.yaml | 51 ++++++++++++++++++- {shared/charts => charts}/csc/Chart.yaml | 2 +- {shared/charts => charts}/csc/README.md | 0 .../csc/templates/_helpers.tpl | 0 .../csc/templates/configfile-configmap.yaml | 0 .../csc/templates/entrypoint-configmap.yaml | 0 .../charts => charts}/csc/templates/job.yaml | 0 .../csc/templates/mountpoint-pvc.yaml | 0 .../csc/templates/service.yaml | 0 {shared/charts => charts}/csc/values.yaml | 0 .../csc_collector/Chart.yaml | 2 +- .../charts => charts}/csc_collector/README.md | 0 .../templates/configmap-env.yaml | 0 .../csc_collector/templates/vault-secret.yaml | 0 .../csc_collector/values.yaml | 0 shared/values/values_control_system_apps.yaml | 50 ------------------ 46 files changed, 438 insertions(+), 75 deletions(-) delete mode 120000 applications/auxtel/charts/csc delete mode 120000 applications/auxtel/charts/csc_collector mode change 120000 => 100644 applications/auxtel/values.yaml delete mode 120000 applications/calsys/charts/csc delete mode 120000 applications/calsys/charts/csc_collector mode change 120000 => 100644 applications/calsys/values.yaml delete mode 120000 applications/control-system-test/charts/csc delete mode 120000 applications/control-system-test/charts/csc_collector delete mode 120000 applications/eas/charts/csc delete mode 120000 applications/eas/charts/csc_collector mode change 120000 => 100644 applications/eas/values.yaml delete mode 120000 applications/love/charts/csc delete mode 120000 applications/love/charts/csc_collector mode change 120000 => 100644 applications/love/values.yaml delete mode 120000 applications/obssys/charts/csc delete mode 120000 applications/obssys/charts/csc_collector mode change 120000 => 100644 applications/obssys/values.yaml delete mode 120000 applications/simonyitel/charts/csc delete mode 120000 applications/simonyitel/charts/csc_collector mode change 120000 => 100644 applications/simonyitel/values.yaml delete mode 120000 applications/uws/charts/csc delete mode 120000 applications/uws/charts/csc_collector mode change 120000 => 100644 applications/uws/values.yaml rename {shared/charts => charts}/csc/Chart.yaml (99%) rename {shared/charts => charts}/csc/README.md (100%) rename {shared/charts => charts}/csc/templates/_helpers.tpl (100%) rename {shared/charts => charts}/csc/templates/configfile-configmap.yaml (100%) rename {shared/charts => charts}/csc/templates/entrypoint-configmap.yaml (100%) rename {shared/charts => charts}/csc/templates/job.yaml (100%) rename {shared/charts => charts}/csc/templates/mountpoint-pvc.yaml (100%) rename {shared/charts => charts}/csc/templates/service.yaml (100%) rename {shared/charts => charts}/csc/values.yaml (100%) rename {shared/charts => charts}/csc_collector/Chart.yaml (88%) rename {shared/charts => charts}/csc_collector/README.md (100%) rename {shared/charts => charts}/csc_collector/templates/configmap-env.yaml (100%) rename {shared/charts => charts}/csc_collector/templates/vault-secret.yaml (100%) rename {shared/charts => charts}/csc_collector/values.yaml (100%) delete mode 100644 shared/values/values_control_system_apps.yaml diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml index bfcf914e14..3fb42ed764 100644 --- a/applications/auxtel/Chart.yaml +++ b/applications/auxtel/Chart.yaml @@ -5,38 +5,50 @@ description: Deployment for the Auxiliary Telescope CSCs dependencies: - name: csc_collector version: 1.0.0 + repository: "file://../../charts/csc_collector" - name: hexapod-sim version: 1.0.0 - name: csc alias: ataos version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atdome version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atdome-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atdometrajectory version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atheaderservice version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: athexapod version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: athexapod-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atoods version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atptg version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atspectrograph version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atspectrograph-sim version: 1.0.0 + repository: "file://../../charts/csc" diff --git a/applications/auxtel/charts/csc b/applications/auxtel/charts/csc deleted file mode 120000 index 294046490f..0000000000 --- a/applications/auxtel/charts/csc +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc \ No newline at end of file diff --git a/applications/auxtel/charts/csc_collector b/applications/auxtel/charts/csc_collector deleted file mode 120000 index 3ced684acb..0000000000 --- a/applications/auxtel/charts/csc_collector +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc_collector \ No newline at end of file diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml deleted file mode 120000 index 22e98f1fe2..0000000000 --- a/applications/auxtel/values.yaml +++ /dev/null @@ -1 +0,0 @@ -../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml new file mode 100644 index 0000000000..70e29b7d0b --- /dev/null +++ b/applications/auxtel/values.yaml @@ -0,0 +1,50 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" diff --git a/applications/calsys/Chart.yaml b/applications/calsys/Chart.yaml index 011979f109..6c9670d485 100644 --- a/applications/calsys/Chart.yaml +++ b/applications/calsys/Chart.yaml @@ -5,9 +5,12 @@ description: Deployment for the Calibration System CSCs dependencies: - name: csc_collector version: 1.0.0 + repository: "file://../../charts/csc_collector" - name: csc alias: gcheaderservice1 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: simulation-gencam version: 1.0.0 + repository: "file://../../charts/csc" diff --git a/applications/calsys/charts/csc b/applications/calsys/charts/csc deleted file mode 120000 index 3a423a6f5f..0000000000 --- a/applications/calsys/charts/csc +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/calsys/charts/csc_collector b/applications/calsys/charts/csc_collector deleted file mode 120000 index 38853814a3..0000000000 --- a/applications/calsys/charts/csc_collector +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/calsys/values.yaml b/applications/calsys/values.yaml deleted file mode 120000 index 22e98f1fe2..0000000000 --- a/applications/calsys/values.yaml +++ /dev/null @@ -1 +0,0 @@ -../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/calsys/values.yaml b/applications/calsys/values.yaml new file mode 100644 index 0000000000..70e29b7d0b --- /dev/null +++ b/applications/calsys/values.yaml @@ -0,0 +1,50 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" diff --git a/applications/control-system-test/Chart.yaml b/applications/control-system-test/Chart.yaml index e7dd94e727..e7b199e393 100644 --- a/applications/control-system-test/Chart.yaml +++ b/applications/control-system-test/Chart.yaml @@ -5,9 +5,11 @@ description: Deployment for the Test CSCs and Integration Testing Workflows dependencies: - name: csc_collector version: 1.0.0 + repository: "file://../../charts/csc_collector" - name: csc alias: test42 version: 1.0.0 + repository: "file://../../charts/csc" - name: integration-testing version: 1.0.0 condition: integration-testing.enabled diff --git a/applications/control-system-test/charts/csc b/applications/control-system-test/charts/csc deleted file mode 120000 index 3a423a6f5f..0000000000 --- a/applications/control-system-test/charts/csc +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/control-system-test/charts/csc_collector b/applications/control-system-test/charts/csc_collector deleted file mode 120000 index 38853814a3..0000000000 --- a/applications/control-system-test/charts/csc_collector +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/eas/Chart.yaml b/applications/eas/Chart.yaml index 6af654f0f0..b9651de186 100644 --- a/applications/eas/Chart.yaml +++ b/applications/eas/Chart.yaml @@ -5,96 +5,128 @@ description: Deployment for the Environmental Awareness Systems CSCs dependencies: - name: csc_collector version: 1.0.0 + repository: "file://../../charts/csc_collector" - name: csc alias: auxtel-ess01 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: auxtel-ess01-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: auxtel-ess02 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: auxtel-ess02-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: auxtel-ess03 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: auxtel-ess03-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: auxtel-ess04 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: auxtel-ess04-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: calibhill-ess01 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: calibhill-ess01-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: dimm1 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: dimm1-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: dimm2 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: dimm2-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: dsm1 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: dsm1-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: dsm2 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: dsm2-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome-ess01 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome-ess01-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome-ess02 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome-ess02-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome-ess03 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome-ess03-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: tma-ess01 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: tma-ess01-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: tma-ess104 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: tma-ess104-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: tma-ess105 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: tma-ess105-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: weatherforecast version: 1.0.0 + repository: "file://../../charts/csc" diff --git a/applications/eas/charts/csc b/applications/eas/charts/csc deleted file mode 120000 index 294046490f..0000000000 --- a/applications/eas/charts/csc +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc \ No newline at end of file diff --git a/applications/eas/charts/csc_collector b/applications/eas/charts/csc_collector deleted file mode 120000 index 3ced684acb..0000000000 --- a/applications/eas/charts/csc_collector +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc_collector \ No newline at end of file diff --git a/applications/eas/values.yaml b/applications/eas/values.yaml deleted file mode 120000 index 22e98f1fe2..0000000000 --- a/applications/eas/values.yaml +++ /dev/null @@ -1 +0,0 @@ -../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/eas/values.yaml b/applications/eas/values.yaml new file mode 100644 index 0000000000..70e29b7d0b --- /dev/null +++ b/applications/eas/values.yaml @@ -0,0 +1,50 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" diff --git a/applications/love/Chart.yaml b/applications/love/Chart.yaml index bda5f7ab69..57212fe5f3 100644 --- a/applications/love/Chart.yaml +++ b/applications/love/Chart.yaml @@ -5,9 +5,11 @@ description: Deployment for the LSST Operators Visualization Environment dependencies: - name: csc_collector version: 1.0.0 + repository: "file://../../charts/csc_collector" - name: csc alias: love-commander version: 1.0.0 + repository: "file://../../charts/csc" - name: love-manager version: 1.0.0 - name: love-nginx diff --git a/applications/love/charts/csc b/applications/love/charts/csc deleted file mode 120000 index 294046490f..0000000000 --- a/applications/love/charts/csc +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc \ No newline at end of file diff --git a/applications/love/charts/csc_collector b/applications/love/charts/csc_collector deleted file mode 120000 index 3ced684acb..0000000000 --- a/applications/love/charts/csc_collector +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc_collector \ No newline at end of file diff --git a/applications/love/values.yaml b/applications/love/values.yaml deleted file mode 120000 index 22e98f1fe2..0000000000 --- a/applications/love/values.yaml +++ /dev/null @@ -1 +0,0 @@ -../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/love/values.yaml b/applications/love/values.yaml new file mode 100644 index 0000000000..70e29b7d0b --- /dev/null +++ b/applications/love/values.yaml @@ -0,0 +1,50 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" diff --git a/applications/obssys/Chart.yaml b/applications/obssys/Chart.yaml index 8e25270e58..f53d3ded25 100644 --- a/applications/obssys/Chart.yaml +++ b/applications/obssys/Chart.yaml @@ -5,21 +5,28 @@ description: Deployment for the Observatory System CSCs dependencies: - name: csc_collector version: 1.0.0 + repository: "file://../../charts/csc_collector" - name: csc alias: atqueue version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atscheduler version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: authorize version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtqueue version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtscheduler version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: watcher version: 1.0.0 + repository: "file://../../charts/csc" diff --git a/applications/obssys/charts/csc b/applications/obssys/charts/csc deleted file mode 120000 index 3a423a6f5f..0000000000 --- a/applications/obssys/charts/csc +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/obssys/charts/csc_collector b/applications/obssys/charts/csc_collector deleted file mode 120000 index 38853814a3..0000000000 --- a/applications/obssys/charts/csc_collector +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/obssys/values.yaml b/applications/obssys/values.yaml deleted file mode 120000 index 22e98f1fe2..0000000000 --- a/applications/obssys/values.yaml +++ /dev/null @@ -1 +0,0 @@ -../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/obssys/values.yaml b/applications/obssys/values.yaml new file mode 100644 index 0000000000..70e29b7d0b --- /dev/null +++ b/applications/obssys/values.yaml @@ -0,0 +1,50 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" diff --git a/applications/simonyitel/Chart.yaml b/applications/simonyitel/Chart.yaml index ef31522b69..a2020f0818 100644 --- a/applications/simonyitel/Chart.yaml +++ b/applications/simonyitel/Chart.yaml @@ -5,75 +5,100 @@ description: Deployment for the Simonyi Survey Telescope CSCs dependencies: - name: csc_collector version: 1.0.0 + repository: "file://../../charts/csc_collector" - name: csc alias: ccheaderservice version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: ccoods version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: lasertracker1 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: lasertracker1-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtaircompressor1 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtaircompressor1-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtaircompressor2 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtaircompressor2-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtaos version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtcamhexapod version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtcamhexapod-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdometrajectory version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtm1m3-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtm2 version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtm2-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtm2hexapod version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtm2hexapod-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtmount version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtmount-sim version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtptg version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtrotator version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtrotator-sim version: 1.0.0 + repository: "file://../../charts/csc" diff --git a/applications/simonyitel/charts/csc b/applications/simonyitel/charts/csc deleted file mode 120000 index 3a423a6f5f..0000000000 --- a/applications/simonyitel/charts/csc +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/simonyitel/charts/csc_collector b/applications/simonyitel/charts/csc_collector deleted file mode 120000 index 38853814a3..0000000000 --- a/applications/simonyitel/charts/csc_collector +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/simonyitel/values.yaml b/applications/simonyitel/values.yaml deleted file mode 120000 index 22e98f1fe2..0000000000 --- a/applications/simonyitel/values.yaml +++ /dev/null @@ -1 +0,0 @@ -../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/simonyitel/values.yaml b/applications/simonyitel/values.yaml new file mode 100644 index 0000000000..70e29b7d0b --- /dev/null +++ b/applications/simonyitel/values.yaml @@ -0,0 +1,50 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" diff --git a/applications/uws/Chart.yaml b/applications/uws/Chart.yaml index ea6539c2c5..46ed611daa 100644 --- a/applications/uws/Chart.yaml +++ b/applications/uws/Chart.yaml @@ -5,11 +5,14 @@ description: Deployment for the UWS and DM OCPS CSCs dependencies: - name: csc_collector version: 1.0.0 + repository: "file://../../charts/csc_collector" - name: csc alias: atocps version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: ccocps version: 1.0.0 + repository: "file://../../charts/csc" - name: uws-api-server version: 1.5.0 diff --git a/applications/uws/charts/csc b/applications/uws/charts/csc deleted file mode 120000 index 3a423a6f5f..0000000000 --- a/applications/uws/charts/csc +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc/ \ No newline at end of file diff --git a/applications/uws/charts/csc_collector b/applications/uws/charts/csc_collector deleted file mode 120000 index 38853814a3..0000000000 --- a/applications/uws/charts/csc_collector +++ /dev/null @@ -1 +0,0 @@ -../../../shared/charts/csc_collector/ \ No newline at end of file diff --git a/applications/uws/values.yaml b/applications/uws/values.yaml deleted file mode 120000 index 22e98f1fe2..0000000000 --- a/applications/uws/values.yaml +++ /dev/null @@ -1 +0,0 @@ -../../shared/values/values_control_system_apps.yaml \ No newline at end of file diff --git a/applications/uws/values.yaml b/applications/uws/values.yaml new file mode 100644 index 0000000000..70e29b7d0b --- /dev/null +++ b/applications/uws/values.yaml @@ -0,0 +1,50 @@ +csc_collector: + # -- This section holds secret specifications. + # Each object listed can have the following attributes defined: + # _name_ (The name used by pods to access the secret) + # _key_ (The key in the vault store where the secret resides) + # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) + secrets: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + + # -- Application namespacce for the control system deployment + # @default -- Set by ArgoCD + controlSystemAppNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemImageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemSiteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + controlSystemTopicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + controlSystemKafkaBrokerAddress: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + controlSystemSchemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + controlSystemS3EndpointUrl: "" diff --git a/shared/charts/csc/Chart.yaml b/charts/csc/Chart.yaml similarity index 99% rename from shared/charts/csc/Chart.yaml rename to charts/csc/Chart.yaml index 39e973dab7..6ddcdf43e1 100644 --- a/shared/charts/csc/Chart.yaml +++ b/charts/csc/Chart.yaml @@ -1,4 +1,4 @@ name: csc apiVersion: v2 version: 1.0.0 -description: A Helm chart for deploying the Control System CSCs. \ No newline at end of file +description: A Helm chart for deploying the Control System CSCs. diff --git a/shared/charts/csc/README.md b/charts/csc/README.md similarity index 100% rename from shared/charts/csc/README.md rename to charts/csc/README.md diff --git a/shared/charts/csc/templates/_helpers.tpl b/charts/csc/templates/_helpers.tpl similarity index 100% rename from shared/charts/csc/templates/_helpers.tpl rename to charts/csc/templates/_helpers.tpl diff --git a/shared/charts/csc/templates/configfile-configmap.yaml b/charts/csc/templates/configfile-configmap.yaml similarity index 100% rename from shared/charts/csc/templates/configfile-configmap.yaml rename to charts/csc/templates/configfile-configmap.yaml diff --git a/shared/charts/csc/templates/entrypoint-configmap.yaml b/charts/csc/templates/entrypoint-configmap.yaml similarity index 100% rename from shared/charts/csc/templates/entrypoint-configmap.yaml rename to charts/csc/templates/entrypoint-configmap.yaml diff --git a/shared/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml similarity index 100% rename from shared/charts/csc/templates/job.yaml rename to charts/csc/templates/job.yaml diff --git a/shared/charts/csc/templates/mountpoint-pvc.yaml b/charts/csc/templates/mountpoint-pvc.yaml similarity index 100% rename from shared/charts/csc/templates/mountpoint-pvc.yaml rename to charts/csc/templates/mountpoint-pvc.yaml diff --git a/shared/charts/csc/templates/service.yaml b/charts/csc/templates/service.yaml similarity index 100% rename from shared/charts/csc/templates/service.yaml rename to charts/csc/templates/service.yaml diff --git a/shared/charts/csc/values.yaml b/charts/csc/values.yaml similarity index 100% rename from shared/charts/csc/values.yaml rename to charts/csc/values.yaml diff --git a/shared/charts/csc_collector/Chart.yaml b/charts/csc_collector/Chart.yaml similarity index 88% rename from shared/charts/csc_collector/Chart.yaml rename to charts/csc_collector/Chart.yaml index 7eb70158e4..a58aa429b0 100644 --- a/shared/charts/csc_collector/Chart.yaml +++ b/charts/csc_collector/Chart.yaml @@ -1,4 +1,4 @@ name: csc_collector apiVersion: v2 version: 1.0.0 -description: A Helm chart provided shared information for Control System CSCs. \ No newline at end of file +description: A Helm chart provided shared information for Control System CSCs. diff --git a/shared/charts/csc_collector/README.md b/charts/csc_collector/README.md similarity index 100% rename from shared/charts/csc_collector/README.md rename to charts/csc_collector/README.md diff --git a/shared/charts/csc_collector/templates/configmap-env.yaml b/charts/csc_collector/templates/configmap-env.yaml similarity index 100% rename from shared/charts/csc_collector/templates/configmap-env.yaml rename to charts/csc_collector/templates/configmap-env.yaml diff --git a/shared/charts/csc_collector/templates/vault-secret.yaml b/charts/csc_collector/templates/vault-secret.yaml similarity index 100% rename from shared/charts/csc_collector/templates/vault-secret.yaml rename to charts/csc_collector/templates/vault-secret.yaml diff --git a/shared/charts/csc_collector/values.yaml b/charts/csc_collector/values.yaml similarity index 100% rename from shared/charts/csc_collector/values.yaml rename to charts/csc_collector/values.yaml diff --git a/shared/values/values_control_system_apps.yaml b/shared/values/values_control_system_apps.yaml deleted file mode 100644 index 70e29b7d0b..0000000000 --- a/shared/values/values_control_system_apps.yaml +++ /dev/null @@ -1,50 +0,0 @@ -csc_collector: - # -- This section holds secret specifications. - # Each object listed can have the following attributes defined: - # _name_ (The name used by pods to access the secret) - # _key_ (The key in the vault store where the secret resides) - # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) - secrets: [] - -# The following will be set by parameters injected by Argo CD and should not -# be set in the individual environment values files. -global: - # -- Base URL for the environment - # @default -- Set by Argo CD - baseUrl: "" - - # -- Host name for ingress - # @default -- Set by Argo CD - host: "" - - # -- Base path for Vault secrets - # @default -- Set by Argo CD - vaultSecretsPath: "" - - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" - - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" - - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" - - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" - - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" - - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" - - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" From 9a4b3ef4348c663743e16c244e902ab2112b0840 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 08:43:27 -0700 Subject: [PATCH 500/588] Fix application enabling. --- environments/templates/auxtel-application.yaml | 2 +- environments/templates/calsys-application.yaml | 2 +- environments/templates/control-system-test-application.yaml | 2 +- environments/templates/eas-application.yaml | 2 +- environments/templates/love-application.yaml | 2 +- environments/templates/obssys-application.yaml | 2 +- environments/templates/simonyitel-application.yaml | 2 +- environments/templates/uws-application.yaml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/environments/templates/auxtel-application.yaml b/environments/templates/auxtel-application.yaml index 12dd1f9e7d..ada9684060 100644 --- a/environments/templates/auxtel-application.yaml +++ b/environments/templates/auxtel-application.yaml @@ -1,4 +1,4 @@ -{{- if .Values.auxtel.enabled -}} +{{- if (index .Values "applications" "auxtel") -}} apiVersion: v1 kind: Namespace metadata: diff --git a/environments/templates/calsys-application.yaml b/environments/templates/calsys-application.yaml index 7f7d7f5e38..8a13485f0a 100644 --- a/environments/templates/calsys-application.yaml +++ b/environments/templates/calsys-application.yaml @@ -1,4 +1,4 @@ -{{- if .Values.calsys.enabled -}} +{{- if (index .Values "applications" "calsys") -}} apiVersion: v1 kind: Namespace metadata: diff --git a/environments/templates/control-system-test-application.yaml b/environments/templates/control-system-test-application.yaml index a1efceff31..c10c858604 100644 --- a/environments/templates/control-system-test-application.yaml +++ b/environments/templates/control-system-test-application.yaml @@ -1,4 +1,4 @@ -{{- if (index .Values "strimzi-registry-operator" "enabled") -}} +{{- if (index .Values "applications" "control-system-test") -}} apiVersion: v1 kind: Namespace metadata: diff --git a/environments/templates/eas-application.yaml b/environments/templates/eas-application.yaml index f8c39ed3fd..1b1515842c 100644 --- a/environments/templates/eas-application.yaml +++ b/environments/templates/eas-application.yaml @@ -1,4 +1,4 @@ -{{- if .Values.eas.enabled -}} +{{- if (index .Values "applications" "eas") -}} apiVersion: v1 kind: Namespace metadata: diff --git a/environments/templates/love-application.yaml b/environments/templates/love-application.yaml index f899ba53dd..225ee1ea7a 100644 --- a/environments/templates/love-application.yaml +++ b/environments/templates/love-application.yaml @@ -1,4 +1,4 @@ -{{- if .Values.love.enabled -}} +{{- if (index .Values "applications" "love") -}} apiVersion: v1 kind: Namespace metadata: diff --git a/environments/templates/obssys-application.yaml b/environments/templates/obssys-application.yaml index a856462112..147b9d6feb 100644 --- a/environments/templates/obssys-application.yaml +++ b/environments/templates/obssys-application.yaml @@ -1,4 +1,4 @@ -{{- if .Values.obssys.enabled -}} +{{- if (index .Values "applications" "obssys") -}} apiVersion: v1 kind: Namespace metadata: diff --git a/environments/templates/simonyitel-application.yaml b/environments/templates/simonyitel-application.yaml index 112934afc2..0a4a0bd32c 100644 --- a/environments/templates/simonyitel-application.yaml +++ b/environments/templates/simonyitel-application.yaml @@ -1,4 +1,4 @@ -{{- if .Values.simonyitel.enabled -}} +{{- if (index .Values "applications" "simonyitel") -}} apiVersion: v1 kind: Namespace metadata: diff --git a/environments/templates/uws-application.yaml b/environments/templates/uws-application.yaml index 6c05734d0b..6a6e2e79c4 100644 --- a/environments/templates/uws-application.yaml +++ b/environments/templates/uws-application.yaml @@ -1,4 +1,4 @@ -{{- if .Values.uws.enabled -}} +{{- if (index .Values "applications" "uws") -}} apiVersion: argoproj.io/v1alpha1 kind: Application metadata: From fbc194d46382178d07241ed457348a2c67998104 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 08:52:28 -0700 Subject: [PATCH 501/588] Fix environment file issue. --- environments/templates/auxtel-application.yaml | 2 +- environments/templates/calsys-application.yaml | 2 +- environments/templates/control-system-test-application.yaml | 2 +- environments/templates/eas-application.yaml | 2 +- environments/templates/love-application.yaml | 2 +- environments/templates/obssys-application.yaml | 2 +- environments/templates/simonyitel-application.yaml | 2 +- environments/templates/uws-application.yaml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/environments/templates/auxtel-application.yaml b/environments/templates/auxtel-application.yaml index ada9684060..1f8e02c511 100644 --- a/environments/templates/auxtel-application.yaml +++ b/environments/templates/auxtel-application.yaml @@ -47,5 +47,5 @@ spec: value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - - "values-{{ .Values.environment }}.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} diff --git a/environments/templates/calsys-application.yaml b/environments/templates/calsys-application.yaml index 8a13485f0a..80919b72e6 100644 --- a/environments/templates/calsys-application.yaml +++ b/environments/templates/calsys-application.yaml @@ -47,5 +47,5 @@ spec: value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - - "values-{{ .Values.environment }}.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} diff --git a/environments/templates/control-system-test-application.yaml b/environments/templates/control-system-test-application.yaml index c10c858604..c9d0f8485b 100644 --- a/environments/templates/control-system-test-application.yaml +++ b/environments/templates/control-system-test-application.yaml @@ -47,5 +47,5 @@ spec: value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - - "values-{{ .Values.environment }}.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} diff --git a/environments/templates/eas-application.yaml b/environments/templates/eas-application.yaml index 1b1515842c..5849dc0aac 100644 --- a/environments/templates/eas-application.yaml +++ b/environments/templates/eas-application.yaml @@ -47,5 +47,5 @@ spec: value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - - "values-{{ .Values.environment }}.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} diff --git a/environments/templates/love-application.yaml b/environments/templates/love-application.yaml index 225ee1ea7a..d66c60fa58 100644 --- a/environments/templates/love-application.yaml +++ b/environments/templates/love-application.yaml @@ -47,5 +47,5 @@ spec: value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - - "values-{{ .Values.environment }}.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} diff --git a/environments/templates/obssys-application.yaml b/environments/templates/obssys-application.yaml index 147b9d6feb..8fd38eebc2 100644 --- a/environments/templates/obssys-application.yaml +++ b/environments/templates/obssys-application.yaml @@ -47,5 +47,5 @@ spec: value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - - "values-{{ .Values.environment }}.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} diff --git a/environments/templates/simonyitel-application.yaml b/environments/templates/simonyitel-application.yaml index 0a4a0bd32c..354c885fb4 100644 --- a/environments/templates/simonyitel-application.yaml +++ b/environments/templates/simonyitel-application.yaml @@ -47,5 +47,5 @@ spec: value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - - "values-{{ .Values.environment }}.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} diff --git a/environments/templates/uws-application.yaml b/environments/templates/uws-application.yaml index 6a6e2e79c4..7768a1a8bd 100644 --- a/environments/templates/uws-application.yaml +++ b/environments/templates/uws-application.yaml @@ -39,5 +39,5 @@ spec: value: {{ .Values.controlSystemS3EndpointUrl | quote }} valueFiles: - "values.yaml" - - "values-{{ .Values.environment }}.yaml" + - "values-{{ .Values.name }}.yaml" {{- end -}} From 9a259d88612ed87982f6c50f81e4749d07ab5d6a Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 09:15:47 -0700 Subject: [PATCH 502/588] Add namespace to uws app. --- environments/templates/uws-application.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/environments/templates/uws-application.yaml b/environments/templates/uws-application.yaml index 7768a1a8bd..83d73b8601 100644 --- a/environments/templates/uws-application.yaml +++ b/environments/templates/uws-application.yaml @@ -1,4 +1,12 @@ {{- if (index .Values "applications" "uws") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: uws +spec: + finalizers: + - kubernetes +--- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: From 4c5fe84997c388c5734d22c024726319ea77cb1d Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 09:36:17 -0700 Subject: [PATCH 503/588] Update hexapod-sim chart. --- applications/auxtel/Chart.yaml | 1 + applications/auxtel/README.md | 2 +- applications/auxtel/charts/hexapod-sim/README.md | 1 - .../charts/hexapod-sim/templates/_helpers.tpl | 7 +++++++ .../charts/hexapod-sim/templates/deployment.yaml | 16 +++++++--------- .../charts/hexapod-sim/templates/service.yaml | 8 +++----- .../auxtel/charts/hexapod-sim/values.yaml | 2 -- applications/auxtel/values.yaml | 3 +++ 8 files changed, 22 insertions(+), 18 deletions(-) create mode 100644 applications/auxtel/charts/hexapod-sim/templates/_helpers.tpl diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml index 3fb42ed764..164a102ede 100644 --- a/applications/auxtel/Chart.yaml +++ b/applications/auxtel/Chart.yaml @@ -8,6 +8,7 @@ dependencies: repository: "file://../../charts/csc_collector" - name: hexapod-sim version: 1.0.0 + condition: hexapod-sim.enabled - name: csc alias: ataos version: 1.0.0 diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md index 933e46278f..c597b11987 100644 --- a/applications/auxtel/README.md +++ b/applications/auxtel/README.md @@ -17,7 +17,7 @@ Deployment for the Auxiliary Telescope CSCs | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| hexapod-sim.enabled | bool | `false` | Flag to enable the given CSC application | +| hexapod-sim.enabled | bool | `false` | | | hexapod-sim.image | object | `{"pullPolicy":"Always","repository":"ts-dockerhub.lsst.org/hexapod_simulator","tag":"latest"}` | This section holds the configuration of the container image | | hexapod-sim.image.pullPolicy | string | `"Always"` | The policy to apply when pulling an image for deployment | | hexapod-sim.image.repository | string | `"ts-dockerhub.lsst.org/hexapod_simulator"` | The Docker registry name of the container image | diff --git a/applications/auxtel/charts/hexapod-sim/README.md b/applications/auxtel/charts/hexapod-sim/README.md index 0fae7a3137..b5a01ae11b 100644 --- a/applications/auxtel/charts/hexapod-sim/README.md +++ b/applications/auxtel/charts/hexapod-sim/README.md @@ -6,7 +6,6 @@ Chart for the hexapod simulator that supports the ATHexapod | Key | Type | Default | Description | |-----|------|---------|-------------| -| enabled | bool | `false` | Flag to enable the given CSC application | | image | object | `{"pullPolicy":"Always","repository":"ts-dockerhub.lsst.org/hexapod_simulator","tag":"latest"}` | This section holds the configuration of the container image | | image.pullPolicy | string | `"Always"` | The policy to apply when pulling an image for deployment | | image.repository | string | `"ts-dockerhub.lsst.org/hexapod_simulator"` | The Docker registry name of the container image | diff --git a/applications/auxtel/charts/hexapod-sim/templates/_helpers.tpl b/applications/auxtel/charts/hexapod-sim/templates/_helpers.tpl new file mode 100644 index 0000000000..b0b8517ad5 --- /dev/null +++ b/applications/auxtel/charts/hexapod-sim/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "hexapod-sim.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml b/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml index 1461b78ba9..67ca984209 100644 --- a/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml +++ b/applications/auxtel/charts/hexapod-sim/templates/deployment.yaml @@ -1,29 +1,27 @@ -{{- if .Values.enabled -}} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ .Release.Name }} + name: {{ include "hexapod-sim.name" . }} namespace: {{ .Values.namespace }} labels: - app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} spec: replicas: 1 selector: matchLabels: - app: {{ .Release.Name }} - app.kubernetes.io/instance: {{ $.Release.Name }} + app: {{ include "hexapod-sim.name" . }} + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} template: metadata: labels: - app: {{ .Release.Name }} - app.kubernetes.io/instance: {{ $.Release.Name }} + app: {{ include "hexapod-sim.name" . }} + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} spec: containers: - - name: {{ .Release.Name }} + - name: {{ include "hexapod-sim.name" . }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} stdin: true tty: true imagePullSecrets: - name: nexus3-docker -{{- end -}} diff --git a/applications/auxtel/charts/hexapod-sim/templates/service.yaml b/applications/auxtel/charts/hexapod-sim/templates/service.yaml index 3c3d375ead..64bd7bf413 100644 --- a/applications/auxtel/charts/hexapod-sim/templates/service.yaml +++ b/applications/auxtel/charts/hexapod-sim/templates/service.yaml @@ -1,16 +1,14 @@ -{{- if .Values.enabled -}} apiVersion: v1 kind: Service metadata: labels: - app.kubernetes.io/instance: {{ .Release.Name }} - name: {{ .Release.Name }} + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} + name: {{ include "hexapod-sim.name" . }} namespace: {{ .Values.namespace }} spec: ports: - port: 50000 targetPort: 50000 selector: - app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/instance: {{ include "hexapod-sim.name" . }} type: ClusterIP -{{- end -}} diff --git a/applications/auxtel/charts/hexapod-sim/values.yaml b/applications/auxtel/charts/hexapod-sim/values.yaml index 699b4dd6b2..e3daccc617 100644 --- a/applications/auxtel/charts/hexapod-sim/values.yaml +++ b/applications/auxtel/charts/hexapod-sim/values.yaml @@ -1,5 +1,3 @@ -# -- Flag to enable the given CSC application -enabled: false # -- This is the namespace in which the hexapod controller simulator will be placed namespace: auxtel # -- This section holds the configuration of the container image diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml index 70e29b7d0b..fee74bcbd1 100644 --- a/applications/auxtel/values.yaml +++ b/applications/auxtel/values.yaml @@ -6,6 +6,9 @@ csc_collector: # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) secrets: [] +hexapod-sim: + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: From 42a3e56f62d3ed0dcbb09763ed5a1d0bf24800f8 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 11:46:24 -0700 Subject: [PATCH 504/588] Fixup love charts. --- .../charts/love-manager/templates/_helpers.tpl | 18 +++++++++--------- .../charts/love-nginx/templates/_helpers.tpl | 2 +- .../charts/love-nginx/templates/ingress.yaml | 4 ++-- .../love-nginx/templates/nginx-deployment.yaml | 2 +- .../charts/love-nginx/templates/service.yaml | 4 ++-- .../love-producer/templates/_helpers.tpl | 2 +- .../love-producer/templates/deployment.yaml | 2 +- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/applications/love/charts/love-manager/templates/_helpers.tpl b/applications/love/charts/love-manager/templates/_helpers.tpl index 661706d2ca..e5165d7e20 100644 --- a/applications/love/charts/love-manager/templates/_helpers.tpl +++ b/applications/love/charts/love-manager/templates/_helpers.tpl @@ -15,8 +15,8 @@ If release name contains chart name it will be used as a full name. {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} {{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- if contains .Release.Name $name }} +{{- $name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} @@ -43,7 +43,7 @@ Selector labels */}} {{- define "love-manager.selectorLabels" -}} app.kubernetes.io/name: {{ include "love-manager.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/instance: {{ include "love-manager.name" . }} {{- end }} {{/* @@ -82,8 +82,8 @@ If release name contains chart name it will be used as a full name. {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} {{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- printf "%s-database" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- if contains .Release.Name $name }} +{{- printf "%s-database" $name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s-database" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} @@ -116,8 +116,8 @@ If release name contains chart name it will be used as a full name. {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} {{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- printf "%s-redis" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- if contains .Release.Name $name }} +{{- printf "%s-redis" $name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s-redis" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} @@ -150,8 +150,8 @@ If release name contains chart name it will be used as a full name. {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} {{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- printf "%s-view-backup" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- if contains .Release.Name $name }} +{{- printf "%s-view-backup" $name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s-view-backup" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} diff --git a/applications/love/charts/love-nginx/templates/_helpers.tpl b/applications/love/charts/love-nginx/templates/_helpers.tpl index 75e97afd42..82f31f6cdf 100644 --- a/applications/love/charts/love-nginx/templates/_helpers.tpl +++ b/applications/love/charts/love-nginx/templates/_helpers.tpl @@ -43,5 +43,5 @@ Selector labels */}} {{- define "love-nginx.selectorLabels" -}} app.kubernetes.io/name: {{ include "love-nginx.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/instance: {{ include "love-nginx.name" . }} {{- end }} diff --git a/applications/love/charts/love-nginx/templates/ingress.yaml b/applications/love/charts/love-nginx/templates/ingress.yaml index 1f093ed75a..693b94ff2c 100644 --- a/applications/love/charts/love-nginx/templates/ingress.yaml +++ b/applications/love/charts/love-nginx/templates/ingress.yaml @@ -3,7 +3,7 @@ apiVersion: networking.k8s.io/v1 kind: Ingress metadata: - name: {{ .Release.Name }}-ingress + name: {{ include "love-nginx.name" . }}-ingress namespace: {{ $.Values.global.controlSystemAppNamespace }} {{- with .Values.ingress.annotations }} annotations: @@ -21,7 +21,7 @@ spec: pathType: {{ default "Prefix" .Values.ingress.pathType }} backend: service: - name: {{ .Release.Name }}-service + name: {{ include "love-nginx.name" . }}-service port: number: {{ .Values.ports.container }} {{- end }} diff --git a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml index 93000d0cc2..f85aa77195 100644 --- a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml +++ b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "love-nginx.fullname" . }} + name: {{ include "love-nginx.name" . }} namespace: {{ $.Values.global.controlSystemAppNamespace }} labels: {{- include "love-nginx.labels" . | nindent 4 }} diff --git a/applications/love/charts/love-nginx/templates/service.yaml b/applications/love/charts/love-nginx/templates/service.yaml index fea87197b3..70b2972a87 100644 --- a/applications/love/charts/love-nginx/templates/service.yaml +++ b/applications/love/charts/love-nginx/templates/service.yaml @@ -2,11 +2,11 @@ apiVersion: v1 kind: Service metadata: - name: {{ include "love-nginx.fullname" . }}-service + name: {{ include "love-nginx.name" . }}-service namespace: {{ $.Values.global.controlSystemAppNamespace }} spec: selector: - app.kubernetes.io/instance: {{ include "love-nginx.fullname" . }} + app.kubernetes.io/instance: {{ include "love-nginx.name" . }} type: {{ .Values.serviceType }} ports: - port: {{ .Values.ports.container }} diff --git a/applications/love/charts/love-producer/templates/_helpers.tpl b/applications/love/charts/love-producer/templates/_helpers.tpl index b012503f89..af6ce0dc52 100644 --- a/applications/love/charts/love-producer/templates/_helpers.tpl +++ b/applications/love/charts/love-producer/templates/_helpers.tpl @@ -27,7 +27,7 @@ If release name contains chart name it will be used as a full name. Create app name from release and producer name. */}} {{- define "love-producer.appName" -}} -{{ printf "%s-%s" .Release.Name .Producer | trunc 63 | trimSuffix "-" }} +{{ printf "%s-producer-%s" .Release.Name .Producer | trunc 63 | trimSuffix "-" }} {{- end }} {{/* diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml index e2026a0e44..be2a6ef4a0 100644 --- a/applications/love/charts/love-producer/templates/deployment.yaml +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -1,5 +1,5 @@ {{- range $producer := .Values.producers }} -{{ $appName := printf "%s-%s" $.Release.Name $producer.name | trunc 63 | trimSuffix "-" }} +{{ $appName := printf "%s-producer-%s" $.Release.Name $producer.name | trunc 63 | trimSuffix "-" }} --- apiVersion: apps/v1 kind: Deployment From e905f579b10ac1c31b390fddcee4c120cb7f6aec Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 14:18:21 -0700 Subject: [PATCH 505/588] Chart updates to make things run. --- .../charts/love-manager/templates/manager-deployment.yaml | 2 -- .../charts/love-manager/templates/view-backup-cronjob.yaml | 4 ++-- .../love/charts/love-nginx/templates/nginx-deployment.yaml | 2 +- .../love/charts/love-producer/templates/deployment.yaml | 4 ++-- charts/csc/templates/_helpers.tpl | 2 +- charts/csc/templates/job.yaml | 4 ++-- charts/csc_collector/templates/configmap-env.yaml | 4 ++-- 7 files changed, 10 insertions(+), 12 deletions(-) diff --git a/applications/love/charts/love-manager/templates/manager-deployment.yaml b/applications/love/charts/love-manager/templates/manager-deployment.yaml index 39abc8590d..f48f0591f3 100644 --- a/applications/love/charts/love-manager/templates/manager-deployment.yaml +++ b/applications/love/charts/love-manager/templates/manager-deployment.yaml @@ -37,10 +37,8 @@ spec: readinessProbe: {{- toYaml $.Values.readinessProbe | nindent 10 }} {{- end }} - {{- if $.Values.image.nexus3 }} imagePullSecrets: - name: nexus3-docker - {{- end }} {{- with $.Values.nodeSelector }} nodeSelector: {{- toYaml $ | nindent 8 }} diff --git a/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml index b3103f5078..14e6185b2b 100644 --- a/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml +++ b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml @@ -30,7 +30,7 @@ spec: imagePullPolicy: {{ .Values.viewBackup.image.pullPolicy }} envFrom: - configMapRef: - name: csc-env-config + name: csc-env-config env: - name: PGHOST value: {{ .Values.env.DB_HOST | quote }} @@ -49,7 +49,7 @@ spec: - name: PGPASSWORD valueFrom: secretKeyRef: - name: {{ .Values.namespace }}-secrets + name: love-secrets key: {{ .Values.database.envSecrets.POSTGRES_PASSWORD }} - name: AWS_ACCESS_KEY_ID valueFrom: diff --git a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml index f85aa77195..2b7b48fcff 100644 --- a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml +++ b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml @@ -42,7 +42,7 @@ spec: - mountPath: /usr/src name: {{ .Values.staticStore.name }} containers: - - name: {{ include "love-nginx.fullname" . }} + - name: {{ include "love-nginx.name" . }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml index be2a6ef4a0..2be364334e 100644 --- a/applications/love/charts/love-producer/templates/deployment.yaml +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -31,9 +31,9 @@ spec: imagePullPolicy: {{ $.Values.image.pullPolicy }} envFrom: - configMapRef: - name: csc-env-config + name: csc-env-config - secretRef: - name: ts-salkafka + name: ts-salkafka env: - name: LOVE_CSC_PRODUCER value: {{ $csc | quote }} diff --git a/charts/csc/templates/_helpers.tpl b/charts/csc/templates/_helpers.tpl index 21668ec926..4a3024ae08 100644 --- a/charts/csc/templates/_helpers.tpl +++ b/charts/csc/templates/_helpers.tpl @@ -73,5 +73,5 @@ Selector labels csc: {{ include "chart.name" . }} csc-name: {{ include "csc.name" . }} csc-class: {{ include "csc.class" . }} -csc-is-primary: {{ .Values.isPrimary }} +csc-is-primary: {{ .Values.isPrimary | quote }} {{- end -}} diff --git a/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml index 3913701abd..9bddc84e71 100644 --- a/charts/csc/templates/job.yaml +++ b/charts/csc/templates/job.yaml @@ -30,9 +30,9 @@ spec: tty: true envFrom: - configMapRef: - name: csc-env-config + name: csc-env-config - secretRef: - name: ts-salkafka + name: ts-salkafka {{- if or (or .Values.env .Values.envSecrets) .Values.butlerSecret }} env: {{- range $env_var, $env_value := .Values.env }} diff --git a/charts/csc_collector/templates/configmap-env.yaml b/charts/csc_collector/templates/configmap-env.yaml index 913433d6de..072fb602c3 100644 --- a/charts/csc_collector/templates/configmap-env.yaml +++ b/charts/csc_collector/templates/configmap-env.yaml @@ -1,10 +1,10 @@ apiVersion: v1 kind: ConfigMap metadata: - name: csc-env-configfile + name: csc-env-config data: LSST_SITE: {{ $.Values.global.controlSystemSiteTag }} - LSST_TOPIC_NAME: {{ $.Values.global.controlSystemTopicName }} + LSST_TOPIC_SUBNAME: {{ $.Values.global.controlSystemTopicName }} LSST_KAFKA_BROKER_ADDR: {{ $.Values.global.controlSystemKafkaBrokerAddress }} LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystemSchemaRegistryUrl }} S3_ENDPOINT_URL: {{ $.Values.global.controlSystemS3EndpointUrl }} From e9af97f293dca8e9dab106c32978c965e3ecfcbb Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 15:04:01 -0700 Subject: [PATCH 506/588] Change broker address for TTS. --- environments/values-tucson-teststand.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 8adbe05318..79146b7628 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -23,5 +23,6 @@ applications: telegraf-ds: true controlSystemImageTag: k0001 +controlSystemKafkaBrokerAddress: sasquatch-tts-kafka-bootstrap.lsst.codes:9094 controlSystemSiteTag: tucson controlSystemS3EndpointUrl: https://s3.tu.lsst.org From 015cfb5337d026e7eb17809ad40eb8c0b2ff9cf9 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 15:47:48 -0700 Subject: [PATCH 507/588] Change kafka auth handling. --- .../templates/job-workflow-template.yaml | 7 +++++-- .../love/charts/love-producer/templates/deployment.yaml | 7 +++++-- charts/csc/templates/job.yaml | 9 ++++++--- charts/csc_collector/templates/configmap-env.yaml | 2 +- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml index 4ac473ecb2..bf19252611 100644 --- a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml @@ -42,13 +42,16 @@ spec: envFrom: - configMapRef: name: csc-env-config - - secretRef: - name: ts-salkafka env: - name: ENV_EFD value: {{ .Values.envEfd }} - name: RUN_ARG value: {{ printf "'{{inputs.parameters.integrationtest}}'" }} + - name: LSST_SASL_PLAIN_PASSWORD + valueFrom: + secretKeyRef: + name: ts-salkafka + key: ts-salkafka-password volumeMounts: - name: testreports mountPath: {{ .Values.reportLocation }} diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml index 2be364334e..5721fc772d 100644 --- a/applications/love/charts/love-producer/templates/deployment.yaml +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -32,11 +32,14 @@ spec: envFrom: - configMapRef: name: csc-env-config - - secretRef: - name: ts-salkafka env: - name: LOVE_CSC_PRODUCER value: {{ $csc | quote }} + - name: LSST_KAFKA_SECURITY_PASSWORD + valueFrom: + secretKeyRef: + name: ts-salkafka + key: ts-salkafka-password {{- range $env_var, $env_value := $.Values.env }} - name: {{ $env_var }} value: {{ $env_value | quote }} diff --git a/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml index 9bddc84e71..a7b55b151a 100644 --- a/charts/csc/templates/job.yaml +++ b/charts/csc/templates/job.yaml @@ -31,10 +31,13 @@ spec: envFrom: - configMapRef: name: csc-env-config - - secretRef: - name: ts-salkafka - {{- if or (or .Values.env .Values.envSecrets) .Values.butlerSecret }} env: + - name: LSST_KAFKA_SECURITY_PASSWORD + valueFrom: + secretKeyRef: + name: ts-salkafka + key: ts-salkafka-password + {{- if or (or .Values.env .Values.envSecrets) .Values.butlerSecret }} {{- range $env_var, $env_value := .Values.env }} - name: {{ $env_var }} value: {{ $env_value | quote }} diff --git a/charts/csc_collector/templates/configmap-env.yaml b/charts/csc_collector/templates/configmap-env.yaml index 072fb602c3..6f04ed0987 100644 --- a/charts/csc_collector/templates/configmap-env.yaml +++ b/charts/csc_collector/templates/configmap-env.yaml @@ -6,6 +6,6 @@ data: LSST_SITE: {{ $.Values.global.controlSystemSiteTag }} LSST_TOPIC_SUBNAME: {{ $.Values.global.controlSystemTopicName }} LSST_KAFKA_BROKER_ADDR: {{ $.Values.global.controlSystemKafkaBrokerAddress }} + LSST_KAFKA_SECURITY_USERNAME: ts-salkafka LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystemSchemaRegistryUrl }} S3_ENDPOINT_URL: {{ $.Values.global.controlSystemS3EndpointUrl }} - TS_SALKAFKA_USERNAME: ts-salkafka From 54609bbbe6d53fbb0db11dfb37c1b9207d9b5274 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 12 Sep 2023 17:26:39 -0700 Subject: [PATCH 508/588] Add doc stubs for applications. --- docs/applications/auxtel/index.rst | 16 ++++++++++++++++ docs/applications/auxtel/values.md | 12 ++++++++++++ docs/applications/calsys/index.rst | 16 ++++++++++++++++ docs/applications/calsys/values.md | 12 ++++++++++++ docs/applications/control-system-test/index.rst | 16 ++++++++++++++++ docs/applications/control-system-test/values.md | 12 ++++++++++++ docs/applications/eas/index.rst | 16 ++++++++++++++++ docs/applications/eas/values.md | 12 ++++++++++++ docs/applications/love/index.rst | 16 ++++++++++++++++ docs/applications/love/values.md | 12 ++++++++++++ docs/applications/obssys/index.rst | 16 ++++++++++++++++ docs/applications/obssys/values.md | 12 ++++++++++++ docs/applications/simonyitel/index.rst | 16 ++++++++++++++++ docs/applications/simonyitel/values.md | 12 ++++++++++++ docs/applications/uws/index.rst | 16 ++++++++++++++++ docs/applications/uws/values.md | 12 ++++++++++++ 16 files changed, 224 insertions(+) create mode 100644 docs/applications/auxtel/index.rst create mode 100644 docs/applications/auxtel/values.md create mode 100644 docs/applications/calsys/index.rst create mode 100644 docs/applications/calsys/values.md create mode 100644 docs/applications/control-system-test/index.rst create mode 100644 docs/applications/control-system-test/values.md create mode 100644 docs/applications/eas/index.rst create mode 100644 docs/applications/eas/values.md create mode 100644 docs/applications/love/index.rst create mode 100644 docs/applications/love/values.md create mode 100644 docs/applications/obssys/index.rst create mode 100644 docs/applications/obssys/values.md create mode 100644 docs/applications/simonyitel/index.rst create mode 100644 docs/applications/simonyitel/values.md create mode 100644 docs/applications/uws/index.rst create mode 100644 docs/applications/uws/values.md diff --git a/docs/applications/auxtel/index.rst b/docs/applications/auxtel/index.rst new file mode 100644 index 0000000000..4a55253196 --- /dev/null +++ b/docs/applications/auxtel/index.rst @@ -0,0 +1,16 @@ +.. px-app:: auxtel + +###################################################### +auxtel — Auxiliary Telescope Control System Components +###################################################### + +.. jinja:: auxtel + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/auxtel/values.md b/docs/applications/auxtel/values.md new file mode 100644 index 0000000000..f038df7882 --- /dev/null +++ b/docs/applications/auxtel/values.md @@ -0,0 +1,12 @@ +```{px-app-values} auxtel +``` + +# AuxTel Helm values reference + +Helm values reference table for the {px-app}`auxtel` application. + +```{include} ../../../applications/auxtel/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/calsys/index.rst b/docs/applications/calsys/index.rst new file mode 100644 index 0000000000..cf332a2033 --- /dev/null +++ b/docs/applications/calsys/index.rst @@ -0,0 +1,16 @@ +.. px-app:: calsys + +###################################################### +calsys — Calibration Systems Control System Components +###################################################### + +.. jinja:: calsys + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/calsys/values.md b/docs/applications/calsys/values.md new file mode 100644 index 0000000000..c7f9cb8582 --- /dev/null +++ b/docs/applications/calsys/values.md @@ -0,0 +1,12 @@ +```{px-app-values} calsys +``` + +# CalSys Helm values reference + +Helm values reference table for the {px-app}`calsys` application. + +```{include} ../../../applications/calsys/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/control-system-test/index.rst b/docs/applications/control-system-test/index.rst new file mode 100644 index 0000000000..4cd2127866 --- /dev/null +++ b/docs/applications/control-system-test/index.rst @@ -0,0 +1,16 @@ +.. px-app:: control-system-test + +################################################################### +control-system-test — Systems for Testing Control System Components +################################################################### + +.. jinja:: control-system-test + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/control-system-test/values.md b/docs/applications/control-system-test/values.md new file mode 100644 index 0000000000..ae16e7ad84 --- /dev/null +++ b/docs/applications/control-system-test/values.md @@ -0,0 +1,12 @@ +```{px-app-values} control-system-test +``` + +# Control-System-Test Helm values reference + +Helm values reference table for the {px-app}`control-system-test` application. + +```{include} ../../../applications/control-system-test/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/eas/index.rst b/docs/applications/eas/index.rst new file mode 100644 index 0000000000..50bda5caf0 --- /dev/null +++ b/docs/applications/eas/index.rst @@ -0,0 +1,16 @@ +.. px-app:: eas + +############################################################## +eas — Environmental Awareness System Control System Components +############################################################## + +.. jinja:: eas + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/eas/values.md b/docs/applications/eas/values.md new file mode 100644 index 0000000000..bc1a032bec --- /dev/null +++ b/docs/applications/eas/values.md @@ -0,0 +1,12 @@ +```{px-app-values} eas +``` + +# EAS Helm values reference + +Helm values reference table for the {px-app}`eas` application. + +```{include} ../../../applications/eas/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/love/index.rst b/docs/applications/love/index.rst new file mode 100644 index 0000000000..77befff33e --- /dev/null +++ b/docs/applications/love/index.rst @@ -0,0 +1,16 @@ +.. px-app:: love + +############################################### +love — LSST Observers Visualization Environment +############################################### + +.. jinja:: love + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/love/values.md b/docs/applications/love/values.md new file mode 100644 index 0000000000..84ad08c4a3 --- /dev/null +++ b/docs/applications/love/values.md @@ -0,0 +1,12 @@ +```{px-app-values} love +``` + +# LOVE Helm values reference + +Helm values reference table for the {px-app}`love` application. + +```{include} ../../../applications/love/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/obssys/index.rst b/docs/applications/obssys/index.rst new file mode 100644 index 0000000000..f08fe702db --- /dev/null +++ b/docs/applications/obssys/index.rst @@ -0,0 +1,16 @@ +.. px-app:: obssys + +###################################################### +obssys — Observatory Systems Control System Components +###################################################### + +.. jinja:: obssys + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/obssys/values.md b/docs/applications/obssys/values.md new file mode 100644 index 0000000000..71ac5786bc --- /dev/null +++ b/docs/applications/obssys/values.md @@ -0,0 +1,12 @@ +```{px-app-values} obssys +``` + +# ObsSys Helm values reference + +Helm values reference table for the {px-app}`obssys` application. + +```{include} ../../../applications/obssys/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/simonyitel/index.rst b/docs/applications/simonyitel/index.rst new file mode 100644 index 0000000000..fb033a76c5 --- /dev/null +++ b/docs/applications/simonyitel/index.rst @@ -0,0 +1,16 @@ +.. px-app:: simonyitel + +######################################################## +simonyitel — Simonyi Telescope Control System Components +######################################################## + +.. jinja:: simonyitel + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/simonyitel/values.md b/docs/applications/simonyitel/values.md new file mode 100644 index 0000000000..6347b6bd5e --- /dev/null +++ b/docs/applications/simonyitel/values.md @@ -0,0 +1,12 @@ +```{px-app-values} simonyitel +``` + +# SimonyiTel Helm values reference + +Helm values reference table for the {px-app}`simonyitel` application. + +```{include} ../../../applications/simonyitel/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/uws/index.rst b/docs/applications/uws/index.rst new file mode 100644 index 0000000000..58452d7109 --- /dev/null +++ b/docs/applications/uws/index.rst @@ -0,0 +1,16 @@ +.. px-app:: uws + +####################################### +uws — Universal Worker Service for OCPS +####################################### + +.. jinja:: uws + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values \ No newline at end of file diff --git a/docs/applications/uws/values.md b/docs/applications/uws/values.md new file mode 100644 index 0000000000..a5ceb06fa2 --- /dev/null +++ b/docs/applications/uws/values.md @@ -0,0 +1,12 @@ +```{px-app-values} uws +``` + +# UWS Helm values reference + +Helm values reference table for the {px-app}`uws` application. + +```{include} ../../../applications/uws/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file From 13cd0e234d8d1de85c4ad545b966cd53b0210080 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 07:51:16 -0700 Subject: [PATCH 509/588] Try diffent broker. --- environments/values-tucson-teststand.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 79146b7628..e2485b6f22 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -23,6 +23,6 @@ applications: telegraf-ds: true controlSystemImageTag: k0001 -controlSystemKafkaBrokerAddress: sasquatch-tts-kafka-bootstrap.lsst.codes:9094 +controlSystemKafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 controlSystemSiteTag: tucson controlSystemS3EndpointUrl: https://s3.tu.lsst.org From df8a02be1cb0bd7f366dd5507906cb51383b5876 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 12:58:56 -0700 Subject: [PATCH 510/588] Add forgotten CSCs to auxtel. --- applications/auxtel/Chart.yaml | 16 ++++++++++++++++ applications/auxtel/values-tucson-teststand.yaml | 12 ++++++++++++ 2 files changed, 28 insertions(+) diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml index 164a102ede..5108dee9b0 100644 --- a/applications/auxtel/Chart.yaml +++ b/applications/auxtel/Chart.yaml @@ -37,10 +37,26 @@ dependencies: alias: athexapod-sim version: 1.0.0 repository: "file://../../charts/csc" +- name: csc + alias: atmcs + version: 1.0.0 + repository: "file://../../charts/csc" +- name: csc + alias: atmcs-sim + version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atoods version: 1.0.0 repository: "file://../../charts/csc" +- name: csc + alias: atpneumatics + version: 1.0.0 + repository: "file://../../charts/csc" +- name: csc + alias: atpneumatics-sim + version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: atptg version: 1.0.0 diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml index 95cd977115..400a3874b7 100644 --- a/applications/auxtel/values-tucson-teststand.yaml +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -60,6 +60,12 @@ athexapod-sim: repository: ts-dockerhub.lsst.org/athexapod pullPolicy: Always +atmcs-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/atmcs_sim + pullPolicy: Always + atoods: enabled: true image: @@ -138,6 +144,12 @@ atoods: <<: *interval days: 2 +atpneumatics-sim: + enabled: true + image: + repository: ts-dockerhub.lsst.org/at_pneumatics_sim + pullPolicy: Always + atptg: enabled: true image: From f8e07dc34114a7dae9e285a5c9a68df598f3ea88 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 13:08:10 -0700 Subject: [PATCH 511/588] Adding ESS:106. --- applications/eas/Chart.yaml | 8 ++++++++ applications/eas/values-tucson-teststand.yaml | 9 +++++++++ applications/love/values-tucson-teststand.yaml | 1 + 3 files changed, 18 insertions(+) diff --git a/applications/eas/Chart.yaml b/applications/eas/Chart.yaml index b9651de186..67d54a48d4 100644 --- a/applications/eas/Chart.yaml +++ b/applications/eas/Chart.yaml @@ -78,6 +78,14 @@ dependencies: alias: dsm2-sim version: 1.0.0 repository: "file://../../charts/csc" +- name: csc + alias: m2-ess106 + version: 1.0.0 + repository: "file://../../charts/csc" +- name: csc + alias: m2-ess106-sim + version: 1.0.0 + repository: "file://../../charts/csc" - name: csc alias: mtdome-ess01 version: 1.0.0 diff --git a/applications/eas/values-tucson-teststand.yaml b/applications/eas/values-tucson-teststand.yaml index 107055acf0..3d6fd6c584 100644 --- a/applications/eas/values-tucson-teststand.yaml +++ b/applications/eas/values-tucson-teststand.yaml @@ -87,6 +87,15 @@ dsm2-sim: CSC_INDEX: 2 RUN_ARG: --simulate 2 --state enabled +m2-ess106-sim: + enabled: true + classifier: ess106 + image: + repository: ts-dockerhub.lsst.org/ess + pullPolicy: Always + env: + RUN_ARG: 106 --simulate + mtdome-ess01-sim: enabled: true classifier: ess101 diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index a80f47b16a..9eb0f4cd34 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -199,6 +199,7 @@ love-producer: dimm2: DIMM:2 dsm1: DSM:1 dsm2: DSM:2 + m2ess106: ESS:106 mtdomeess01: ESS:101 mtdomeess02: ESS:102 mtdomeess03: ESS:103 From 54cd191a5b37502d0d7f832373708248eafa80f7 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 13:42:42 -0700 Subject: [PATCH 512/588] Fixes for simonyitel CSCs. --- applications/simonyitel/values-tucson-teststand.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/applications/simonyitel/values-tucson-teststand.yaml b/applications/simonyitel/values-tucson-teststand.yaml index 281956b2fe..db1d4f7780 100644 --- a/applications/simonyitel/values-tucson-teststand.yaml +++ b/applications/simonyitel/values-tucson-teststand.yaml @@ -125,12 +125,16 @@ mtaircompressor1-sim: image: repository: ts-dockerhub.lsst.org/mtaircompressor pullPolicy: Always + env: + RUN_ARG: 1 --simulate --state disabled mtaircompressor2-sim: enabled: true image: repository: ts-dockerhub.lsst.org/mtaircompressor pullPolicy: Always + env: + RUN_ARG: 2 --simulate --state disabled mtaos: enabled: true @@ -215,6 +219,7 @@ mtmount-sim: RUN_ARG: --simulate mtptg: + enabled: true image: repository: ts-dockerhub.lsst.org/ptkernel pullPolicy: Always @@ -222,6 +227,7 @@ mtptg: TELESCOPE: MT mtrotator-sim: + enabled: true image: repository: ts-dockerhub.lsst.org/mtrotator pullPolicy: Always From 0225623e1e2fb91a0733570823e8ff490b36962a Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 14:50:09 -0700 Subject: [PATCH 513/588] Fix mounts for scriptqueues. --- applications/obssys/values-tucson-teststand.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml index 34dc0ca2af..dba7536218 100644 --- a/applications/obssys/values-tucson-teststand.yaml +++ b/applications/obssys/values-tucson-teststand.yaml @@ -37,13 +37,13 @@ atqueue: - name: auxtel-gen3-butler containerPath: /repo/LATISS readOnly: false - server: auxtel-archiver.tu.lsst.org - serverPath: /repo/LATISS + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/repo/LATISS - name: auxtel-gen3-oods containerPath: /data/lsstdata/TTS/auxtel readOnly: true - server: auxtel-archiver.tu.lsst.org - serverPath: /lsstdata/TTS/auxtel + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata/TTS/auxtel - name: comcam-gen3-butler containerPath: /repo/LSSTComCam readOnly: false @@ -129,13 +129,13 @@ mtqueue: - name: auxtel-gen3-butler containerPath: /repo/LATISS readOnly: false - server: auxtel-archiver.tu.lsst.org - serverPath: /repo/LATISS + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/repo/LATISS - name: auxtel-gen3-oods containerPath: /data/lsstdata/TTS/auxtel readOnly: true - server: auxtel-archiver.tu.lsst.org - serverPath: /lsstdata/TTS/auxtel + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata/TTS/auxtel - name: comcam-gen3-butler containerPath: /repo/LSSTComCam readOnly: false From bc4edd8bcc90653285980cfecc70f57ec1a34ca3 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 15:01:56 -0700 Subject: [PATCH 514/588] Fix mounts for atoods. --- applications/auxtel/values-tucson-teststand.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml index 400a3874b7..a467448b4d 100644 --- a/applications/auxtel/values-tucson-teststand.yaml +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -84,13 +84,13 @@ atoods: - name: auxtel-gen3-butler containerPath: /repo/LATISS readOnly: false - server: auxtel-archiver.tu.lsst.org - serverPath: /repo/LATISS + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/repo/LATISS - name: auxtel-oods-data containerPath: /data readOnly: false - server: auxtel-archiver.tu.lsst.org - serverPath: /data + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel configfile: path: /etc filename: atoods.yaml From f1a7fdfd97610273a1c1aa9f981bba64693cc542 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 15:04:56 -0700 Subject: [PATCH 515/588] Fix butler secret name. --- charts/csc/templates/job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml index a7b55b151a..4403110a15 100644 --- a/charts/csc/templates/job.yaml +++ b/charts/csc/templates/job.yaml @@ -147,7 +147,7 @@ spec: emptyDir: {} - name: {{ include "chart.name" $ }}-raw-{{ $values.name }} secret: - secretName: {{ $.Values.namespace }}-{{ or $values.secretName $values.name }} + secretName: {{ or $values.secretName $values.name }} defaultMode: 0600 {{- end }} {{- end }} From d43c821f87ad13a6085d25c8321852ceb4986b0e Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 15:19:57 -0700 Subject: [PATCH 516/588] Remove auto-enable on Watcher. --- applications/obssys/values-tucson-teststand.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml index dba7536218..2c4888af34 100644 --- a/applications/obssys/values-tucson-teststand.yaml +++ b/applications/obssys/values-tucson-teststand.yaml @@ -192,5 +192,3 @@ watcher: image: repository: ts-dockerhub.lsst.org/watcher pullPolicy: Always - env: - RUN_ARG: --state enabled From 64e63c80601a968a3598321c21f22c76754c52a4 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 15:31:16 -0700 Subject: [PATCH 517/588] Fix tag for love-view-backup. --- applications/love/README.md | 2 +- applications/love/charts/love-manager/README.md | 2 +- applications/love/charts/love-manager/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/love/README.md b/applications/love/README.md index ab9e7a1cdf..3466ba895a 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -91,7 +91,7 @@ Deployment for the LSST Operators Visualization Environment | love-manager.viewBackup.env | object | `{}` | Place to specify additional environment variables for the view backup job | | love-manager.viewBackup.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the view backup image | | love-manager.viewBackup.image.repository | string | `"lsstts/love-view-backup"` | The view backup image to use | -| love-manager.viewBackup.image.tag | string | `"develop"` | The tag to use for the view backup image | +| love-manager.viewBackup.image.tag | string | `nil` | The tag to use for the view backup image | | love-manager.viewBackup.nodeSelector | object | `{}` | Node selection rules for the LOVE view backup pods | | love-manager.viewBackup.resources | object | `{}` | Resource specifications for the LOVE view backup pods | | love-manager.viewBackup.restartPolicy | string | `"Never"` | The restart policy type for the view backup cronjob | diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index fc06207495..b24102d153 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -80,7 +80,7 @@ Helm chart for the LOVE manager service. | viewBackup.env | object | `{}` | Place to specify additional environment variables for the view backup job | | viewBackup.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the view backup image | | viewBackup.image.repository | string | `"lsstts/love-view-backup"` | The view backup image to use | -| viewBackup.image.tag | string | `"develop"` | The tag to use for the view backup image | +| viewBackup.image.tag | string | `nil` | The tag to use for the view backup image | | viewBackup.nodeSelector | object | `{}` | Node selection rules for the LOVE view backup pods | | viewBackup.resources | object | `{}` | Resource specifications for the LOVE view backup pods | | viewBackup.restartPolicy | string | `"Never"` | The restart policy type for the view backup cronjob | diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index 1adf351455..18f501c873 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -164,7 +164,7 @@ viewBackup: # -- The view backup image to use repository: lsstts/love-view-backup # -- The tag to use for the view backup image - tag: develop + tag: # -- The pull policy to use for the view backup image pullPolicy: IfNotPresent # -- Place to specify additional environment variables for the view backup job From 0161c360af7c0df72b5036f7d22cb69cadf4b9a9 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 13 Sep 2023 15:48:21 -0700 Subject: [PATCH 518/588] Fix love-commander service. --- applications/love/values-tucson-teststand.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index 9eb0f4cd34..03badf97cd 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -29,7 +29,7 @@ love-commander: secretName: lfa secretKey: aws-secret-access-key service: - use: true + enabled: true port: 5000 type: ClusterIP From 826db20b9814916d06bb5a768feda9329472da08 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 14 Sep 2023 09:25:41 -0700 Subject: [PATCH 519/588] Updates to nublado for kafka. --- applications/nublado/README.md | 1 + .../nublado/templates/controller-deployment.yaml | 7 +++++++ applications/nublado/templates/vault-secrets.yaml | 12 ++++++++++++ applications/nublado/values-tucson-teststand.yaml | 15 ++++++++++----- applications/nublado/values.yaml | 5 ++++- 5 files changed, 34 insertions(+), 6 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index fb4bd65db2..d6f6f231d0 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -58,6 +58,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.nodeSelector | object | `{}` | Node selector rules for user lab pods | | controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | | controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | +| controller.config.lab.kafkaSecret | bool | `false` | Add the Kafka secret to the user pods | | controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | | controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Sizes must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | diff --git a/applications/nublado/templates/controller-deployment.yaml b/applications/nublado/templates/controller-deployment.yaml index 800fc2cb41..e854a79f84 100644 --- a/applications/nublado/templates/controller-deployment.yaml +++ b/applications/nublado/templates/controller-deployment.yaml @@ -45,6 +45,13 @@ spec: name: "nublado-secret" key: "slack-webhook" {{- end }} + {{- if .Values.controller.config.lab.kafkaSecret }} + - name: LSST_KAFKA_SECURITY_PASSWORD + valueFrom: + secretKeyRef: + name: kafka-secret + key: ts-salkafka-password + {{- end }} ports: - name: "http" containerPort: 8080 diff --git a/applications/nublado/templates/vault-secrets.yaml b/applications/nublado/templates/vault-secrets.yaml index 592042f21b..9211c270a0 100644 --- a/applications/nublado/templates/vault-secrets.yaml +++ b/applications/nublado/templates/vault-secrets.yaml @@ -59,3 +59,15 @@ spec: path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" type: kubernetes.io/dockerconfigjson {{- end }} +{{- if .Values.controller.config.lab.kafkaSecret }} +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: kafka-secret + labels: + {{- include "nublado.labels" . | nindent 4 }} +spec: + path: "{{- .Values.global.vaultSecretsPath }}/ts/software/ts-salkafka" + type: Opaque +{{- end }} \ No newline at end of file diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 84d54b6ecf..9f594fa942 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -8,18 +8,21 @@ controller: num_releases: 0 num_weeklies: 3 num_dailies: 2 - cycle: 32 - recommended_tag: "recommended_c0032" + cycle: null + recommended_tag: "recommended_k0001" lab: extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - LSST_DDS_INTERFACE: net1 - LSST_DDS_PARTITION_PREFIX: tucson LSST_SITE: tucson - PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" + LSST_TOPIC_SUBNAME: sal + LSST_KAFKA_SECURITY_USERNAME: ts-salkafka + LSST_KAFKA_BROKER_ADDR: sasquatch-kafka-brokers.sasquatch:9092 + LSST_SCHEMA_REGISTRY_URL: http://sasquatch-schema-registry.sasquatch:8081 + PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" + kafkaSecret: true initContainers: - name: "inithome" image: @@ -31,6 +34,8 @@ controller: volumeName: "home" pullSecret: "pull-secret" secrets: + - secretName: "kafka-secret" + secretKey: "ts-salkafka-password" - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" volumes: diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 3ca8799eed..a713dfa678 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -294,7 +294,10 @@ controller: # that key. secrets: [] - # -- Available lab sizes. Sizes must be chosen from `fine`, + # -- Add the Kafka secret to the user pods + kafkaSecret: false + + # -- Available lab sizes. Names must be chosen from `fine`, # `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, # `gargantuan`, and `colossal` in that order. Each should specify the # maximum CPU equivalents and memory. SI suffixes for memory are From 803220b4bad20451b72f6eb00f423a0c627e0203 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 18 Sep 2023 11:23:39 -0700 Subject: [PATCH 520/588] Using conditions to control deployments. --- applications/auxtel/Chart.yaml | 10 ++ applications/auxtel/README.md | 12 +- .../auxtel/values-tucson-teststand.yaml | 5 - applications/auxtel/values.yaml | 41 ++++++ applications/calsys/Chart.yaml | 2 + applications/calsys/README.md | 2 + applications/calsys/values.yaml | 8 ++ applications/control-system-test/README.md | 2 +- .../values-tucson-teststand.yaml | 1 - applications/control-system-test/values.yaml | 1 + applications/eas/Chart.yaml | 32 +++++ applications/eas/README.md | 32 +++++ applications/eas/values-tucson-teststand.yaml | 1 - applications/eas/values.yaml | 128 ++++++++++++++++++ .../love/values-tucson-teststand.yaml | 1 - applications/obssys/Chart.yaml | 1 + applications/obssys/README.md | 1 + .../obssys/values-tucson-teststand.yaml | 5 - applications/obssys/values.yaml | 4 + applications/simonyitel/Chart.yaml | 36 +++++ applications/simonyitel/README.md | 23 ++++ .../simonyitel/values-tucson-teststand.yaml | 3 - applications/simonyitel/values.yaml | 92 +++++++++++++ applications/uws/Chart.yaml | 7 + applications/uws/README.md | 3 + applications/uws/values.yaml | 12 ++ charts/csc/templates/job.yaml | 2 - 27 files changed, 447 insertions(+), 20 deletions(-) diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml index 5108dee9b0..87ef2753b8 100644 --- a/applications/auxtel/Chart.yaml +++ b/applications/auxtel/Chart.yaml @@ -16,10 +16,12 @@ dependencies: - name: csc alias: atdome version: 1.0.0 + condition: atdome.enabled repository: "file://../../charts/csc" - name: csc alias: atdome-sim version: 1.0.0 + condition: atdome-sim.enabled repository: "file://../../charts/csc" - name: csc alias: atdometrajectory @@ -32,18 +34,22 @@ dependencies: - name: csc alias: athexapod version: 1.0.0 + condition: athexapod.enabled repository: "file://../../charts/csc" - name: csc alias: athexapod-sim version: 1.0.0 + condition: athexapod-sim.enabled repository: "file://../../charts/csc" - name: csc alias: atmcs version: 1.0.0 + condition: atmcs.enabled repository: "file://../../charts/csc" - name: csc alias: atmcs-sim version: 1.0.0 + condition: atmcs-sim.enabled repository: "file://../../charts/csc" - name: csc alias: atoods @@ -52,10 +58,12 @@ dependencies: - name: csc alias: atpneumatics version: 1.0.0 + condition: atpneumatics.enabled repository: "file://../../charts/csc" - name: csc alias: atpneumatics-sim version: 1.0.0 + condition: atpneumatics-sim.enabled repository: "file://../../charts/csc" - name: csc alias: atptg @@ -64,8 +72,10 @@ dependencies: - name: csc alias: atspectrograph version: 1.0.0 + condition: atspectrograph.enabled repository: "file://../../charts/csc" - name: csc alias: atspectrograph-sim version: 1.0.0 + condition: atspectrograph-sim.enabled repository: "file://../../charts/csc" diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md index c597b11987..4ba653fd10 100644 --- a/applications/auxtel/README.md +++ b/applications/auxtel/README.md @@ -16,8 +16,18 @@ Deployment for the Auxiliary Telescope CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| atdome-sim.enabled | bool | `false` | Enable the ATDome simulator CSC | +| atdome.enabled | bool | `false` | Enable the ATDome CSC | +| athexapod-sim.enabled | bool | `false` | Enable the ATHexapod simulator CSC | +| athexapod.enabled | bool | `false` | Enable the ATHexapod CSC | +| atmcs-sim.enabled | bool | `false` | Enable the ATMCS simulator CSC | +| atmcs.enabled | bool | `false` | Enable the ATMCS CSC | +| atpneumatics-sim.enabled | bool | `false` | Enable the ATPneumatics simulator CSC | +| atpneumatics.enabled | bool | `false` | Enable the ATPneumatics CSC | +| atspectrograph-sim.enabled | bool | `false` | Enable the ATSpectograph simulator CSC | +| atspectrograph.enabled | bool | `false` | Enable the ATSpectrograph CSC | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| hexapod-sim.enabled | bool | `false` | | +| hexapod-sim.enabled | bool | `false` | Enable the hexapod controller simulator | | hexapod-sim.image | object | `{"pullPolicy":"Always","repository":"ts-dockerhub.lsst.org/hexapod_simulator","tag":"latest"}` | This section holds the configuration of the container image | | hexapod-sim.image.pullPolicy | string | `"Always"` | The policy to apply when pulling an image for deployment | | hexapod-sim.image.repository | string | `"ts-dockerhub.lsst.org/hexapod_simulator"` | The Docker registry name of the container image | diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml index a467448b4d..fb2ac30f9c 100644 --- a/applications/auxtel/values-tucson-teststand.yaml +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -11,7 +11,6 @@ csc_collector: key: butler-secret ataos: - enabled: true image: repository: ts-dockerhub.lsst.org/ataos pullPolicy: Always @@ -25,13 +24,11 @@ atdome-sim: RUN_ARG: --simulate atdometrajectory: - enabled: true image: repository: ts-dockerhub.lsst.org/atdometrajectory pullPolicy: Always atheaderservice: - enabled: true image: repository: ts-dockerhub.lsst.org/headerservice tag: ts-v3.1.11_c0029 @@ -67,7 +64,6 @@ atmcs-sim: pullPolicy: Always atoods: - enabled: true image: repository: ts-dockerhub.lsst.org/atoods pullPolicy: Always @@ -151,7 +147,6 @@ atpneumatics-sim: pullPolicy: Always atptg: - enabled: true image: repository: ts-dockerhub.lsst.org/ptkernel pullPolicy: Always diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml index fee74bcbd1..7eaf54f067 100644 --- a/applications/auxtel/values.yaml +++ b/applications/auxtel/values.yaml @@ -7,6 +7,47 @@ csc_collector: secrets: [] hexapod-sim: + # -- Enable the hexapod controller simulator + enabled: false + +atdome: + # -- Enable the ATDome CSC + enabled: false + +atdome-sim: + # -- Enable the ATDome simulator CSC + enabled: false + +athexapod: + # -- Enable the ATHexapod CSC + enabled: false + +athexapod-sim: + # -- Enable the ATHexapod simulator CSC + enabled: false + +atmcs: + # -- Enable the ATMCS CSC + enabled: false + +atmcs-sim: + # -- Enable the ATMCS simulator CSC + enabled: false + +atpneumatics: + # -- Enable the ATPneumatics CSC + enabled: false + +atpneumatics-sim: + # -- Enable the ATPneumatics simulator CSC + enabled: false + +atspectrograph: + # -- Enable the ATSpectrograph CSC + enabled: false + +atspectrograph-sim: + # -- Enable the ATSpectograph simulator CSC enabled: false # The following will be set by parameters injected by Argo CD and should not diff --git a/applications/calsys/Chart.yaml b/applications/calsys/Chart.yaml index 6c9670d485..cc151abdad 100644 --- a/applications/calsys/Chart.yaml +++ b/applications/calsys/Chart.yaml @@ -9,8 +9,10 @@ dependencies: - name: csc alias: gcheaderservice1 version: 1.0.0 + condition: gcheaderservice1.enabled repository: "file://../../charts/csc" - name: csc alias: simulation-gencam version: 1.0.0 + condition: simulation-gencam.enabled repository: "file://../../charts/csc" diff --git a/applications/calsys/README.md b/applications/calsys/README.md index 602a93199e..d6973f6cc9 100644 --- a/applications/calsys/README.md +++ b/applications/calsys/README.md @@ -7,6 +7,7 @@ Deployment for the Calibration System CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| gcheaderservice1.enabled | bool | `false` | Enable the GCHeaderService:1 CSC | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | @@ -17,3 +18,4 @@ Deployment for the Calibration System CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| simulation-gencam.enabled | bool | `false` | Enabled the GenericCamera:1 CSC | diff --git a/applications/calsys/values.yaml b/applications/calsys/values.yaml index 70e29b7d0b..0de6fb8210 100644 --- a/applications/calsys/values.yaml +++ b/applications/calsys/values.yaml @@ -6,6 +6,14 @@ csc_collector: # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) secrets: [] +gcheaderservice1: + # -- Enable the GCHeaderService:1 CSC + enabled: false + +simulation-gencam: + # -- Enabled the GenericCamera:1 CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md index aaf35fbe85..76c57582e9 100644 --- a/applications/control-system-test/README.md +++ b/applications/control-system-test/README.md @@ -17,7 +17,7 @@ Deployment for the Test CSCs and Integration Testing Workflows | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| integration-testing.enabled | bool | `false` | | +| integration-testing.enabled | bool | `false` | Enable the integration testing system | | integration-testing.envEfd | string | `nil` | The Name of the EFD instance. | | integration-testing.image.tag | string | `nil` | The image tag for the Integration Test runner container | | integration-testing.persistentVolume.claimName | string | `"saved-reports"` | PVC name for saving the reports | diff --git a/applications/control-system-test/values-tucson-teststand.yaml b/applications/control-system-test/values-tucson-teststand.yaml index 9ec18f218e..361e1468e2 100644 --- a/applications/control-system-test/values-tucson-teststand.yaml +++ b/applications/control-system-test/values-tucson-teststand.yaml @@ -9,7 +9,6 @@ csc_collector: key: ts/software/lfa test42: - enabled: true image: repository: ts-dockerhub.lsst.org/test pullPolicy: Always diff --git a/applications/control-system-test/values.yaml b/applications/control-system-test/values.yaml index 353a68dc0c..0062c56078 100644 --- a/applications/control-system-test/values.yaml +++ b/applications/control-system-test/values.yaml @@ -7,6 +7,7 @@ csc_collector: secrets: [] integration-testing: + # -- Enable the integration testing system enabled: false # The following will be set by parameters injected by Argo CD and should not diff --git a/applications/eas/Chart.yaml b/applications/eas/Chart.yaml index 67d54a48d4..0fcd82fdea 100644 --- a/applications/eas/Chart.yaml +++ b/applications/eas/Chart.yaml @@ -9,130 +9,162 @@ dependencies: - name: csc alias: auxtel-ess01 version: 1.0.0 + condition: auxtel-ess01.enabled repository: "file://../../charts/csc" - name: csc alias: auxtel-ess01-sim version: 1.0.0 + condition: auxtel-ess01-sim.enabled repository: "file://../../charts/csc" - name: csc alias: auxtel-ess02 version: 1.0.0 + condition: auxtel-ess02.enabled repository: "file://../../charts/csc" - name: csc alias: auxtel-ess02-sim version: 1.0.0 + condition: auxtel-ess02-sim.enabled repository: "file://../../charts/csc" - name: csc alias: auxtel-ess03 version: 1.0.0 + condition: auxtel-ess03.enabled repository: "file://../../charts/csc" - name: csc alias: auxtel-ess03-sim version: 1.0.0 + condition: auxtel-ess03-sim.enabled repository: "file://../../charts/csc" - name: csc alias: auxtel-ess04 version: 1.0.0 + condition: auxtel-ess04.enabled repository: "file://../../charts/csc" - name: csc alias: auxtel-ess04-sim version: 1.0.0 + condition: auxtel-ess04-sim.enabled repository: "file://../../charts/csc" - name: csc alias: calibhill-ess01 version: 1.0.0 + condition: calibhill-ess01.enabled repository: "file://../../charts/csc" - name: csc alias: calibhill-ess01-sim version: 1.0.0 + condition: calibhill-ess01-sim.enabled repository: "file://../../charts/csc" - name: csc alias: dimm1 version: 1.0.0 + condition: dimm1.enabled repository: "file://../../charts/csc" - name: csc alias: dimm1-sim version: 1.0.0 + condition: dimm1-sim.enabled repository: "file://../../charts/csc" - name: csc alias: dimm2 version: 1.0.0 + condition: dimm2.enabled repository: "file://../../charts/csc" - name: csc alias: dimm2-sim version: 1.0.0 + condition: dimm2-sim.enabled repository: "file://../../charts/csc" - name: csc alias: dsm1 version: 1.0.0 + condition: dsm1.enabled repository: "file://../../charts/csc" - name: csc alias: dsm1-sim version: 1.0.0 + condition: dsm1-sim.enabled repository: "file://../../charts/csc" - name: csc alias: dsm2 version: 1.0.0 + condition: dsm2.enabled repository: "file://../../charts/csc" - name: csc alias: dsm2-sim version: 1.0.0 + condition: dsm2-sim.enabled repository: "file://../../charts/csc" - name: csc alias: m2-ess106 version: 1.0.0 + condition: m2-ess106.enabled repository: "file://../../charts/csc" - name: csc alias: m2-ess106-sim version: 1.0.0 + condition: m2-ess106-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtdome-ess01 version: 1.0.0 + condition: mtdome-ess01.enabled repository: "file://../../charts/csc" - name: csc alias: mtdome-ess01-sim version: 1.0.0 + condition: mtdome-ess01-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtdome-ess02 version: 1.0.0 + condition: mtdome-ess02.enabled repository: "file://../../charts/csc" - name: csc alias: mtdome-ess02-sim version: 1.0.0 + condition: mtdome-ess02-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtdome-ess03 version: 1.0.0 + condition: mtdome-ess03.enabled repository: "file://../../charts/csc" - name: csc alias: mtdome-ess03-sim version: 1.0.0 + condition: mtdome-ess03-sim.enabled repository: "file://../../charts/csc" - name: csc alias: tma-ess01 version: 1.0.0 + condition: tma-ess01.enabled repository: "file://../../charts/csc" - name: csc alias: tma-ess01-sim version: 1.0.0 + condition: tma-ess01-sim.enabled repository: "file://../../charts/csc" - name: csc alias: tma-ess104 version: 1.0.0 + condition: tma-ess104.enabled repository: "file://../../charts/csc" - name: csc alias: tma-ess104-sim version: 1.0.0 + condition: tma-ess104-sim.enabled repository: "file://../../charts/csc" - name: csc alias: tma-ess105 version: 1.0.0 + condition: tma-ess105.enabled repository: "file://../../charts/csc" - name: csc alias: tma-ess105-sim version: 1.0.0 + condition: tma-ess105-sim.enabled repository: "file://../../charts/csc" - name: csc alias: weatherforecast diff --git a/applications/eas/README.md b/applications/eas/README.md index 7e154eae62..c0421d5d55 100644 --- a/applications/eas/README.md +++ b/applications/eas/README.md @@ -6,7 +6,25 @@ Deployment for the Environmental Awareness Systems CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| auxtel-ess01-sim.enabled | bool | `false` | Enable the ESS:201 simulator CSC | +| auxtel-ess01.enabled | bool | `false` | Enable the ESS:201 CSC | +| auxtel-ess02-sim.enabled | bool | `false` | Enable the ESS:202 simulator CSC | +| auxtel-ess02.enabled | bool | `false` | Enable the ESS:202 CSC | +| auxtel-ess03-sim.enabled | bool | `false` | Enable the ESS:203 simulator CSC | +| auxtel-ess03.enabled | bool | `false` | Enable the ESS:203 CSC | +| auxtel-ess04-sim.enabled | bool | `false` | Enable the ESS:204 simulator CSC | +| auxtel-ess04.enabled | bool | `false` | Enable the ESS:204 CSC | +| calibhill-ess01-sim.enabled | bool | `false` | Enable the ESS:301 simulator CSC | +| calibhill-ess01.enabled | bool | `false` | Enable the ESS:301 CSC | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| dimm1-sim.enabled | bool | `false` | Enable the DIMM:1 simulator CSC | +| dimm1.enabled | bool | `false` | Enable the DIMM:1 CSC | +| dimm2-sim.enabled | bool | `false` | Enable the DIMM:2 simulator CSC | +| dimm2.enabled | bool | `false` | Enable the DIMM:2 CSC | +| dsm1-sim.enabled | bool | `false` | Enable the DSM:1 simulator CSC | +| dsm1.enabled | bool | `false` | Enable the DSM:1 CSC | +| dsm2-sim.enabled | bool | `false` | Enable the DSM:2 simulator CSC | +| dsm2.enabled | bool | `false` | Enable the DSM:2 CSC | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | @@ -17,3 +35,17 @@ Deployment for the Environmental Awareness Systems CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| m2-ess106-sim.enabled | bool | `false` | Enable the ESS:106 simulator CSC | +| m2-ess106.enabled | bool | `false` | Enable the ESS:106 CSC | +| mtdome-ess01-sim.enabled | bool | `false` | Enable the ESS:101 simulator CSC | +| mtdome-ess01.enabled | bool | `false` | Enable the ESS:101 CSC | +| mtdome-ess02-sim.enabled | bool | `false` | Enable the ESS:102 simulator CSC | +| mtdome-ess02.enabled | bool | `false` | Enable the ESS:102 CSC | +| mtdome-ess03-sim.enabled | bool | `false` | Enable the ESS:103 simulator CSC | +| mtdome-ess03.enabled | bool | `false` | Enable the ESS:103 CSC | +| tma-ess01-sim.enabled | bool | `false` | Enable the ESS:1 simulator CSC | +| tma-ess01.enabled | bool | `false` | Enable the ESS:1 CSC | +| tma-ess104-sim.enabled | bool | `false` | Enable the ESS:104 simulator CSC | +| tma-ess104.enabled | bool | `false` | Enable the ESS:104 CSC | +| tma-ess105-sim.enabled | bool | `false` | Enable the ESS:105 simulator CSC | +| tma-ess105.enabled | bool | `false` | Enable the ESS:105 CSC | diff --git a/applications/eas/values-tucson-teststand.yaml b/applications/eas/values-tucson-teststand.yaml index 3d6fd6c584..5e35acc017 100644 --- a/applications/eas/values-tucson-teststand.yaml +++ b/applications/eas/values-tucson-teststand.yaml @@ -151,7 +151,6 @@ tma-ess105-sim: RUN_ARG: 105 --simulate weatherforecast: - enabled: true image: repository: ts-dockerhub.lsst.org/weatherforecast pullPolicy: Always diff --git a/applications/eas/values.yaml b/applications/eas/values.yaml index 70e29b7d0b..bf4a033685 100644 --- a/applications/eas/values.yaml +++ b/applications/eas/values.yaml @@ -6,6 +6,134 @@ csc_collector: # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) secrets: [] +auxtel-ess01: + # -- Enable the ESS:201 CSC + enabled: false + +auxtel-ess01-sim: + # -- Enable the ESS:201 simulator CSC + enabled: false + +auxtel-ess02: + # -- Enable the ESS:202 CSC + enabled: false + +auxtel-ess02-sim: + # -- Enable the ESS:202 simulator CSC + enabled: false + +auxtel-ess03: + # -- Enable the ESS:203 CSC + enabled: false + +auxtel-ess03-sim: + # -- Enable the ESS:203 simulator CSC + enabled: false + +auxtel-ess04: + # -- Enable the ESS:204 CSC + enabled: false + +auxtel-ess04-sim: + # -- Enable the ESS:204 simulator CSC + enabled: false + +calibhill-ess01: + # -- Enable the ESS:301 CSC + enabled: false + +calibhill-ess01-sim: + # -- Enable the ESS:301 simulator CSC + enabled: false + +dimm1: + # -- Enable the DIMM:1 CSC + enabled: false + +dimm1-sim: + # -- Enable the DIMM:1 simulator CSC + enabled: false + +dimm2: + # -- Enable the DIMM:2 CSC + enabled: false + +dimm2-sim: + # -- Enable the DIMM:2 simulator CSC + enabled: false + +dsm1: + # -- Enable the DSM:1 CSC + enabled: false + +dsm1-sim: + # -- Enable the DSM:1 simulator CSC + enabled: false + +dsm2: + # -- Enable the DSM:2 CSC + enabled: false + +dsm2-sim: + # -- Enable the DSM:2 simulator CSC + enabled: false + +m2-ess106: + # -- Enable the ESS:106 CSC + enabled: false + +m2-ess106-sim: + # -- Enable the ESS:106 simulator CSC + enabled: false + +mtdome-ess01: + # -- Enable the ESS:101 CSC + enabled: false + +mtdome-ess01-sim: + # -- Enable the ESS:101 simulator CSC + enabled: false + +mtdome-ess02: + # -- Enable the ESS:102 CSC + enabled: false + +mtdome-ess02-sim: + # -- Enable the ESS:102 simulator CSC + enabled: false + +mtdome-ess03: + # -- Enable the ESS:103 CSC + enabled: false + +mtdome-ess03-sim: + # -- Enable the ESS:103 simulator CSC + enabled: false + +tma-ess01: + # -- Enable the ESS:1 CSC + enabled: false + +tma-ess01-sim: + # -- Enable the ESS:1 simulator CSC + enabled: false + +tma-ess104: + # -- Enable the ESS:104 CSC + enabled: false + +tma-ess104-sim: + # -- Enable the ESS:104 simulator CSC + enabled: false + +tma-ess105: + # -- Enable the ESS:105 CSC + enabled: false + +tma-ess105-sim: + # -- Enable the ESS:105 simulator CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index 03badf97cd..2987dde0cd 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -9,7 +9,6 @@ csc_collector: key: ts/software/lfa love-commander: - enabled: true image: repository: ts-dockerhub.lsst.org/love-commander pullPolicy: Always diff --git a/applications/obssys/Chart.yaml b/applications/obssys/Chart.yaml index f53d3ded25..644cd7bb9a 100644 --- a/applications/obssys/Chart.yaml +++ b/applications/obssys/Chart.yaml @@ -17,6 +17,7 @@ dependencies: - name: csc alias: authorize version: 1.0.0 + condition: authorize.enabled repository: "file://../../charts/csc" - name: csc alias: mtqueue diff --git a/applications/obssys/README.md b/applications/obssys/README.md index ddc8b78e21..bb31ba2946 100644 --- a/applications/obssys/README.md +++ b/applications/obssys/README.md @@ -6,6 +6,7 @@ Deployment for the Observatory System CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| authorize.enabled | bool | `false` | Enable the Authorize CSC | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml index 2c4888af34..8c39859d67 100644 --- a/applications/obssys/values-tucson-teststand.yaml +++ b/applications/obssys/values-tucson-teststand.yaml @@ -17,7 +17,6 @@ csc_collector: key: ts/software/love atqueue: - enabled: true namespace: *ns classifier: scriptqueue2 image: @@ -66,7 +65,6 @@ atqueue: serverPath: /obs-env atscheduler: - enabled: true namespace: *ns classifier: scheduler2 image: @@ -109,7 +107,6 @@ authorize: secretKey: authlist-user-pass mtqueue: - enabled: true namespace: *ns classifier: scriptqueue1 image: @@ -158,7 +155,6 @@ mtqueue: serverPath: /obs-env mtscheduler: - enabled: true namespace: *ns classifier: scheduler1 image: @@ -187,7 +183,6 @@ mtscheduler: serverPath: /scratch/scheduler watcher: - enabled: true namespace: *ns image: repository: ts-dockerhub.lsst.org/watcher diff --git a/applications/obssys/values.yaml b/applications/obssys/values.yaml index 70e29b7d0b..18e5ad4e86 100644 --- a/applications/obssys/values.yaml +++ b/applications/obssys/values.yaml @@ -6,6 +6,10 @@ csc_collector: # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) secrets: [] +authorize: + # -- Enable the Authorize CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/simonyitel/Chart.yaml b/applications/simonyitel/Chart.yaml index a2020f0818..b944e4dfda 100644 --- a/applications/simonyitel/Chart.yaml +++ b/applications/simonyitel/Chart.yaml @@ -9,34 +9,42 @@ dependencies: - name: csc alias: ccheaderservice version: 1.0.0 + condition: ccheaderservice.enabled repository: "file://../../charts/csc" - name: csc alias: ccoods version: 1.0.0 + condition: ccoods.enabled repository: "file://../../charts/csc" - name: csc alias: lasertracker1 version: 1.0.0 + condition: lasertracker1.enabled repository: "file://../../charts/csc" - name: csc alias: lasertracker1-sim version: 1.0.0 + condition: lasertracker1-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtaircompressor1 version: 1.0.0 + condition: mtaircompressor1.enabled repository: "file://../../charts/csc" - name: csc alias: mtaircompressor1-sim version: 1.0.0 + condition: mtaircompressor1-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtaircompressor2 version: 1.0.0 + condition: mtaircompressor2.enabled repository: "file://../../charts/csc" - name: csc alias: mtaircompressor2-sim version: 1.0.0 + condition: mtaircompressor2-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtaos @@ -45,50 +53,76 @@ dependencies: - name: csc alias: mtcamhexapod version: 1.0.0 + condition: mtcamhexapod.enabled repository: "file://../../charts/csc" - name: csc alias: mtcamhexapod-sim version: 1.0.0 + condition: mtcamhexapod-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtdome version: 1.0.0 + condition: mtdome.enabled repository: "file://../../charts/csc" - name: csc alias: mtdome-sim version: 1.0.0 + condition: mtdome-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtdometrajectory version: 1.0.0 repository: "file://../../charts/csc" +- name: csc + alias: mtheaderservice + version: 1.0.0 + condition: mtheaderservice.enabled + repository: "file://../../charts/csc" +- name: csc + alias: mtm1m3 + version: 1.0.0 + condition: mtm1m3.enabled + repository: "file://../../charts/csc" - name: csc alias: mtm1m3-sim version: 1.0.0 + condition: mtm1m3-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtm2 version: 1.0.0 + condition: mtm2.enabled repository: "file://../../charts/csc" - name: csc alias: mtm2-sim version: 1.0.0 + condition: mtm2-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtm2hexapod version: 1.0.0 + condition: mtm2hexapod.enabled repository: "file://../../charts/csc" - name: csc alias: mtm2hexapod-sim version: 1.0.0 + condition: mtm2hexapod-sim.enabled repository: "file://../../charts/csc" - name: csc alias: mtmount version: 1.0.0 + condition: mtmount.enabled repository: "file://../../charts/csc" - name: csc alias: mtmount-sim version: 1.0.0 + condition: mtmount-sim.enabled + repository: "file://../../charts/csc" +- name: csc + alias: mtoods + version: 1.0.0 + condition: mtoods.enabled repository: "file://../../charts/csc" - name: csc alias: mtptg @@ -97,8 +131,10 @@ dependencies: - name: csc alias: mtrotator version: 1.0.0 + condition: mtrotator.enabled repository: "file://../../charts/csc" - name: csc alias: mtrotator-sim version: 1.0.0 + condition: mtrotator-sim.enabled repository: "file://../../charts/csc" diff --git a/applications/simonyitel/README.md b/applications/simonyitel/README.md index 23ed7cb5c3..4084da4050 100644 --- a/applications/simonyitel/README.md +++ b/applications/simonyitel/README.md @@ -6,6 +6,8 @@ Deployment for the Simonyi Survey Telescope CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| +| ccheaderservice.enabled | bool | `false` | Enable the CCHeaderService CSC | +| ccoods.enabled | bool | `false` | Enable the CCOODS CSC | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | @@ -17,3 +19,24 @@ Deployment for the Simonyi Survey Telescope CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| lasertracker1-sim.enabled | bool | `false` | Enable the LaserTracker:1 simulator CSC | +| lasertracker1.enabled | bool | `false` | Enable the LaserTracker:1 CSC | +| m1m3-sim.enabled | bool | `false` | Enable the MTM1M3 simulator CSC | +| m1m3.enabled | bool | `false` | Enable the MTM1M3 hardware simulator CSC | +| mtaircompressor1-sim.enabled | bool | `false` | Enable the MTAirCompressor:1 simulator CSC | +| mtaircompressor1.enabled | bool | `false` | Enable the MTAirCompressor:1 CSC | +| mtaircompressor2-sim.enabled | bool | `false` | Enable the MTAirCompressor:2 simulator CSC | +| mtaircompressor2.enabled | bool | `false` | Enable the MTAirCompressor:2 CSC | +| mtcamhexapod-sim.enabled | bool | `false` | Enable the MTHexapod:1 simulator CSC | +| mtcamhexapod.enabled | bool | `false` | Enable the MTHexapod:1 CSC | +| mtdome-sim.enabled | bool | `false` | Enable the MTDome simulator CSC | +| mtdome.enabled | bool | `false` | Enable the MTDome CSC | +| mtheaderservice.enabled | bool | `false` | Enable the MTHeaderService CSC | +| mtm2-sim.enabled | bool | `false` | Enable the MTM2 simulator CSC | +| mtm2.enabled | bool | `false` | Enable the MTM2 CSC | +| mtm2hexapod-sim.enabled | bool | `false` | Enable the MTHexapod:2 simulator CSC | +| mtm2hexapod.enabled | bool | `false` | Enable the MTHexapod:2 CSC | +| mtmount-sim.enabled | bool | `false` | Enable the MTMount simulator CSC | +| mtmount.enabled | bool | `false` | Enable the MTMount CSC | +| mtrotator-sim.enabled | bool | `false` | Enable the MTRotator simulator CSC | +| mtrotator.enabled | bool | `false` | Enable the MTRotator CSC | diff --git a/applications/simonyitel/values-tucson-teststand.yaml b/applications/simonyitel/values-tucson-teststand.yaml index db1d4f7780..4a8d76d4a1 100644 --- a/applications/simonyitel/values-tucson-teststand.yaml +++ b/applications/simonyitel/values-tucson-teststand.yaml @@ -137,7 +137,6 @@ mtaircompressor2-sim: RUN_ARG: 2 --simulate --state disabled mtaos: - enabled: true image: repository: ts-dockerhub.lsst.org/mtaos pullPolicy: Always @@ -182,7 +181,6 @@ mtdome-sim: RUN_ARG: --simulate 1 mtdometrajectory: - enabled: true image: repository: ts-dockerhub.lsst.org/mtdometrajectory pullPolicy: Always @@ -219,7 +217,6 @@ mtmount-sim: RUN_ARG: --simulate mtptg: - enabled: true image: repository: ts-dockerhub.lsst.org/ptkernel pullPolicy: Always diff --git a/applications/simonyitel/values.yaml b/applications/simonyitel/values.yaml index 70e29b7d0b..2d35fe23ce 100644 --- a/applications/simonyitel/values.yaml +++ b/applications/simonyitel/values.yaml @@ -6,6 +6,98 @@ csc_collector: # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) secrets: [] +ccheaderservice: + # -- Enable the CCHeaderService CSC + enabled: false + +ccoods: + # -- Enable the CCOODS CSC + enabled: false + +lasertracker1: + # -- Enable the LaserTracker:1 CSC + enabled: false + +lasertracker1-sim: + # -- Enable the LaserTracker:1 simulator CSC + enabled: false + +mtaircompressor1: + # -- Enable the MTAirCompressor:1 CSC + enabled: false + +mtaircompressor1-sim: + # -- Enable the MTAirCompressor:1 simulator CSC + enabled: false + +mtaircompressor2: + # -- Enable the MTAirCompressor:2 CSC + enabled: false + +mtaircompressor2-sim: + # -- Enable the MTAirCompressor:2 simulator CSC + enabled: false + +mtcamhexapod: + # -- Enable the MTHexapod:1 CSC + enabled: false + +mtcamhexapod-sim: + # -- Enable the MTHexapod:1 simulator CSC + enabled: false + +mtdome: + # -- Enable the MTDome CSC + enabled: false + +mtdome-sim: + # -- Enable the MTDome simulator CSC + enabled: false + +mtheaderservice: + # -- Enable the MTHeaderService CSC + enabled: false + +m1m3: + # -- Enable the MTM1M3 hardware simulator CSC + enabled: false + +m1m3-sim: + # -- Enable the MTM1M3 simulator CSC + enabled: false + +mtm2: + # -- Enable the MTM2 CSC + enabled: false + +mtm2-sim: + # -- Enable the MTM2 simulator CSC + enabled: false + +mtm2hexapod: + # -- Enable the MTHexapod:2 CSC + enabled: false + +mtm2hexapod-sim: + # -- Enable the MTHexapod:2 simulator CSC + enabled: false + +mtmount: + # -- Enable the MTMount CSC + enabled: false + +mtmount-sim: + # -- Enable the MTMount simulator CSC + enabled: false + +mtrotator: + # -- Enable the MTRotator CSC + enabled: false + +mtrotator-sim: + # -- Enable the MTRotator simulator CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/applications/uws/Chart.yaml b/applications/uws/Chart.yaml index 46ed611daa..8124b868bc 100644 --- a/applications/uws/Chart.yaml +++ b/applications/uws/Chart.yaml @@ -9,10 +9,17 @@ dependencies: - name: csc alias: atocps version: 1.0.0 + condition: atocps.enabled repository: "file://../../charts/csc" - name: csc alias: ccocps version: 1.0.0 + condition: ccocps.enabled + repository: "file://../../charts/csc" +- name: csc + alias: mtocps + version: 1.0.0 + condition: mtocps.enabled repository: "file://../../charts/csc" - name: uws-api-server version: 1.5.0 diff --git a/applications/uws/README.md b/applications/uws/README.md index f5d98a8214..a2cb92764e 100644 --- a/applications/uws/README.md +++ b/applications/uws/README.md @@ -16,7 +16,10 @@ Deployment for the UWS and DM OCPS CSCs | global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| atocps.enabled | bool | `false` | Enable the OCPS:1 CSC | +| ccocps.enabled | bool | `false` | Enable the OCPS:2 CSC | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | +| mtocps.enabled | bool | `false` | Enable the OCPS:3 CSC | | uws-api-server.basePath | string | `"uws-server"` | The base path for the client ingress | | uws-api-server.butlerPg | object | `{}` | Configuration for Postgres backed butlers The object must have the following attributes defined: _secretKey_ (A label that points to the VaultSecret for the postgres credentials) _containerPath_ (The directory location in the container for the Butler secret) _dbUser_ (The database user name for butler access) | | uws-api-server.client.enabled | bool | `false` | Turn on the UWS client system if desired | diff --git a/applications/uws/values.yaml b/applications/uws/values.yaml index 70e29b7d0b..d53832c26c 100644 --- a/applications/uws/values.yaml +++ b/applications/uws/values.yaml @@ -6,6 +6,18 @@ csc_collector: # _type_ (OPTIONAL: The secret type. Defaults to Opaque.) secrets: [] +atocps: + # -- Enable the OCPS:1 CSC + enabled: false + +ccocps: + # -- Enable the OCPS:2 CSC + enabled: false + +mtocps: + # -- Enable the OCPS:3 CSC + enabled: false + # The following will be set by parameters injected by Argo CD and should not # be set in the individual environment values files. global: diff --git a/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml index 4403110a15..78340416a9 100644 --- a/charts/csc/templates/job.yaml +++ b/charts/csc/templates/job.yaml @@ -1,4 +1,3 @@ -{{- if .Values.enabled -}} apiVersion: batch/v1 kind: Job metadata: @@ -182,4 +181,3 @@ spec: tolerations: {{- toYaml $ | nindent 8 }} {{- end }} -{{- end -}} From 835a8028b83d265ff5e5232c1685f30d88ab91f1 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 20 Sep 2023 16:27:39 -0700 Subject: [PATCH 521/588] Add ocps-uws-job app. --- applications/ocps-uws-job/Chart.yaml | 3 +++ .../templates/ocps-uws-job-application.yaml | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 applications/ocps-uws-job/Chart.yaml create mode 100644 environments/templates/ocps-uws-job-application.yaml diff --git a/applications/ocps-uws-job/Chart.yaml b/applications/ocps-uws-job/Chart.yaml new file mode 100644 index 0000000000..aa41890510 --- /dev/null +++ b/applications/ocps-uws-job/Chart.yaml @@ -0,0 +1,3 @@ +apiVersion: v2 +name: ocps-uws-job +version: 1.0.0 diff --git a/environments/templates/ocps-uws-job-application.yaml b/environments/templates/ocps-uws-job-application.yaml new file mode 100644 index 0000000000..7df1fe253e --- /dev/null +++ b/environments/templates/ocps-uws-job-application.yaml @@ -0,0 +1,18 @@ +{{- if .Values.applications.uws -}} +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: ocps-uws-job + namespace: argocd + finalizers: + - resources-finalizer.argocd.argoproj.io +spec: + destination: + namespace: ocps-uws-job + server: https://kubernetes.default.svc + project: default + source: + path: applications/ocps-uws-job + repoURL: {{ .Values.repoUrl }} + targetRevision: {{ .Values.targetRevision }} +{{- end -}} \ No newline at end of file From 23fa3984bfd686b86945b1887e609896bec7d602 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 21 Sep 2023 11:35:10 -0700 Subject: [PATCH 522/588] Add DDS stuff back to nublado. --- applications/nublado/templates/vault-secrets.yaml | 2 +- applications/nublado/values-tucson-teststand.yaml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/applications/nublado/templates/vault-secrets.yaml b/applications/nublado/templates/vault-secrets.yaml index 9211c270a0..a2ef9908ac 100644 --- a/applications/nublado/templates/vault-secrets.yaml +++ b/applications/nublado/templates/vault-secrets.yaml @@ -70,4 +70,4 @@ metadata: spec: path: "{{- .Values.global.vaultSecretsPath }}/ts/software/ts-salkafka" type: Opaque -{{- end }} \ No newline at end of file +{{- end }} diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 9f594fa942..d92d96d3e4 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -11,11 +11,14 @@ controller: cycle: null recommended_tag: "recommended_k0001" lab: + pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" LSST_SITE: tucson + LSST_DDS_INTERFACE: net1 + LSST_DDS_PARTITION_PREFIX: tucson LSST_TOPIC_SUBNAME: sal LSST_KAFKA_SECURITY_USERNAME: ts-salkafka LSST_KAFKA_BROKER_ADDR: sasquatch-kafka-brokers.sasquatch:9092 @@ -32,7 +35,6 @@ controller: volumeMounts: - containerPath: "/home" volumeName: "home" - pullSecret: "pull-secret" secrets: - secretName: "kafka-secret" secretKey: "ts-salkafka-password" From cfd64b148c9c1e3edd7139d103deccbaf0180ae4 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 25 Sep 2023 09:44:23 -0700 Subject: [PATCH 523/588] Try different secret approach for nubaldo. --- applications/nublado/README.md | 7 +------ applications/nublado/templates/controller-deployment.yaml | 7 ------- applications/nublado/templates/vault-secrets.yaml | 2 -- applications/nublado/values-tucson-teststand.yaml | 3 +-- applications/nublado/values.yaml | 3 --- 5 files changed, 2 insertions(+), 20 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index d6f6f231d0..a6053d5e8f 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -53,12 +53,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab | | controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | -| controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumeMounts` (similar to the main `volumeMountss` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | -| controller.config.lab.namespacePrefix | string | `"nublado"` | Prefix for namespaces for user labs. To this will be added a dash (`-`) and the user's username. | -| controller.config.lab.nodeSelector | object | `{}` | Node selector rules for user lab pods | -| controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | -| controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | -| controller.config.lab.kafkaSecret | bool | `false` | Add the Kafka secret to the user pods | +| controller.config.lab.initcontainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image reference), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with `allowPrivilegeEscalation` true. Otherwise it will, run as UID 1000. | | controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | | controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Sizes must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | diff --git a/applications/nublado/templates/controller-deployment.yaml b/applications/nublado/templates/controller-deployment.yaml index e854a79f84..800fc2cb41 100644 --- a/applications/nublado/templates/controller-deployment.yaml +++ b/applications/nublado/templates/controller-deployment.yaml @@ -45,13 +45,6 @@ spec: name: "nublado-secret" key: "slack-webhook" {{- end }} - {{- if .Values.controller.config.lab.kafkaSecret }} - - name: LSST_KAFKA_SECURITY_PASSWORD - valueFrom: - secretKeyRef: - name: kafka-secret - key: ts-salkafka-password - {{- end }} ports: - name: "http" containerPort: 8080 diff --git a/applications/nublado/templates/vault-secrets.yaml b/applications/nublado/templates/vault-secrets.yaml index a2ef9908ac..134653ec4f 100644 --- a/applications/nublado/templates/vault-secrets.yaml +++ b/applications/nublado/templates/vault-secrets.yaml @@ -59,7 +59,6 @@ spec: path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" type: kubernetes.io/dockerconfigjson {{- end }} -{{- if .Values.controller.config.lab.kafkaSecret }} --- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret @@ -70,4 +69,3 @@ metadata: spec: path: "{{- .Values.global.vaultSecretsPath }}/ts/software/ts-salkafka" type: Opaque -{{- end }} diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index d92d96d3e4..f9bf0805fb 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -20,12 +20,11 @@ controller: LSST_DDS_INTERFACE: net1 LSST_DDS_PARTITION_PREFIX: tucson LSST_TOPIC_SUBNAME: sal - LSST_KAFKA_SECURITY_USERNAME: ts-salkafka + LSST_KAFKA_PASSFILE: "/opt/lsst/software/jupyterlab/secrets/kafka_credentials.txt" LSST_KAFKA_BROKER_ADDR: sasquatch-kafka-brokers.sasquatch:9092 LSST_SCHEMA_REGISTRY_URL: http://sasquatch-schema-registry.sasquatch:8081 PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" - kafkaSecret: true initContainers: - name: "inithome" image: diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index a713dfa678..f19b4d5603 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -294,9 +294,6 @@ controller: # that key. secrets: [] - # -- Add the Kafka secret to the user pods - kafkaSecret: false - # -- Available lab sizes. Names must be chosen from `fine`, # `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, # `gargantuan`, and `colossal` in that order. Each should specify the From ca7c4af3fe4e67df4eddf53c9e3c702550e34c77 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 27 Sep 2023 13:27:45 -0700 Subject: [PATCH 524/588] Fix headerservice tags. --- applications/auxtel/values-tucson-teststand.yaml | 1 - applications/calsys/values-tucson-teststand.yaml | 1 - applications/simonyitel/values-tucson-teststand.yaml | 1 - 3 files changed, 3 deletions(-) diff --git a/applications/auxtel/values-tucson-teststand.yaml b/applications/auxtel/values-tucson-teststand.yaml index fb2ac30f9c..d961044765 100644 --- a/applications/auxtel/values-tucson-teststand.yaml +++ b/applications/auxtel/values-tucson-teststand.yaml @@ -31,7 +31,6 @@ atdometrajectory: atheaderservice: image: repository: ts-dockerhub.lsst.org/headerservice - tag: ts-v3.1.11_c0029 pullPolicy: Always env: URL_SPEC: --lfa_mode s3 --s3instance tuc diff --git a/applications/calsys/values-tucson-teststand.yaml b/applications/calsys/values-tucson-teststand.yaml index 8b9766ff98..c4101cde0d 100644 --- a/applications/calsys/values-tucson-teststand.yaml +++ b/applications/calsys/values-tucson-teststand.yaml @@ -12,7 +12,6 @@ gcheaderservice1: enabled: true image: repository: ts-dockerhub.lsst.org/headerservice - tag: ts-v3.1.11_c0029 pullPolicy: Always env: CAMERA: gc1 diff --git a/applications/simonyitel/values-tucson-teststand.yaml b/applications/simonyitel/values-tucson-teststand.yaml index 4a8d76d4a1..d8d1d03ccf 100644 --- a/applications/simonyitel/values-tucson-teststand.yaml +++ b/applications/simonyitel/values-tucson-teststand.yaml @@ -14,7 +14,6 @@ ccheaderservice: enabled: true image: repository: ts-dockerhub.lsst.org/headerservice - tag: ts-v3.1.11_c0029 pullPolicy: Always env: URL_SPEC: --lfa_mode s3 --s3instance tuc From 8c34fdc78589bb7e8f1686cc33e35b401c8f4b52 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 28 Sep 2023 09:19:30 -0700 Subject: [PATCH 525/588] Fix configfile and entrypoint in CSC chart. --- charts/csc/templates/job.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml index 78340416a9..c053c1019f 100644 --- a/charts/csc/templates/job.yaml +++ b/charts/csc/templates/job.yaml @@ -126,7 +126,7 @@ spec: {{- if .Values.entrypoint }} - name: entrypoint configMap: - name: {{ .Release.Name }}-entrypoint + name: {{ include "chart.name" $ }}-entrypoint defaultMode: 0755 items: - key: .startup.sh @@ -135,7 +135,7 @@ spec: {{- if .Values.configfile }} - name: configfile configMap: - name: {{ .Release.Name }}-configfile + name: {{ include "chart.name" $ }}-configfile items: - key: {{ .Values.configfile.filename }} path: {{ .Values.configfile.filename }} From a079a92c0a42689368acac27ca73319efcf6efcb Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 29 Sep 2023 17:14:02 -0700 Subject: [PATCH 526/588] Make doc build work. --- .../obssys/values-tucson-teststand.yaml | 8 +- docs/applications/index.rst | 13 +++ docs/extras/schemas/environment.json | 84 +++++++++++++++---- src/phalanx/models/environments.py | 25 ++++++ 4 files changed, 112 insertions(+), 18 deletions(-) diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml index 8c39859d67..ef83dd67ad 100644 --- a/applications/obssys/values-tucson-teststand.yaml +++ b/applications/obssys/values-tucson-teststand.yaml @@ -27,11 +27,11 @@ atqueue: RUN_ARG: 2 --state enabled USER_USERNAME: user butlerSecret: - containerPath: &bS-cP /home/saluser/.lsst + containerPath: &abS-cP /home/saluser/.lsst dbUser: oods secretPermFixer: - name: butler-secret - containerPath: *bS-cP + containerPath: *abS-cP nfsMountpoint: - name: auxtel-gen3-butler containerPath: /repo/LATISS @@ -117,11 +117,11 @@ mtqueue: RUN_ARG: 1 --state enabled USER_USERNAME: user butlerSecret: - containerPath: &bS-cP /home/saluser/.lsst + containerPath: &mbS-cP /home/saluser/.lsst dbUser: oods secretPermFixer: - name: butler-secret - containerPath: *bS-cP + containerPath: *mbS-cP nfsMountpoint: - name: auxtel-gen3-butler containerPath: /repo/LATISS diff --git a/docs/applications/index.rst b/docs/applications/index.rst index d9c5c08f91..2a16ada6b6 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -83,3 +83,16 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde prompt-proto-service-latiss/index prompt-proto-service-lsstcam/index prompt-proto-service-lsstcomcam/index + +.. toctree:: + :maxdepth: 1 + :caption: Rubin Observatory Control System + + auxtel/index + calsys/index + control-system-test/index + eas/index + love/index + obssys/index + simonyitel/index + uws/index diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index c59efb1b8f..1e6bb2bea5 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -159,32 +159,88 @@ "title": "Git repository branch" }, "controlSystemAppNamespace": { - "title": "Controlsystemappnamespace", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Controlsystemappnamespace" }, "controlSystemImageTag": { - "title": "Controlsystemimagetag", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Controlsystemimagetag" }, "controlSystemSiteTag": { - "title": "Controlsystemsitetag", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Controlsystemsitetag" }, "controlSystemTopicName": { - "title": "Controlsystemtopicname", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Controlsystemtopicname" }, "controlSystemKafkaBrokerAddress": { - "title": "Controlsystemkafkabrokeraddress", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Controlsystemkafkabrokeraddress" }, "controlSystemSchemaRegistryUrl": { - "title": "Controlsystemschemaregistryurl", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Controlsystemschemaregistryurl" }, "controlSystemS3EndpointUrl": { - "title": "Controlsystems3endpointurl", - "type": "string" + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Controlsystems3Endpointurl" } }, "required": [ diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index db384cd8a0..f15afbabc2 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -235,6 +235,31 @@ class EnvironmentConfig(EnvironmentBaseConfig): ), ) + control_system_app_namespace: str | None = None + """Set the namespace for the control system components. + + Each control system application consists of many components that need to + know what namespace to which they belong. + """ + + control_system_image_tag: str | None = None + """The image tag to use for control system containers.""" + + control_system_site_tag: str | None = None + """The tag that tells the control system component where it is running.""" + + control_system_topic_name: str | None = None + """The Kafka identifier for control system topics.""" + + control_system_kafka_broker_address: str | None = None + """The Kafka broker address for the control system components.""" + + control_system_schema_registry_url: str | None = None + """The Schema Registry URL for the control system components.""" + + control_system_s3_endpoint_url: str | None = None + """The S3 URL for the environment specific LFA.""" + model_config = ConfigDict(extra="forbid") @classmethod From a8dcb81d532be945d9b0da31833c1db08fcb4cca Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 2 Oct 2023 09:29:27 -0700 Subject: [PATCH 527/588] Add some applications docs. --- docs/applications/auxtel/index.rst | 2 ++ docs/applications/calsys/index.rst | 2 ++ docs/applications/control-system-test/index.rst | 2 ++ docs/applications/eas/index.rst | 2 ++ docs/applications/love/index.rst | 2 ++ docs/applications/obssys/index.rst | 2 ++ docs/applications/simonyitel/index.rst | 2 ++ docs/applications/uws/index.rst | 2 ++ 8 files changed, 16 insertions(+) diff --git a/docs/applications/auxtel/index.rst b/docs/applications/auxtel/index.rst index 4a55253196..44fe7c6b0a 100644 --- a/docs/applications/auxtel/index.rst +++ b/docs/applications/auxtel/index.rst @@ -4,6 +4,8 @@ auxtel — Auxiliary Telescope Control System Components ###################################################### +The auxtel application houses the CSCs associated with the Auxiliary Telescope. Simulation environments use simulators for all CSCs except the ATAOS, ATDomeTrajectory, ATHeaderService, ATOODS and ATPtg. Those environments also contain a simulator for the low-level controller of the ATHexapod. + .. jinja:: auxtel :file: applications/_summary.rst.jinja diff --git a/docs/applications/calsys/index.rst b/docs/applications/calsys/index.rst index cf332a2033..7a2c26ff97 100644 --- a/docs/applications/calsys/index.rst +++ b/docs/applications/calsys/index.rst @@ -4,6 +4,8 @@ calsys — Calibration Systems Control System Components ###################################################### +The calsys application houses CSCs associated with calibration systems across both the Simonyi Survey Telescope and the Auxiliary Telescope. It also contains the simulation generic camera systems (GenericCamera:1 and GCHeaderService:1). Simulation environments currently do not have any systems besides the afore mentionend simulation generic camera. + .. jinja:: calsys :file: applications/_summary.rst.jinja diff --git a/docs/applications/control-system-test/index.rst b/docs/applications/control-system-test/index.rst index 4cd2127866..0afb4d752a 100644 --- a/docs/applications/control-system-test/index.rst +++ b/docs/applications/control-system-test/index.rst @@ -4,6 +4,8 @@ control-system-test — Systems for Testing Control System Components ################################################################### +The control-system-test application houses a CSC (Test:42) and the control system integration testing infrastructure. These systems are meant for testing the control system for cycle upgrades and other potentially distruptive software changes. + .. jinja:: control-system-test :file: applications/_summary.rst.jinja diff --git a/docs/applications/eas/index.rst b/docs/applications/eas/index.rst index 50bda5caf0..b8558ffc68 100644 --- a/docs/applications/eas/index.rst +++ b/docs/applications/eas/index.rst @@ -4,6 +4,8 @@ eas — Environmental Awareness System Control System Components ############################################################## +The eas application houses CSCs associated with the Environmental Awareness System. Simulation environments use simulators for all CSCs except the WeatherForecast CSC. + .. jinja:: eas :file: applications/_summary.rst.jinja diff --git a/docs/applications/love/index.rst b/docs/applications/love/index.rst index 77befff33e..1041b545c8 100644 --- a/docs/applications/love/index.rst +++ b/docs/applications/love/index.rst @@ -4,6 +4,8 @@ love — LSST Observers Visualization Environment ############################################### +The love application houses all of the systems that make up the LSST Observers Visualization Envrionment. It consists of a visualization front-end, a set of managers to coordinate information exchange, prodcuers that gather the topic traffic from each CSC, a commander to allow control of CSCs and various support applications. + .. jinja:: love :file: applications/_summary.rst.jinja diff --git a/docs/applications/obssys/index.rst b/docs/applications/obssys/index.rst index f08fe702db..f82f6cc578 100644 --- a/docs/applications/obssys/index.rst +++ b/docs/applications/obssys/index.rst @@ -4,6 +4,8 @@ obssys — Observatory Systems Control System Components ###################################################### +The obssys application houses the CSCs associated with high-level Observatory control and monitoring. All CSCs within this group use real applications (no simulators) across all environments. + .. jinja:: obssys :file: applications/_summary.rst.jinja diff --git a/docs/applications/simonyitel/index.rst b/docs/applications/simonyitel/index.rst index fb033a76c5..3bdbe4fc12 100644 --- a/docs/applications/simonyitel/index.rst +++ b/docs/applications/simonyitel/index.rst @@ -4,6 +4,8 @@ simonyitel — Simonyi Telescope Control System Components ######################################################## +The simonyitel application houses all the CSCs associated with the Simonyi Survey Telescope. Simulation environments use simulators except for the CCHeaderService, CCOODS, MTAOS, MTDomeTrajectory and MTPtg CSCs. + .. jinja:: simonyitel :file: applications/_summary.rst.jinja diff --git a/docs/applications/uws/index.rst b/docs/applications/uws/index.rst index 58452d7109..e23e1df5bb 100644 --- a/docs/applications/uws/index.rst +++ b/docs/applications/uws/index.rst @@ -4,6 +4,8 @@ uws — Universal Worker Service for OCPS ####################################### +The uws application houses services and CSCs associated with the Universal Worker System. The UWS consists of a server that accepts requests to run DM specific jobs, such as calibrations. The application also contains the OCPS CSCs that are associated with each camera. Simulation envrionments do not use simulators for these CSCs. + .. jinja:: uws :file: applications/_summary.rst.jinja From 9895341e127d6956736918bcfff8857efa9f290c Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 3 Oct 2023 16:43:01 -0700 Subject: [PATCH 528/588] Add message.keyFormat CLI option for kafdrop. --- applications/sasquatch/values-tucson-teststand.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 98618642fb..8c418de2fd 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -175,6 +175,7 @@ kafka-connect-manager: topicsRegex: ".*GCHeaderService|.*GenericCamera" kafdrop: + cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" ingress: enabled: true hostname: tucson-teststand.lsst.codes From 1865803a0660ce57678ee90a72b18454d6c16580 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 4 Oct 2023 16:39:30 -0700 Subject: [PATCH 529/588] Change kafka connect key converter to json. --- applications/sasquatch/README.md | 4 +++- applications/sasquatch/charts/strimzi-kafka/README.md | 4 +++- .../sasquatch/charts/strimzi-kafka/templates/connect.yaml | 5 +++-- applications/sasquatch/charts/strimzi-kafka/values.yaml | 5 +++++ applications/sasquatch/values-tucson-teststand.yaml | 4 ++++ 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 9d24b6a36c..c7d14f71e9 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -385,7 +385,9 @@ Rubin Observatory's telemetry service. | source-kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | | square-events.cluster.name | string | `"sasquatch"` | | | strimzi-kafka.cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | -| strimzi-kafka.connect.enabled | bool | `false` | Enable Kafka Connect. | +| strimzi-kafka.connect.config."key.converter" | string | `"io.confluent.connect.avro.AvroConverter"` | Set the converter for the message key | +| strimzi-kafka.connect.config."key.converter.schemas.enable" | bool | `true` | Enable converted schemas for the message key | +| strimzi-kafka.connect.enabled | bool | `true` | Enable Kafka Connect. | | strimzi-kafka.connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | strimzi-kafka.connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | strimzi-kafka.kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 83d73ae9bf..329d30a030 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -7,7 +7,9 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | Key | Type | Default | Description | |-----|------|---------|-------------| | cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | -| connect.enabled | bool | `false` | Enable Kafka Connect. | +| connect.config."key.converter" | string | `"io.confluent.connect.avro.AvroConverter"` | Set the converter for the message key | +| connect.config."key.converter.schemas.enable" | bool | `true` | Enable converted schemas for the message key | +| connect.enabled | bool | `true` | Enable Kafka Connect. | | connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/connect.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/connect.yaml index c825464d75..1a5d3e51c8 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/connect.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/connect.yaml @@ -29,8 +29,9 @@ spec: config.storage.replication.factor: -1 offset.storage.replication.factor: -1 status.storage.replication.factor: -1 - key.converter: io.confluent.connect.avro.AvroConverter - key.converter.schemas.enable: true + {{- range $key, $value := .Values.connect.config }} + {{ $key }}: {{ $value }} + {{- end }} key.converter.schema.registry.url: http://sasquatch-schema-registry.sasquatch:8081 value.converter: io.confluent.connect.avro.AvroConverter value.converter.schemas.enable: true diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index d10b1468e9..c7e9205b2c 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -120,6 +120,11 @@ connect: image: ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655 # -- Number of Kafka Connect replicas to run. replicas: 3 + config: + # -- Set the converter for the message key + key.converter: io.confluent.connect.avro.AvroConverter + # -- Enable converted schemas for the message key + key.converter.schemas.enable: true registry: ingress: diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 8c418de2fd..fa3936bb35 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -34,6 +34,10 @@ strimzi-kafka: nginx.ingress.kubernetes.io/rewrite-target: /$2 hostname: tucson-teststand.lsst.codes path: /schema-registry(/|$)(.*) + connect: + config: + key.converter: org.apache.kafka.connect.storage.StringConverter + key.converter.schemas.enable: false influxdb: persistence: From 333c2fb4397f4bbbb6c0193a9c70975051e6b265 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 5 Oct 2023 15:09:04 -0700 Subject: [PATCH 530/588] Switch to JSON key converter. --- applications/sasquatch/values-tucson-teststand.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index fa3936bb35..21b43bafba 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -36,7 +36,7 @@ strimzi-kafka: path: /schema-registry(/|$)(.*) connect: config: - key.converter: org.apache.kafka.connect.storage.StringConverter + key.converter: org.apache.kafka.connect.json.JsonConverter key.converter.schemas.enable: false influxdb: From 3f6540b03c2a3a62efbc9904b8aba8ceee7ccd50 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 6 Oct 2023 15:16:46 -0700 Subject: [PATCH 531/588] Turn off auto topic creation in kafka. --- applications/sasquatch/values-tucson-teststand.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 21b43bafba..968314a830 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -1,5 +1,7 @@ strimzi-kafka: kafka: + config: + auto.create.topics.enable: false storage: storageClassName: rook-ceph-block externalListener: From de34c0c4be9627aab6b4f395ff897e8775e5c8d5 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 17 Oct 2023 15:27:53 -0700 Subject: [PATCH 532/588] Updates for integration-testing chart. --- .../templates/imaging-workflow.yaml | 13 ++++++++++++- .../templates/job-workflow-template.yaml | 2 +- .../templates/testing-workflow.yaml | 13 ++++++++++++- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml index 450a014722..d950967bc8 100644 --- a/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml @@ -80,8 +80,19 @@ spec: value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" - name: jobname value: auxtel-telescope-dome-daytime-checkout - - name: auxtel-prep-flat + - name: auxtel-slew-take-image-daytime-checkout depends: auxtel-telescope-dome-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Telescope_Slew_and_Take_Image_Checkout.list" + - name: jobname + value: auxtel-telescope-slew-take-image-daytime-checkout + - name: auxtel-prep-flat + depends: auxtel-slew-take-image-daytime-checkout templateRef: name: integration-test-job-template template: inttest-template diff --git a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml index bf19252611..8e064db527 100644 --- a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml @@ -47,7 +47,7 @@ spec: value: {{ .Values.envEfd }} - name: RUN_ARG value: {{ printf "'{{inputs.parameters.integrationtest}}'" }} - - name: LSST_SASL_PLAIN_PASSWORD + - name: LSST_KAFKA_SECURITY_PASSWORD valueFrom: secretKeyRef: name: ts-salkafka diff --git a/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml index 5453e43e46..b4d2c98a9b 100644 --- a/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml @@ -125,8 +125,19 @@ spec: value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" - name: jobname value: auxtel-telescope-dome-daytime-checkout - - name: auxtel-prep-flat + - name: auxtel-slew-take-image-daytime-checkout depends: auxtel-telescope-dome-daytime-checkout + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_AuxTel_Telescope_Slew_and_Take_Image_Checkout.list" + - name: jobname + value: auxtel-telescope-slew-take-image-daytime-checkout + - name: auxtel-prep-flat + depends: auxtel-telescope-slew-take-image-daytime-checkout templateRef: name: integration-test-job-template template: inttest-template From 55e7c837d4353eb58dc2527dbd99afe24f4e239b Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 20 Oct 2023 09:10:50 -0700 Subject: [PATCH 533/588] Update version of Kafdrop. --- applications/sasquatch/values-tucson-teststand.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 968314a830..7373d1bc9e 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -181,6 +181,8 @@ kafka-connect-manager: topicsRegex: ".*GCHeaderService|.*GenericCamera" kafdrop: + image: + tag: 4.0.0 cmdArgs: "--message.format=AVRO --message.keyFormat=DEFAULT --topic.deleteEnabled=false --topic.createEnabled=false" ingress: enabled: true From 4b919a001471416cc53ede84d8709377706851d0 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 20 Oct 2023 13:24:15 -0700 Subject: [PATCH 534/588] Add variable for Kafka topic replication factor. --- charts/csc_collector/templates/configmap-env.yaml | 1 + docs/extras/schemas/environment.json | 12 ++++++++++++ environments/README.md | 1 + environments/templates/auxtel-application.yaml | 2 ++ environments/templates/calsys-application.yaml | 2 ++ .../templates/control-system-test-application.yaml | 2 ++ environments/templates/eas-application.yaml | 2 ++ environments/templates/love-application.yaml | 2 ++ environments/templates/obssys-application.yaml | 2 ++ environments/templates/simonyitel-application.yaml | 2 ++ environments/templates/uws-application.yaml | 2 ++ environments/values.yaml | 3 +++ src/phalanx/models/environments.py | 3 +++ 13 files changed, 36 insertions(+) diff --git a/charts/csc_collector/templates/configmap-env.yaml b/charts/csc_collector/templates/configmap-env.yaml index 6f04ed0987..5e6eec6629 100644 --- a/charts/csc_collector/templates/configmap-env.yaml +++ b/charts/csc_collector/templates/configmap-env.yaml @@ -6,6 +6,7 @@ data: LSST_SITE: {{ $.Values.global.controlSystemSiteTag }} LSST_TOPIC_SUBNAME: {{ $.Values.global.controlSystemTopicName }} LSST_KAFKA_BROKER_ADDR: {{ $.Values.global.controlSystemKafkaBrokerAddress }} + LSST_KAFKA_REPLICATION_FACTOR: {{ $.Values.global.controlSystemKafkaTopicReplicationFactor | quote }} LSST_KAFKA_SECURITY_USERNAME: ts-salkafka LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystemSchemaRegistryUrl }} S3_ENDPOINT_URL: {{ $.Values.global.controlSystemS3EndpointUrl }} diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index 1e6bb2bea5..992ed5f30f 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -218,6 +218,18 @@ "default": null, "title": "Controlsystemkafkabrokeraddress" }, + "controlSystemKafkaTopicReplicationFactor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "title": "Controlsystemkafkatopicreplicationfactor" + }, "controlSystemSchemaRegistryUrl": { "anyOf": [ { diff --git a/environments/README.md b/environments/README.md index 4464f09423..81a1ca1d52 100644 --- a/environments/README.md +++ b/environments/README.md @@ -67,6 +67,7 @@ | controlSystemAppNamespace | string | None, must be set | Application namespacce for the control system deployment | | controlSystemImageTag | string | None, must be set | Image tag for the control system deployment | | controlSystemKafkaBrokerAddress | string | `"sasquatch-kafka-brokers.sasquatch:9092"` | Kafka broker address for the control system deployment | +| controlSystemKafkaTopicReplicationFactor | int | `3` | Kafka topic replication factor for control system topics | | controlSystemS3EndpointUrl | string | None, must be set: "" | S3 endpoint (LFA) for the control system deployment | | controlSystemSchemaRegistryUrl | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL for the control system deployment | | controlSystemSiteTag | string | None, must be set | Site tag for the control system deployment | diff --git a/environments/templates/auxtel-application.yaml b/environments/templates/auxtel-application.yaml index 1f8e02c511..c19ef441ed 100644 --- a/environments/templates/auxtel-application.yaml +++ b/environments/templates/auxtel-application.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.controlSystemTopicName | quote }} - name: "global.controlSystemKafkaBrokerAddress" value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemKafkaTopicReplicationFactor" + value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - name: "global.controlSystemSchemaRegistryUrl" value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - name: "global.controlSystemS3EndpointUrl" diff --git a/environments/templates/calsys-application.yaml b/environments/templates/calsys-application.yaml index 80919b72e6..6867f95448 100644 --- a/environments/templates/calsys-application.yaml +++ b/environments/templates/calsys-application.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.controlSystemTopicName | quote }} - name: "global.controlSystemKafkaBrokerAddress" value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemKafkaTopicReplicationFactor" + value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - name: "global.controlSystemSchemaRegistryUrl" value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - name: "global.controlSystemS3EndpointUrl" diff --git a/environments/templates/control-system-test-application.yaml b/environments/templates/control-system-test-application.yaml index c9d0f8485b..385c6d6db1 100644 --- a/environments/templates/control-system-test-application.yaml +++ b/environments/templates/control-system-test-application.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.controlSystemTopicName | quote }} - name: "global.controlSystemKafkaBrokerAddress" value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemKafkaTopicReplicationFactor" + value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - name: "global.controlSystemSchemaRegistryUrl" value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - name: "global.controlSystemS3EndpointUrl" diff --git a/environments/templates/eas-application.yaml b/environments/templates/eas-application.yaml index 5849dc0aac..3e2cbe1732 100644 --- a/environments/templates/eas-application.yaml +++ b/environments/templates/eas-application.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.controlSystemTopicName | quote }} - name: "global.controlSystemKafkaBrokerAddress" value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemKafkaTopicReplicationFactor" + value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - name: "global.controlSystemSchemaRegistryUrl" value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - name: "global.controlSystemS3EndpointUrl" diff --git a/environments/templates/love-application.yaml b/environments/templates/love-application.yaml index d66c60fa58..e56ecf7fac 100644 --- a/environments/templates/love-application.yaml +++ b/environments/templates/love-application.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.controlSystemTopicName | quote }} - name: "global.controlSystemKafkaBrokerAddress" value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemKafkaTopicReplicationFactor" + value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - name: "global.controlSystemSchemaRegistryUrl" value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - name: "global.controlSystemS3EndpointUrl" diff --git a/environments/templates/obssys-application.yaml b/environments/templates/obssys-application.yaml index 8fd38eebc2..ed0f2bdc33 100644 --- a/environments/templates/obssys-application.yaml +++ b/environments/templates/obssys-application.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.controlSystemTopicName | quote }} - name: "global.controlSystemKafkaBrokerAddress" value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemKafkaTopicReplicationFactor" + value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - name: "global.controlSystemSchemaRegistryUrl" value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - name: "global.controlSystemS3EndpointUrl" diff --git a/environments/templates/simonyitel-application.yaml b/environments/templates/simonyitel-application.yaml index 354c885fb4..1c186a39d6 100644 --- a/environments/templates/simonyitel-application.yaml +++ b/environments/templates/simonyitel-application.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.controlSystemTopicName | quote }} - name: "global.controlSystemKafkaBrokerAddress" value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemKafkaTopicReplicationFactor" + value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - name: "global.controlSystemSchemaRegistryUrl" value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - name: "global.controlSystemS3EndpointUrl" diff --git a/environments/templates/uws-application.yaml b/environments/templates/uws-application.yaml index 83d73b8601..3030dae0d3 100644 --- a/environments/templates/uws-application.yaml +++ b/environments/templates/uws-application.yaml @@ -41,6 +41,8 @@ spec: value: {{ .Values.controlSystemTopicName | quote }} - name: "global.controlSystemKafkaBrokerAddress" value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} + - name: "global.controlSystemKafkaTopicReplicationFactor" + value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - name: "global.controlSystemSchemaRegistryUrl" value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - name: "global.controlSystemS3EndpointUrl" diff --git a/environments/values.yaml b/environments/values.yaml index 4a8149473c..022929161e 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -233,6 +233,9 @@ controlSystemTopicName: sal # -- Kafka broker address for the control system deployment controlSystemKafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 +# -- Kafka topic replication factor for control system topics +controlSystemKafkaTopicReplicationFactor: 3 + # -- Schema registry URL for the control system deployment controlSystemSchemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index f15afbabc2..ea4320d422 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -254,6 +254,9 @@ class EnvironmentConfig(EnvironmentBaseConfig): control_system_kafka_broker_address: str | None = None """The Kafka broker address for the control system components.""" + control_system_kafka_topic_replication_factor: int | None = None + """The Kafka topic replication factor for control system components.""" + control_system_schema_registry_url: str | None = None """The Schema Registry URL for the control system components.""" From ab8d73485d4c7c734605bbcbbd99efc0034eeb8e Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 23 Oct 2023 12:25:32 -0700 Subject: [PATCH 535/588] sasquatch: Set min isr based on distruption tolerance. --- applications/sasquatch/README.md | 1 + applications/sasquatch/charts/strimzi-kafka/README.md | 1 + .../sasquatch/charts/strimzi-kafka/templates/kafka.yaml | 4 ++-- applications/sasquatch/charts/strimzi-kafka/values.yaml | 2 ++ applications/sasquatch/values-tucson-teststand.yaml | 1 + 5 files changed, 7 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index c7d14f71e9..f40b5ab39a 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -397,6 +397,7 @@ Rubin Observatory's telemetry service. | strimzi-kafka.kafka.config."offsets.retention.minutes" | int | `2880` | Number of minutes for a consumer group's offsets to be retained. | | strimzi-kafka.kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition. | | strimzi-kafka.kafka.config."replica.lag.time.max.ms" | int | `120000` | Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. | +| strimzi-kafka.kafka.disruption_tolerance | int | `0` | Number of down brokers that the system can tolerate. | | strimzi-kafka.kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource. | | strimzi-kafka.kafka.externalListener.bootstrap.host | string | `""` | Name used for TLS hostname verification. | | strimzi-kafka.kafka.externalListener.bootstrap.loadBalancerIP | string | `""` | The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the loadBalancerIP when a load balancer is created. This field is ignored if the cloud provider does not support the feature. Once the IP address is provisioned this option make it possible to pin the IP address. We can request the same IP next time it is provisioned. This is important because it lets us configure a DNS record, associating a hostname with that pinned IP address. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 329d30a030..eb1f3741fb 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -19,6 +19,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.config."offsets.retention.minutes" | int | `2880` | Number of minutes for a consumer group's offsets to be retained. | | kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition. | | kafka.config."replica.lag.time.max.ms" | int | `120000` | Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. | +| kafka.disruption_tolerance | int | `0` | Number of down brokers that the system can tolerate. | | kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource. | | kafka.externalListener.bootstrap.host | string | `""` | Name used for TLS hostname verification. | | kafka.externalListener.bootstrap.loadBalancerIP | string | `""` | The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the loadBalancerIP when a load balancer is created. This field is ignored if the cloud provider does not support the feature. Once the IP address is provisioned this option make it possible to pin the IP address. We can request the same IP next time it is provisioned. This is important because it lets us configure a DNS record, associating a hostname with that pinned IP address. | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml index a5dbdbfa1a..c56d58aed9 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml @@ -92,9 +92,9 @@ spec: config: offsets.topic.replication.factor: {{ .Values.kafka.replicas }} transaction.state.log.replication.factor: {{ .Values.kafka.replicas }} - transaction.state.log.min.isr: {{ .Values.kafka.replicas }} + transaction.state.log.min.isr: {{ sub .Values.kafka.replicas .Values.kafka.disruption_tolerance }} default.replication.factor: {{ .Values.kafka.replicas }} - min.insync.replicas: {{ .Values.kafka.replicas }} + min.insync.replicas: {{ sub .Values.kafka.replicas .Values.kafka.disruption_tolerance }} {{- range $key, $value := .Values.kafka.config }} {{ $key }}: {{ $value }} {{- end }} diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index c7e9205b2c..ec975126d9 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -8,6 +8,8 @@ kafka: version: "3.5.1" # -- Number of Kafka broker replicas to run. replicas: 3 + # -- Number of down brokers that the system can tolerate. + disruption_tolerance: 0 storage: # -- Size of the backing storage disk for each of the Kafka brokers. size: 500Gi diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 7373d1bc9e..6e32137d95 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -1,5 +1,6 @@ strimzi-kafka: kafka: + disruption_tolerance: 1 config: auto.create.topics.enable: false storage: From d1d0840f048aa8b03df6d7c8e4df5c56ac9f355f Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 23 Oct 2023 13:22:07 -0700 Subject: [PATCH 536/588] Reduce kafka retention period. --- applications/sasquatch/values-tucson-teststand.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 6e32137d95..cc6ccc5a61 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -3,6 +3,8 @@ strimzi-kafka: disruption_tolerance: 1 config: auto.create.topics.enable: false + log.retention.hours: 24 + offsets.retention.minutes: 1440 storage: storageClassName: rook-ceph-block externalListener: From 3d6fb6defc40eedf94a3142ca08bae49a39d7b8a Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 24 Oct 2023 16:35:08 -0700 Subject: [PATCH 537/588] Updates for integration testing chart. --- .../templates/cleanup-reports-workflow.yaml | 2 +- .../integration-testing/templates/imaging-workflow.yaml | 6 +++--- .../integration-testing/templates/testing-workflow.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml index 3c7ca95b65..0bfface1f7 100644 --- a/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml @@ -23,7 +23,7 @@ spec: container: image: alpine:latest command: [sh, -c] - args: ["rm /pvc/*.*"] + args: ["rm -f /pvc/*.* /pvc/STATE_FAILED"] volumeMounts: - name: testreports mountPath: /pvc diff --git a/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml index d950967bc8..bd691ca6b1 100644 --- a/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml @@ -80,7 +80,7 @@ spec: value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" - name: jobname value: auxtel-telescope-dome-daytime-checkout - - name: auxtel-slew-take-image-daytime-checkout + - name: auxtel-telescope-slew-take-image-daytime-checkout depends: auxtel-telescope-dome-daytime-checkout templateRef: name: integration-test-job-template @@ -88,11 +88,11 @@ spec: arguments: parameters: - name: integrationtest - value: "-A Test_Report_AuxTel_Telescope_Slew_and_Take_Image_Checkout.list" + value: "-A Test_Report_AuxTel_Slew_and_Take_Image_Checkout.list" - name: jobname value: auxtel-telescope-slew-take-image-daytime-checkout - name: auxtel-prep-flat - depends: auxtel-slew-take-image-daytime-checkout + depends: auxtel-telescope-slew-take-image-daytime-checkout templateRef: name: integration-test-job-template template: inttest-template diff --git a/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml index b4d2c98a9b..8db41e3941 100644 --- a/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml @@ -125,7 +125,7 @@ spec: value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" - name: jobname value: auxtel-telescope-dome-daytime-checkout - - name: auxtel-slew-take-image-daytime-checkout + - name: auxtel-telescope-slew-take-image-daytime-checkout depends: auxtel-telescope-dome-daytime-checkout templateRef: name: integration-test-job-template @@ -133,7 +133,7 @@ spec: arguments: parameters: - name: integrationtest - value: "-A Test_Report_AuxTel_Telescope_Slew_and_Take_Image_Checkout.list" + value: "-A Test_Report_AuxTel_Slew_and_Take_Image_Checkout.list" - name: jobname value: auxtel-telescope-slew-take-image-daytime-checkout - name: auxtel-prep-flat From 204a7481eac1ea75db09e8c21b062de34f600f3e Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 25 Oct 2023 14:19:58 -0700 Subject: [PATCH 538/588] Add simple workflow to test new constructs. --- applications/control-system-test/README.md | 1 + .../charts/integration-testing/README.md | 1 + .../templates/cleanup-reports-workflow.yaml | 5 +++ .../templates/controller-configmap.yaml | 2 +- .../templates/job-workflow-template.yaml | 20 ++++++--- .../templates/simple-workflow.yaml | 45 +++++++++++++++++++ .../charts/integration-testing/values.yaml | 2 + 7 files changed, 69 insertions(+), 7 deletions(-) create mode 100644 applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md index 76c57582e9..8ed044eba5 100644 --- a/applications/control-system-test/README.md +++ b/applications/control-system-test/README.md @@ -20,6 +20,7 @@ Deployment for the Test CSCs and Integration Testing Workflows | integration-testing.enabled | bool | `false` | Enable the integration testing system | | integration-testing.envEfd | string | `nil` | The Name of the EFD instance. | | integration-testing.image.tag | string | `nil` | The image tag for the Integration Test runner container | +| integration-testing.jobLabelName | string | `"control-system-test"` | Label for jobs to get them to appear in application | | integration-testing.persistentVolume.claimName | string | `"saved-reports"` | PVC name for saving the reports | | integration-testing.persistentVolume.storage | string | `"1Gi"` | Storage size request for the PVC | | integration-testing.reportLocation | string | `"/home/saluser/robotframework_EFD/Reports"` | Container location of the RobotFramework reports | diff --git a/applications/control-system-test/charts/integration-testing/README.md b/applications/control-system-test/charts/integration-testing/README.md index cee995e935..311c7d112f 100644 --- a/applications/control-system-test/charts/integration-testing/README.md +++ b/applications/control-system-test/charts/integration-testing/README.md @@ -8,6 +8,7 @@ Helm chart for Integration Testing Workflows. |-----|------|---------|-------------| | envEfd | string | `nil` | The Name of the EFD instance. | | image.tag | string | `nil` | The image tag for the Integration Test runner container | +| jobLabelName | string | `"control-system-test"` | Label for jobs to get them to appear in application | | persistentVolume.claimName | string | `"saved-reports"` | PVC name for saving the reports | | persistentVolume.storage | string | `"1Gi"` | Storage size request for the PVC | | reportLocation | string | `"/home/saluser/robotframework_EFD/Reports"` | Container location of the RobotFramework reports | diff --git a/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml index 0bfface1f7..d9f801111f 100644 --- a/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/cleanup-reports-workflow.yaml @@ -10,6 +10,8 @@ spec: artifactRepositoryRef: configMap: integration-test-controller-configmap key: artifactRepository + ttlStrategy: + secondsAfterCompletion: 1800 volumes: - name: testreports persistentVolumeClaim: @@ -20,6 +22,9 @@ spec: entrypoint: cleanup-reports templates: - name: cleanup-reports + metadata: + labels: + argocd.argoproj.io/instance: {{ .Values.jobLabelName }} container: image: alpine:latest command: [sh, -c] diff --git a/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml index bac2f013b0..901d1be0f6 100644 --- a/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml @@ -8,7 +8,7 @@ metadata: data: artifactRepository: | # However, all nested maps must be strings s3: - endpoint: {{ $.Values.global.controlSystemS3EndpointUrl }} + endpoint: {{ $.Values.global.controlSystemS3EndpointUrl | trimPrefix "https://" }} bucket: {{ .Values.s3Bucket }} insecure: false accessKeySecret: diff --git a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml index 8e064db527..14b6ff793c 100644 --- a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml @@ -8,6 +8,8 @@ spec: configMap: integration-test-controller-configmap key: artifactRepository entrypoint: inttest-template + ttlStrategy: + secondsAfterCompletion: 1800 imagePullSecrets: - name: nexus3-docker volumes: @@ -22,25 +24,31 @@ spec: value: "-A Run-Robot.list" - name: jobname value: "myjob" + - name: reportname + value: "report.xml" outputs: - parameters: - - name: job-name - valueFrom: - jsonPath: '{.metadata.name}' + artifacts: + - name: integration-test-reports + archive: + none: {} + path: {{ .Values.reportLocation }}/{{ printf "{{inputs.parameters.reportname}}" }} + s3: + key: IntegrationTests/{{ printf "{{workflow.parameters.date-key}}" }}/{{ printf "{{inputs.parameters.reportname}}" }} metadata: labels: - argocd.argoproj.io/instance: integration-testing + argocd.argoproj.io/instance: {{ .Values.jobLabelName }} securityContext: runAsUser: 73006 runAsGroup: 73006 fsGroup: 73006 container: + command: [/home/saluser/.startup.sh] name: test-{{ printf "{{inputs.parameters.jobname}}" }} {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystemImageTag }} image: "ts-dockerhub.lsst.org/integrationtests:{{ $imageTag }}" imagePullPolicy: Always envFrom: - - configMapRef: + - configMapRef: name: csc-env-config env: - name: ENV_EFD diff --git a/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml new file mode 100644 index 0000000000..d9df3378d2 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml @@ -0,0 +1,45 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: simple-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: {{ .Values.jobLabelName }} +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + imagePullSecrets: + - name: nexus3-docker + podMetadata: + labels: + argocd.argoproj.io/instance: {{ .Values.jobLabelName }} + arguments: + parameters: + - name: date-key + value: "20230601" + entrypoint: run-tests + templates: + - name: run-tests + steps: + - - name: cleanup + templateRef: + name: cleanup-reports-workflow + template: cleanup-reports + - - name: standby + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_Standby.list" + - name: jobname + value: simple-standby + - name: reportname + value: standby.xml diff --git a/applications/control-system-test/charts/integration-testing/values.yaml b/applications/control-system-test/charts/integration-testing/values.yaml index 67083d7190..d88872a8d0 100644 --- a/applications/control-system-test/charts/integration-testing/values.yaml +++ b/applications/control-system-test/charts/integration-testing/values.yaml @@ -16,3 +16,5 @@ persistentVolume: claimName: saved-reports # -- Storage size request for the PVC storage: 1Gi +# -- Label for jobs to get them to appear in application +jobLabelName: control-system-test From 44deb83d0006f2a67ce752ec4dea0f2ffadcbdfb Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 26 Oct 2023 14:09:12 -0700 Subject: [PATCH 539/588] Switch to CNPG for LOVE DB. --- applications/love/README.md | 24 ++------ .../love/charts/love-manager/README.md | 24 ++------ .../templates/database-service.yaml | 11 ---- .../templates/database-statefulset.yaml | 58 ------------------- .../templates/view-backup-cronjob.yaml | 8 +-- .../love/charts/love-manager/values.yaml | 45 +------------- .../love/values-tucson-teststand.yaml | 12 +--- 7 files changed, 15 insertions(+), 167 deletions(-) delete mode 100644 applications/love/charts/love-manager/templates/database-service.yaml delete mode 100644 applications/love/charts/love-manager/templates/database-statefulset.yaml diff --git a/applications/love/README.md b/applications/love/README.md index 3466ba895a..e02e44c958 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -23,22 +23,6 @@ Deployment for the LSST Operators Visualization Environment | love-manager.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | | love-manager.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | | love-manager.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | -| love-manager.database.affinity | object | `{}` | Affinity rules for the LOVE database pods | -| love-manager.database.env.POSTGRES_DB | string | `"postgres"` | Define the database type | -| love-manager.database.env.POSTGRES_USER | string | `"postgres"` | Define the database user | -| love-manager.database.envSecrets.POSTGRES_PASSWORD | string | `"db-pass"` | The database password secret key name | -| love-manager.database.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the database image | -| love-manager.database.image.repository | string | `"postgres"` | The database image to use | -| love-manager.database.image.tag | string | `"12.0"` | The tag to use for the database image | -| love-manager.database.nodeSelector | object | `{}` | Node selection rules for the LOVE database pods | -| love-manager.database.port | int | `5432` | The database port number | -| love-manager.database.resources | object | `{}` | Resource specifications for the LOVE database pods | -| love-manager.database.storage.accessMode | string | `"ReadWriteMany"` | The access mode for the database storage | -| love-manager.database.storage.claimSize | string | `"2Gi"` | The size of the database storage request | -| love-manager.database.storage.name | string | `"love-manager-database"` | Label for the database storage point | -| love-manager.database.storage.path | string | `"/var/lib/postgresql/data"` | Path within the running container | -| love-manager.database.storage.storageClass | string | `"local-store"` | The storage class to request the disk allocation from | -| love-manager.database.tolerations | list | `[]` | Toleration specifications for the LOVE database pods | | love-manager.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | | love-manager.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | | love-manager.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | @@ -46,9 +30,9 @@ Deployment for the LSST Operators Visualization Environment | love-manager.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | | love-manager.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager | | love-manager.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | -| love-manager.env.DB_NAME | string | `"postgres"` | The name of the database being used for the LOVE manager. Must match `database.env.POSTGRES_DB` | -| love-manager.env.DB_PORT | int | `5432` | The port for the database Must match `database.port` | -| love-manager.env.DB_USER | string | `"postgres"` | The database user needed for access from the LOVE manager. Must match `database.env.POSTGRES_USER` | +| love-manager.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager. | +| love-manager.env.DB_PORT | int | `5432` | The port for the database | +| love-manager.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager. | | love-manager.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | | love-manager.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | | love-manager.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | @@ -61,7 +45,7 @@ Deployment for the LSST Operators Visualization Environment | love-manager.envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | | love-manager.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager admin user password secret key name | | love-manager.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager cmd_user user password secret key name | -| love-manager.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| love-manager.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. | | love-manager.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager process connection password secret key name | | love-manager.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | | love-manager.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager secret secret key name | diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index b24102d153..51c48f9c86 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -12,22 +12,6 @@ Helm chart for the LOVE manager service. | autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | | autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | | autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | -| database.affinity | object | `{}` | Affinity rules for the LOVE database pods | -| database.env.POSTGRES_DB | string | `"postgres"` | Define the database type | -| database.env.POSTGRES_USER | string | `"postgres"` | Define the database user | -| database.envSecrets.POSTGRES_PASSWORD | string | `"db-pass"` | The database password secret key name | -| database.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the database image | -| database.image.repository | string | `"postgres"` | The database image to use | -| database.image.tag | string | `"12.0"` | The tag to use for the database image | -| database.nodeSelector | object | `{}` | Node selection rules for the LOVE database pods | -| database.port | int | `5432` | The database port number | -| database.resources | object | `{}` | Resource specifications for the LOVE database pods | -| database.storage.accessMode | string | `"ReadWriteMany"` | The access mode for the database storage | -| database.storage.claimSize | string | `"2Gi"` | The size of the database storage request | -| database.storage.name | string | `"love-manager-database"` | Label for the database storage point | -| database.storage.path | string | `"/var/lib/postgresql/data"` | Path within the running container | -| database.storage.storageClass | string | `"local-store"` | The storage class to request the disk allocation from | -| database.tolerations | list | `[]` | Toleration specifications for the LOVE database pods | | env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | | env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | | env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | @@ -35,9 +19,9 @@ Helm chart for the LOVE manager service. | env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | | env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager | | env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | -| env.DB_NAME | string | `"postgres"` | The name of the database being used for the LOVE manager. Must match `database.env.POSTGRES_DB` | -| env.DB_PORT | int | `5432` | The port for the database Must match `database.port` | -| env.DB_USER | string | `"postgres"` | The database user needed for access from the LOVE manager. Must match `database.env.POSTGRES_USER` | +| env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager. | +| env.DB_PORT | int | `5432` | The port for the database | +| env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager. | | env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | | env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | | env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | @@ -50,7 +34,7 @@ Helm chart for the LOVE manager service. | envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | | envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager admin user password secret key name | | envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager cmd_user user password secret key name | -| envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. | | envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager process connection password secret key name | | envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | | envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager secret secret key name | diff --git a/applications/love/charts/love-manager/templates/database-service.yaml b/applications/love/charts/love-manager/templates/database-service.yaml deleted file mode 100644 index 520d57000d..0000000000 --- a/applications/love/charts/love-manager/templates/database-service.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "love-manager.database.fullname" . }}-service - namespace: {{ $.Values.global.controlSystemAppNamespace }} -spec: - selector: - app.kubernetes.io/instance: {{ include "love-manager.database.fullname" . }} - ports: - - port: {{ .Values.database.port }} - diff --git a/applications/love/charts/love-manager/templates/database-statefulset.yaml b/applications/love/charts/love-manager/templates/database-statefulset.yaml deleted file mode 100644 index 010aa93358..0000000000 --- a/applications/love/charts/love-manager/templates/database-statefulset.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "love-manager.database.fullname" . }} - namespace: {{ $.Values.global.controlSystemAppNamespace }} - labels: - {{- include "love-manager.database.labels" . | nindent 4 }} -spec: - serviceName: {{ include "love-manager.database.fullname" . }}-service - selector: - matchLabels: - {{- include "love-manager.database.selectorLabels" . | nindent 6 }} - replicas: {{ .Values.database.replicas | default 1 }} - template: - metadata: - labels: - {{- include "love-manager.database.selectorLabels" . | nindent 8 }} - spec: - containers: - - name: {{ include "love-manager.database.fullname" . }} - image: "{{ .Values.database.image.repository }}:{{ .Values.database.image.tag }}" - imagePullPolicy: {{ .Values.database.image.pullPolicy }} - ports: - - containerPort: {{ .Values.database.port }} - volumeMounts: - - mountPath: {{ .Values.database.storage.path }} - name: {{ .Values.database.storage.name }}-pvc - env: - {{- $data := dict "env" .Values.database.env "secretName" "" }} - {{- include "helpers.envFromList" $data | indent 10 }} - {{- $data := dict "env" .Values.database.envSecrets "secretName" .Values.envSecretKeyName }} - {{- include "helpers.envFromList" $data | indent 10 }} - {{- with $.Values.database.resources }} - resources: - {{- toYaml $.Values.database.resources | nindent 10 }} - {{- end }} - {{- with $.Values.database.nodeSelector }} - nodeSelector: - {{- toYaml $ | nindent 8 }} - {{- end }} - {{- with $.Values.database.affinity }} - affinity: - {{- toYaml $ | nindent 8 }} - {{- end }} - {{- with $.Values.database.tolerations }} - tolerations: - {{- toYaml $ | nindent 8 }} - {{- end }} - volumeClaimTemplates: - - metadata: - name: {{ .Values.database.storage.name }}-pvc - spec: - accessModes: - - {{ .Values.database.storage.accessMode | quote }} - storageClassName: {{ .Values.database.storage.storageClass }} - resources: - requests: - storage: {{ .Values.database.storage.claimSize }} diff --git a/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml index 14e6185b2b..c3e6b53051 100644 --- a/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml +++ b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml @@ -35,11 +35,11 @@ spec: - name: PGHOST value: {{ .Values.env.DB_HOST | quote }} - name: PGPORT - value: {{ .Values.database.port | quote }} + value: {{ .Values.env.DB_PORT | quote }} - name: PGDATABASE - value: {{ .Values.database.env.POSTGRES_DB | quote }} + value: {{ .Values.env.DB_NAME | quote }} - name: PGUSER - value: {{ .Values.database.env.POSTGRES_USER | quote }} + value: {{ .Values.env.DB_USER | quote }} - name: LOVE_SITE value: {{ .Values.env.LOVE_SITE | quote }} {{- range $env_var, $env_value := .Values.viewBackup.env }} @@ -50,7 +50,7 @@ spec: valueFrom: secretKeyRef: name: love-secrets - key: {{ .Values.database.envSecrets.POSTGRES_PASSWORD }} + key: {{ .Values.envSecrets.DB_PASS }} - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index 18f501c873..c0ae216c4b 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -44,15 +44,12 @@ env: # -- The type of database engine being used for the LOVE manager DB_ENGINE: postgresql # -- The name of the database being used for the LOVE manager. - # Must match `database.env.POSTGRES_DB` - DB_NAME: postgres + DB_NAME: love # -- The database user needed for access from the LOVE manager. - # Must match `database.env.POSTGRES_USER` - DB_USER: postgres + DB_USER: love # -- The name of the database service DB_HOST: love-manager-database-service # -- The port for the database - # Must match `database.port` DB_PORT: 5432 # -- The name of the redis service REDIS_HOST: love-manager-redis-service @@ -68,7 +65,6 @@ envSecrets: # -- The LOVE manager cmd_user user password secret key name CMD_USER_PASS: cmd-user-pass # -- The database password secret key name. - # Must match `database.envSecrets.POSTGRES_PASSWORD` DB_PASS: db-pass # -- The redis password secret key name. # Must match `redis.envSecrets.REDIS_PASS` @@ -96,43 +92,6 @@ tolerations: [] affinity: {} # -- Configuration for the LOVE manager pods readiness probe readinessProbe: {} -database: - image: - # -- The database image to use - repository: postgres - # -- The tag to use for the database image - tag: "12.0" - # -- The pull policy for the database image - pullPolicy: IfNotPresent - # -- The database port number - port: 5432 - storage: - # -- Label for the database storage point - name: love-manager-database - # -- Path within the running container - path: /var/lib/postgresql/data - # -- The storage class to request the disk allocation from - storageClass: local-store - # -- The access mode for the database storage - accessMode: ReadWriteMany - # -- The size of the database storage request - claimSize: 2Gi - # -- Resource specifications for the LOVE database pods - resources: {} - # -- Node selection rules for the LOVE database pods - nodeSelector: {} - # -- Toleration specifications for the LOVE database pods - tolerations: [] - # -- Affinity rules for the LOVE database pods - affinity: {} - env: - # -- Define the database type - POSTGRES_DB: postgres - # -- Define the database user - POSTGRES_USER: postgres - envSecrets: - # -- The database password secret key name - POSTGRES_PASSWORD: db-pass redis: image: # -- The redis image to use diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index 2987dde0cd..26fc1d4cec 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -42,6 +42,7 @@ love-manager: AUTH_LDAP_1_SERVER_URI: ldap://ipa1.tu.lsst.org AUTH_LDAP_2_SERVER_URI: ldap://ipa2.tu.lsst.org AUTH_LDAP_3_SERVER_URI: ldap://ipa3.tu.lsst.org + DB_HOST: postgresdb01.tu.lsst.org REDIS_CONFIG_EXPIRY: 5 REDIS_CONFIG_CAPACITY: 5000 LOVE_SITE: tucson @@ -66,17 +67,6 @@ love-manager: port: 8000 initialDelaySeconds: 20 periodSeconds: 10 - database: - image: - repository: postgres - tag: '15.0' - pullPolicy: IfNotPresent - storage: - name: love-manager-database - path: /var/lib/postgresql - storageClass: rook-ceph-block - accessMode: ReadWriteOnce - claimSize: 2Gi redis: image: repository: redis From e45ba4b073987f7a4fc7d27becfd1c6449b4446b Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 26 Oct 2023 14:17:40 -0700 Subject: [PATCH 540/588] Add global parameter to all application values.yaml files. --- applications/auxtel/README.md | 1 + applications/auxtel/values.yaml | 4 ++++ applications/calsys/README.md | 1 + applications/calsys/values.yaml | 4 ++++ applications/control-system-test/README.md | 1 + applications/control-system-test/values.yaml | 4 ++++ applications/eas/README.md | 1 + applications/eas/values.yaml | 4 ++++ applications/love/README.md | 1 + applications/love/values.yaml | 4 ++++ applications/obssys/README.md | 1 + applications/obssys/values.yaml | 4 ++++ applications/simonyitel/README.md | 1 + applications/simonyitel/values.yaml | 4 ++++ applications/uws/README.md | 1 + applications/uws/values.yaml | 4 ++++ 16 files changed, 40 insertions(+) diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md index 4ba653fd10..bcdd744d87 100644 --- a/applications/auxtel/README.md +++ b/applications/auxtel/README.md @@ -10,6 +10,7 @@ Deployment for the Auxiliary Telescope CSCs | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | | global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | | global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml index 7eaf54f067..12826c7ca9 100644 --- a/applications/auxtel/values.yaml +++ b/applications/auxtel/values.yaml @@ -85,6 +85,10 @@ global: # @default -- Set by ArgoCD controlSystemKafkaBrokerAddress: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + controlSystemKafkaTopicReplicationFactor: "" + # -- Schema registry URL for the control system deployment # @default -- Set by ArgoCD controlSystemSchemaRegistryUrl: "" diff --git a/applications/calsys/README.md b/applications/calsys/README.md index d6973f6cc9..7d49bd4cee 100644 --- a/applications/calsys/README.md +++ b/applications/calsys/README.md @@ -12,6 +12,7 @@ Deployment for the Calibration System CSCs | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | | global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | | global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | diff --git a/applications/calsys/values.yaml b/applications/calsys/values.yaml index 0de6fb8210..73936c5445 100644 --- a/applications/calsys/values.yaml +++ b/applications/calsys/values.yaml @@ -49,6 +49,10 @@ global: # @default -- Set by ArgoCD controlSystemKafkaBrokerAddress: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + controlSystemKafkaTopicReplicationFactor: "" + # -- Schema registry URL for the control system deployment # @default -- Set by ArgoCD controlSystemSchemaRegistryUrl: "" diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md index 8ed044eba5..43bda22046 100644 --- a/applications/control-system-test/README.md +++ b/applications/control-system-test/README.md @@ -10,6 +10,7 @@ Deployment for the Test CSCs and Integration Testing Workflows | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | | global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | | global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | diff --git a/applications/control-system-test/values.yaml b/applications/control-system-test/values.yaml index 0062c56078..4982dea29c 100644 --- a/applications/control-system-test/values.yaml +++ b/applications/control-system-test/values.yaml @@ -45,6 +45,10 @@ global: # @default -- Set by ArgoCD controlSystemKafkaBrokerAddress: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + controlSystemKafkaTopicReplicationFactor: "" + # -- Schema registry URL for the control system deployment # @default -- Set by ArgoCD controlSystemSchemaRegistryUrl: "" diff --git a/applications/eas/README.md b/applications/eas/README.md index c0421d5d55..5a88c50f26 100644 --- a/applications/eas/README.md +++ b/applications/eas/README.md @@ -29,6 +29,7 @@ Deployment for the Environmental Awareness Systems CSCs | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | | global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | | global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | diff --git a/applications/eas/values.yaml b/applications/eas/values.yaml index bf4a033685..1357ed868a 100644 --- a/applications/eas/values.yaml +++ b/applications/eas/values.yaml @@ -169,6 +169,10 @@ global: # @default -- Set by ArgoCD controlSystemKafkaBrokerAddress: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + controlSystemKafkaTopicReplicationFactor: "" + # -- Schema registry URL for the control system deployment # @default -- Set by ArgoCD controlSystemSchemaRegistryUrl: "" diff --git a/applications/love/README.md b/applications/love/README.md index e02e44c958..db3f7d95d1 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -10,6 +10,7 @@ Deployment for the LSST Operators Visualization Environment | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | | global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | | global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | diff --git a/applications/love/values.yaml b/applications/love/values.yaml index 70e29b7d0b..6bffc81c51 100644 --- a/applications/love/values.yaml +++ b/applications/love/values.yaml @@ -41,6 +41,10 @@ global: # @default -- Set by ArgoCD controlSystemKafkaBrokerAddress: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + controlSystemKafkaTopicReplicationFactor: "" + # -- Schema registry URL for the control system deployment # @default -- Set by ArgoCD controlSystemSchemaRegistryUrl: "" diff --git a/applications/obssys/README.md b/applications/obssys/README.md index bb31ba2946..46f08d1677 100644 --- a/applications/obssys/README.md +++ b/applications/obssys/README.md @@ -12,6 +12,7 @@ Deployment for the Observatory System CSCs | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | | global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | | global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | diff --git a/applications/obssys/values.yaml b/applications/obssys/values.yaml index 18e5ad4e86..036671a714 100644 --- a/applications/obssys/values.yaml +++ b/applications/obssys/values.yaml @@ -45,6 +45,10 @@ global: # @default -- Set by ArgoCD controlSystemKafkaBrokerAddress: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + controlSystemKafkaTopicReplicationFactor: "" + # -- Schema registry URL for the control system deployment # @default -- Set by ArgoCD controlSystemSchemaRegistryUrl: "" diff --git a/applications/simonyitel/README.md b/applications/simonyitel/README.md index 4084da4050..11dd5cc977 100644 --- a/applications/simonyitel/README.md +++ b/applications/simonyitel/README.md @@ -13,6 +13,7 @@ Deployment for the Simonyi Survey Telescope CSCs | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | | global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | | global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | diff --git a/applications/simonyitel/values.yaml b/applications/simonyitel/values.yaml index 2d35fe23ce..331e11e0f0 100644 --- a/applications/simonyitel/values.yaml +++ b/applications/simonyitel/values.yaml @@ -133,6 +133,10 @@ global: # @default -- Set by ArgoCD controlSystemKafkaBrokerAddress: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + controlSystemKafkaTopicReplicationFactor: "" + # -- Schema registry URL for the control system deployment # @default -- Set by ArgoCD controlSystemSchemaRegistryUrl: "" diff --git a/applications/uws/README.md b/applications/uws/README.md index a2cb92764e..e8cea935c1 100644 --- a/applications/uws/README.md +++ b/applications/uws/README.md @@ -10,6 +10,7 @@ Deployment for the UWS and DM OCPS CSCs | global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | | global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | | global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | | global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | | global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | | global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | diff --git a/applications/uws/values.yaml b/applications/uws/values.yaml index d53832c26c..e41e0b7994 100644 --- a/applications/uws/values.yaml +++ b/applications/uws/values.yaml @@ -53,6 +53,10 @@ global: # @default -- Set by ArgoCD controlSystemKafkaBrokerAddress: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + controlSystemKafkaTopicReplicationFactor: "" + # -- Schema registry URL for the control system deployment # @default -- Set by ArgoCD controlSystemSchemaRegistryUrl: "" From 075b883539190ca59c5e7c1753bfba0976b90365 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 26 Oct 2023 15:44:25 -0700 Subject: [PATCH 541/588] Increase minimum log compaction time for kafka. --- applications/sasquatch/values-tucson-teststand.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index cc6ccc5a61..623bc8ec96 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -5,6 +5,7 @@ strimzi-kafka: auto.create.topics.enable: false log.retention.hours: 24 offsets.retention.minutes: 1440 + min.compaction.lag.ms: 86400000 storage: storageClassName: rook-ceph-block externalListener: From e73f456d16394961b171e83874032920f31b16b4 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 26 Oct 2023 17:05:57 -0700 Subject: [PATCH 542/588] Change kafka retention period. --- applications/sasquatch/values-tucson-teststand.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 623bc8ec96..31a92dc2b0 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -3,9 +3,9 @@ strimzi-kafka: disruption_tolerance: 1 config: auto.create.topics.enable: false - log.retention.hours: 24 - offsets.retention.minutes: 1440 - min.compaction.lag.ms: 86400000 + log.retention.hours: 12 + offsets.retention.minutes: 720 + log.cleaner.min.compaction.lag.ms: 43200000 storage: storageClassName: rook-ceph-block externalListener: From b9f263072571ef82f6420fcd6b52d0e01b410c07 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 27 Oct 2023 08:29:31 -0700 Subject: [PATCH 543/588] Revert to one CSC per LOVE producer. --- applications/love/README.md | 2 +- .../love/charts/love-producer/README.md | 2 +- .../love-producer/templates/deployment.yaml | 6 +- .../love/charts/love-producer/values.yaml | 7 +- .../love/values-tucson-teststand.yaml | 188 +++++++++++------- 5 files changed, 121 insertions(+), 84 deletions(-) diff --git a/applications/love/README.md b/applications/love/README.md index db3f7d95d1..ba90087c93 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -121,7 +121,7 @@ Deployment for the LSST Operators Visualization Environment | love-producer.image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | | love-producer.image.tag | string | `nil` | | | love-producer.nodeSelector | object | `{}` | Node selection rules applied to all LOVE producer pods | -| love-producer.producers | obj | `[]` | This sections sets the list of producers to use. The producers are collected into producer groups and a CSC producers will be assigned to a given container. The producers should be specified like: _name_: The top-level name for the producer group. _cscs_: Map of _CSC name:index_ Example: ataos: ATAOS:0 The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | +| love-producer.producers | obj | `[]` | This sections sets the list of producers to use. The producers should be specified like: _name_: The identifying name for the CSC producer _csc_: _CSC name:index_ The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | | love-producer.replicaCount | int | `1` | Set the replica count for the LOVE producers | | love-producer.resources | object | `{}` | Resource specifications applied to all LOVE producer pods | | love-producer.tolerations | list | `[]` | Toleration specifications applied to all LOVE producer pods | diff --git a/applications/love/charts/love-producer/README.md b/applications/love/charts/love-producer/README.md index 6c10b9b910..7857e17d30 100644 --- a/applications/love/charts/love-producer/README.md +++ b/applications/love/charts/love-producer/README.md @@ -14,7 +14,7 @@ Helm chart for the LOVE producers. | image.repository | string | `"lsstts/love-producer"` | The LOVE producer image to use | | image.tag | string | `nil` | | | nodeSelector | object | `{}` | Node selection rules applied to all LOVE producer pods | -| producers | obj | `[]` | This sections sets the list of producers to use. The producers are collected into producer groups and a CSC producers will be assigned to a given container. The producers should be specified like: _name_: The top-level name for the producer group. _cscs_: Map of _CSC name:index_ Example: ataos: ATAOS:0 The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | +| producers | obj | `[]` | This sections sets the list of producers to use. The producers should be specified like: _name_: The identifying name for the CSC producer _csc_: _CSC name:index_ The following attributes are optional _resources_ (A resource object specification) _nodeSelector_ (A node selector object specification) _tolerations_ (A list of tolerations) _affinity_ (An affinity object specification) | | replicaCount | int | `1` | Set the replica count for the LOVE producers | | resources | object | `{}` | Resource specifications applied to all LOVE producer pods | | tolerations | list | `[]` | Toleration specifications applied to all LOVE producer pods | diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml index 5721fc772d..0dc4653832 100644 --- a/applications/love/charts/love-producer/templates/deployment.yaml +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -24,8 +24,7 @@ spec: app.kubernetes.io/instance: {{ $appName }} spec: containers: - {{- range $cName, $csc := $producer.cscs }} - - name: {{ $cName }} + - name: {{ $producer.name }} {{- $imageTag := $.Values.image.tag | default $.Values.global.controlSystemImageTag }} image: "{{ $.Values.image.repository }}:{{ $imageTag }}" imagePullPolicy: {{ $.Values.image.pullPolicy }} @@ -34,7 +33,7 @@ spec: name: csc-env-config env: - name: LOVE_CSC_PRODUCER - value: {{ $csc | quote }} + value: {{ $producer.csc | quote }} - name: LSST_KAFKA_SECURITY_PASSWORD valueFrom: secretKeyRef: @@ -61,7 +60,6 @@ spec: resources: {{- toYaml $resources | nindent 12 }} {{- end }} - {{- end }} imagePullSecrets: - name: nexus3-docker {{- if or $.Values.nodeSelector $producer.nodeSelector }} diff --git a/applications/love/charts/love-producer/values.yaml b/applications/love/charts/love-producer/values.yaml index 161c42fe9f..ca39d63d95 100644 --- a/applications/love/charts/love-producer/values.yaml +++ b/applications/love/charts/love-producer/values.yaml @@ -14,12 +14,9 @@ env: envSecrets: PROCESS_CONNECTION_PASS: process-connection-pass # -- (obj) This sections sets the list of producers to use. -# The producers are collected into producer groups and a CSC producers -# will be assigned to a given container. # The producers should be specified like: -# _name_: The top-level name for the producer group. -# _cscs_: Map of _CSC name:index_ -# Example: ataos: ATAOS:0 +# _name_: The identifying name for the CSC producer +# _csc_: _CSC name:index_ # The following attributes are optional # _resources_ (A resource object specification) # _nodeSelector_ (A node selector object specification) diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index 26fc1d4cec..70310fa82f 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -161,79 +161,121 @@ love-producer: pullPolicy: Always env: WEBSOCKET_HOST: love-nginx-service/love/manager/ws/subscription + resources: + requests: + cpu: 10m + memory: 100Mi + limits: + cpu: 100m + memory: 300Mi producers: - - name: auxtel - cscs: - ataos: ATAOS:0 - atdome: ATDome:0 - atdometrajectory: ATDomeTrajectory:0 - athexapod: ATHexapod:0 - atmcs: ATMCS:0 - atpneumatics: ATPneumatics:0 - atptg: ATPtg:0 - - name: comcam - cscs: - cccamera: CCCamera:0 - ccheaderservice: CCHeaderService:0 - ccocps: OCPS:2 - ccoods: CCOODS:0 - - name: eas - cscs: - auxteless01: ESS:201 - auxteless02: ESS:202 - auxteless03: ESS:203 - auxteless04: ESS:204 - calibhilless01: ESS:301 - dimm1: DIMM:1 - dimm2: DIMM:2 - dsm1: DSM:1 - dsm2: DSM:2 - m2ess106: ESS:106 - mtdomeess01: ESS:101 - mtdomeess02: ESS:102 - mtdomeess03: ESS:103 - tmaess01: ESS:1 - tmaess104: ESS:104 - tmaess105: ESS:105 - weatherforecast: WeatherForecast:0 - - name: genericcamera - cscs: - gcheaderservice1: GCHeaderService:1 - genericcamera1: GenericCamera:1 - - name: latiss - cscs: - atcamera: ATCamera:0 - atheaderservice: ATHeaderService:0 - atocps: OCPS:1 - atoods: ATOODS:0 - atspectrograph: ATSpectrograph:0 + - name: ataos + csc: ATAOS:0 + - name: atcamera + csc: ATCamera:0 + - name: atdome + csc: ATDome:0 + - name: atdometrajectory + csc: ATDomeTrajectory:0 + - name: atheaderservice + csc: ATHeaderService:0 + - name: athexapod + csc: ATHexapod:0 + - name: atmcs + csc: ATMCS:0 + - name: atocps + csc: OCPS:1 + - name: atoods + csc: ATOODS:0 + - name: atpneumatics + csc: ATPneumatics:0 + - name: atptg + csc: ATPtg:0 + - name: atscheduler + csc: Scheduler:2 + - name: atscriptqueue + csc: ScriptQueue:2 + - name: atspectrograph + csc: ATSpectrograph:0 + - name: authorize + csc: Authorize:0 + - name: auxteless01 + csc: ESS:201 + - name: auxteless02 + csc: ESS:202 + - name: auxteless03 + csc: ESS:203 + - name: auxteless04 + csc: ESS:204 + - name: calibhilless01 + csc: ESS:301 + - name: camerahexapod + csc: MTHexapod:1 + - name: cccamera + csc: CCCamera:0 + - name: ccheaderservice + csc: CCHeaderService:0 + - name: ccoods + csc: CCOODS:0 + - name: ccocps + csc: OCPS:2 + - name: dimm1 + csc: DIMM:1 + - name: dimm2 + csc: DIMM:2 + - name: dsm1 + csc: DSM:1 + - name: dsm2 + csc: DSM:2 + - name: gcheaderservice1 + csc: GCHeaderService:1 + - name: genericcamera1 + csc: GenericCamera:1 + - name: lasertracker1 + csc: LaserTracker:1 + - name: love + csc: LOVE:0 + - name: m2ess106 + csc: ESS:106 + - name: m2hexapod + csc: MTHexapod:2 + - name: mtaircompressor1 + csc: MTAirCompressor:1 + - name: mtaircompressor2 + csc: MTAirCompressor:2 + - name: mtaos + csc: MTAOS:0 + - name: mtdome + csc: MTDome:0 + - name: mtdomeess01 + csc: ESS:101 + - name: mtdomeess02 + csc: ESS:102 + - name: mtdomeess03 + csc: ESS:103 + - name: mtdometrajectory + csc: MTDomeTrajectory:0 - name: mtm1m3 - cscs: - mtm1m3: MTM1M3:0 + csc: MTM1M3:0 - name: mtm2 - cscs: - mtm2: MTM2:0 - - name: obssys - cscs: - atscheduler: Scheduler:2 - atscriptqueue: ScriptQueue:2 - authorize: Authorize:0 - love: LOVE:0 - mtscheduler: Scheduler:1 - mtscriptqueue: ScriptQueue:1 - watcher: Watcher:0 - - name: simonyitel - cscs: - camerahexapod: MTHexapod:1 - m2hexapod: MTHexapod:2 - mtaos: MTAOS:0 - mtdome: MTDome:0 - mtdometrajectory: MTDomeTrajectory:0 - mtmount: MTMount:0 - mtptg: MTPtg:0 - mtrotator: MTRotator:0 - - name: simonyitel-support - cscs: - lasertracker1: LaserTracker:1 - mtaircompressor1: MTAirCompressor:1 - mtaircompressor2: MTAirCompressor:2 + csc: MTM2:0 + - name: mtmount + csc: MTMount:0 + - name: mtptg + csc: MTPtg:0 + - name: mtrotator + csc: MTRotator:0 + - name: mtscheduler + csc: Scheduler:1 + - name: mtscriptqueue + csc: ScriptQueue:1 + - name: tmaess01 + csc: ESS:1 + - name: tmaess104 + csc: ESS:104 + - name: tmaess105 + csc: ESS:105 + - name: watcher + csc: Watcher:0 + - name: weatherforecast + csc: WeatherForecast:0 From 0e6afe8627e8f166d05c1d0f3b5bf3bccf885dd2 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 27 Oct 2023 15:32:46 -0700 Subject: [PATCH 544/588] Turn on log archiving for workflow jobs. --- .../integration-testing/templates/controller-configmap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml index 901d1be0f6..531a9a60d2 100644 --- a/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml @@ -7,6 +7,7 @@ metadata: workflows.argoproj.io/configmap-type: Parameter data: artifactRepository: | # However, all nested maps must be strings + archiveLogs: true s3: endpoint: {{ $.Values.global.controlSystemS3EndpointUrl | trimPrefix "https://" }} bucket: {{ .Values.s3Bucket }} From 7b11ea1ea456c588cd2920c016acb6f9ccbe5ddf Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 7 Nov 2023 08:11:48 -0700 Subject: [PATCH 545/588] Add obs-env to schedulers. --- applications/obssys/values-tucson-teststand.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml index ef83dd67ad..61ce3a28da 100644 --- a/applications/obssys/values-tucson-teststand.yaml +++ b/applications/obssys/values-tucson-teststand.yaml @@ -91,6 +91,11 @@ atscheduler: readOnly: false server: nfs-scratch.tu.lsst.org serverPath: /scratch/scheduler + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.tu.lsst.org + serverPath: /obs-env authorize: enabled: true @@ -181,6 +186,11 @@ mtscheduler: readOnly: false server: nfs-scratch.tu.lsst.org serverPath: /scratch/scheduler + - name: obs-env + containerPath: /net/obs-env + readOnly: true + server: nfs-obsenv.tu.lsst.org + serverPath: /obs-env watcher: namespace: *ns From 98d9c9bfa925ebc4e6c4743c2ffaf802ff65469a Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 8 Nov 2023 10:51:30 -0700 Subject: [PATCH 546/588] Add scaling policies to love-manager HPA. --- applications/love/README.md | 2 ++ applications/love/charts/love-manager/README.md | 2 ++ .../love-manager/templates/manager-hpa.yaml | 15 +++++++++++++++ applications/love/charts/love-manager/values.yaml | 4 ++++ environments/README.md | 2 +- 5 files changed, 24 insertions(+), 1 deletion(-) diff --git a/applications/love/README.md b/applications/love/README.md index ba90087c93..aff93c7125 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -22,6 +22,8 @@ Deployment for the LSST Operators Visualization Environment | love-manager.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | | love-manager.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | | love-manager.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| love-manager.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| love-manager.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | | love-manager.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | | love-manager.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | | love-manager.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index 51c48f9c86..2280f3d9ff 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -10,6 +10,8 @@ Helm chart for the LOVE manager service. | autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | | autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | | autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | | autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | | env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | diff --git a/applications/love/charts/love-manager/templates/manager-hpa.yaml b/applications/love/charts/love-manager/templates/manager-hpa.yaml index f7e1b8c2ea..4202c2824d 100644 --- a/applications/love/charts/love-manager/templates/manager-hpa.yaml +++ b/applications/love/charts/love-manager/templates/manager-hpa.yaml @@ -29,4 +29,19 @@ spec: type: Utilization averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} {{- end }} + {{- if or .Values.autoscaling.scaleUpPolicy .Values.autoscaling.scaleDownPolicy }} + behavior: + {{- if .Values.autoscaling.scaleUpPolicy }} + scaleUp: + {{- with .Values.autoscaling.scaleUpPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.scaleDownPolicy }} + scaleDown: + {{- with .Values.autoscaling.scaleDownPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- end }} {{- end }} diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index c0ae216c4b..3e8ef3c541 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -82,6 +82,10 @@ autoscaling: targetCPUUtilizationPercentage: 80 # -- (int) The percentage of memory utilization that will trigger the scaling targetMemoryUtilizationPercentage: "" + # -- Policy for scaling up manager pods + scaleUpPolicy: {} + # -- Policy for scaling down manager pods + scaleDownPolicy: {} # -- Resource specifications for the LOVE manager pods resources: {} # -- Node selection rules for the LOVE manager pods diff --git a/environments/README.md b/environments/README.md index 81a1ca1d52..731c9b922f 100644 --- a/environments/README.md +++ b/environments/README.md @@ -49,7 +49,7 @@ | applications.semaphore | bool | `false` | Enable the semaphore application | | applications.sherlock | bool | `false` | Enable the sherlock application | | applications.siav2 | bool | `false` | Enable the siav2 application | - applications.simonyitel | bool | `false` | Enable the simonyitel control system application | +| applications.simonyitel | bool | `false` | Enable the simonyitel control system application | | applications.sqlproxy-cross-project | bool | `false` | Enable the sqlproxy-cross-project application | | applications.squarebot | bool | `false` | Enable the squarebot application | | applications.squareone | bool | `false` | Enable the squareone application | From 8db804d6146bac178572c82b62c67e6b50802725 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 13 Nov 2023 10:31:31 -0700 Subject: [PATCH 547/588] Revert back to 72 hour retention period. --- applications/sasquatch/values-tucson-teststand.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 31a92dc2b0..41430433f8 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -3,9 +3,7 @@ strimzi-kafka: disruption_tolerance: 1 config: auto.create.topics.enable: false - log.retention.hours: 12 - offsets.retention.minutes: 720 - log.cleaner.min.compaction.lag.ms: 43200000 + log.cleaner.min.compaction.lag.ms: 259200000 storage: storageClassName: rook-ceph-block externalListener: From b34db4291916e1891fb4c985a13dd07a7d85287b Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 16 Nov 2023 15:40:52 -0700 Subject: [PATCH 548/588] Turn off LOVE alarm sounds. --- applications/love/values-tucson-teststand.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index 70310fa82f..4b62b08b24 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -144,8 +144,8 @@ love-nginx: loveConfig: | { "alarms": { - "minSeveritySound": "serious", - "minSeverityNotification": "warning" + "minSeveritySound": "mute", + "minSeverityNotification": "mute" }, "camFeeds": { "simcam": "/love/simcam" From 960c21d7b3b4fc594bd5efd6009ffdbc1cb9c88b Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 1 Dec 2023 14:33:23 -0700 Subject: [PATCH 549/588] Fix merge issue with nublado config. --- applications/nublado/values-tucson-teststand.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index f9bf0805fb..b6cb138f83 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -11,7 +11,6 @@ controller: cycle: null recommended_tag: "recommended_k0001" lab: - pullSecret: "pull-secret" extraAnnotations: k8s.v1.cni.cncf.io/networks: "kube-system/dds" env: @@ -25,6 +24,12 @@ controller: LSST_SCHEMA_REGISTRY_URL: http://sasquatch-schema-registry.sasquatch:8081 PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" + pullSecret: "pull-secret" + secrets: + - secretName: "kafka-secret" + secretKey: "kafka_credentials.txt" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" initContainers: - name: "inithome" image: @@ -34,11 +39,6 @@ controller: volumeMounts: - containerPath: "/home" volumeName: "home" - secrets: - - secretName: "kafka-secret" - secretKey: "ts-salkafka-password" - - secretName: "nublado-lab-secret" - secretKey: "postgres-credentials.txt" volumes: - name: "home" source: From 6d1694696f3c79ca11175771de2c8c92e591471d Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 4 Dec 2023 16:49:09 -0700 Subject: [PATCH 550/588] Update love chart to split managers. --- applications/love/README.md | 145 +++++--- .../love/charts/love-manager/README.md | 145 +++++--- .../love-manager/templates/_helpers.tpl | 50 ++- .../templates/manager-deployment.yaml | 53 --- .../manager-frontend-deployment.yaml | 55 +++ .../templates/manager-frontend-hpa.yaml | 47 +++ .../templates/manager-frontend-service.yaml | 11 + .../love-manager/templates/manager-hpa.yaml | 47 --- .../manager-producers-deployment.yaml | 55 +++ .../templates/manager-producers-hpa.yaml | 47 +++ .../templates/manager-producers-service.yaml | 10 + .../templates/manager-service.yaml | 11 - .../templates/view-backup-cronjob.yaml | 30 +- .../love/charts/love-manager/values.yaml | 317 ++++++++++++------ .../love/values-tucson-teststand.yaml | 131 ++++++-- 15 files changed, 801 insertions(+), 353 deletions(-) delete mode 100644 applications/love/charts/love-manager/templates/manager-deployment.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-frontend-hpa.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-frontend-service.yaml delete mode 100644 applications/love/charts/love-manager/templates/manager-hpa.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-producers-deployment.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-producers-hpa.yaml create mode 100644 applications/love/charts/love-manager/templates/manager-producers-service.yaml delete mode 100644 applications/love/charts/love-manager/templates/manager-service.yaml diff --git a/applications/love/README.md b/applications/love/README.md index aff93c7125..e824acb45e 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -18,48 +18,104 @@ Deployment for the LSST Operators Visualization Environment | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | -| love-manager.affinity | object | `{}` | Affinity rules for the LOVE manager pods | -| love-manager.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | -| love-manager.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | -| love-manager.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | -| love-manager.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | -| love-manager.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | -| love-manager.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | -| love-manager.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | -| love-manager.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | -| love-manager.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | -| love-manager.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | -| love-manager.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| love-manager.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| love-manager.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager | -| love-manager.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | -| love-manager.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager. | -| love-manager.env.DB_PORT | int | `5432` | The port for the database | -| love-manager.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager. | -| love-manager.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | -| love-manager.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | -| love-manager.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | -| love-manager.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | -| love-manager.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | -| love-manager.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | -| love-manager.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | -| love-manager.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | -| love-manager.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | | love-manager.envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | -| love-manager.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager admin user password secret key name | -| love-manager.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager cmd_user user password secret key name | -| love-manager.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. | -| love-manager.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager process connection password secret key name | -| love-manager.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | -| love-manager.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager secret secret key name | -| love-manager.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager user user password secret key name | -| love-manager.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager image | -| love-manager.image.repository | string | `"lsstts/love-manager"` | The LOVE manager image to use | -| love-manager.image.tag | string | `nil` | | -| love-manager.nodeSelector | object | `{}` | Node selection rules for the LOVE manager pods | -| love-manager.ports.container | int | `8000` | The port on the container for normal communications | -| love-manager.ports.node | int | `30000` | The port on the node for normal communcations | -| love-manager.readinessProbe | object | `{}` | Configuration for the LOVE manager pods readiness probe | +| love-manager.manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | +| love-manager.manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| love-manager.manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| love-manager.manager.frontend.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| love-manager.manager.frontend.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| love-manager.manager.frontend.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| love-manager.manager.frontend.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| love-manager.manager.frontend.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| love-manager.manager.frontend.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| love-manager.manager.frontend.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| love-manager.manager.frontend.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| love-manager.manager.frontend.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.frontend.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.frontend.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager frontend | +| love-manager.manager.frontend.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| love-manager.manager.frontend.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager frontend | +| love-manager.manager.frontend.env.DB_PORT | int | `5432` | The port for the database service | +| love-manager.manager.frontend.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager frontend | +| love-manager.manager.frontend.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| love-manager.manager.frontend.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| love-manager.manager.frontend.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | +| love-manager.manager.frontend.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| love-manager.manager.frontend.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| love-manager.manager.frontend.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| love-manager.manager.frontend.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| love-manager.manager.frontend.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| love-manager.manager.frontend.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| love-manager.manager.frontend.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| love-manager.manager.frontend.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| love-manager.manager.frontend.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager frontend admin user password secret key name | +| love-manager.manager.frontend.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager frontend authlist_user password secret key name | +| love-manager.manager.frontend.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager frontend LDAP binding password secret key name | +| love-manager.manager.frontend.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager frontend cmd_user user password secret key name | +| love-manager.manager.frontend.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| love-manager.manager.frontend.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager frontend process connection password secret key name | +| love-manager.manager.frontend.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| love-manager.manager.frontend.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager frontend secret secret key name | +| love-manager.manager.frontend.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager frontend user user password secret key name | +| love-manager.manager.frontend.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| love-manager.manager.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager frontend image | +| love-manager.manager.frontend.image.repository | string | `"lsstts/love-manager"` | The LOVE manager frontend image to use | +| love-manager.manager.frontend.nodeSelector | object | `{}` | Node selection rules for the LOVE manager frontend pods | +| love-manager.manager.frontend.ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.manager.frontend.ports.node | int | `30000` | The port on the node for normal communcations | +| love-manager.manager.frontend.readinessProbe | object | `{}` | Configuration for the LOVE manager frontend pods readiness probe | +| love-manager.manager.frontend.replicas | int | `1` | Set the default number of LOVE manager frontend pod replicas | +| love-manager.manager.frontend.resources | object | `{}` | Resource specifications for the LOVE manager frontend pods | +| love-manager.manager.frontend.tolerations | list | `[]` | Toleration specifications for the LOVE manager frontend pods | +| love-manager.manager.producers.affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | +| love-manager.manager.producers.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| love-manager.manager.producers.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| love-manager.manager.producers.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| love-manager.manager.producers.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| love-manager.manager.producers.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| love-manager.manager.producers.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| love-manager.manager.producers.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| love-manager.manager.producers.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| love-manager.manager.producers.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| love-manager.manager.producers.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| love-manager.manager.producers.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.producers.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| love-manager.manager.producers.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | +| love-manager.manager.producers.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| love-manager.manager.producers.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | +| love-manager.manager.producers.env.DB_PORT | int | `5432` | The port for the database service | +| love-manager.manager.producers.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | +| love-manager.manager.producers.env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | +| love-manager.manager.producers.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| love-manager.manager.producers.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| love-manager.manager.producers.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| love-manager.manager.producers.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| love-manager.manager.producers.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| love-manager.manager.producers.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| love-manager.manager.producers.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| love-manager.manager.producers.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| love-manager.manager.producers.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| love-manager.manager.producers.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| love-manager.manager.producers.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | +| love-manager.manager.producers.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | +| love-manager.manager.producers.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | +| love-manager.manager.producers.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | +| love-manager.manager.producers.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| love-manager.manager.producers.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | +| love-manager.manager.producers.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| love-manager.manager.producers.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | +| love-manager.manager.producers.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | +| love-manager.manager.producers.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| love-manager.manager.producers.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | +| love-manager.manager.producers.image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | +| love-manager.manager.producers.nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | +| love-manager.manager.producers.ports.container | int | `8000` | The port on the container for normal communications | +| love-manager.manager.producers.ports.node | int | `30000` | The port on the node for normal communcations | +| love-manager.manager.producers.readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | +| love-manager.manager.producers.replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | +| love-manager.manager.producers.resources | object | `{}` | Resource specifications for the LOVE manager producers pods | +| love-manager.manager.producers.tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| love-manager.namespace | string | `"love"` | The overall namespace for the application | | love-manager.redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | | love-manager.redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | | love-manager.redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | @@ -70,15 +126,14 @@ Deployment for the LSST Operators Visualization Environment | love-manager.redis.port | int | `6379` | The redis port number | | love-manager.redis.resources | object | `{}` | Resource specifications for the LOVE redis pods | | love-manager.redis.tolerations | list | `[]` | Toleration specifications for the LOVE redis pods | -| love-manager.replicas | int | `1` | Set the default number of LOVE manager pod replicas | -| love-manager.resources | object | `{}` | Resource specifications for the LOVE manager pods | -| love-manager.tolerations | list | `[]` | Toleration specifications for the LOVE manager pods | +| love-manager.secret_path | string | `"lsst.local"` | The site-specific path to find Vault secrets | | love-manager.viewBackup.affinity | object | `{}` | Affinity rules for the LOVE view backup pods | | love-manager.viewBackup.enabled | bool | `false` | Whether view backup is active | | love-manager.viewBackup.env | object | `{}` | Place to specify additional environment variables for the view backup job | +| love-manager.viewBackup.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | | love-manager.viewBackup.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the view backup image | | love-manager.viewBackup.image.repository | string | `"lsstts/love-view-backup"` | The view backup image to use | -| love-manager.viewBackup.image.tag | string | `nil` | The tag to use for the view backup image | +| love-manager.viewBackup.image.tag | string | `"develop"` | The tag to use for the view backup image | | love-manager.viewBackup.nodeSelector | object | `{}` | Node selection rules for the LOVE view backup pods | | love-manager.viewBackup.resources | object | `{}` | Resource specifications for the LOVE view backup pods | | love-manager.viewBackup.restartPolicy | string | `"Never"` | The restart policy type for the view backup cronjob | diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index 2280f3d9ff..4bb92383ae 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -6,48 +6,104 @@ Helm chart for the LOVE manager service. | Key | Type | Default | Description | |-----|------|---------|-------------| -| affinity | object | `{}` | Affinity rules for the LOVE manager pods | -| autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | -| autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | -| autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | -| autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | -| autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | -| autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | -| autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | -| env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | -| env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | -| env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | -| env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | -| env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager | -| env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | -| env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager. | -| env.DB_PORT | int | `5432` | The port for the database | -| env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager. | -| env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | -| env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | -| env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | -| env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | -| env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | -| env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | -| env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | -| env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | -| env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | | envSecretKeyName | string | `"love"` | The top-level secret key name that houses the rest of the secrets | -| envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager admin user password secret key name | -| envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager cmd_user user password secret key name | -| envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. | -| envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager process connection password secret key name | -| envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | -| envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager secret secret key name | -| envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager user user password secret key name | -| image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager image | -| image.repository | string | `"lsstts/love-manager"` | The LOVE manager image to use | -| image.tag | string | `nil` | | -| nodeSelector | object | `{}` | Node selection rules for the LOVE manager pods | -| ports.container | int | `8000` | The port on the container for normal communications | -| ports.node | int | `30000` | The port on the node for normal communcations | -| readinessProbe | object | `{}` | Configuration for the LOVE manager pods readiness probe | +| manager.frontend.affinity | object | `{}` | Affinity rules for the LOVE manager frontend pods | +| manager.frontend.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| manager.frontend.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| manager.frontend.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| manager.frontend.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| manager.frontend.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| manager.frontend.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| manager.frontend.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| manager.frontend.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| manager.frontend.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| manager.frontend.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| manager.frontend.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.frontend.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.frontend.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager frontend | +| manager.frontend.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| manager.frontend.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager frontend | +| manager.frontend.env.DB_PORT | int | `5432` | The port for the database service | +| manager.frontend.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager frontend | +| manager.frontend.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| manager.frontend.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| manager.frontend.env.LOVE_PRODUCER_WEBSOCKET_HOST | string | `"love-service/manager/ws/subscription"` | The URL path for the LOVE producer websocket host | +| manager.frontend.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| manager.frontend.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| manager.frontend.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| manager.frontend.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| manager.frontend.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| manager.frontend.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| manager.frontend.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| manager.frontend.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| manager.frontend.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager frontend admin user password secret key name | +| manager.frontend.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager frontend authlist_user password secret key name | +| manager.frontend.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager frontend LDAP binding password secret key name | +| manager.frontend.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager frontend cmd_user user password secret key name | +| manager.frontend.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| manager.frontend.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager frontend process connection password secret key name | +| manager.frontend.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| manager.frontend.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager frontend secret secret key name | +| manager.frontend.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager frontend user user password secret key name | +| manager.frontend.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| manager.frontend.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager frontend image | +| manager.frontend.image.repository | string | `"lsstts/love-manager"` | The LOVE manager frontend image to use | +| manager.frontend.nodeSelector | object | `{}` | Node selection rules for the LOVE manager frontend pods | +| manager.frontend.ports.container | int | `8000` | The port on the container for normal communications | +| manager.frontend.ports.node | int | `30000` | The port on the node for normal communcations | +| manager.frontend.readinessProbe | object | `{}` | Configuration for the LOVE manager frontend pods readiness probe | +| manager.frontend.replicas | int | `1` | Set the default number of LOVE manager frontend pod replicas | +| manager.frontend.resources | object | `{}` | Resource specifications for the LOVE manager frontend pods | +| manager.frontend.tolerations | list | `[]` | Toleration specifications for the LOVE manager frontend pods | +| manager.producers.affinity | object | `{}` | Affinity rules for the LOVE manager producers pods | +| manager.producers.autoscaling.enabled | bool | `true` | Whether automatic horizontal scaling is active | +| manager.producers.autoscaling.maxReplicas | int | `100` | The allowed maximum number of replicas | +| manager.producers.autoscaling.minReplicas | int | `1` | The allowed minimum number of replicas | +| manager.producers.autoscaling.scaleDownPolicy | object | `{}` | Policy for scaling down manager pods | +| manager.producers.autoscaling.scaleUpPolicy | object | `{}` | Policy for scaling up manager pods | +| manager.producers.autoscaling.targetCPUUtilizationPercentage | int | `80` | The percentage of CPU utilization that will trigger the scaling | +| manager.producers.autoscaling.targetMemoryUtilizationPercentage | int | `""` | The percentage of memory utilization that will trigger the scaling | +| manager.producers.env.AUTH_LDAP_1_SERVER_URI | string | `"ldap://ipa1.lsst.local"` | Set the URI for the 1st LDAP server | +| manager.producers.env.AUTH_LDAP_2_SERVER_URI | string | `"ldap://ipa2.lsst.local"` | Set the URI for the 2nd LDAP server | +| manager.producers.env.AUTH_LDAP_3_SERVER_URI | string | `"ldap://ipa3.lsst.local"` | Set the URI for the 3rd LDAP server | +| manager.producers.env.COMMANDER_HOSTNAME | string | `"love-commander-service"` | Label for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.producers.env.COMMANDER_PORT | int | `5000` | Port number for the LOVE commander service. Must match the one spcified in the LOVE commander chart | +| manager.producers.env.DB_ENGINE | string | `"postgresql"` | The type of database engine being used for the LOVE manager producers | +| manager.producers.env.DB_HOST | string | `"love-manager-database-service"` | The name of the database service | +| manager.producers.env.DB_NAME | string | `"love"` | The name of the database being used for the LOVE manager producers | +| manager.producers.env.DB_PORT | int | `5432` | The port for the database service | +| manager.producers.env.DB_USER | string | `"love"` | The database user needed for access from the LOVE manager producers | +| manager.producers.env.HEARTBEAT_QUERY_COMMANDER | bool | `false` | Have the LOVE producer managers not query commander | +| manager.producers.env.JIRA_API_HOSTNAME | string | `"jira.lsstcorp.org"` | Set the hostname for the Jira instance | +| manager.producers.env.JIRA_PROJECT_ID | int | `14601` | Set the Jira project ID | +| manager.producers.env.LOVE_SITE | string | `"local"` | The site tag where LOVE is being run | +| manager.producers.env.OLE_API_HOSTNAME | string | `"site.lsst.local"` | Set the URL for the OLE instance | +| manager.producers.env.REDIS_CONFIG_CAPACITY | int | `5000` | The connection capacity for the redis service | +| manager.producers.env.REDIS_CONFIG_EXPIRY | int | `5` | The expiration time for the redis service | +| manager.producers.env.REDIS_HOST | string | `"love-manager-redis-service"` | The name of the redis service | +| manager.producers.env.REMOTE_STORAGE | bool | `true` | Set the manager to use LFA storage | +| manager.producers.env.SERVER_URL | string | `"love.lsst.local"` | The external URL from the NGINX server for LOVE | +| manager.producers.env.URL_SUBPATH | string | `"/love"` | The Kubernetes sub-path for LOVE | +| manager.producers.envSecrets.ADMIN_USER_PASS | string | `"admin-user-pass"` | The LOVE manager producers admin user password secret key name | +| manager.producers.envSecrets.AUTHLIST_USER_PASS | string | `"authlist-user-pass"` | The LOVE manager producers authlist_user password secret key name | +| manager.producers.envSecrets.AUTH_LDAP_BIND_PASSWORD | string | `"auth-ldap-bind-password"` | The LOVE manager producers LDAP binding password secret key name | +| manager.producers.envSecrets.CMD_USER_PASS | string | `"cmd-user-pass"` | The LOVE manager producers cmd_user user password secret key name | +| manager.producers.envSecrets.DB_PASS | string | `"db-pass"` | The database password secret key name. Must match `database.envSecrets.POSTGRES_PASSWORD` | +| manager.producers.envSecrets.PROCESS_CONNECTION_PASS | string | `"process-connection-pass"` | The LOVE manager producers process connection password secret key name | +| manager.producers.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name. Must match `redis.envSecrets.REDIS_PASS` | +| manager.producers.envSecrets.SECRET_KEY | string | `"manager-secret-key"` | The LOVE manager producers secret secret key name | +| manager.producers.envSecrets.USER_USER_PASS | string | `"user-user-pass"` | The LOVE manager producers user user password secret key name | +| manager.producers.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | +| manager.producers.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the LOVE manager producers image | +| manager.producers.image.repository | string | `"lsstts/love-manager"` | The LOVE manager producers image to use | +| manager.producers.nodeSelector | object | `{}` | Node selection rules for the LOVE manager producers pods | +| manager.producers.ports.container | int | `8000` | The port on the container for normal communications | +| manager.producers.ports.node | int | `30000` | The port on the node for normal communcations | +| manager.producers.readinessProbe | object | `{}` | Configuration for the LOVE manager producers pods readiness probe | +| manager.producers.replicas | int | `1` | Set the default number of LOVE manager producers pod replicas | +| manager.producers.resources | object | `{}` | Resource specifications for the LOVE manager producers pods | +| manager.producers.tolerations | list | `[]` | Toleration specifications for the LOVE manager producers pods | +| namespace | string | `"love"` | The overall namespace for the application | | redis.affinity | object | `{}` | Affinity rules for the LOVE redis pods | | redis.config | string | `"timeout 60\n"` | Configuration specification for the redis service | | redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | @@ -58,15 +114,14 @@ Helm chart for the LOVE manager service. | redis.port | int | `6379` | The redis port number | | redis.resources | object | `{}` | Resource specifications for the LOVE redis pods | | redis.tolerations | list | `[]` | Toleration specifications for the LOVE redis pods | -| replicas | int | `1` | Set the default number of LOVE manager pod replicas | -| resources | object | `{}` | Resource specifications for the LOVE manager pods | -| tolerations | list | `[]` | Toleration specifications for the LOVE manager pods | +| secret_path | string | `"lsst.local"` | The site-specific path to find Vault secrets | | viewBackup.affinity | object | `{}` | Affinity rules for the LOVE view backup pods | | viewBackup.enabled | bool | `false` | Whether view backup is active | | viewBackup.env | object | `{}` | Place to specify additional environment variables for the view backup job | +| viewBackup.image.nexus3 | string | `""` | The tag name for the Nexus3 Docker repository secrets if private images need to be pulled | | viewBackup.image.pullPolicy | string | `"IfNotPresent"` | The pull policy to use for the view backup image | | viewBackup.image.repository | string | `"lsstts/love-view-backup"` | The view backup image to use | -| viewBackup.image.tag | string | `nil` | The tag to use for the view backup image | +| viewBackup.image.tag | string | `"develop"` | The tag to use for the view backup image | | viewBackup.nodeSelector | object | `{}` | Node selection rules for the LOVE view backup pods | | viewBackup.resources | object | `{}` | Resource specifications for the LOVE view backup pods | | viewBackup.restartPolicy | string | `"Never"` | The restart policy type for the view backup cronjob | diff --git a/applications/love/charts/love-manager/templates/_helpers.tpl b/applications/love/charts/love-manager/templates/_helpers.tpl index e5165d7e20..df3787c60b 100644 --- a/applications/love/charts/love-manager/templates/_helpers.tpl +++ b/applications/love/charts/love-manager/templates/_helpers.tpl @@ -23,6 +23,20 @@ If release name contains chart name it will be used as a full name. {{- end }} {{- end }} +{{/* +Manager frontend fullname +*/}} +{{- define "love-manager-frontend.fullname" -}} +{{ include "love-manager.fullname" . }}-frontend +{{- end }} + +{{/* +Manager producers fullname +*/}} +{{- define "love-manager-producers.fullname" -}} +{{ include "love-manager.fullname" . }}-producers +{{- end }} + {{/* Create chart name and version as used by the chart label. */}} @@ -39,11 +53,43 @@ helm.sh/chart: {{ include "love-manager.chart" . }} {{- end }} {{/* -Selector labels +Manager Frontend Common labels +*/}} +{{- define "love-manager-frontend.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager-frontend.selectorLabels" . }} +{{- end }} + +{{/* +Manager Producers Common labels +*/}} +{{- define "love-manager-producers.labels" -}} +helm.sh/chart: {{ include "love-manager.chart" . }} +{{ include "love-manager-producers.selectorLabels" . }} +{{- end }} + +{{/* +Common Selector labels */}} {{- define "love-manager.selectorLabels" -}} app.kubernetes.io/name: {{ include "love-manager.name" . }} -app.kubernetes.io/instance: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Manager Frontend Selector labels +*/}} +{{- define "love-manager-frontend.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ include "love-manager.name" . }}-frontend +{{- end }} + +{{/* +Manager Producers Selector labels +*/}} +{{- define "love-manager-producers.selectorLabels" -}} +app.kubernetes.io/name: {{ include "love-manager.name" . }} +app.kubernetes.io/instance: {{ include "love-manager.name" . }}-producers {{- end }} {{/* diff --git a/applications/love/charts/love-manager/templates/manager-deployment.yaml b/applications/love/charts/love-manager/templates/manager-deployment.yaml deleted file mode 100644 index f48f0591f3..0000000000 --- a/applications/love/charts/love-manager/templates/manager-deployment.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "love-manager.fullname" . }} - namespace: {{ $.Values.global.controlSystemAppNamespace }} - labels: - {{- include "love-manager.labels" . | nindent 4 }} -spec: - selector: - matchLabels: - {{- include "love-manager.selectorLabels" . | nindent 6 }} - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicas }} - {{- end }} - template: - metadata: - labels: - {{- include "love-manager.selectorLabels" . | nindent 8 }} - spec: - containers: - - name: {{ include "love-manager.fullname" . }} - {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystemImageTag }} - image: "{{ .Values.image.repository }}:{{ $imageTag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - containerPort: {{ .Values.ports.container }} - env: - {{- $data := dict "env" .Values.env "secretName" "" }} - {{- include "helpers.envFromList" $data | indent 10 }} - {{- $data := dict "secretName" .Values.envSecretKeyName "env" .Values.envSecrets }} - {{- include "helpers.envFromList" $data | indent 10 }} - {{- with $.Values.resources }} - resources: - {{- toYaml $.Values.resources | nindent 10 }} - {{- end }} - {{- with $.Values.readinessProbe }} - readinessProbe: - {{- toYaml $.Values.readinessProbe | nindent 10 }} - {{- end }} - imagePullSecrets: - - name: nexus3-docker - {{- with $.Values.nodeSelector }} - nodeSelector: - {{- toYaml $ | nindent 8 }} - {{- end }} - {{- with $.Values.affinity }} - affinity: - {{- toYaml $ | nindent 8 }} - {{- end }} - {{- with $.Values.tolerations }} - tolerations: - {{- toYaml $ | nindent 8 }} - {{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml b/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml new file mode 100644 index 0000000000..cb07fa7208 --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-manager-frontend.fullname" . }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} + labels: + {{- include "love-manager-frontend.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-manager-frontend.selectorLabels" . | nindent 6 }} + {{- if not .Values.manager.frontend.autoscaling.enabled }} + replicas: {{ .Values.manager.frontend.replicas }} + {{- end }} + template: + metadata: + labels: + {{- include "love-manager-frontend.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ include "love-manager-frontend.fullname" . }} + {{- $imageTag := .Values.manager.frontend.image.tag | default $.Values.global.controlSystemImageTag }} + image: "{{ .Values.manager.frontend.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ .Values.manager.frontend.image.pullPolicy }} + ports: + - containerPort: {{ .Values.manager.frontend.ports.container }} + env: + {{- $data := dict "env" .Values.manager.frontend.env "secretName" "" }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- if .Values.manager.frontend.envSecrets }} + {{- $data := dict "secretName" .Values.envSecretKeyName "env" .Values.manager.frontend.envSecrets }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- end }} + {{- with $.Values.manager.frontend.resources }} + resources: + {{- toYaml $.Values.manager.frontend.resources | nindent 10 }} + {{- end }} + {{- with $.Values.manager.frontend.readinessProbe }} + readinessProbe: + {{- toYaml $.Values.manager.frontend.readinessProbe | nindent 10 }} + {{- end }} + imagePullSecrets: + - name: nexus3-docker + {{- with $.Values.manager.frontend.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.manager.frontend.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.manager.frontend.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-frontend-hpa.yaml b/applications/love/charts/love-manager/templates/manager-frontend-hpa.yaml new file mode 100644 index 0000000000..12153a2d64 --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-frontend-hpa.yaml @@ -0,0 +1,47 @@ +{{- if .Values.manager.frontend.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "love-manager-frontend.fullname" . }} + labels: + {{- include "love-manager-frontend.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "love-manager-frontend.fullname" . }} + minReplicas: {{ .Values.manager.frontend.autoscaling.minReplicas }} + maxReplicas: {{ .Values.manager.frontend.autoscaling.maxReplicas }} + metrics: + {{- if .Values.manager.frontend.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.manager.frontend.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.manager.frontend.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.manager.frontend.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- if or .Values.manager.frontend.autoscaling.scaleUpPolicy .Values.manager.frontend.autoscaling.scaleDownPolicy }} + behavior: + {{- if .Values.manager.frontend.autoscaling.scaleUpPolicy }} + scaleUp: + {{- with .Values.manager.frontend.autoscaling.scaleUpPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if .Values.manager.frontend.autoscaling.scaleDownPolicy }} + scaleDown: + {{- with .Values.manager.frontend.autoscaling.scaleDownPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-frontend-service.yaml b/applications/love/charts/love-manager/templates/manager-frontend-service.yaml new file mode 100644 index 0000000000..6d9e2028f5 --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-frontend-service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-manager-frontend.fullname" . }}-service + namespace: {{ .Values.namespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-manager-frontend.fullname" . }} + ports: + - port: {{ .Values.manager.frontend.ports.container }} + diff --git a/applications/love/charts/love-manager/templates/manager-hpa.yaml b/applications/love/charts/love-manager/templates/manager-hpa.yaml deleted file mode 100644 index 4202c2824d..0000000000 --- a/applications/love/charts/love-manager/templates/manager-hpa.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "love-manager.fullname" . }} - labels: - {{- include "love-manager.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "love-manager.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} - {{- if or .Values.autoscaling.scaleUpPolicy .Values.autoscaling.scaleDownPolicy }} - behavior: - {{- if .Values.autoscaling.scaleUpPolicy }} - scaleUp: - {{- with .Values.autoscaling.scaleUpPolicy }} - {{- toYaml . | nindent 6 }} - {{- end }} - {{- end }} - {{- if .Values.autoscaling.scaleDownPolicy }} - scaleDown: - {{- with .Values.autoscaling.scaleDownPolicy }} - {{- toYaml . | nindent 6 }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml new file mode 100644 index 0000000000..d3a7990210 --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "love-manager-producers.fullname" . }} + namespace: {{ $.Values.global.controlSystemAppNamespace }} + labels: + {{- include "love-manager-producers.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "love-manager-producers.selectorLabels" . | nindent 6 }} + {{- if not .Values.manager.producers.autoscaling.enabled }} + replicas: {{ .Values.manager.producers.replicas }} + {{- end }} + template: + metadata: + labels: + {{- include "love-manager-producers.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: {{ include "love-manager-producers.fullname" . }} + {{- $imageTag := .Values.manager.producers.image.tag | default $.Values.global.controlSystemImageTag }} + image: "{{ .Values.manager.producers.image.repository }}:{{ $imageTag }}" + imagePullPolicy: {{ .Values.manager.producers.image.pullPolicy }} + ports: + - containerPort: {{ .Values.manager.producers.ports.container }} + env: + {{- $data := dict "env" .Values.manager.producers.env "secretName" "" }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- if .Values.manager.producers.envSecrets }} + {{- $data := dict "secretName" .Values.envSecretKeyName "env" .Values.manager.producers.envSecrets }} + {{- include "helpers.envFromList" $data | indent 10 }} + {{- end }} + {{- with $.Values.manager.producers.resources }} + resources: + {{- toYaml $.Values.manager.producers.resources | nindent 10 }} + {{- end }} + {{- with $.Values.manager.producers.readinessProbe }} + readinessProbe: + {{- toYaml $.Values.manager.producers.readinessProbe | nindent 10 }} + {{- end }} + imagePullSecrets: + - name: nexus3-docker + {{- with $.Values.manager.producers.nodeSelector }} + nodeSelector: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.manager.producers.affinity }} + affinity: + {{- toYaml $ | nindent 8 }} + {{- end }} + {{- with $.Values.manager.producers.tolerations }} + tolerations: + {{- toYaml $ | nindent 8 }} + {{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml b/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml new file mode 100644 index 0000000000..a44422835b --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-producers-hpa.yaml @@ -0,0 +1,47 @@ +{{- if .Values.manager.producers.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "love-manager-producers.fullname" . }} + labels: + {{- include "love-manager-producers.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "love-manager-producers.fullname" . }} + minReplicas: {{ .Values.manager.producers.autoscaling.minReplicas }} + maxReplicas: {{ .Values.manager.producers.autoscaling.maxReplicas }} + metrics: + {{- if .Values.manager.producers.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.manager.producers.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.manager.producers.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.manager.producers.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- if or .Values.manager.producers.autoscaling.scaleUpPolicy .Values.manager.producers.autoscaling.scaleDownPolicy }} + behavior: + {{- if .Values.manager.producers.autoscaling.scaleUpPolicy }} + scaleUp: + {{- with .Values.manager.producers.autoscaling.scaleUpPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if .Values.manager.producers.autoscaling.scaleDownPolicy }} + scaleDown: + {{- with .Values.manager.producers.autoscaling.scaleDownPolicy }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/applications/love/charts/love-manager/templates/manager-producers-service.yaml b/applications/love/charts/love-manager/templates/manager-producers-service.yaml new file mode 100644 index 0000000000..bf90a53f9b --- /dev/null +++ b/applications/love/charts/love-manager/templates/manager-producers-service.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "love-manager-producers.fullname" . }}-service + namespace: {{ .Values.namespace }} +spec: + selector: + app.kubernetes.io/instance: {{ include "love-manager-producers.fullname" . }} + ports: + - port: {{ .Values.manager.producers.ports.container }} diff --git a/applications/love/charts/love-manager/templates/manager-service.yaml b/applications/love/charts/love-manager/templates/manager-service.yaml deleted file mode 100644 index 0f9fc95e34..0000000000 --- a/applications/love/charts/love-manager/templates/manager-service.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "love-manager.fullname" . }}-service - namespace: {{ .Values.namespace }} -spec: - selector: - app.kubernetes.io/instance: {{ include "love-manager.fullname" . }} - ports: - - port: {{ .Values.ports.container }} - diff --git a/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml index c3e6b53051..e153bcdd70 100644 --- a/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml +++ b/applications/love/charts/love-manager/templates/view-backup-cronjob.yaml @@ -3,7 +3,7 @@ apiVersion: batch/v1 kind: CronJob metadata: name: {{ include "love-manager.view-backup.fullname" . }} - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ .Values.namespace }} labels: {{- include "love-manager.view-backup.labels" . | nindent 4 }} spec: @@ -25,23 +25,19 @@ spec: spec: containers: - name: {{ include "love-manager.view-backup.fullname" . }} - {{- $imageTag := .Values.viewBackup.image.tag | default $.Values.global.controlSystemImageTag }} - image: "{{ .Values.viewBackup.image.repository }}:{{ $imageTag }}" + image: "{{ .Values.viewBackup.image.repository }}:{{ .Values.viewBackup.image.tag }}" imagePullPolicy: {{ .Values.viewBackup.image.pullPolicy }} - envFrom: - - configMapRef: - name: csc-env-config env: - name: PGHOST - value: {{ .Values.env.DB_HOST | quote }} + value: {{ .Values.manager.frontend.env.DB_HOST | quote }} - name: PGPORT - value: {{ .Values.env.DB_PORT | quote }} + value: {{ .Values.manager.frontend.env.DB_PORT | quote }} - name: PGDATABASE - value: {{ .Values.env.DB_NAME | quote }} + value: {{ .Values.manager.frontend.env.DB_NAME | quote }} - name: PGUSER - value: {{ .Values.env.DB_USER | quote }} + value: {{ .Values.manager.frontend.env.DB_USER | quote }} - name: LOVE_SITE - value: {{ .Values.env.LOVE_SITE | quote }} + value: {{ .Values.manager.frontend.env.LOVE_SITE | quote }} {{- range $env_var, $env_value := .Values.viewBackup.env }} - name: {{ $env_var }} value: {{ $env_value | quote }} @@ -49,25 +45,27 @@ spec: - name: PGPASSWORD valueFrom: secretKeyRef: - name: love-secrets - key: {{ .Values.envSecrets.DB_PASS }} + name: {{ .Values.namespace }}-secrets + key: {{ .Values.manager.frontend.envSecrets.DB_PASS }} - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: - name: lfa + name: {{ .Values.namespace }}-lfa key: aws-access-key-id - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: - name: lfa + name: {{ .Values.namespace }}-lfa key: aws-secret-access-key {{- with $.Values.viewBackup.resources }} resources: {{- toYaml $.Values.viewBackup.resources | nindent 16 }} {{- end }} restartPolicy: {{ .Values.viewBackup.restartPolicy }} + {{- if $.Values.viewBackup.image.nexus3 }} imagePullSecrets: - - name: nexus3-docker + - name: {{ $.Values.namespace }}-{{ $.Values.viewBackup.image.nexus3 }} + {{- end }} {{- with $.Values.nodeSelector }} nodeSelector: {{- toYaml $ | nindent 12 }} diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index 3e8ef3c541..2b6040b4c9 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -1,101 +1,222 @@ -image: - # -- The LOVE manager image to use - repository: lsstts/love-manager - # str -- The tag to use for the LOVE manager image - tag: - # -- The pull policy on the LOVE manager image - pullPolicy: IfNotPresent -ports: - # -- The port on the container for normal communications - container: 8000 - # -- The port on the node for normal communcations - node: 30000 +# -- The overall namespace for the application +namespace: love +# -- The site-specific path to find Vault secrets +secret_path: lsst.local # -- The top-level secret key name that houses the rest of the secrets envSecretKeyName: love -env: - # -- The site tag where LOVE is being run - LOVE_SITE: local - # -- The external URL from the NGINX server for LOVE - SERVER_URL: love.lsst.local - # -- The Kubernetes sub-path for LOVE - URL_SUBPATH: /love - # -- Set the manager to use LFA storage - REMOTE_STORAGE: true - # -- Set the hostname for the Jira instance - JIRA_API_HOSTNAME: jira.lsstcorp.org - # -- Set the Jira project ID - JIRA_PROJECT_ID: 14601 - # -- Set the URL for the OLE instance - OLE_API_HOSTNAME: site.lsst.local - # -- Set the URI for the 1st LDAP server - AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local - # -- Set the URI for the 2nd LDAP server - AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local - # -- Set the URI for the 3rd LDAP server - AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local - # -- The URL path for the LOVE producer websocket host - LOVE_PRODUCER_WEBSOCKET_HOST: love-service/manager/ws/subscription - # -- Label for the LOVE commander service. - # Must match the one spcified in the LOVE commander chart - COMMANDER_HOSTNAME: love-commander-service - # -- Port number for the LOVE commander service. - # Must match the one spcified in the LOVE commander chart - COMMANDER_PORT: 5000 - # -- The type of database engine being used for the LOVE manager - DB_ENGINE: postgresql - # -- The name of the database being used for the LOVE manager. - DB_NAME: love - # -- The database user needed for access from the LOVE manager. - DB_USER: love - # -- The name of the database service - DB_HOST: love-manager-database-service - # -- The port for the database - DB_PORT: 5432 - # -- The name of the redis service - REDIS_HOST: love-manager-redis-service -envSecrets: - # -- The LOVE manager secret secret key name - SECRET_KEY: manager-secret-key - # -- The LOVE manager process connection password secret key name - PROCESS_CONNECTION_PASS: process-connection-pass - # -- The LOVE manager admin user password secret key name - ADMIN_USER_PASS: admin-user-pass - # -- The LOVE manager user user password secret key name - USER_USER_PASS: user-user-pass - # -- The LOVE manager cmd_user user password secret key name - CMD_USER_PASS: cmd-user-pass - # -- The database password secret key name. - DB_PASS: db-pass - # -- The redis password secret key name. - # Must match `redis.envSecrets.REDIS_PASS` - REDIS_PASS: redis-pass -# -- Set the default number of LOVE manager pod replicas -replicas: 1 -autoscaling: - # -- Whether automatic horizontal scaling is active - enabled: true - # -- The allowed minimum number of replicas - minReplicas: 1 - # -- The allowed maximum number of replicas - maxReplicas: 100 - # -- The percentage of CPU utilization that will trigger the scaling - targetCPUUtilizationPercentage: 80 - # -- (int) The percentage of memory utilization that will trigger the scaling - targetMemoryUtilizationPercentage: "" - # -- Policy for scaling up manager pods - scaleUpPolicy: {} - # -- Policy for scaling down manager pods - scaleDownPolicy: {} -# -- Resource specifications for the LOVE manager pods -resources: {} -# -- Node selection rules for the LOVE manager pods -nodeSelector: {} -# -- Toleration specifications for the LOVE manager pods -tolerations: [] -# -- Affinity rules for the LOVE manager pods -affinity: {} -# -- Configuration for the LOVE manager pods readiness probe -readinessProbe: {} +manager: + frontend: + image: + # -- The LOVE manager frontend image to use + repository: lsstts/love-manager + # -- The pull policy on the LOVE manager frontend image + pullPolicy: IfNotPresent + # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled + nexus3: "" + ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 + env: + # -- The site tag where LOVE is being run + LOVE_SITE: local + # -- The external URL from the NGINX server for LOVE + SERVER_URL: love.lsst.local + # -- The Kubernetes sub-path for LOVE + URL_SUBPATH: /love + # -- Set the manager to use LFA storage + REMOTE_STORAGE: true + # -- Set the hostname for the Jira instance + JIRA_API_HOSTNAME: jira.lsstcorp.org + # -- Set the Jira project ID + JIRA_PROJECT_ID: 14601 + # -- Set the URL for the OLE instance + OLE_API_HOSTNAME: site.lsst.local + # -- Set the URI for the 1st LDAP server + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local + # -- Set the URI for the 2nd LDAP server + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local + # -- Set the URI for the 3rd LDAP server + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local + # -- The URL path for the LOVE producer websocket host + LOVE_PRODUCER_WEBSOCKET_HOST: love-service/manager/ws/subscription + # -- Label for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_HOSTNAME: love-commander-service + # -- Port number for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_PORT: 5000 + # -- The type of database engine being used for the LOVE manager frontend + DB_ENGINE: postgresql + # -- The name of the database being used for the LOVE manager frontend + DB_NAME: love + # -- The database user needed for access from the LOVE manager frontend + DB_USER: love + # -- The name of the database service + DB_HOST: love-manager-database-service + # -- The port for the database service + DB_PORT: 5432 + # -- The name of the redis service + REDIS_HOST: love-manager-redis-service + # -- The expiration time for the redis service + REDIS_CONFIG_EXPIRY: 5 + # -- The connection capacity for the redis service + REDIS_CONFIG_CAPACITY: 5000 + envSecrets: + # -- The LOVE manager frontend secret secret key name + SECRET_KEY: manager-secret-key + # -- The LOVE manager frontend process connection password secret key name + PROCESS_CONNECTION_PASS: process-connection-pass + # -- The LOVE manager frontend admin user password secret key name + ADMIN_USER_PASS: admin-user-pass + # -- The LOVE manager frontend user user password secret key name + USER_USER_PASS: user-user-pass + # -- The LOVE manager frontend cmd_user user password secret key name + CMD_USER_PASS: cmd-user-pass + # -- The LOVE manager frontend authlist_user password secret key name + AUTHLIST_USER_PASS: authlist-user-pass + # -- The LOVE manager frontend LDAP binding password secret key name + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + # -- The database password secret key name. + # Must match `database.envSecrets.POSTGRES_PASSWORD` + DB_PASS: db-pass + # -- The redis password secret key name. + # Must match `redis.envSecrets.REDIS_PASS` + REDIS_PASS: redis-pass + # -- Set the default number of LOVE manager frontend pod replicas + replicas: 1 + autoscaling: + # -- Whether automatic horizontal scaling is active + enabled: true + # -- The allowed minimum number of replicas + minReplicas: 1 + # -- The allowed maximum number of replicas + maxReplicas: 100 + # -- The percentage of CPU utilization that will trigger the scaling + targetCPUUtilizationPercentage: 80 + # -- (int) The percentage of memory utilization that will trigger the scaling + targetMemoryUtilizationPercentage: "" + # -- Policy for scaling up manager pods + scaleUpPolicy: {} + # -- Policy for scaling down manager pods + scaleDownPolicy: {} + # -- Resource specifications for the LOVE manager frontend pods + resources: {} + # -- Node selection rules for the LOVE manager frontend pods + nodeSelector: {} + # -- Toleration specifications for the LOVE manager frontend pods + tolerations: [] + # -- Affinity rules for the LOVE manager frontend pods + affinity: {} + # -- Configuration for the LOVE manager frontend pods readiness probe + readinessProbe: {} + producers: + image: + # -- The LOVE manager producers image to use + repository: lsstts/love-manager + # -- The pull policy on the LOVE manager producers image + pullPolicy: IfNotPresent + # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled + nexus3: "" + ports: + # -- The port on the container for normal communications + container: 8000 + # -- The port on the node for normal communcations + node: 30000 + env: + # -- The site tag where LOVE is being run + LOVE_SITE: local + # -- The external URL from the NGINX server for LOVE + SERVER_URL: love.lsst.local + # -- The Kubernetes sub-path for LOVE + URL_SUBPATH: /love + # -- Set the manager to use LFA storage + REMOTE_STORAGE: true + # -- Set the hostname for the Jira instance + JIRA_API_HOSTNAME: jira.lsstcorp.org + # -- Set the Jira project ID + JIRA_PROJECT_ID: 14601 + # -- Set the URL for the OLE instance + OLE_API_HOSTNAME: site.lsst.local + # -- Set the URI for the 1st LDAP server + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.lsst.local + # -- Set the URI for the 2nd LDAP server + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.lsst.local + # -- Set the URI for the 3rd LDAP server + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.lsst.local + # -- Have the LOVE producer managers not query commander + HEARTBEAT_QUERY_COMMANDER: false + # -- Label for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_HOSTNAME: love-commander-service + # -- Port number for the LOVE commander service. + # Must match the one spcified in the LOVE commander chart + COMMANDER_PORT: 5000 + # -- The type of database engine being used for the LOVE manager producers + DB_ENGINE: postgresql + # -- The name of the database being used for the LOVE manager producers + DB_NAME: love + # -- The database user needed for access from the LOVE manager producers + DB_USER: love + # -- The name of the database service + DB_HOST: love-manager-database-service + # -- The port for the database service + DB_PORT: 5432 + # -- The name of the redis service + REDIS_HOST: love-manager-redis-service + # -- The expiration time for the redis service + REDIS_CONFIG_EXPIRY: 5 + # -- The connection capacity for the redis service + REDIS_CONFIG_CAPACITY: 5000 + envSecrets: + # -- The LOVE manager producers secret secret key name + SECRET_KEY: manager-secret-key + # -- The LOVE manager producers process connection password secret key name + PROCESS_CONNECTION_PASS: process-connection-pass + # -- The LOVE manager producers admin user password secret key name + ADMIN_USER_PASS: admin-user-pass + # -- The LOVE manager producers user user password secret key name + USER_USER_PASS: user-user-pass + # -- The LOVE manager producers cmd_user user password secret key name + CMD_USER_PASS: cmd-user-pass + # -- The LOVE manager producers authlist_user password secret key name + AUTHLIST_USER_PASS: authlist-user-pass + # -- The LOVE manager producers LDAP binding password secret key name + AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password + # -- The database password secret key name. + # Must match `database.envSecrets.POSTGRES_PASSWORD` + DB_PASS: db-pass + # -- The redis password secret key name. + # Must match `redis.envSecrets.REDIS_PASS` + REDIS_PASS: redis-pass + # -- Set the default number of LOVE manager producers pod replicas + replicas: 1 + autoscaling: + # -- Whether automatic horizontal scaling is active + enabled: true + # -- The allowed minimum number of replicas + minReplicas: 1 + # -- The allowed maximum number of replicas + maxReplicas: 100 + # -- The percentage of CPU utilization that will trigger the scaling + targetCPUUtilizationPercentage: 80 + # -- (int) The percentage of memory utilization that will trigger the scaling + targetMemoryUtilizationPercentage: "" + # -- Policy for scaling up manager pods + scaleUpPolicy: {} + # -- Policy for scaling down manager pods + scaleDownPolicy: {} + # -- Resource specifications for the LOVE manager producers pods + resources: {} + # -- Node selection rules for the LOVE manager producers pods + nodeSelector: {} + # -- Toleration specifications for the LOVE manager producers pods + tolerations: [] + # -- Affinity rules for the LOVE manager producers pods + affinity: {} + # -- Configuration for the LOVE manager producers pods readiness probe + readinessProbe: {} redis: image: # -- The redis image to use @@ -127,9 +248,11 @@ viewBackup: # -- The view backup image to use repository: lsstts/love-view-backup # -- The tag to use for the view backup image - tag: + tag: develop # -- The pull policy to use for the view backup image pullPolicy: IfNotPresent + # -- The tag name for the Nexus3 Docker repository secrets if private images need to be pulled + nexus3: "" # -- Place to specify additional environment variables for the view backup job env: {} # -- The view backup job schedule in cron format diff --git a/applications/love/values-tucson-teststand.yaml b/applications/love/values-tucson-teststand.yaml index 4b62b08b24..858f7a6e3a 100644 --- a/applications/love/values-tucson-teststand.yaml +++ b/applications/love/values-tucson-teststand.yaml @@ -33,40 +33,83 @@ love-commander: type: ClusterIP love-manager: - image: - repository: ts-dockerhub.lsst.org/love-manager - pullPolicy: Always - env: - SERVER_URL: tucson-teststand.lsst.codes - OLE_API_HOSTNAME: tucson-teststand.lsst.codes - AUTH_LDAP_1_SERVER_URI: ldap://ipa1.tu.lsst.org - AUTH_LDAP_2_SERVER_URI: ldap://ipa2.tu.lsst.org - AUTH_LDAP_3_SERVER_URI: ldap://ipa3.tu.lsst.org - DB_HOST: postgresdb01.tu.lsst.org - REDIS_CONFIG_EXPIRY: 5 - REDIS_CONFIG_CAPACITY: 5000 - LOVE_SITE: tucson - LOVE_PRODUCER_WEBSOCKET_HOST: love-nginx-service/love/manager/ws/subscription - envSecrets: - AUTHLIST_USER_PASS: authlist-user-pass - AUTH_LDAP_BIND_PASSWORD: auth-ldap-bind-password - autoscaling: - enabled: true - minReplicas: 2 - maxReplicas: 25 - targetCPUUtilizationPercentage: 75 - resources: - requests: - cpu: 250m - memory: 500Mi - limits: - cpu: 750m - memory: 1000Mi - readinessProbe: - tcpSocket: - port: 8000 - initialDelaySeconds: 20 - periodSeconds: 10 + manager: + frontend: + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + SERVER_URL: tucson-teststand.lsst.codes + OLE_API_HOSTNAME: tucson-teststand.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.tu.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.tu.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.tu.lsst.org + DB_HOST: postgresdb01.tu.lsst.org + LOVE_SITE: tucson + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 + producers: + image: + repository: ts-dockerhub.lsst.org/love-manager + pullPolicy: Always + env: + SERVER_URL: tucson-teststand.lsst.codes + OLE_API_HOSTNAME: tucson-teststand.lsst.codes + AUTH_LDAP_1_SERVER_URI: ldap://ipa1.tu.lsst.org + AUTH_LDAP_2_SERVER_URI: ldap://ipa2.tu.lsst.org + AUTH_LDAP_3_SERVER_URI: ldap://ipa3.tu.lsst.org + DB_HOST: postgresdb01.tu.lsst.org + LOVE_SITE: tucson + autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 25 + targetCPUUtilizationPercentage: 50 + scaleDownPolicy: + policies: + - type: Pods + value: 2 + periodSeconds: 120 + - type: Percent + value: 10 + periodSeconds: 120 + selectPolicy: Min + resources: + requests: + cpu: 150m + memory: 200Mi + limits: + cpu: 1000m + memory: 1500Mi + readinessProbe: + tcpSocket: + port: 8000 + initialDelaySeconds: 20 + periodSeconds: 10 redis: image: repository: redis @@ -120,7 +163,16 @@ love-nginx: try_files $uri$args $uri$args/ $uri/ /love/index.html; } location /love/manager { - proxy_pass http://love-manager-service:8000; + client_max_body_size 5M; + proxy_pass http://love-manager-frontend-service:8000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header Host $host; + proxy_redirect off; + } + location /love/manager/producers { + proxy_pass http://love-manager-producers-service:8000; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; @@ -151,7 +203,12 @@ love-nginx: "simcam": "/love/simcam" }, "efd": { - "defaultEfdInstance": "tucson_teststand_efd" + "defaultEfdInstance": "tucson_teststand_efd", + "urlStatus": "https://tucson-teststand.lsst.codes/influxdb/health" + }, + "sal": { + "urlStatus": "https://tucson-teststand.lsst.codes/sasquatch-rest-proxy/brokers", + "expectedBrokerList": [0, 1, 2] } } @@ -160,7 +217,7 @@ love-producer: repository: ts-dockerhub.lsst.org/love-producer pullPolicy: Always env: - WEBSOCKET_HOST: love-nginx-service/love/manager/ws/subscription + WEBSOCKET_HOST: love-nginx-service/love/manager/producers/ws/subscription resources: requests: cpu: 10m From ed252f72b0f5858fb7eb8c6484fece2f6392af59 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 6 Dec 2023 11:32:33 -0700 Subject: [PATCH 551/588] Add metrics for kafka and zookeeper. --- applications/sasquatch/README.md | 2 + .../sasquatch/charts/strimzi-kafka/README.md | 2 + .../templates/kafka-metrics-configmap.yaml | 154 ++++++++++++++++++ .../charts/strimzi-kafka/templates/kafka.yaml | 16 ++ .../zookeeper-metrics-configmap.yaml | 40 +++++ .../charts/strimzi-kafka/values.yaml | 8 + .../sasquatch/values-tucson-teststand.yaml | 4 + 7 files changed, 226 insertions(+) create mode 100644 applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml create mode 100644 applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index f40b5ab39a..bef057d55b 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -407,6 +407,7 @@ Rubin Observatory's telemetry service. | strimzi-kafka.kafka.listeners.external.enabled | bool | `false` | Whether external listener is enabled. | | strimzi-kafka.kafka.listeners.plain.enabled | bool | `false` | Whether internal plaintext listener is enabled. | | strimzi-kafka.kafka.listeners.tls.enabled | bool | `false` | Whether internal TLS listener is enabled. | +| strimzi-kafka.kafka.metricsConfig.enabled | bool | `false` | Whether metric configuration is enabled. | | strimzi-kafka.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | @@ -432,6 +433,7 @@ Rubin Observatory's telemetry service. | strimzi-kafka.users.telegraf.enabled | bool | `false` | Enable user telegraf (deployed by parent Sasquatch chart) | | strimzi-kafka.users.tsSalKafka.enabled | bool | `false` | Enable user ts-salkafka, used at the telescope environments | | strimzi-kafka.zookeeper.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["zookeeper"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Zookeeper pod assignment. | +| strimzi-kafka.zookeeper.metricsConfig.enabled | bool | `false` | Whether metric configuration is enabled. | | strimzi-kafka.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | strimzi-kafka.zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | | strimzi-kafka.zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index eb1f3741fb..fe578b276a 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -29,6 +29,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.listeners.external.enabled | bool | `false` | Whether external listener is enabled. | | kafka.listeners.plain.enabled | bool | `false` | Whether internal plaintext listener is enabled. | | kafka.listeners.tls.enabled | bool | `false` | Whether internal TLS listener is enabled. | +| kafka.metricsConfig.enabled | bool | `false` | Whether metric configuration is enabled. | | kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | @@ -54,6 +55,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | users.telegraf.enabled | bool | `false` | Enable user telegraf (deployed by parent Sasquatch chart) | | users.tsSalKafka.enabled | bool | `false` | Enable user ts-salkafka, used at the telescope environments | | zookeeper.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["zookeeper"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Zookeeper pod assignment. | +| zookeeper.metricsConfig.enabled | bool | `false` | Whether metric configuration is enabled. | | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | | zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml new file mode 100644 index 0000000000..9ce28f5752 --- /dev/null +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml @@ -0,0 +1,154 @@ +{{- if .Values.kafka.metricsConfig.enabled }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: kafka-metrics + labels: + app: sasquatch-kafka-metrics +data: + kafka-metrics-config.yml: | + # See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics + lowercaseOutputName: true + rules: + # Special cases and very specific rules + - pattern: kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + topic: "$4" + partition: "$5" + - pattern: kafka.server<>Value + name: kafka_server_$1_$2 + type: GAUGE + labels: + clientId: "$3" + broker: "$4:$5" + - pattern: kafka.server<>connections + name: kafka_server_$1_connections_tls_info + type: GAUGE + labels: + cipher: "$2" + protocol: "$3" + listener: "$4" + networkProcessor: "$5" + - pattern: kafka.server<>connections + name: kafka_server_$1_connections_software + type: GAUGE + labels: + clientSoftwareName: "$2" + clientSoftwareVersion: "$3" + listener: "$4" + networkProcessor: "$5" + - pattern: "kafka.server<>(.+):" + name: kafka_server_$1_$4 + type: GAUGE + labels: + listener: "$2" + networkProcessor: "$3" + - pattern: kafka.server<>(.+) + name: kafka_server_$1_$4 + type: GAUGE + labels: + listener: "$2" + networkProcessor: "$3" + # Some percent metrics use MeanRate attribute + # Ex) kafka.server<>MeanRate + - pattern: kafka.(\w+)<>MeanRate + name: kafka_$1_$2_$3_percent + type: GAUGE + # Generic gauges for percents + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3_percent + type: GAUGE + labels: + "$4": "$5" + # Generic per-second counters with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_total + type: COUNTER + # Generic gauges with 0-2 key/value pairs + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>Value + name: kafka_$1_$2_$3 + type: GAUGE + # Emulate Prometheus 'Summary' metrics for the exported 'Histogram's. + # Note that these are missing the '_sum' metric! + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + "$6": "$7" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + "$6": "$7" + quantile: "0.$8" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + labels: + "$4": "$5" + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + "$4": "$5" + quantile: "0.$6" + - pattern: kafka.(\w+)<>Count + name: kafka_$1_$2_$3_count + type: COUNTER + - pattern: kafka.(\w+)<>(\d+)thPercentile + name: kafka_$1_$2_$3 + type: GAUGE + labels: + quantile: "0.$4" + # KRaft mode: uncomment the following lines to export KRaft related metrics + # KRaft overall related metrics + # distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics + #- pattern: "kafka.server<>(.+-total|.+-max):" + # name: kafka_server_raftmetrics_$1 + # type: COUNTER + #- pattern: "kafka.server<>(.+):" + # name: kafka_server_raftmetrics_$1 + # type: GAUGE + # KRaft "low level" channels related metrics + # distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics + #- pattern: "kafka.server<>(.+-total|.+-max):" + # name: kafka_server_raftchannelmetrics_$1 + # type: COUNTER + #- pattern: "kafka.server<>(.+):" + # name: kafka_server_raftchannelmetrics_$1 + # type: GAUGE + # Broker metrics related to fetching metadata topic records in KRaft mode + #- pattern: "kafka.server<>(.+):" + # name: kafka_server_brokermetadatametrics_$1 + # type: GAUGE +{{- end}} \ No newline at end of file diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml index c56d58aed9..6572e25c17 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml @@ -98,6 +98,14 @@ spec: {{- range $key, $value := .Values.kafka.config }} {{ $key }}: {{ $value }} {{- end }} + {{- if .Values.kafka.metricsConfig.enabled }} + metricsConfig: + type: jmxPrometheusExporter + valueFrom: + configMapKeyRef: + name: kafka-metrics + key: kafka-metrics-config.yml + {{- end }} storage: type: jbod volumes: @@ -113,6 +121,14 @@ spec: {{- end}} deleteClaim: false zookeeper: + {{- if .Values.zookeeper.metricsConfig.enabled }} + metricsConfig: + type: jmxPrometheusExporter + valueFrom: + configMapKeyRef: + name: zookeeper-metrics + key: zookeeper-metrics-config.yml + {{- end }} template: persistentVolumeClaim: metadata: diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml new file mode 100644 index 0000000000..d80545f226 --- /dev/null +++ b/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml @@ -0,0 +1,40 @@ +{{- if .Values.zookeeper.metricsConfig.enabled }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: zookeeper-metrics + labels: + app: sasquatch-zookeeper-metrics +data: + zookeeper-metrics-config.yml: | + # See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics + lowercaseOutputName: true + rules: + # replicated Zookeeper + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$2" + type: GAUGE + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$3" + type: GAUGE + labels: + replicaId: "$2" + - pattern: "org.apache.ZooKeeperService<>(Packets\\w+)" + name: "zookeeper_$4" + type: COUNTER + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" + - pattern: "org.apache.ZooKeeperService<>(\\w+)" + name: "zookeeper_$4_$5" + type: GAUGE + labels: + replicaId: "$2" + memberType: "$3" +{{- end}} \ No newline at end of file diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index ec975126d9..bb6ced02f5 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -29,6 +29,10 @@ kafka: # -- Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. replica.lag.time.max.ms: 120000 + metricsConfig: + # -- Whether metric configuration is enabled. + enabled: false + listeners: plain: # -- Whether internal plaintext listener is enabled. @@ -100,6 +104,10 @@ zookeeper: # -- Name of a StorageClass to use when requesting persistent volumes. storageClassName: "" + metricsConfig: + # -- Whether metric configuration is enabled. + enabled: false + # -- Affinity for Zookeeper pod assignment. affinity: podAntiAffinity: diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 41430433f8..0a0f2ff06e 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -19,9 +19,13 @@ strimzi-kafka: host: sasquatch-tts-kafka-1.lsst.codes - loadBalancerIP: "140.252.146.47" host: sasquatch-tts-kafka-2.lsst.codes + metricsConfig: + enabled: true zookeeper: storage: storageClassName: rook-ceph-block + metricsConfig: + enabled: true users: tsSalKafka: enabled: true From 86b03706d7d8ec091c0ea49aef05d639aaecdf29 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 14 Dec 2023 11:02:20 -0700 Subject: [PATCH 552/588] Fix cluster attributes in csc chart. --- charts/csc/templates/job.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml index c053c1019f..73b53732c8 100644 --- a/charts/csc/templates/job.yaml +++ b/charts/csc/templates/job.yaml @@ -85,9 +85,9 @@ spec: readOnly: {{ $values.readOnly }} {{- end}} {{- end}} - {{- with $.Values.resources }} + {{- with .Values.resources }} resources: - {{- toYaml $.Values.resources | nindent 12 }} + {{- toYaml .Values.resources | nindent 12 }} {{- end }} {{- if .Values.secretPermFixer }} initContainers: @@ -171,13 +171,13 @@ spec: - name: nexus3-docker {{- with $.Values.nodeSelector }} nodeSelector: - {{- toYaml $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with $.Values.affinity }} affinity: - {{- toYaml $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with $.Values.tolerations }} tolerations: - {{- toYaml $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} From 68c9e21f7a2917098033e5c806bd3aeb75c35179 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 8 Jan 2024 08:30:08 -0700 Subject: [PATCH 553/588] Fix nubaldo README. --- applications/nublado/README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index a6053d5e8f..ad9e5c1359 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -53,10 +53,14 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.env | object | See `values.yaml` | Environment variables to set for every user lab | | controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | -| controller.config.lab.initcontainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image reference), and `privileged`, and may contain `volumes` (similar to the main `volumes` configuration). If `privileged` is true, the container will run as root with `allowPrivilegeEscalation` true. Otherwise it will, run as UID 1000. | +| controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumeMounts` (similar to the main `volumeMountss` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | +| controller.config.lab.namespacePrefix | string | `"nublado"` | Prefix for namespaces for user labs. To this will be added a dash (`-`) and the user's username. | +| controller.config.lab.nodeSelector | object | `{}` | Node selector rules for user lab pods | +| controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | +| controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | | controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | -| controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Sizes must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | +| controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Names must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | | controller.config.lab.spawnTimeout | int | `600` | How long to wait for Kubernetes to spawn a lab in seconds. This should generally be shorter than the spawn timeout set in JupyterHub. | | controller.config.lab.tolerations | list | `[]` | Tolerations for user lab pods | | controller.config.lab.volumeMounts | list | `[]` | Volumes that should be mounted in lab pods. | From 012989a306445730cde8345525af7adc3409b1aa Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 11 Jan 2024 10:43:08 -0700 Subject: [PATCH 554/588] Add pod anti-affinity for SQs and Schedulers. --- .../obssys/values-tucson-teststand.yaml | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/applications/obssys/values-tucson-teststand.yaml b/applications/obssys/values-tucson-teststand.yaml index 61ce3a28da..7be607d4ae 100644 --- a/applications/obssys/values-tucson-teststand.yaml +++ b/applications/obssys/values-tucson-teststand.yaml @@ -63,6 +63,13 @@ atqueue: readOnly: true server: nfs-obsenv.tu.lsst.org serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scriptqueue + topologyKey: "kubernetes.io/hostname" atscheduler: namespace: *ns @@ -96,6 +103,13 @@ atscheduler: readOnly: true server: nfs-obsenv.tu.lsst.org serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scheduler + topologyKey: "kubernetes.io/hostname" authorize: enabled: true @@ -158,6 +172,13 @@ mtqueue: readOnly: true server: nfs-obsenv.tu.lsst.org serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scriptqueue + topologyKey: "kubernetes.io/hostname" mtscheduler: namespace: *ns @@ -191,6 +212,13 @@ mtscheduler: readOnly: true server: nfs-obsenv.tu.lsst.org serverPath: /obs-env + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + csc-class: scheduler + topologyKey: "kubernetes.io/hostname" watcher: namespace: *ns From 5c9f3e01ad5f57537bb0b40c9ad8ce7c9f227292 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 11 Jan 2024 11:50:43 -0700 Subject: [PATCH 555/588] Add pod monitors for kafka metrics. --- applications/sasquatch/README.md | 1 + .../sasquatch/charts/strimzi-kafka/README.md | 1 + .../strimzi-kafka/templates/podmonitors.yaml | 108 ++++++++++++++++++ .../charts/strimzi-kafka/values.yaml | 2 + .../sasquatch/values-tucson-teststand.yaml | 2 + 5 files changed, 114 insertions(+) create mode 100644 applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index bef057d55b..bfe484acd3 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -385,6 +385,7 @@ Rubin Observatory's telemetry service. | source-kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | | square-events.cluster.name | string | `"sasquatch"` | | | strimzi-kafka.cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | +| strimzi-kafka.cluster.releaseLabel | string | `"site-prom"` | Site wide label required for gathering Prometheus metrics if they are enabled. | | strimzi-kafka.connect.config."key.converter" | string | `"io.confluent.connect.avro.AvroConverter"` | Set the converter for the message key | | strimzi-kafka.connect.config."key.converter.schemas.enable" | bool | `true` | Enable converted schemas for the message key | | strimzi-kafka.connect.enabled | bool | `true` | Enable Kafka Connect. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index fe578b276a..d07670e5d6 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -7,6 +7,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | Key | Type | Default | Description | |-----|------|---------|-------------| | cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | +| cluster.releaseLabel | string | `"site-prom"` | Site wide label required for gathering Prometheus metrics if they are enabled. | | connect.config."key.converter" | string | `"io.confluent.connect.avro.AvroConverter"` | Set the converter for the message key | | connect.config."key.converter.schemas.enable" | bool | `true` | Enable converted schemas for the message key | | connect.enabled | bool | `true` | Enable Kafka Connect. | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml new file mode 100644 index 0000000000..10984d13c2 --- /dev/null +++ b/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml @@ -0,0 +1,108 @@ +{{- if .Values.kafka.metricsConfig.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: cluster-operator-metrics + namespace: sasquatch + labels: + app: strimzi + release: {{ .Values.cluster.releaseLabel }} +spec: + selector: + matchLabels: + strimzi.io/kind: cluster-operator + namespaceSelector: + matchNames: + - sasquatch + podMetricsEndpoints: + - path: /metrics + port: http +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: entity-operator-metrics + namespace: sasquatch + labels: + app: strimzi + release: {{ .Values.cluster.releaseLabel }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: entity-operator + namespaceSelector: + matchNames: + - sasquatch + podMetricsEndpoints: + - path: /metrics + port: healthcheck +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: bridge-metrics + namespace: sasquatch + labels: + app: strimzi + release: {{ .Values.cluster.releaseLabel }} +spec: + selector: + matchLabels: + strimzi.io/kind: KafkaBridge + namespaceSelector: + matchNames: + - sasquatch + podMetricsEndpoints: + - path: /metrics + port: rest-api +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: kafka-resources-metrics + namespace: sasquatch + labels: + app: strimzi + release: {{ .Values.cluster.releaseLabel }} +spec: + selector: + matchExpressions: + - key: "strimzi.io/kind" + operator: In + values: ["Kafka", "KafkaConnect", "KafkaMirrorMaker", "KafkaMirrorMaker2"] + namespaceSelector: + matchNames: + - sasquatch + podMetricsEndpoints: + - path: /metrics + port: tcp-prometheus + relabelings: + - separator: ; + regex: __meta_kubernetes_pod_label_(strimzi_io_.+) + replacement: $1 + action: labelmap + - sourceLabels: [__meta_kubernetes_namespace] + separator: ; + regex: (.*) + targetLabel: namespace + replacement: $1 + action: replace + - sourceLabels: [__meta_kubernetes_pod_name] + separator: ; + regex: (.*) + targetLabel: kubernetes_pod_name + replacement: $1 + action: replace + - sourceLabels: [__meta_kubernetes_pod_node_name] + separator: ; + regex: (.*) + targetLabel: node_name + replacement: $1 + action: replace + - sourceLabels: [__meta_kubernetes_pod_host_ip] + separator: ; + regex: (.*) + targetLabel: node_ip + replacement: $1 + action: replace +{{- end }} \ No newline at end of file diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index bb6ced02f5..38d6138a5d 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -2,6 +2,8 @@ cluster: # -- Name used for the Kafka cluster, and used by Strimzi for many annotations. name: sasquatch + # -- Site wide label required for gathering Prometheus metrics if they are enabled. + releaseLabel: "site-prom" kafka: # -- Version of Kafka to deploy. diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 0a0f2ff06e..434a2753b0 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -1,4 +1,6 @@ strimzi-kafka: + cluster: + releaseLabel: pillan-prom kafka: disruption_tolerance: 1 config: From bab3862bfa248806b4887546f2cec4133c99c00e Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 12 Jan 2024 14:30:54 -0700 Subject: [PATCH 556/588] Add kafkaExporter to Kafka. --- applications/sasquatch/README.md | 3 +++ applications/sasquatch/charts/strimzi-kafka/README.md | 3 +++ .../sasquatch/charts/strimzi-kafka/templates/kafka.yaml | 5 +++++ applications/sasquatch/charts/strimzi-kafka/values.yaml | 8 ++++++++ applications/sasquatch/values-tucson-teststand.yaml | 2 ++ 5 files changed, 21 insertions(+) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index bfe484acd3..0523deceab 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -414,6 +414,9 @@ Rubin Observatory's telemetry service. | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | | strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | | strimzi-kafka.kafka.version | string | `"3.5.1"` | Version of Kafka to deploy. | +| strimzi-kafka.kafkaExporter.enabled | bool | `false` | Enable Kafka exporter | +| strimzi-kafka.kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | +| strimzi-kafka.kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | | strimzi-kafka.mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | | strimzi-kafka.mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. | | strimzi-kafka.mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index d07670e5d6..dfc431910b 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -36,6 +36,9 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | | kafka.version | string | `"3.5.1"` | Version of Kafka to deploy. | +| kafkaExporter.enabled | bool | `false` | Enable Kafka exporter | +| kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | +| kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | | mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | | mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. | | mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml index 6572e25c17..8a2ec0d56e 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml @@ -155,3 +155,8 @@ spec: entityOperator: topicOperator: {} userOperator: {} + {{- if .Values.kafkaExporter.enabled }} + kafkaExporter: + topicRegex: {{ .Values.kafkaExporter.topicRegex }} + groupRegex: {{ .Values.kafkaExporter.groupRegex }} + {{- end }} \ No newline at end of file diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 38d6138a5d..2d70b5c0ef 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -97,6 +97,14 @@ kafka: # -- Tolerations for Kafka broker pod assignment. tolerations: [] +kafkaExporter: + # -- Enable Kafka exporter + enabled: false + # -- Kafka topics to monitor + topicRegex: ".*" + # -- Consumer groups to monitor + groupRegex: ".*" + zookeeper: # -- Number of Zookeeper replicas to run. replicas: 3 diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 434a2753b0..b92c9d45bc 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -23,6 +23,8 @@ strimzi-kafka: host: sasquatch-tts-kafka-2.lsst.codes metricsConfig: enabled: true + kafkaExporter: + enabled: true zookeeper: storage: storageClassName: rook-ceph-block From 9fa9d17d7857ab1e368051571460543aa9c3afad Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 17 Jan 2024 11:21:13 -0700 Subject: [PATCH 557/588] Fixup docs after rebase. --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 0523deceab..2a611c3fb3 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -388,7 +388,7 @@ Rubin Observatory's telemetry service. | strimzi-kafka.cluster.releaseLabel | string | `"site-prom"` | Site wide label required for gathering Prometheus metrics if they are enabled. | | strimzi-kafka.connect.config."key.converter" | string | `"io.confluent.connect.avro.AvroConverter"` | Set the converter for the message key | | strimzi-kafka.connect.config."key.converter.schemas.enable" | bool | `true` | Enable converted schemas for the message key | -| strimzi-kafka.connect.enabled | bool | `true` | Enable Kafka Connect. | +| strimzi-kafka.connect.enabled | bool | `false` | Enable Kafka Connect. | | strimzi-kafka.connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | strimzi-kafka.connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | strimzi-kafka.kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index dfc431910b..cc23f7c7b0 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -10,7 +10,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | cluster.releaseLabel | string | `"site-prom"` | Site wide label required for gathering Prometheus metrics if they are enabled. | | connect.config."key.converter" | string | `"io.confluent.connect.avro.AvroConverter"` | Set the converter for the message key | | connect.config."key.converter.schemas.enable" | bool | `true` | Enable converted schemas for the message key | -| connect.enabled | bool | `true` | Enable Kafka Connect. | +| connect.enabled | bool | `false` | Enable Kafka Connect. | | connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.36.1-kafka-3.5.1:tickets-dm-40655"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | kafka.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/name","operator":"In","values":["kafka"]}]},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for Kafka pod assignment. | From 2fad2a2fed6809966ed515e92529c55d198fec4c Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 19 Jan 2024 15:16:04 -0700 Subject: [PATCH 558/588] Add logging to kafkaExporter. --- .../sasquatch/charts/strimzi-kafka/templates/kafka.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml index 8a2ec0d56e..5199399a89 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml @@ -159,4 +159,6 @@ spec: kafkaExporter: topicRegex: {{ .Values.kafkaExporter.topicRegex }} groupRegex: {{ .Values.kafkaExporter.groupRegex }} + logging: debug + enableSaramaLogging: true {{- end }} \ No newline at end of file From c22a19a8e7ecff92812f8ee2698d7fb5624bc903 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 19 Jan 2024 16:29:25 -0700 Subject: [PATCH 559/588] Make kafkaExporter more configurable. --- applications/sasquatch/README.md | 3 +++ applications/sasquatch/charts/strimzi-kafka/README.md | 3 +++ .../sasquatch/charts/strimzi-kafka/templates/kafka.yaml | 8 ++++++-- applications/sasquatch/charts/strimzi-kafka/values.yaml | 6 ++++++ 4 files changed, 18 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 2a611c3fb3..ac8f9b783a 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -414,8 +414,11 @@ Rubin Observatory's telemetry service. | strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | | strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | | strimzi-kafka.kafka.version | string | `"3.5.1"` | Version of Kafka to deploy. | +| strimzi-kafka.kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging for pod | | strimzi-kafka.kafkaExporter.enabled | bool | `false` | Enable Kafka exporter | | strimzi-kafka.kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | +| strimzi-kafka.kafkaExporter.logging | string | `"info"` | Logging level | +| strimzi-kafka.kafkaExporter.resources | object | `{}` | Resource specification for Kafka exporter | | strimzi-kafka.kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | | strimzi-kafka.mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | | strimzi-kafka.mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index cc23f7c7b0..c05c81afd5 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -36,8 +36,11 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | | kafka.version | string | `"3.5.1"` | Version of Kafka to deploy. | +| kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging for pod | | kafkaExporter.enabled | bool | `false` | Enable Kafka exporter | | kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor | +| kafkaExporter.logging | string | `"info"` | Logging level | +| kafkaExporter.resources | object | `{}` | Resource specification for Kafka exporter | | kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor | | mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | | mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml index 5199399a89..467b104f8d 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml @@ -159,6 +159,10 @@ spec: kafkaExporter: topicRegex: {{ .Values.kafkaExporter.topicRegex }} groupRegex: {{ .Values.kafkaExporter.groupRegex }} - logging: debug - enableSaramaLogging: true + logging: {{ .Values.kafkaExporter.logging }} + enableSaramaLogging: {{ .Values.kafkaExporter.enableSaramaLogging }} + {{- with .Values.kafkaExporter.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} {{- end }} \ No newline at end of file diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 2d70b5c0ef..062a868242 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -104,6 +104,12 @@ kafkaExporter: topicRegex: ".*" # -- Consumer groups to monitor groupRegex: ".*" + # -- Logging level + logging: info + # -- Enable Sarama logging for pod + enableSaramaLogging: false + # -- Resource specification for Kafka exporter + resources: {} zookeeper: # -- Number of Zookeeper replicas to run. From 411bdd9caf63028c06bba4738db424c3386e73d9 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 19 Jan 2024 16:29:43 -0700 Subject: [PATCH 560/588] Update TTS kafkaExporter config. --- applications/sasquatch/values-tucson-teststand.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index b92c9d45bc..6f15453958 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -25,6 +25,14 @@ strimzi-kafka: enabled: true kafkaExporter: enabled: true + enableSaramaLogging: true + resources: + requests: + cpu: 200m + memory: 64Mi + limits: + cpu: 500m + memory: 128Mi zookeeper: storage: storageClassName: rook-ceph-block From 439964ce290134fd014e00caddbb6d2aee5c4971 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 19 Jan 2024 16:59:15 -0700 Subject: [PATCH 561/588] Add blank line at end of file. --- .../charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml | 2 +- .../sasquatch/charts/strimzi-kafka/templates/kafka.yaml | 2 +- .../sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml | 2 +- .../strimzi-kafka/templates/zookeeper-metrics-configmap.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml index 9ce28f5752..e1090ccdf6 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka-metrics-configmap.yaml @@ -151,4 +151,4 @@ data: #- pattern: "kafka.server<>(.+):" # name: kafka_server_brokermetadatametrics_$1 # type: GAUGE -{{- end}} \ No newline at end of file +{{- end}} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml index 467b104f8d..fab4289e5a 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml @@ -165,4 +165,4 @@ spec: resources: {{- toYaml . | nindent 6 }} {{- end }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml index 10984d13c2..51d87d0aa6 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/podmonitors.yaml @@ -105,4 +105,4 @@ spec: targetLabel: node_ip replacement: $1 action: replace -{{- end }} \ No newline at end of file +{{- end }} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml index d80545f226..8fb8f8a17a 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/zookeeper-metrics-configmap.yaml @@ -37,4 +37,4 @@ data: labels: replicaId: "$2" memberType: "$3" -{{- end}} \ No newline at end of file +{{- end}} From 0310f2934453fb681aea28fb673eb30f59579ce4 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 22 Jan 2024 08:16:07 -0700 Subject: [PATCH 562/588] Change yamlint back to what's on main. --- .yamllint.yml | 4 +- .../nublado/values-tucson-teststand.yaml | 142 +++++++++--------- 2 files changed, 72 insertions(+), 74 deletions(-) diff --git a/.yamllint.yml b/.yamllint.yml index 3994a4d6b9..64842f4b49 100644 --- a/.yamllint.yml +++ b/.yamllint.yml @@ -1,8 +1,6 @@ extends: default -ignore: - - templates - - shared/charts +ignore: templates rules: line-length: disable diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index b6cb138f83..6ecf4aabd7 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -31,79 +31,79 @@ controller: - secretName: "nublado-lab-secret" secretKey: "postgres-credentials.txt" initContainers: - - name: "inithome" - image: - repository: "ghcr.io/lsst-sqre/nublado-inithome" - tag: "4.0.2" - privileged: true - volumeMounts: - - containerPath: "/home" - volumeName: "home" - volumes: - - name: "home" - source: - type: "nfs" - serverPath: "/jhome" - server: "nfs-jhome.tu.lsst.org" - - name: "project" - source: - type: "nfs" - serverPath: "/project" - server: "nfs-project.tu.lsst.org" - - name: "scratch" - source: - type: "nfs" - serverPath: "/scratch" - server: "nfs-scratch.tu.lsst.org" - - name: "datasets" - source: - type: "nfs" - serverPath: "/lsstdata" - server: "nfs-lsstdata.tu.lsst.org" - - name: "latiss" - source: - type: "nfs" - serverPath: "/auxtel/repo/LATISS" - server: "nfs-auxtel.tu.lsst.org" - - name: "obs-env" - source: - type: "nfs" - serverPath: "/obs-env" - server: "nfs-obsenv.tu.lsst.org" - - name: "lsstcomcan" - source: - type: "nfs" - serverPath: "/repo/LSSTComCam" - server: "comcam-archiver.tu.lsst.org" - - name: "auxtel" - source: - type: "nfs" - serverPath: "/auxtel/lsstdata/TTS/auxtel" - server: "nfs-auxtel.tu.lsst.org" - - name: "comcam" - source: - type: "nfs" - serverPath: "/lsstdata/TTS/comcam" - server: "comcam-archiver.tu.lsst.org" - volumeMounts: + - name: "inithome" + image: + repository: "ghcr.io/lsst-sqre/nublado-inithome" + tag: "4.0.2" + privileged: true + volumeMounts: - containerPath: "/home" volumeName: "home" - - containerPath: "/project" - volumeName: "project" - - containerPath: "/scratch" - volumeName: "scratch" - - containerPath: "/datasets" - volumeName: "datasets" - - containerPath: "/repo/LATISS" - volumeName: "latiss" - - containerPath: "/net/obs-env" - volumeName: "obs-env" - - containerPath: "/repo/LSSTComCam" - volumeName: "lsstcomcam" - - containerPath: "/data/lsstdata/TTS/auxtel" - volumeName: "auxtel" - - containerPath: "/data/lsstdata/TTS/comcam" - volumeName: "comcam" + volumes: + - name: "home" + source: + type: "nfs" + serverPath: "/jhome" + server: "nfs-jhome.tu.lsst.org" + - name: "project" + source: + type: "nfs" + serverPath: "/project" + server: "nfs-project.tu.lsst.org" + - name: "scratch" + source: + type: "nfs" + serverPath: "/scratch" + server: "nfs-scratch.tu.lsst.org" + - name: "datasets" + source: + type: "nfs" + serverPath: "/lsstdata" + server: "nfs-lsstdata.tu.lsst.org" + - name: "latiss" + source: + type: "nfs" + serverPath: "/auxtel/repo/LATISS" + server: "nfs-auxtel.tu.lsst.org" + - name: "obs-env" + source: + type: "nfs" + serverPath: "/obs-env" + server: "nfs-obsenv.tu.lsst.org" + - name: "lsstcomcan" + source: + type: "nfs" + serverPath: "/repo/LSSTComCam" + server: "comcam-archiver.tu.lsst.org" + - name: "auxtel" + source: + type: "nfs" + serverPath: "/auxtel/lsstdata/TTS/auxtel" + server: "nfs-auxtel.tu.lsst.org" + - name: "comcam" + source: + type: "nfs" + serverPath: "/lsstdata/TTS/comcam" + server: "comcam-archiver.tu.lsst.org" + volumeMounts: + - containerPath: "/home" + volumeName: "home" + - containerPath: "/project" + volumeName: "project" + - containerPath: "/scratch" + volumeName: "scratch" + - containerPath: "/datasets" + volumeName: "datasets" + - containerPath: "/repo/LATISS" + volumeName: "latiss" + - containerPath: "/net/obs-env" + volumeName: "obs-env" + - containerPath: "/repo/LSSTComCam" + volumeName: "lsstcomcam" + - containerPath: "/data/lsstdata/TTS/auxtel" + volumeName: "auxtel" + - containerPath: "/data/lsstdata/TTS/comcam" + volumeName: "comcam" jupyterhub: cull: From ce050c2749a89e5c6011cc88350cde631f685018 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 22 Jan 2024 08:21:21 -0700 Subject: [PATCH 563/588] Fix nubaldo values.yaml docs. --- applications/nublado/README.md | 2 +- applications/nublado/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index ad9e5c1359..fb4bd65db2 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -60,7 +60,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.nss.basePasswd | string | See `values.yaml` | Base `/etc/passwd` file for lab containers | | controller.config.lab.pullSecret | string | Do not use a pull secret | Pull secret to use for labs. Set to the string `pull-secret` to use the normal pull secret from Vault. | | controller.config.lab.secrets | list | `[]` | Secrets to set in the user pods. Each should have a `secretKey` key pointing to a secret in the same namespace as the controller (generally `nublado-secret`) and `secretRef` pointing to a field in that key. | -| controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Names must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | +| controller.config.lab.sizes | list | See `values.yaml` (specifies `small`, `medium`, and | Available lab sizes. Sizes must be chosen from `fine`, `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, `gargantuan`, and `colossal` in that order. Each should specify the maximum CPU equivalents and memory. SI suffixes for memory are supported. Sizes will be shown in the order defined here, and the first defined size will be the default. `large` with `small` as the default) | | controller.config.lab.spawnTimeout | int | `600` | How long to wait for Kubernetes to spawn a lab in seconds. This should generally be shorter than the spawn timeout set in JupyterHub. | | controller.config.lab.tolerations | list | `[]` | Tolerations for user lab pods | | controller.config.lab.volumeMounts | list | `[]` | Volumes that should be mounted in lab pods. | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index f19b4d5603..3ca8799eed 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -294,7 +294,7 @@ controller: # that key. secrets: [] - # -- Available lab sizes. Names must be chosen from `fine`, + # -- Available lab sizes. Sizes must be chosen from `fine`, # `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, # `gargantuan`, and `colossal` in that order. Each should specify the # maximum CPU equivalents and memory. SI suffixes for memory are From 05a5630fcbcef58f0382b55cbbabeb9d493a6c4c Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 22 Jan 2024 08:38:42 -0700 Subject: [PATCH 564/588] Fix test. --- tests/docs/applications_test.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/docs/applications_test.py b/tests/docs/applications_test.py index d5821e837c..c2153b4c08 100644 --- a/tests/docs/applications_test.py +++ b/tests/docs/applications_test.py @@ -37,7 +37,11 @@ def test_applications_index() -> None: for application in root_path.iterdir(): if not application.is_dir(): continue - if application.name in ("nublado-fileservers", "nublado-users"): + if application.name in ( + "nublado-fileservers", + "nublado-users", + "ocps-uws-job", + ): continue assert ( application.name in seen From 6a129d2e0073f2b0dc59d6f6c267056eddfa03fc Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 22 Jan 2024 14:54:59 -0700 Subject: [PATCH 565/588] Create control system configuration model. --- applications/auxtel/README.md | 16 +- applications/auxtel/values.yaml | 63 +++--- applications/calsys/README.md | 16 +- applications/calsys/values.yaml | 63 +++--- applications/control-system-test/README.md | 16 +- .../templates/controller-configmap.yaml | 2 +- applications/control-system-test/values.yaml | 63 +++--- applications/eas/README.md | 16 +- applications/eas/values.yaml | 63 +++--- applications/love/README.md | 16 +- applications/love/values.yaml | 63 +++--- applications/obssys/README.md | 16 +- applications/obssys/values.yaml | 63 +++--- applications/simonyitel/README.md | 16 +- applications/simonyitel/values.yaml | 63 +++--- applications/uws/README.md | 16 +- applications/uws/values.yaml | 49 ++--- charts/csc/README.md | 2 +- .../csc/templates/configfile-configmap.yaml | 2 +- .../csc/templates/entrypoint-configmap.yaml | 2 +- charts/csc/templates/job.yaml | 4 +- charts/csc/templates/mountpoint-pvc.yaml | 2 +- charts/csc/templates/service.yaml | 2 +- charts/csc/values.yaml | 2 +- .../templates/configmap-env.yaml | 12 +- .../csc_collector/templates/vault-secret.yaml | 2 +- docs/extras/schemas/environment.json | 202 ++++++++++-------- environments/README.md | 9 +- .../templates/auxtel-application.yaml | 30 +-- .../templates/calsys-application.yaml | 30 +-- .../control-system-test-application.yaml | 30 +-- environments/templates/eas-application.yaml | 30 +-- environments/templates/love-application.yaml | 30 +-- .../templates/obssys-application.yaml | 30 +-- .../templates/simonyitel-application.yaml | 30 +-- environments/templates/uws-application.yaml | 30 +-- environments/values-tucson-teststand.yaml | 12 +- environments/values.yaml | 40 +--- src/phalanx/models/environments.py | 93 +++++--- 39 files changed, 649 insertions(+), 597 deletions(-) diff --git a/applications/auxtel/README.md b/applications/auxtel/README.md index bcdd744d87..eb239ab869 100644 --- a/applications/auxtel/README.md +++ b/applications/auxtel/README.md @@ -7,14 +7,14 @@ Deployment for the Auxiliary Telescope CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| | global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | -| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | -| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | -| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | -| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | -| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | -| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | -| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | atdome-sim.enabled | bool | `false` | Enable the ATDome simulator CSC | diff --git a/applications/auxtel/values.yaml b/applications/auxtel/values.yaml index 12826c7ca9..e4cd333773 100644 --- a/applications/auxtel/values.yaml +++ b/applications/auxtel/values.yaml @@ -65,34 +65,35 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" - - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" - - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" - - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" - - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" - - # -- Kafka topic replication factor for control system topics - # @default -- Set by ArgoCD - controlSystemKafkaTopicReplicationFactor: "" - - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" - - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/calsys/README.md b/applications/calsys/README.md index 7d49bd4cee..056ed61adc 100644 --- a/applications/calsys/README.md +++ b/applications/calsys/README.md @@ -9,14 +9,14 @@ Deployment for the Calibration System CSCs | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | gcheaderservice1.enabled | bool | `false` | Enable the GCHeaderService:1 CSC | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | -| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | -| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | -| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | -| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | -| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | -| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | -| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | simulation-gencam.enabled | bool | `false` | Enabled the GenericCamera:1 CSC | diff --git a/applications/calsys/values.yaml b/applications/calsys/values.yaml index 73936c5445..71ded1b43b 100644 --- a/applications/calsys/values.yaml +++ b/applications/calsys/values.yaml @@ -29,34 +29,35 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" - - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" - - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" - - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" - - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" - - # -- Kafka topic replication factor for control system topics - # @default -- Set by ArgoCD - controlSystemKafkaTopicReplicationFactor: "" - - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" - - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/control-system-test/README.md b/applications/control-system-test/README.md index 43bda22046..f13374b774 100644 --- a/applications/control-system-test/README.md +++ b/applications/control-system-test/README.md @@ -7,14 +7,14 @@ Deployment for the Test CSCs and Integration Testing Workflows | Key | Type | Default | Description | |-----|------|---------|-------------| | global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | -| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | -| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | -| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | -| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | -| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | -| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | -| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | diff --git a/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml index 531a9a60d2..78c4fb11e5 100644 --- a/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/controller-configmap.yaml @@ -9,7 +9,7 @@ data: artifactRepository: | # However, all nested maps must be strings archiveLogs: true s3: - endpoint: {{ $.Values.global.controlSystemS3EndpointUrl | trimPrefix "https://" }} + endpoint: {{ $.Values.global.controlSystem.s3EndpointUrl | trimPrefix "https://" }} bucket: {{ .Values.s3Bucket }} insecure: false accessKeySecret: diff --git a/applications/control-system-test/values.yaml b/applications/control-system-test/values.yaml index 4982dea29c..be5d0e15cf 100644 --- a/applications/control-system-test/values.yaml +++ b/applications/control-system-test/values.yaml @@ -25,34 +25,35 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" - - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" - - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" - - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" - - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" - - # -- Kafka topic replication factor for control system topics - # @default -- Set by ArgoCD - controlSystemKafkaTopicReplicationFactor: "" - - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" - - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/eas/README.md b/applications/eas/README.md index 5a88c50f26..11c38edd72 100644 --- a/applications/eas/README.md +++ b/applications/eas/README.md @@ -26,14 +26,14 @@ Deployment for the Environmental Awareness Systems CSCs | dsm2-sim.enabled | bool | `false` | Enable the DSM:2 simulator CSC | | dsm2.enabled | bool | `false` | Enable the DSM:2 CSC | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | -| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | -| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | -| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | -| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | -| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | -| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | -| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | m2-ess106-sim.enabled | bool | `false` | Enable the ESS:106 simulator CSC | diff --git a/applications/eas/values.yaml b/applications/eas/values.yaml index 1357ed868a..14b4035f93 100644 --- a/applications/eas/values.yaml +++ b/applications/eas/values.yaml @@ -149,34 +149,35 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" - - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" - - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" - - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" - - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" - - # -- Kafka topic replication factor for control system topics - # @default -- Set by ArgoCD - controlSystemKafkaTopicReplicationFactor: "" - - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" - - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/love/README.md b/applications/love/README.md index e824acb45e..e875e93406 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -7,14 +7,14 @@ Deployment for the LSST Operators Visualization Environment | Key | Type | Default | Description | |-----|------|---------|-------------| | global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | -| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | -| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | -| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | -| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | -| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | -| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | -| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | diff --git a/applications/love/values.yaml b/applications/love/values.yaml index 6bffc81c51..346ac1d5b0 100644 --- a/applications/love/values.yaml +++ b/applications/love/values.yaml @@ -21,34 +21,35 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" - - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" - - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" - - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" - - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" - - # -- Kafka topic replication factor for control system topics - # @default -- Set by ArgoCD - controlSystemKafkaTopicReplicationFactor: "" - - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" - - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/obssys/README.md b/applications/obssys/README.md index 46f08d1677..6890f6ad30 100644 --- a/applications/obssys/README.md +++ b/applications/obssys/README.md @@ -9,13 +9,13 @@ Deployment for the Observatory System CSCs | authorize.enabled | bool | `false` | Enable the Authorize CSC | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | -| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | -| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | -| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | -| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | -| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | -| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | -| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/obssys/values.yaml b/applications/obssys/values.yaml index 036671a714..aa900f48bb 100644 --- a/applications/obssys/values.yaml +++ b/applications/obssys/values.yaml @@ -25,34 +25,35 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" - - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" - - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" - - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" - - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" - - # -- Kafka topic replication factor for control system topics - # @default -- Set by ArgoCD - controlSystemKafkaTopicReplicationFactor: "" - - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" - - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/simonyitel/README.md b/applications/simonyitel/README.md index 11dd5cc977..2247abeba4 100644 --- a/applications/simonyitel/README.md +++ b/applications/simonyitel/README.md @@ -10,14 +10,14 @@ Deployment for the Simonyi Survey Telescope CSCs | ccoods.enabled | bool | `false` | Enable the CCOODS CSC | | csc_collector.secrets | list | `[]` | This section holds secret specifications. Each object listed can have the following attributes defined: _name_ (The name used by pods to access the secret) _key_ (The key in the vault store where the secret resides) _type_ (OPTIONAL: The secret type. Defaults to Opaque.) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | -| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | -| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | -| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | -| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | -| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | -| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | -| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | lasertracker1-sim.enabled | bool | `false` | Enable the LaserTracker:1 simulator CSC | diff --git a/applications/simonyitel/values.yaml b/applications/simonyitel/values.yaml index 331e11e0f0..d1872c646f 100644 --- a/applications/simonyitel/values.yaml +++ b/applications/simonyitel/values.yaml @@ -113,34 +113,35 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" - - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" - - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" - - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" - - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" - - # -- Kafka topic replication factor for control system topics - # @default -- Set by ArgoCD - controlSystemKafkaTopicReplicationFactor: "" - - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" - - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" + + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" + + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" + + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" + + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/applications/uws/README.md b/applications/uws/README.md index e8cea935c1..d85d0198db 100644 --- a/applications/uws/README.md +++ b/applications/uws/README.md @@ -7,14 +7,14 @@ Deployment for the UWS and DM OCPS CSCs | Key | Type | Default | Description | |-----|------|---------|-------------| | global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.controlSystemAppNamespace | string | Set by ArgoCD | Application namespacce for the control system deployment | -| global.controlSystemImageTag | string | Set by ArgoCD | Image tag for the control system deployment | -| global.controlSystemKafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | -| global.controlSystemKafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | -| global.controlSystemS3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | -| global.controlSystemSchemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | -| global.controlSystemSiteTag | string | Set by ArgoCD | Site tag for the control system deployment | -| global.controlSystemTopicName | string | Set by ArgoCD | Topic name tag for the control system deployment | +| global.controlSystem.appNamespace | string | Set by ArgoCD | Application namespace for the control system deployment | +| global.controlSystem.imageTag | string | Set by ArgoCD | Image tag for the control system deployment | +| global.controlSystem.kafkaBrokerAddress | string | Set by ArgoCD | Kafka broker address for the control system deployment | +| global.controlSystem.kafkaTopicReplicationFactor | string | Set by ArgoCD | Kafka topic replication factor for control system topics | +| global.controlSystem.s3EndpointUrl | string | Set by ArgoCD | S3 endpoint (LFA) for the control system deployment | +| global.controlSystem.schemaRegistryUrl | string | Set by ArgoCD | Schema registry URL for the control system deployment | +| global.controlSystem.siteTag | string | Set by ArgoCD | Site tag for the control system deployment | +| global.controlSystem.topicName | string | Set by ArgoCD | Topic name tag for the control system deployment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | atocps.enabled | bool | `false` | Enable the OCPS:1 CSC | diff --git a/applications/uws/values.yaml b/applications/uws/values.yaml index e41e0b7994..13b3083d23 100644 --- a/applications/uws/values.yaml +++ b/applications/uws/values.yaml @@ -33,34 +33,35 @@ global: # @default -- Set by Argo CD vaultSecretsPath: "" - # -- Application namespacce for the control system deployment - # @default -- Set by ArgoCD - controlSystemAppNamespace: "" + controlSystem: + # -- Application namespace for the control system deployment + # @default -- Set by ArgoCD + appNamespace: "" - # -- Image tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemImageTag: "" + # -- Image tag for the control system deployment + # @default -- Set by ArgoCD + imageTag: "" - # -- Site tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemSiteTag: "" + # -- Site tag for the control system deployment + # @default -- Set by ArgoCD + siteTag: "" - # -- Topic name tag for the control system deployment - # @default -- Set by ArgoCD - controlSystemTopicName: "" + # -- Topic name tag for the control system deployment + # @default -- Set by ArgoCD + topicName: "" - # -- Kafka broker address for the control system deployment - # @default -- Set by ArgoCD - controlSystemKafkaBrokerAddress: "" + # -- Kafka broker address for the control system deployment + # @default -- Set by ArgoCD + kafkaBrokerAddress: "" - # -- Kafka topic replication factor for control system topics - # @default -- Set by ArgoCD - controlSystemKafkaTopicReplicationFactor: "" + # -- Kafka topic replication factor for control system topics + # @default -- Set by ArgoCD + kafkaTopicReplicationFactor: "" - # -- Schema registry URL for the control system deployment - # @default -- Set by ArgoCD - controlSystemSchemaRegistryUrl: "" + # -- Schema registry URL for the control system deployment + # @default -- Set by ArgoCD + schemaRegistryUrl: "" - # -- S3 endpoint (LFA) for the control system deployment - # @default -- Set by ArgoCD - controlSystemS3EndpointUrl: "" + # -- S3 endpoint (LFA) for the control system deployment + # @default -- Set by ArgoCD + s3EndpointUrl: "" diff --git a/charts/csc/README.md b/charts/csc/README.md index 86ce313979..fb4504e095 100644 --- a/charts/csc/README.md +++ b/charts/csc/README.md @@ -24,7 +24,7 @@ A Helm chart for deploying the Control System CSCs. | nodeSelector | object | `{}` | This allows the specification of using specific nodes to run the pod | | pvcMountpoint | list | `[]` | This section holds the information necessary to create a volume mount for the container. If this section is used, each object listed can have the following attributes defined: _name_ (A label identifier for the mountpoint), _path_ (The path inside the container to mount), _accessMode_ (This sets the required access mode for the volume mount), _claimSize_ (The requested physical disk space size for the volume mount), _storageClass_ (The Kubernetes provided storage class), _ids.uid_ (OPTIONAL: An alternative UID for mounting), _ids.gid_ (OPTIONAL: An alternative GID for mounting) | | resources | object | `{}` | This allows the specification of resources (CPU, memory) requires to run the container | -| secretPermFixer | object | `{}` | This section sets the optional use of an init container for fixing permissions on secret files. If this section is used, each object listed can have the necessary attributes specified: _name_ (The label used for the init container) _containerPath_ (The path in the container where the secret files will be stored) _secretName_ (OPTIONAL: The secret name if different from _name_) _specialInstructions_ (OPTIONAL: This allows for optional instructions to be used when fixing permissions) | +| secretPermFixer | list | `[]` | This section sets the optional use of an init container for fixing permissions on secret files. If this section is used, each object listed can have the necessary attributes specified: _name_ (The label used for the init container) _containerPath_ (The path in the container where the secret files will be stored) _secretName_ (OPTIONAL: The secret name if different from _name_) _specialInstructions_ (OPTIONAL: This allows for optional instructions to be used when fixing permissions) | | securityContext | object | `{}` | This key allows for the specification of a pod security context for volumes. If this section is used, it must contain the following attributes: _user_ (The user id for the volumes) _group_ (The group id for the volumes) _fsGroup_ (OPTIONAL: A special supplemental group that applies to all containers in a pod) | | service.port | int | `nil` | The port number to use for the Service. | | service.type | string | `nil` | The Service type for the application. This is either ClusterIP (internal access) or LoadBalancer (external access) | diff --git a/charts/csc/templates/configfile-configmap.yaml b/charts/csc/templates/configfile-configmap.yaml index 3bab1bfbdf..b0fab92aa6 100644 --- a/charts/csc/templates/configfile-configmap.yaml +++ b/charts/csc/templates/configfile-configmap.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: {{ include "chart.name" . }}-configfile - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} data: {{ .Values.configfile.filename }}: {{ .Values.configfile.content | toYaml | indent 4 }} diff --git a/charts/csc/templates/entrypoint-configmap.yaml b/charts/csc/templates/entrypoint-configmap.yaml index e9f347899b..bbd37daad2 100644 --- a/charts/csc/templates/entrypoint-configmap.yaml +++ b/charts/csc/templates/entrypoint-configmap.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: {{ include "chart.name" . }}-entrypoint - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} data: .startup.sh: {{ .Values.entrypoint | toYaml | indent 4 }} diff --git a/charts/csc/templates/job.yaml b/charts/csc/templates/job.yaml index 73b53732c8..1ea87b9a9b 100644 --- a/charts/csc/templates/job.yaml +++ b/charts/csc/templates/job.yaml @@ -2,7 +2,7 @@ apiVersion: batch/v1 kind: Job metadata: name: {{ include "chart.name" . }} - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} labels: {{- include "csc.labels" . | nindent 4 }} {{- with .Values.annotations }} @@ -22,7 +22,7 @@ spec: spec: containers: - name: {{ include "csc.class" . }} - {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystemImageTag }} + {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystem.imageTag }} image: "{{ .Values.image.repository }}:{{ $imageTag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} stdin: true diff --git a/charts/csc/templates/mountpoint-pvc.yaml b/charts/csc/templates/mountpoint-pvc.yaml index 63d69fd88f..4e6be7427f 100644 --- a/charts/csc/templates/mountpoint-pvc.yaml +++ b/charts/csc/templates/mountpoint-pvc.yaml @@ -5,7 +5,7 @@ kind: PersistentVolumeClaim apiVersion: v1 metadata: name: {{ include "chart.name" . }}-{{ $values.name }}-pvc - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} {{- if $values.ids }} annotations: {{- if $values.ids.uid }} diff --git a/charts/csc/templates/service.yaml b/charts/csc/templates/service.yaml index 584813660d..9e79ab0a13 100644 --- a/charts/csc/templates/service.yaml +++ b/charts/csc/templates/service.yaml @@ -5,7 +5,7 @@ metadata: labels: csc: {{ include "csc.name" . }} name: {{ include "chart.name" . }}-service - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} spec: {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} loadBalancerIP: {{ .Values.service.loadBalancerIP }} diff --git a/charts/csc/values.yaml b/charts/csc/values.yaml index 5a190bcf50..6916aa3ec5 100644 --- a/charts/csc/values.yaml +++ b/charts/csc/values.yaml @@ -72,7 +72,7 @@ annotations: {} # _containerPath_ (The path in the container where the secret files will be stored) # _secretName_ (OPTIONAL: The secret name if different from _name_) # _specialInstructions_ (OPTIONAL: This allows for optional instructions to be used when fixing permissions) -secretPermFixer: {} +secretPermFixer: [] service: # -- (bool) This sets the use of a Service API for the application use: false diff --git a/charts/csc_collector/templates/configmap-env.yaml b/charts/csc_collector/templates/configmap-env.yaml index 5e6eec6629..fa9324a5ea 100644 --- a/charts/csc_collector/templates/configmap-env.yaml +++ b/charts/csc_collector/templates/configmap-env.yaml @@ -3,10 +3,10 @@ kind: ConfigMap metadata: name: csc-env-config data: - LSST_SITE: {{ $.Values.global.controlSystemSiteTag }} - LSST_TOPIC_SUBNAME: {{ $.Values.global.controlSystemTopicName }} - LSST_KAFKA_BROKER_ADDR: {{ $.Values.global.controlSystemKafkaBrokerAddress }} - LSST_KAFKA_REPLICATION_FACTOR: {{ $.Values.global.controlSystemKafkaTopicReplicationFactor | quote }} + LSST_SITE: {{ $.Values.global.controlSystem.siteTag }} + LSST_TOPIC_SUBNAME: {{ $.Values.global.controlSystem.topicName }} + LSST_KAFKA_BROKER_ADDR: {{ $.Values.global.controlSystem.kafkaBrokerAddress }} + LSST_KAFKA_REPLICATION_FACTOR: {{ $.Values.global.controlSystem.kafkaTopicReplicationFactor | quote }} LSST_KAFKA_SECURITY_USERNAME: ts-salkafka - LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystemSchemaRegistryUrl }} - S3_ENDPOINT_URL: {{ $.Values.global.controlSystemS3EndpointUrl }} + LSST_SCHEMA_REGISTRY_URL: {{ $.Values.global.controlSystem.schemaRegistryUrl }} + S3_ENDPOINT_URL: {{ $.Values.global.controlSystem.s3EndpointUrl }} diff --git a/charts/csc_collector/templates/vault-secret.yaml b/charts/csc_collector/templates/vault-secret.yaml index 9f5b7ac80c..949a3016a8 100644 --- a/charts/csc_collector/templates/vault-secret.yaml +++ b/charts/csc_collector/templates/vault-secret.yaml @@ -4,7 +4,7 @@ apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: name: {{ $secret.name }} - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} spec: path: {{ $.Values.global.vaultSecretsPath }}/{{ $secret.key }} type: {{ default "Opaque" $secret.type }} diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index 992ed5f30f..80817cc2f6 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -1,5 +1,116 @@ { "$defs": { + "ControlSystemConfig": { + "description": "Configuration for the Control System.", + "properties": { + "appNamespace": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Set the namespace for the control system components. Each control system application consists of many components that need to know what namespace to which they belong.", + "title": "Application Namespace" + }, + "imageTag": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The image tag to use for control system images.", + "title": "Image Tag" + }, + "siteTag": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The tag that tells the control system component where it is running.", + "title": "Site Tag" + }, + "topicName": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Kafka identifier for control system topics.", + "title": "Topic Identifier" + }, + "kafkaBrokerAddress": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Kafka broker address for the control system components.", + "title": "Kafka Broker Address" + }, + "kafkaTopicReplicationFactor": { + "anyOf": [ + { + "type": "integer" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Kafka topic replication factor for control system components.", + "title": "Kafka Topic Replication Factor" + }, + "schemaRegistryUrl": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The Schema Registry URL for the control system components.", + "title": "Schema Registry URL" + }, + "s3EndpointUrl": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "The S3 URL for the environment specific LFA.", + "title": "S3 Endpoint URL" + } + }, + "title": "ControlSystemConfig", + "type": "object" + }, "GCPMetadata": { "description": "Google Cloud Platform hosting metadata.\n\nHolds information about where in Google Cloud Platform this Phalanx\nenvironment is hosted. This supports generating documentation that\nincludes this metadata, making it easier for administrators to know what\noptions to pass to :command:`gcloud` to do things such as get Kubernetes\ncredentials.", "properties": { @@ -158,101 +269,16 @@ "description": "Branch of the Git repository holding Argo CD configuration. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", "title": "Git repository branch" }, - "controlSystemAppNamespace": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Controlsystemappnamespace" - }, - "controlSystemImageTag": { + "controlSystem": { "anyOf": [ { - "type": "string" + "$ref": "#/$defs/ControlSystemConfig" }, { "type": "null" } ], - "default": null, - "title": "Controlsystemimagetag" - }, - "controlSystemSiteTag": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Controlsystemsitetag" - }, - "controlSystemTopicName": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Controlsystemtopicname" - }, - "controlSystemKafkaBrokerAddress": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Controlsystemkafkabrokeraddress" - }, - "controlSystemKafkaTopicReplicationFactor": { - "anyOf": [ - { - "type": "integer" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Controlsystemkafkatopicreplicationfactor" - }, - "controlSystemSchemaRegistryUrl": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Controlsystemschemaregistryurl" - }, - "controlSystemS3EndpointUrl": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "title": "Controlsystems3Endpointurl" + "default": null } }, "required": [ diff --git a/environments/README.md b/environments/README.md index 731c9b922f..9a98e25ca4 100644 --- a/environments/README.md +++ b/environments/README.md @@ -64,14 +64,7 @@ | applications.vault-secrets-operator | bool | `true` | Enable the vault-secrets-operator application. This is required for all environments. | | applications.vo-cutouts | bool | `false` | Enable the vo-cutouts application | | butlerRepositoryIndex | string | None, must be set | Butler repository index to use for this environment | -| controlSystemAppNamespace | string | None, must be set | Application namespacce for the control system deployment | -| controlSystemImageTag | string | None, must be set | Image tag for the control system deployment | -| controlSystemKafkaBrokerAddress | string | `"sasquatch-kafka-brokers.sasquatch:9092"` | Kafka broker address for the control system deployment | -| controlSystemKafkaTopicReplicationFactor | int | `3` | Kafka topic replication factor for control system topics | -| controlSystemS3EndpointUrl | string | None, must be set: "" | S3 endpoint (LFA) for the control system deployment | -| controlSystemSchemaRegistryUrl | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL for the control system deployment | -| controlSystemSiteTag | string | None, must be set | Site tag for the control system deployment | -| controlSystemTopicName | string | `"sal"` | Topic name tag for the control system deployment | +| controlSystem | string | `nil` | Configuration particular to the control system If this section is used, it can have the following parameters: _appNamespace_ (Set the namespace for the control system components) _imageTag_ (The image tag to use for control system images) _siteTag_ (The tag that tells the control system component where it is running) _topicName_ (The Kafka identifier for control system topics) _kafkaBrokerAddress_ (The Kafka broker address) _kafkaTopicReplicationFactor_ (The Kafka topic replication factor) _schemaRegistryUrl_ (The Schema Registry URL) _s3EndpointUrl_ (The S3 URL for the environment specific LFA) | | fqdn | string | None, must be set | Fully-qualified domain name where the environment is running | | name | string | None, must be set | Name of the environment | | repoUrl | string | `"https://github.com/lsst-sqre/phalanx.git"` | URL of the repository for all applications | diff --git a/environments/templates/auxtel-application.yaml b/environments/templates/auxtel-application.yaml index c19ef441ed..ef77800313 100644 --- a/environments/templates/auxtel-application.yaml +++ b/environments/templates/auxtel-application.yaml @@ -31,22 +31,22 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - - name: "global.controlSystemAppNamespace" + - name: "global.controlSystem.appNamespace" value: "auxtel" - - name: "global.controlSystemImageTag" - value: {{ .Values.controlSystemImageTag | quote }} - - name: "global.controlSystemSiteTag" - value: {{ .Values.controlSystemSiteTag | quote }} - - name: "global.controlSystemTopicName" - value: {{ .Values.controlSystemTopicName | quote }} - - name: "global.controlSystemKafkaBrokerAddress" - value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} - - name: "global.controlSystemKafkaTopicReplicationFactor" - value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - - name: "global.controlSystemSchemaRegistryUrl" - value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - - name: "global.controlSystemS3EndpointUrl" - value: {{ .Values.controlSystemS3EndpointUrl | quote }} + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" diff --git a/environments/templates/calsys-application.yaml b/environments/templates/calsys-application.yaml index 6867f95448..fb70b3256d 100644 --- a/environments/templates/calsys-application.yaml +++ b/environments/templates/calsys-application.yaml @@ -31,22 +31,22 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - - name: "global.controlSystemAppNamespace" + - name: "global.controlSystem.appNamespace" value: "calsys" - - name: "global.controlSystemImageTag" - value: {{ .Values.controlSystemImageTag | quote }} - - name: "global.controlSystemSiteTag" - value: {{ .Values.controlSystemSiteTag | quote }} - - name: "global.controlSystemTopicName" - value: {{ .Values.controlSystemTopicName | quote }} - - name: "global.controlSystemKafkaBrokerAddress" - value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} - - name: "global.controlSystemKafkaTopicReplicationFactor" - value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - - name: "global.controlSystemSchemaRegistryUrl" - value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - - name: "global.controlSystemS3EndpointUrl" - value: {{ .Values.controlSystemS3EndpointUrl | quote }} + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" diff --git a/environments/templates/control-system-test-application.yaml b/environments/templates/control-system-test-application.yaml index 385c6d6db1..62a7799c3f 100644 --- a/environments/templates/control-system-test-application.yaml +++ b/environments/templates/control-system-test-application.yaml @@ -31,22 +31,22 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - - name: "global.controlSystemAppNamespace" + - name: "global.controlSystem.appNamespace" value: "control-system-test" - - name: "global.controlSystemImageTag" - value: {{ .Values.controlSystemImageTag | quote }} - - name: "global.controlSystemSiteTag" - value: {{ .Values.controlSystemSiteTag | quote }} - - name: "global.controlSystemTopicName" - value: {{ .Values.controlSystemTopicName | quote }} - - name: "global.controlSystemKafkaBrokerAddress" - value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} - - name: "global.controlSystemKafkaTopicReplicationFactor" - value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - - name: "global.controlSystemSchemaRegistryUrl" - value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - - name: "global.controlSystemS3EndpointUrl" - value: {{ .Values.controlSystemS3EndpointUrl | quote }} + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" diff --git a/environments/templates/eas-application.yaml b/environments/templates/eas-application.yaml index 3e2cbe1732..15fe4ba6fd 100644 --- a/environments/templates/eas-application.yaml +++ b/environments/templates/eas-application.yaml @@ -31,22 +31,22 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - - name: "global.controlSystemAppNamespace" + - name: "global.controlSystem.appNamespace" value: "eas" - - name: "global.controlSystemImageTag" - value: {{ .Values.controlSystemImageTag | quote }} - - name: "global.controlSystemSiteTag" - value: {{ .Values.controlSystemSiteTag | quote }} - - name: "global.controlSystemTopicName" - value: {{ .Values.controlSystemTopicName | quote }} - - name: "global.controlSystemKafkaBrokerAddress" - value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} - - name: "global.controlSystemKafkaTopicReplicationFactor" - value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - - name: "global.controlSystemSchemaRegistryUrl" - value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - - name: "global.controlSystemS3EndpointUrl" - value: {{ .Values.controlSystemS3EndpointUrl | quote }} + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" diff --git a/environments/templates/love-application.yaml b/environments/templates/love-application.yaml index e56ecf7fac..716c4dcfc6 100644 --- a/environments/templates/love-application.yaml +++ b/environments/templates/love-application.yaml @@ -31,22 +31,22 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - - name: "global.controlSystemAppNamespace" + - name: "global.controlSystema.appNamespace" value: "love" - - name: "global.controlSystemImageTag" - value: {{ .Values.controlSystemImageTag | quote }} - - name: "global.controlSystemSiteTag" - value: {{ .Values.controlSystemSiteTag | quote }} - - name: "global.controlSystemTopicName" - value: {{ .Values.controlSystemTopicName | quote }} - - name: "global.controlSystemKafkaBrokerAddress" - value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} - - name: "global.controlSystemKafkaTopicReplicationFactor" - value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - - name: "global.controlSystemSchemaRegistryUrl" - value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - - name: "global.controlSystemS3EndpointUrl" - value: {{ .Values.controlSystemS3EndpointUrl | quote }} + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" diff --git a/environments/templates/obssys-application.yaml b/environments/templates/obssys-application.yaml index ed0f2bdc33..68c0717261 100644 --- a/environments/templates/obssys-application.yaml +++ b/environments/templates/obssys-application.yaml @@ -31,22 +31,22 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - - name: "global.controlSystemAppNamespace" + - name: "global.controlSystem.appNamespace" value: "obssys" - - name: "global.controlSystemImageTag" - value: {{ .Values.controlSystemImageTag | quote }} - - name: "global.controlSystemSiteTag" - value: {{ .Values.controlSystemSiteTag | quote }} - - name: "global.controlSystemTopicName" - value: {{ .Values.controlSystemTopicName | quote }} - - name: "global.controlSystemKafkaBrokerAddress" - value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} - - name: "global.controlSystemKafkaTopicReplicationFactor" - value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - - name: "global.controlSystemSchemaRegistryUrl" - value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - - name: "global.controlSystemS3EndpointUrl" - value: {{ .Values.controlSystemS3EndpointUrl | quote }} + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" diff --git a/environments/templates/simonyitel-application.yaml b/environments/templates/simonyitel-application.yaml index 1c186a39d6..0288bf9c86 100644 --- a/environments/templates/simonyitel-application.yaml +++ b/environments/templates/simonyitel-application.yaml @@ -31,22 +31,22 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - - name: "global.controlSystemAppNamespace" + - name: "global.controlSystem.appNamespace" value: "simonyitel" - - name: "global.controlSystemImageTag" - value: {{ .Values.controlSystemImageTag | quote }} - - name: "global.controlSystemSiteTag" - value: {{ .Values.controlSystemSiteTag | quote }} - - name: "global.controlSystemTopicName" - value: {{ .Values.controlSystemTopicName | quote }} - - name: "global.controlSystemKafkaBrokerAddress" - value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} - - name: "global.controlSystemKafkaTopicReplicationFactor" - value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - - name: "global.controlSystemSchemaRegistryUrl" - value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - - name: "global.controlSystemS3EndpointUrl" - value: {{ .Values.controlSystemS3EndpointUrl | quote }} + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" diff --git a/environments/templates/uws-application.yaml b/environments/templates/uws-application.yaml index 3030dae0d3..8c12169502 100644 --- a/environments/templates/uws-application.yaml +++ b/environments/templates/uws-application.yaml @@ -31,22 +31,22 @@ spec: value: "https://{{ .Values.fqdn }}" - name: "global.vaultSecretsPath" value: {{ .Values.vaultPathPrefix | quote }} - - name: "global.controlSystemAppNamespace" + - name: "global.controlSystem.appNamespace" value: "uws" - - name: "global.controlSystemImageTag" - value: {{ .Values.controlSystemImageTag | quote }} - - name: "global.controlSystemSiteTag" - value: {{ .Values.controlSystemSiteTag | quote }} - - name: "global.controlSystemTopicName" - value: {{ .Values.controlSystemTopicName | quote }} - - name: "global.controlSystemKafkaBrokerAddress" - value: {{ .Values.controlSystemKafkaBrokerAddress | quote }} - - name: "global.controlSystemKafkaTopicReplicationFactor" - value: {{ .Values.controlSystemKafkaTopicReplicationFactor | quote }} - - name: "global.controlSystemSchemaRegistryUrl" - value: {{ .Values.controlSystemSchemaRegistryUrl | quote }} - - name: "global.controlSystemS3EndpointUrl" - value: {{ .Values.controlSystemS3EndpointUrl | quote }} + - name: "global.controlSystem.imageTag" + value: {{ .Values.controlSystem.imageTag | quote }} + - name: "global.controlSystem.siteTag" + value: {{ .Values.controlSystem.siteTag | quote }} + - name: "global.controlSystem.topicName" + value: {{ .Values.controlSystem.topicName | quote }} + - name: "global.controlSystem.kafkaBrokerAddress" + value: {{ .Values.controlSystem.kafkaBrokerAddress | quote }} + - name: "global.controlSystem.kafkaTopicReplicationFactor" + value: {{ .Values.controlSystem.kafkaTopicReplicationFactor | quote }} + - name: "global.controlSystem.schemaRegistryUrl" + value: {{ .Values.controlSystem.schemaRegistryUrl | quote }} + - name: "global.controlSystem.s3EndpointUrl" + value: {{ .Values.controlSystem.s3EndpointUrl | quote }} valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index e2485b6f22..71461f8eb3 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -22,7 +22,11 @@ applications: telegraf: true telegraf-ds: true -controlSystemImageTag: k0001 -controlSystemKafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 -controlSystemSiteTag: tucson -controlSystemS3EndpointUrl: https://s3.tu.lsst.org +controlSystem: + imageTag: k0001 + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + kafkaTopicReplicationFactor: 3 + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + siteTag: tucson + s3EndpointUrl: https://s3.tu.lsst.org + topicName: sal diff --git a/environments/values.yaml b/environments/values.yaml index 022929161e..4b8aa52267 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -213,32 +213,14 @@ applications: # -- Enable the vo-cutouts application vo-cutouts: false -# The following settings are used for the control system - -# -- Application namespacce for the control system deployment -# @default -- None, must be set -controlSystemAppNamespace: "" - -# -- Image tag for the control system deployment -# @default -- None, must be set -controlSystemImageTag: "" - -# -- Site tag for the control system deployment -# @default -- None, must be set -controlSystemSiteTag: "" - -# -- Topic name tag for the control system deployment -controlSystemTopicName: sal - -# -- Kafka broker address for the control system deployment -controlSystemKafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - -# -- Kafka topic replication factor for control system topics -controlSystemKafkaTopicReplicationFactor: 3 - -# -- Schema registry URL for the control system deployment -controlSystemSchemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 - -# -- S3 endpoint (LFA) for the control system deployment -# @default -- None, must be set: "" -controlSystemS3EndpointUrl: "" +# -- Configuration particular to the control system +# If this section is used, it can have the following parameters: +# _appNamespace_ (Set the namespace for the control system components) +# _imageTag_ (The image tag to use for control system images) +# _siteTag_ (The tag that tells the control system component where it is running) +# _topicName_ (The Kafka identifier for control system topics) +# _kafkaBrokerAddress_ (The Kafka broker address) +# _kafkaTopicReplicationFactor_ (The Kafka topic replication factor) +# _schemaRegistryUrl_ (The Schema Registry URL) +# _s3EndpointUrl_ (The S3 URL for the environment specific LFA) +controlSystem: null diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index ea4320d422..1c38460829 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -192,6 +192,71 @@ def vault_write_policy(self) -> str: return f"{self.vault_path}/write" +class ControlSystemConfig(CamelCaseModel): + """Configuration for the Control System.""" + + app_namespace: str | None = Field( + None, + title="Application Namespace", + description=( + "Set the namespace for the control system components. Each control" + " system application consists of many components that need to know" + " what namespace to which they belong." + ), + ) + + image_tag: str | None = Field( + None, + title="Image Tag", + description=("The image tag to use for control system images."), + ) + + site_tag: str | None = Field( + None, + title="Site Tag", + description=( + "The tag that tells the control system component where it is" + " running." + ), + ) + + topic_name: str | None = Field( + None, + title="Topic Identifier", + description="The Kafka identifier for control system topics.", + ) + + kafka_broker_address: str | None = Field( + None, + title="Kafka Broker Address", + description=( + "The Kafka broker address for the control system components." + ), + ) + + kafka_topic_replication_factor: int | None = Field( + None, + title="Kafka Topic Replication Factor", + description=( + "The Kafka topic replication factor for control system components." + ), + ) + + schema_registry_url: str | None = Field( + None, + title="Schema Registry URL", + description=( + "The Schema Registry URL for the control system components." + ), + ) + + s3_endpoint_url: str | None = Field( + None, + title="S3 Endpoint URL", + description="The S3 URL for the environment specific LFA.", + ) + + class EnvironmentConfig(EnvironmentBaseConfig): """Configuration for a Phalanx environment. @@ -235,33 +300,7 @@ class EnvironmentConfig(EnvironmentBaseConfig): ), ) - control_system_app_namespace: str | None = None - """Set the namespace for the control system components. - - Each control system application consists of many components that need to - know what namespace to which they belong. - """ - - control_system_image_tag: str | None = None - """The image tag to use for control system containers.""" - - control_system_site_tag: str | None = None - """The tag that tells the control system component where it is running.""" - - control_system_topic_name: str | None = None - """The Kafka identifier for control system topics.""" - - control_system_kafka_broker_address: str | None = None - """The Kafka broker address for the control system components.""" - - control_system_kafka_topic_replication_factor: int | None = None - """The Kafka topic replication factor for control system components.""" - - control_system_schema_registry_url: str | None = None - """The Schema Registry URL for the control system components.""" - - control_system_s3_endpoint_url: str | None = None - """The S3 URL for the environment specific LFA.""" + control_system: ControlSystemConfig | None = None model_config = ConfigDict(extra="forbid") From 4ffb1f3cedb8f6cdebc352acce7d295977a04d46 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 22 Jan 2024 16:45:46 -0700 Subject: [PATCH 566/588] Trap nublado T&S SAL Kafka VaultSecret generation. --- applications/nublado/README.md | 1 + applications/nublado/templates/vault-secrets.yaml | 2 ++ applications/nublado/values-tucson-teststand.yaml | 1 + applications/nublado/values.yaml | 3 +++ 4 files changed, 7 insertions(+) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index fb4bd65db2..90a08c8779 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -54,6 +54,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | | controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumeMounts` (similar to the main `volumeMountss` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | +| controller.config.lab.installTsSalKafkaSecret | bool | `false` | Flag to put T&S SAL Kafka secrets into pod. | | controller.config.lab.namespacePrefix | string | `"nublado"` | Prefix for namespaces for user labs. To this will be added a dash (`-`) and the user's username. | | controller.config.lab.nodeSelector | object | `{}` | Node selector rules for user lab pods | | controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | diff --git a/applications/nublado/templates/vault-secrets.yaml b/applications/nublado/templates/vault-secrets.yaml index 134653ec4f..abd9b87344 100644 --- a/applications/nublado/templates/vault-secrets.yaml +++ b/applications/nublado/templates/vault-secrets.yaml @@ -59,6 +59,7 @@ spec: path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" type: kubernetes.io/dockerconfigjson {{- end }} +{{- if .Values.controller.config.lab.installTsSalKafkaSecret }} --- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret @@ -69,3 +70,4 @@ metadata: spec: path: "{{- .Values.global.vaultSecretsPath }}/ts/software/ts-salkafka" type: Opaque +{{- end }} diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index 6ecf4aabd7..aaf9afe3e9 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -24,6 +24,7 @@ controller: LSST_SCHEMA_REGISTRY_URL: http://sasquatch-schema-registry.sasquatch:8081 PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" + installTsSalKafkaSecret: true pullSecret: "pull-secret" secrets: - secretName: "kafka-secret" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 3ca8799eed..7d99f87501 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -216,6 +216,9 @@ controller: # user. initContainers: [] + # -- Flag to put T&S SAL Kafka secrets into pod. + installTsSalKafkaSecret: false + # -- Prefix for namespaces for user labs. To this will be added a dash # (`-`) and the user's username. namespacePrefix: "nublado" From ae69d400ce56d7cc0a6b9319e31e02c002811b7a Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Mon, 22 Jan 2024 17:07:17 -0700 Subject: [PATCH 567/588] Redo control system config parameter docs. --- environments/README.md | 9 ++++- environments/values-tucson-teststand.yaml | 4 --- environments/values.yaml | 40 ++++++++++++++++------- 3 files changed, 37 insertions(+), 16 deletions(-) diff --git a/environments/README.md b/environments/README.md index 9a98e25ca4..f9cbb3f36e 100644 --- a/environments/README.md +++ b/environments/README.md @@ -64,7 +64,14 @@ | applications.vault-secrets-operator | bool | `true` | Enable the vault-secrets-operator application. This is required for all environments. | | applications.vo-cutouts | bool | `false` | Enable the vo-cutouts application | | butlerRepositoryIndex | string | None, must be set | Butler repository index to use for this environment | -| controlSystem | string | `nil` | Configuration particular to the control system If this section is used, it can have the following parameters: _appNamespace_ (Set the namespace for the control system components) _imageTag_ (The image tag to use for control system images) _siteTag_ (The tag that tells the control system component where it is running) _topicName_ (The Kafka identifier for control system topics) _kafkaBrokerAddress_ (The Kafka broker address) _kafkaTopicReplicationFactor_ (The Kafka topic replication factor) _schemaRegistryUrl_ (The Schema Registry URL) _s3EndpointUrl_ (The S3 URL for the environment specific LFA) | +| controlSystem.appNamespace | string | None, must be set | Application namespacce for the control system deployment | +| controlSystem.imageTag | string | None, must be set | Image tag for the control system deployment | +| controlSystem.kafkaBrokerAddress | string | `"sasquatch-kafka-brokers.sasquatch:9092"` | Kafka broker address for the control system deployment | +| controlSystem.kafkaTopicReplicationFactor | int | `3` | Kafka topic replication factor for control system topics | +| controlSystem.s3EndpointUrl | string | None, must be set: "" | S3 endpoint (LFA) for the control system deployment | +| controlSystem.schemaRegistryUrl | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL for the control system deployment | +| controlSystem.siteTag | string | None, must be set | Site tag for the control system deployment | +| controlSystem.topicName | string | `"sal"` | Topic name tag for the control system deployment | | fqdn | string | None, must be set | Fully-qualified domain name where the environment is running | | name | string | None, must be set | Name of the environment | | repoUrl | string | `"https://github.com/lsst-sqre/phalanx.git"` | URL of the repository for all applications | diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 71461f8eb3..ff7484903b 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -24,9 +24,5 @@ applications: controlSystem: imageTag: k0001 - kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 - kafkaTopicReplicationFactor: 3 - schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 siteTag: tucson s3EndpointUrl: https://s3.tu.lsst.org - topicName: sal diff --git a/environments/values.yaml b/environments/values.yaml index 4b8aa52267..c61c03778f 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -213,14 +213,32 @@ applications: # -- Enable the vo-cutouts application vo-cutouts: false -# -- Configuration particular to the control system -# If this section is used, it can have the following parameters: -# _appNamespace_ (Set the namespace for the control system components) -# _imageTag_ (The image tag to use for control system images) -# _siteTag_ (The tag that tells the control system component where it is running) -# _topicName_ (The Kafka identifier for control system topics) -# _kafkaBrokerAddress_ (The Kafka broker address) -# _kafkaTopicReplicationFactor_ (The Kafka topic replication factor) -# _schemaRegistryUrl_ (The Schema Registry URL) -# _s3EndpointUrl_ (The S3 URL for the environment specific LFA) -controlSystem: null +# The following settings are used for the control system +controlSystem: + # -- Application namespacce for the control system deployment + # @default -- None, must be set + appNamespace: "" + + # -- Image tag for the control system deployment + # @default -- None, must be set + imageTag: "" + + # -- Site tag for the control system deployment + # @default -- None, must be set + siteTag: "" + + # -- Topic name tag for the control system deployment + topicName: sal + + # -- Kafka broker address for the control system deployment + kafkaBrokerAddress: sasquatch-kafka-brokers.sasquatch:9092 + + # -- Kafka topic replication factor for control system topics + kafkaTopicReplicationFactor: 3 + + # -- Schema registry URL for the control system deployment + schemaRegistryUrl: http://sasquatch-schema-registry.sasquatch:8081 + + # -- S3 endpoint (LFA) for the control system deployment + # @default -- None, must be set: "" + s3EndpointUrl: "" From c9fed7e1acbd58280bc7a2c8435bd4847b68cefc Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 23 Jan 2024 11:18:15 -0500 Subject: [PATCH 568/588] Deploy version 1.0.0 --- applications/jira-data-proxy/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/jira-data-proxy/Chart.yaml b/applications/jira-data-proxy/Chart.yaml index 90059d55fc..bf316d6622 100644 --- a/applications/jira-data-proxy/Chart.yaml +++ b/applications/jira-data-proxy/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: "tickets-DM-42460" +appVersion: "1.0.0" description: Jira API read-only proxy for Times Square users. name: jira-data-proxy sources: From cd0cb2cff9269154ced953c23f9e15fb3c095dc5 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 23 Jan 2024 11:19:04 -0500 Subject: [PATCH 569/588] Use prod settings for usdfdev --- applications/jira-data-proxy/values-usdfdev.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/applications/jira-data-proxy/values-usdfdev.yaml b/applications/jira-data-proxy/values-usdfdev.yaml index d31626eed3..e69de29bb2 100644 --- a/applications/jira-data-proxy/values-usdfdev.yaml +++ b/applications/jira-data-proxy/values-usdfdev.yaml @@ -1,4 +0,0 @@ -image: - pullPolicy: Always -config: - logLevel: "DEBUG" From eb185fe0a4651eb450ee503b44f848f459b94695 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 23 Jan 2024 11:19:34 -0500 Subject: [PATCH 570/588] Default to 2 replicas for higher availability --- applications/jira-data-proxy/README.md | 2 +- applications/jira-data-proxy/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/jira-data-proxy/README.md b/applications/jira-data-proxy/README.md index 5c7967a639..925984cc76 100644 --- a/applications/jira-data-proxy/README.md +++ b/applications/jira-data-proxy/README.md @@ -27,6 +27,6 @@ Jira API read-only proxy for Times Square users. | ingress.path | string | `"/jira-data-proxy"` | Path prefix where jira-data-proxy is served | | nodeSelector | object | `{}` | Node selection rules for the jira-data-proxy deployment pod | | podAnnotations | object | `{}` | Annotations for the jira-data-proxy deployment pod | -| replicaCount | int | `1` | Number of web deployment pods to start | +| replicaCount | int | `2` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the jira-data-proxy deployment pod | | tolerations | list | `[]` | Tolerations for the jira-data-proxy deployment pod | diff --git a/applications/jira-data-proxy/values.yaml b/applications/jira-data-proxy/values.yaml index acee883907..4002a96f0e 100644 --- a/applications/jira-data-proxy/values.yaml +++ b/applications/jira-data-proxy/values.yaml @@ -10,7 +10,7 @@ config: jiraUrl: "https://jira.lsstcorp.org/" # -- Number of web deployment pods to start -replicaCount: 1 +replicaCount: 2 image: # -- Image to use in the jira-data-proxy deployment From 356153865dfe41b312577e0ebea1be357da996c4 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 22 Jan 2024 17:01:24 -0700 Subject: [PATCH 571/588] Upgrade Strimzi Kafka Operator to version 0.39.0 --- applications/strimzi/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml index ef2f43b9bc..6acbfae9af 100644 --- a/applications/strimzi/Chart.yaml +++ b/applications/strimzi/Chart.yaml @@ -4,8 +4,8 @@ type: application version: 1.0.0 description: Strimzi Kafka Operator home: https://strimzi.io -appVersion: "0.26.0" +appVersion: "0.39.0" dependencies: - name: strimzi-kafka-operator - version: "0.38.0" + version: "0.39.0" repository: https://strimzi.io/charts/ From fcb76ee9616676cc4109ee29b07e645dcc1fdec8 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 23 Jan 2024 11:25:57 -0700 Subject: [PATCH 572/588] Remove quotes from chart paths. --- applications/auxtel/Chart.yaml | 32 +++++----- applications/calsys/Chart.yaml | 6 +- applications/control-system-test/Chart.yaml | 4 +- applications/eas/Chart.yaml | 68 ++++++++++----------- applications/love/Chart.yaml | 4 +- applications/obssys/Chart.yaml | 14 ++--- applications/simonyitel/Chart.yaml | 56 ++++++++--------- applications/uws/Chart.yaml | 8 +-- 8 files changed, 96 insertions(+), 96 deletions(-) diff --git a/applications/auxtel/Chart.yaml b/applications/auxtel/Chart.yaml index 87ef2753b8..00b6f524f8 100644 --- a/applications/auxtel/Chart.yaml +++ b/applications/auxtel/Chart.yaml @@ -5,77 +5,77 @@ description: Deployment for the Auxiliary Telescope CSCs dependencies: - name: csc_collector version: 1.0.0 - repository: "file://../../charts/csc_collector" + repository: file://../../charts/csc_collector - name: hexapod-sim version: 1.0.0 condition: hexapod-sim.enabled - name: csc alias: ataos version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atdome version: 1.0.0 condition: atdome.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atdome-sim version: 1.0.0 condition: atdome-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atdometrajectory version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atheaderservice version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: athexapod version: 1.0.0 condition: athexapod.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: athexapod-sim version: 1.0.0 condition: athexapod-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atmcs version: 1.0.0 condition: atmcs.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atmcs-sim version: 1.0.0 condition: atmcs-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atoods version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atpneumatics version: 1.0.0 condition: atpneumatics.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atpneumatics-sim version: 1.0.0 condition: atpneumatics-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atptg version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atspectrograph version: 1.0.0 condition: atspectrograph.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atspectrograph-sim version: 1.0.0 condition: atspectrograph-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc diff --git a/applications/calsys/Chart.yaml b/applications/calsys/Chart.yaml index cc151abdad..74d89a62b7 100644 --- a/applications/calsys/Chart.yaml +++ b/applications/calsys/Chart.yaml @@ -5,14 +5,14 @@ description: Deployment for the Calibration System CSCs dependencies: - name: csc_collector version: 1.0.0 - repository: "file://../../charts/csc_collector" + repository: file://../../charts/csc_collector - name: csc alias: gcheaderservice1 version: 1.0.0 condition: gcheaderservice1.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: simulation-gencam version: 1.0.0 condition: simulation-gencam.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc diff --git a/applications/control-system-test/Chart.yaml b/applications/control-system-test/Chart.yaml index e7b199e393..5d4cbb6cf2 100644 --- a/applications/control-system-test/Chart.yaml +++ b/applications/control-system-test/Chart.yaml @@ -5,11 +5,11 @@ description: Deployment for the Test CSCs and Integration Testing Workflows dependencies: - name: csc_collector version: 1.0.0 - repository: "file://../../charts/csc_collector" + repository: file://../../charts/csc_collector - name: csc alias: test42 version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: integration-testing version: 1.0.0 condition: integration-testing.enabled diff --git a/applications/eas/Chart.yaml b/applications/eas/Chart.yaml index 0fcd82fdea..73a46f3723 100644 --- a/applications/eas/Chart.yaml +++ b/applications/eas/Chart.yaml @@ -5,168 +5,168 @@ description: Deployment for the Environmental Awareness Systems CSCs dependencies: - name: csc_collector version: 1.0.0 - repository: "file://../../charts/csc_collector" + repository: file://../../charts/csc_collector - name: csc alias: auxtel-ess01 version: 1.0.0 condition: auxtel-ess01.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: auxtel-ess01-sim version: 1.0.0 condition: auxtel-ess01-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: auxtel-ess02 version: 1.0.0 condition: auxtel-ess02.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: auxtel-ess02-sim version: 1.0.0 condition: auxtel-ess02-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: auxtel-ess03 version: 1.0.0 condition: auxtel-ess03.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: auxtel-ess03-sim version: 1.0.0 condition: auxtel-ess03-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: auxtel-ess04 version: 1.0.0 condition: auxtel-ess04.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: auxtel-ess04-sim version: 1.0.0 condition: auxtel-ess04-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: calibhill-ess01 version: 1.0.0 condition: calibhill-ess01.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: calibhill-ess01-sim version: 1.0.0 condition: calibhill-ess01-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: dimm1 version: 1.0.0 condition: dimm1.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: dimm1-sim version: 1.0.0 condition: dimm1-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: dimm2 version: 1.0.0 condition: dimm2.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: dimm2-sim version: 1.0.0 condition: dimm2-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: dsm1 version: 1.0.0 condition: dsm1.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: dsm1-sim version: 1.0.0 condition: dsm1-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: dsm2 version: 1.0.0 condition: dsm2.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: dsm2-sim version: 1.0.0 condition: dsm2-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: m2-ess106 version: 1.0.0 condition: m2-ess106.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: m2-ess106-sim version: 1.0.0 condition: m2-ess106-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdome-ess01 version: 1.0.0 condition: mtdome-ess01.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdome-ess01-sim version: 1.0.0 condition: mtdome-ess01-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdome-ess02 version: 1.0.0 condition: mtdome-ess02.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdome-ess02-sim version: 1.0.0 condition: mtdome-ess02-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdome-ess03 version: 1.0.0 condition: mtdome-ess03.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdome-ess03-sim version: 1.0.0 condition: mtdome-ess03-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: tma-ess01 version: 1.0.0 condition: tma-ess01.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: tma-ess01-sim version: 1.0.0 condition: tma-ess01-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: tma-ess104 version: 1.0.0 condition: tma-ess104.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: tma-ess104-sim version: 1.0.0 condition: tma-ess104-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: tma-ess105 version: 1.0.0 condition: tma-ess105.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: tma-ess105-sim version: 1.0.0 condition: tma-ess105-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: weatherforecast version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc diff --git a/applications/love/Chart.yaml b/applications/love/Chart.yaml index 57212fe5f3..246cedd0e5 100644 --- a/applications/love/Chart.yaml +++ b/applications/love/Chart.yaml @@ -5,11 +5,11 @@ description: Deployment for the LSST Operators Visualization Environment dependencies: - name: csc_collector version: 1.0.0 - repository: "file://../../charts/csc_collector" + repository: file://../../charts/csc_collector - name: csc alias: love-commander version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: love-manager version: 1.0.0 - name: love-nginx diff --git a/applications/obssys/Chart.yaml b/applications/obssys/Chart.yaml index 644cd7bb9a..5ee1f1eafa 100644 --- a/applications/obssys/Chart.yaml +++ b/applications/obssys/Chart.yaml @@ -5,29 +5,29 @@ description: Deployment for the Observatory System CSCs dependencies: - name: csc_collector version: 1.0.0 - repository: "file://../../charts/csc_collector" + repository: file://../../charts/csc_collector - name: csc alias: atqueue version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: atscheduler version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: authorize version: 1.0.0 condition: authorize.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtqueue version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtscheduler version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: watcher version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc diff --git a/applications/simonyitel/Chart.yaml b/applications/simonyitel/Chart.yaml index b944e4dfda..7bb99438ea 100644 --- a/applications/simonyitel/Chart.yaml +++ b/applications/simonyitel/Chart.yaml @@ -5,136 +5,136 @@ description: Deployment for the Simonyi Survey Telescope CSCs dependencies: - name: csc_collector version: 1.0.0 - repository: "file://../../charts/csc_collector" + repository: file://../../charts/csc_collector - name: csc alias: ccheaderservice version: 1.0.0 condition: ccheaderservice.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: ccoods version: 1.0.0 condition: ccoods.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: lasertracker1 version: 1.0.0 condition: lasertracker1.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: lasertracker1-sim version: 1.0.0 condition: lasertracker1-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtaircompressor1 version: 1.0.0 condition: mtaircompressor1.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtaircompressor1-sim version: 1.0.0 condition: mtaircompressor1-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtaircompressor2 version: 1.0.0 condition: mtaircompressor2.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtaircompressor2-sim version: 1.0.0 condition: mtaircompressor2-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtaos version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtcamhexapod version: 1.0.0 condition: mtcamhexapod.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtcamhexapod-sim version: 1.0.0 condition: mtcamhexapod-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdome version: 1.0.0 condition: mtdome.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdome-sim version: 1.0.0 condition: mtdome-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtdometrajectory version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtheaderservice version: 1.0.0 condition: mtheaderservice.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtm1m3 version: 1.0.0 condition: mtm1m3.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtm1m3-sim version: 1.0.0 condition: mtm1m3-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtm2 version: 1.0.0 condition: mtm2.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtm2-sim version: 1.0.0 condition: mtm2-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtm2hexapod version: 1.0.0 condition: mtm2hexapod.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtm2hexapod-sim version: 1.0.0 condition: mtm2hexapod-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtmount version: 1.0.0 condition: mtmount.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtmount-sim version: 1.0.0 condition: mtmount-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtoods version: 1.0.0 condition: mtoods.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtptg version: 1.0.0 - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtrotator version: 1.0.0 condition: mtrotator.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtrotator-sim version: 1.0.0 condition: mtrotator-sim.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc diff --git a/applications/uws/Chart.yaml b/applications/uws/Chart.yaml index 8124b868bc..67076c7911 100644 --- a/applications/uws/Chart.yaml +++ b/applications/uws/Chart.yaml @@ -5,21 +5,21 @@ description: Deployment for the UWS and DM OCPS CSCs dependencies: - name: csc_collector version: 1.0.0 - repository: "file://../../charts/csc_collector" + repository: file://../../charts/csc_collector - name: csc alias: atocps version: 1.0.0 condition: atocps.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: ccocps version: 1.0.0 condition: ccocps.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: csc alias: mtocps version: 1.0.0 condition: mtocps.enabled - repository: "file://../../charts/csc" + repository: file://../../charts/csc - name: uws-api-server version: 1.5.0 From 0c5b79228ec6579a37e8d11bbf6af4e25f0221e2 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 22 Jan 2024 14:04:26 -0700 Subject: [PATCH 573/588] Add secret to push Strimzi Kafka Connect images to ghcr.io - Strimzi requires a secret of type kubernetes.io/dockerconfigjson to push images to ghcr.io. Add a secret to sasquatch with the .dockerconfigjson key and a VaultSecret resource to create the sasquatch-connect-push-secret secret. - Use templated secrets. This way we can have a more human readable name for the secret in Sasquatch while still creating the secret with the required key. --- applications/sasquatch/secrets.yaml | 2 +- .../sasquatch/templates/vault-secrets.yaml | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/secrets.yaml b/applications/sasquatch/secrets.yaml index d60cda7df3..cb879f3170 100644 --- a/applications/sasquatch/secrets.yaml +++ b/applications/sasquatch/secrets.yaml @@ -67,5 +67,5 @@ ts-salkafka-password: if: strimzi-kafka.users.ts-salkafka.enabled connect-push-secret: description: >- - Write token for pushing generated kafka-connect image to GitHub container registry. + Write token for pushing generated Strimzi Kafka Connect image to GitHub Container Registry. if: strimzi-kafka.connect.enabled diff --git a/applications/sasquatch/templates/vault-secrets.yaml b/applications/sasquatch/templates/vault-secrets.yaml index d44b29b2dc..aeb2dabbe9 100644 --- a/applications/sasquatch/templates/vault-secrets.yaml +++ b/applications/sasquatch/templates/vault-secrets.yaml @@ -14,3 +14,17 @@ metadata: spec: path: "{{ .Values.global.vaultSecretsPath }}/pull-secret" type: kubernetes.io/dockerconfigjson +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: sasquatch-connect-push-secret + namespace: sasquatch +spec: + path: "{{ .Values.global.vaultSecretsPath }}/sasquatch" + type: kubernetes.io/dockerconfigjson + keys: + - connect-push-secret + templates: + .dockerconfigjson: >- + {% index .Secrets "connect-push-secret" %} From bc58eb88baab94d2eca5e926169d0ea64ec95095 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 23 Jan 2024 13:56:04 -0700 Subject: [PATCH 574/588] Move shared chart updater to phalanx cli. --- pyproject.toml | 1 - src/phalanx/cli.py | 25 ++++++ src/phalanx/control_system/__init__.py | 0 .../update_shared_chart_version.py | 87 ------------------- src/phalanx/storage/config.py | 25 ++++++ 5 files changed, 50 insertions(+), 88 deletions(-) delete mode 100644 src/phalanx/control_system/__init__.py delete mode 100644 src/phalanx/control_system/update_shared_chart_version.py diff --git a/pyproject.toml b/pyproject.toml index a74db26743..9b7834e8eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,6 @@ requires-python = ">=3.11" [project.scripts] phalanx = "phalanx.cli:main" -update-shared-chart-version = "phalanx.control_system.update_shared_chart_version:run" [project.urls] Homepage = "https://phalanx.lsst.io" diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 220a9bf50c..745b591aa7 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -276,6 +276,31 @@ def application_template( sys.stdout.write(application_service.template(name, environment)) +@application.command("update-shared-chart-version") +@click.argument("chart") +@click.argument("version") +@click.option( + "-c", + "--config", + type=click.Path(path_type=Path), + default=None, + help="Path to root of Phalanx configuration.", +) +def application_update_shared_chart_version( + chart: str, version: str, *, config: Path | None +) -> None: + """Update the version for a shared chart. + + This function updates the version of a shared chart in the Chart.yaml + file of all applications that use that shared chart. + """ + if not config: + config = _find_config() + factory = Factory(config) + storage = factory.create_config_storage() + storage.update_shared_chart_version(chart, version) + + @main.group() def environment() -> None: """Commands for Phalanx environment configuration.""" diff --git a/src/phalanx/control_system/__init__.py b/src/phalanx/control_system/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/phalanx/control_system/update_shared_chart_version.py b/src/phalanx/control_system/update_shared_chart_version.py deleted file mode 100644 index 874e4f06e2..0000000000 --- a/src/phalanx/control_system/update_shared_chart_version.py +++ /dev/null @@ -1,87 +0,0 @@ -"""Script for updating shared chart versions.""" - -import argparse -import pathlib - -import yaml - -APPS_DIR = "applications" - -DIR_MAP = {"csc": "csc", "collector": "csc_collector"} - - -def shared_chart(appdir: pathlib.Path, shared_dir: str) -> bool: - """Determine if app directory has templates dir as link. - - Parameters - ---------- - appdir: `pathlib.Path` - The application directory to check. - shared_dir: `str` - The shared directory to make sure the link resolves to. - - Returns - ------- - `bool`: True if the link resolves to the requested shared dir. - """ - try: - chart_dir = appdir / "charts" / shared_dir - return ( - chart_dir.is_symlink() and chart_dir.resolve().name == shared_dir - ) - except OSError: - return False - - -def main(opts: argparse.Namespace) -> None: - """Execute shared chart version update. - - Parameters - ---------- - opts: `argparse.Namespace` - The command-line options. - """ - print( - f"Updating {opts.app_type} apps Helm chart " - f"to version {opts.chart_version}" - ) - - apps = pathlib.PosixPath(APPS_DIR) - dirlist = list(apps.iterdir()) - for appdir in dirlist: - if not shared_chart(appdir, DIR_MAP[opts.app_type]): - continue - - chart = appdir / "Chart.yaml" - - with chart.open() as ifile: - values = yaml.safe_load(ifile) - - dependencies = values["dependencies"] - for dependency in dependencies: - if dependency["name"] == DIR_MAP[opts.app_type]: - dependency["version"] = opts.chart_version - - with chart.open("w") as ofile: - yaml.dump(values, ofile, sort_keys=False) - - -def run() -> None: - """Script run function.""" - description = [ - "Update version for apps using the csc or shared Helm chart" - ] - parser = argparse.ArgumentParser( - description=" ".join(description), - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "app_type", - choices=list(DIR_MAP.keys()), - help="Specify the application type to set the chart version for.", - ) - parser.add_argument( - "chart_version", help="The version of the Helm chart to set." - ) - args = parser.parse_args() - main(args) diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index d09c8875eb..685802cf27 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -533,6 +533,31 @@ def load_phalanx_config(self) -> PhalanxConfig: applications=sorted(applications.values(), key=lambda a: a.name), ) + def update_shared_chart_version(self, chart: str, version: str) -> None: + """Update the version of a shared chart across all applications. + + Parameters + ---------- + chart + The name of the chart for the version change. + version + The chart version to update. + """ + for app in self.list_applications(): + app_config = self._load_application_config(app) + is_modified = False + try: + for item in app_config.chart["dependencies"]: + if item["name"] == chart: + item["version"] = version + is_modified = True + except KeyError: + pass + if is_modified: + chart_path = self._path / "applications" / app / "Chart.yaml" + with chart_path.open("w") as fh: + yaml.safe_dump(app_config.chart, fh, sort_keys=False) + def write_application_template(self, name: str, template: str) -> None: """Write the Argo CD application template for a new application. From e85a1ac6a576f21aefc973f81dd37f50119d9992 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 22 Jan 2024 13:49:57 -0700 Subject: [PATCH 575/588] Add filestore-backup --- applications/filestore-backup/Chart.yaml | 7 ++ .../filestore-backup/templates/_helpers.tpl | 53 ++++++++++ .../filestore-backup/templates/cronjobs.yaml | 97 +++++++++++++++++++ .../templates/serviceaccount.yaml | 8 ++ .../filestore-backup/values-idfdev.yaml | 8 ++ applications/filestore-backup/values.yaml | 67 +++++++++++++ environments/README.md | 1 + .../filestore-backup-application.yaml | 38 ++++++++ environments/values-idfdev.yaml | 1 + environments/values.yaml | 3 + requirements/dev.txt | 2 +- requirements/main.txt | 2 +- 12 files changed, 285 insertions(+), 2 deletions(-) create mode 100644 applications/filestore-backup/Chart.yaml create mode 100644 applications/filestore-backup/templates/_helpers.tpl create mode 100644 applications/filestore-backup/templates/cronjobs.yaml create mode 100644 applications/filestore-backup/templates/serviceaccount.yaml create mode 100644 applications/filestore-backup/values-idfdev.yaml create mode 100644 applications/filestore-backup/values.yaml create mode 100644 environments/templates/filestore-backup-application.yaml diff --git a/applications/filestore-backup/Chart.yaml b/applications/filestore-backup/Chart.yaml new file mode 100644 index 0000000000..35d72b94ed --- /dev/null +++ b/applications/filestore-backup/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: filestore-backup +version: 1.0.0 +description: Tool to manage Google Filestore backups +sources: + - https://github.com/lsst-sqre/rubin-google-filestore-tool +appVersion: 0.1.2 diff --git a/applications/filestore-backup/templates/_helpers.tpl b/applications/filestore-backup/templates/_helpers.tpl new file mode 100644 index 0000000000..4a25052fc3 --- /dev/null +++ b/applications/filestore-backup/templates/_helpers.tpl @@ -0,0 +1,53 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "filestore-backup.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "filestore-backup.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "filestore-backup.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "filestore-backup.labels" -}} +app.kubernetes.io/name: {{ include "filestore-backup.name" . }} +helm.sh/chart: {{ include "filestore-backup.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "filestore-backup.selectorLabels" -}} +app.kubernetes.io/name: {{ include "filestore-backup.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/filestore-backup/templates/cronjobs.yaml b/applications/filestore-backup/templates/cronjobs.yaml new file mode 100644 index 0000000000..b1da7fec3a --- /dev/null +++ b/applications/filestore-backup/templates/cronjobs.yaml @@ -0,0 +1,97 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: create_backup +spec: + schedule: {{ .Values.tool.backup.schedule | quote }} + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + automountServiceAccountToken: true + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.tolerations }} + affinity: +{{ toYaml . | indent 12 }} + {{- end }} + containers: + - name: create_backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" + value: {{ .Values.global.gcpProjectId | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" + value: "{{ .Values.global.gcpRegion }}-{{ .Values.tool.zone }}" + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" + value: {{ .Values.tool.instance | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" + value: {{ .Values.tool.fileShare | quote }} + {{- with .Values.tool.backup.debug }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" + value: "true" + {{- end }} + command: [ "create_backup" ] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: purge_backup +spec: + schedule: {{ .Values.tool.purge.schedule | quote }} + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + automountServiceAccountToken: true + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.tolerations }} + affinity: +{{ toYaml . | indent 12 }} + {{- end }} + containers: + - name: purge_backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" + value: {{ .Values.global.gcpProjectId | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" + value: "{{ .Values.global.gcpRegion }}-{{ .Values.tool.zone }}" + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" + value: {{ .Values.tool.instance | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" + value: {{ .Values.tool.fileShare | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_KEEP_BACKUPS" + value: {{ .Values.tool.purge.keep | quote }} + {{- with .Values.tool.purge.debug }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" + value: "true" + {{- end }} + command: [ "purge_backup" ] diff --git a/applications/filestore-backup/templates/serviceaccount.yaml b/applications/filestore-backup/templates/serviceaccount.yaml new file mode 100644 index 0000000000..65f0f1e837 --- /dev/null +++ b/applications/filestore-backup/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "filestore-backup" + labels: + {{- include "filestore-backup.labels" . | nindent 4 }} + annotations: + iam.gke.io/gcp-service-account: "filestore-tool@{{ .Values.global.gcpProjectId }}.iam.gserviceaccount.com" diff --git a/applications/filestore-backup/values-idfdev.yaml b/applications/filestore-backup/values-idfdev.yaml new file mode 100644 index 0000000000..860137dccc --- /dev/null +++ b/applications/filestore-backup/values-idfdev.yaml @@ -0,0 +1,8 @@ +tool: + instance: "fshare-instance-dev" + zone: "b" + backup: + debug: true + purge: + debug: true + keep: 3 diff --git a/applications/filestore-backup/values.yaml b/applications/filestore-backup/values.yaml new file mode 100644 index 0000000000..61a45de949 --- /dev/null +++ b/applications/filestore-backup/values.yaml @@ -0,0 +1,67 @@ +# Default values for filestore-backup. + +# -- Override the base name for resources +nameOverride: "" + +# -- Override the full name for resources (includes the release name) +fullnameOverride: "" + +# -- Resource limits and requests for the filestore-backup pods +resources: {} + +# -- Annotations for the filestore-backup pods +podAnnotations: {} + +# -- Node selector rules for the filestore-backup pods +nodeSelector: {} + +# -- Tolerations for the filestore-backup pods +tolerations: [] + +# -- Affinity rules for the filestore-backup pods +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +image: + # -- Filestore-Backup image to use + repository: ghcr.io/lsst-sqre/rubin-google-filestore-tool + # -- Pull policy for the filestore-backup image + pullPolicy: "IfNotPresent" + # -- Tag of filestore-backup image to use + # @default -- The appVersion of the chart + tag: "" + +tool: + # -- Filestore instance (e.g. "fshare-instance-dev") + # @default -- Must be overridden in environment-specific values file + instance: "" + # -- Zone for Filestore instance (e.g. "b" from "us-central1-b") + # @default -- Must be overridden in environment-specific values file + zone: "" + # -- File Share name for filestore instance. Always "share1" unless + # storage is on an Enterprise tier + fileShare: "share1" + backup: + # -- Turn on debugging mode + debug: false + # -- Backup schedule + schedule: "0 10 * * *" + purge: + # -- Turn on debugging mode + debug: false + # -- purge schedule + schedule: "45 10 * * *" + # -- Number of backups to keep when purging + keep: 6 + +global: + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + # -- GCP Project ID + # @default -- Set by Argo CD + gcpProjectId: "" + # -- GCP Region + # @default -- Set by Argo CD + gcpRegion: "" diff --git a/environments/README.md b/environments/README.md index 71f5b10c8d..75f99e242c 100644 --- a/environments/README.md +++ b/environments/README.md @@ -11,6 +11,7 @@ | applications.cert-manager | bool | `true` | Enable the cert-manager application, required unless the environment makes separate arrangements to inject a current TLS certificate | | applications.datalinker | bool | `false` | Eanble the datalinker application | | applications.exposurelog | bool | `false` | Enable the exposurelog application | +| applications.filestore-backup | bool | `false` | Enable the filestore-backup application | | applications.gafaelfawr | bool | `true` | Enable the Gafaelfawr application. This is required by Phalanx since most other applications use `GafaelfawrIngress` | | applications.giftless | bool | `false` | Enable the giftless application | | applications.hips | bool | `false` | Enable the HiPS application | diff --git a/environments/templates/filestore-backup-application.yaml b/environments/templates/filestore-backup-application.yaml new file mode 100644 index 0000000000..9c0bb8d0e3 --- /dev/null +++ b/environments/templates/filestore-backup-application.yaml @@ -0,0 +1,38 @@ +{{- if .Values.applications.filestore-backup -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "filestore-backup" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "filestore-backup" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "filestore-backup" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/filestore-backup" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + - name: "global.gcpProjectId" + value: {{ .Values.gcp.projectId }} + - name: "global.gcpRegion" + value: {{ .Values.gcp.region }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index d8ce3b6431..e736b9ea04 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -14,6 +14,7 @@ applications: argo-workflows: true butler: true datalinker: true + filestore-backup: true hips: true jira-data-proxy: true mobu: true diff --git a/environments/values.yaml b/environments/values.yaml index 11295fd367..381fff2c8d 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -49,6 +49,9 @@ applications: # -- Enable the exposurelog application exposurelog: false + # -- Enable the filestore-backup application + filestore-backup: false + # -- Enable the Gafaelfawr application. This is required by Phalanx since # most other applications use `GafaelfawrIngress` gafaelfawr: true diff --git a/requirements/dev.txt b/requirements/dev.txt index a64a554260..089700b15f 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/dev.txt requirements/dev.in diff --git a/requirements/main.txt b/requirements/main.txt index a9bbe97299..978d4aa3d4 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.12 +# This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --allow-unsafe --generate-hashes --output-file=requirements/main.txt requirements/main.in From 611d8091f82e0d7f6a2253d377e370f50d8a3cdb Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 22 Jan 2024 15:48:39 -0700 Subject: [PATCH 576/588] Add docs for filestore-backup --- applications/filestore-backup/README.md | 33 +++++++++++++++++++ .../filestore-backup/templates/cronjobs.yaml | 8 ++--- applications/filestore-backup/values.yaml | 2 +- docs/applications/filestore-backup/index.rst | 18 ++++++++++ docs/applications/filestore-backup/values.md | 12 +++++++ docs/applications/index.rst | 1 + .../filestore-backup-application.yaml | 2 +- 7 files changed, 70 insertions(+), 6 deletions(-) create mode 100644 applications/filestore-backup/README.md create mode 100644 docs/applications/filestore-backup/index.rst create mode 100644 docs/applications/filestore-backup/values.md diff --git a/applications/filestore-backup/README.md b/applications/filestore-backup/README.md new file mode 100644 index 0000000000..84f3cc6b5f --- /dev/null +++ b/applications/filestore-backup/README.md @@ -0,0 +1,33 @@ +# filestore-backup + +Tool to manage Google Filestore backups + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the filestore-backup pods | +| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| global.gcpProjectId | string | Set by Argo CD | GCP Project ID | +| global.gcpRegion | string | Set by Argo CD | GCP Region | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the filestore-backup image | +| image.repository | string | `"ghcr.io/lsst-sqre/rubin-google-filestore-tools"` | Filestore-Backup image to use | +| image.tag | string | The appVersion of the chart | Tag of filestore-backup image to use | +| nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | Node selector rules for the filestore-backup pods | +| podAnnotations | object | `{}` | Annotations for the filestore-backup pods | +| resources | object | `{}` | Resource limits and requests for the filestore-backup pods | +| tolerations | list | `[]` | Tolerations for the filestore-backup pods | +| tool.backup.debug | bool | `false` | Turn on debugging mode | +| tool.backup.schedule | string | `"0 10 * * *"` | Backup schedule | +| tool.fileShare | string | `"share1"` | File Share name for filestore instance. Always "share1" unless storage is on an Enterprise tier | +| tool.instance | string | Must be overridden in environment-specific values file | Filestore instance (e.g. "fshare-instance-dev") | +| tool.purge.debug | bool | `false` | Turn on debugging mode | +| tool.purge.keep | int | `6` | Number of backups to keep when purging | +| tool.purge.schedule | string | `"45 10 * * *"` | purge schedule | +| tool.zone | string | Must be overridden in environment-specific values file | Zone for Filestore instance (e.g. "b" from "us-central1-b") | diff --git a/applications/filestore-backup/templates/cronjobs.yaml b/applications/filestore-backup/templates/cronjobs.yaml index b1da7fec3a..75f6393b3e 100644 --- a/applications/filestore-backup/templates/cronjobs.yaml +++ b/applications/filestore-backup/templates/cronjobs.yaml @@ -1,7 +1,7 @@ apiVersion: batch/v1 kind: CronJob metadata: - name: create_backup + name: create-backup spec: schedule: {{ .Values.tool.backup.schedule | quote }} successfulJobsHistoryLimit: 1 @@ -20,7 +20,7 @@ spec: {{ toYaml . | indent 12 }} {{- end }} containers: - - name: create_backup + - name: create-backup image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" securityContext: allowPrivilegeEscalation: false @@ -49,7 +49,7 @@ spec: apiVersion: batch/v1 kind: CronJob metadata: - name: purge_backup + name: purge-backup spec: schedule: {{ .Values.tool.purge.schedule | quote }} successfulJobsHistoryLimit: 1 @@ -68,7 +68,7 @@ spec: {{ toYaml . | indent 12 }} {{- end }} containers: - - name: purge_backup + - name: purge-backup image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" securityContext: allowPrivilegeEscalation: false diff --git a/applications/filestore-backup/values.yaml b/applications/filestore-backup/values.yaml index 61a45de949..c1925d0c2e 100644 --- a/applications/filestore-backup/values.yaml +++ b/applications/filestore-backup/values.yaml @@ -25,7 +25,7 @@ affinity: {} # be set in the individual environment values files. image: # -- Filestore-Backup image to use - repository: ghcr.io/lsst-sqre/rubin-google-filestore-tool + repository: ghcr.io/lsst-sqre/rubin-google-filestore-tools # -- Pull policy for the filestore-backup image pullPolicy: "IfNotPresent" # -- Tag of filestore-backup image to use diff --git a/docs/applications/filestore-backup/index.rst b/docs/applications/filestore-backup/index.rst new file mode 100644 index 0000000000..b34d94924b --- /dev/null +++ b/docs/applications/filestore-backup/index.rst @@ -0,0 +1,18 @@ +.. px-app:: filestore-backup + +############################################################## +Filestore-backup — Create and purge Google filestore backups +############################################################## + +Filestore-backup manages backing up Google Filestore shares and purging old backups. + +.. jinja:: filestore-backup + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/filestore-backup/values.md b/docs/applications/filestore-backup/values.md new file mode 100644 index 0000000000..5c9df838a4 --- /dev/null +++ b/docs/applications/filestore-backup/values.md @@ -0,0 +1,12 @@ +```{px-app-values} giftless +``` + +# Filestore-backup Helm values reference + +Helm values reference table for the {px-app}`filestore-backup` application. + +```{include} ../../../applications/filestore-backup/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/applications/index.rst b/docs/applications/index.rst index d9c5c08f91..ff6ed23caf 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -25,6 +25,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde butler/index datalinker/index + filestore-backup/index hips/index linters/index livetap/index diff --git a/environments/templates/filestore-backup-application.yaml b/environments/templates/filestore-backup-application.yaml index 9c0bb8d0e3..a848dec6f6 100644 --- a/environments/templates/filestore-backup-application.yaml +++ b/environments/templates/filestore-backup-application.yaml @@ -1,4 +1,4 @@ -{{- if .Values.applications.filestore-backup -}} +{{- if (index .Values "applications" "filestore-backup") -}} apiVersion: v1 kind: Namespace metadata: From 7876b65d349215736218a790764fa577230ed44e Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 23 Jan 2024 11:18:06 -0700 Subject: [PATCH 577/588] pick up better-cli version of tool --- applications/filestore-backup/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/filestore-backup/Chart.yaml b/applications/filestore-backup/Chart.yaml index 35d72b94ed..19abc8ba0d 100644 --- a/applications/filestore-backup/Chart.yaml +++ b/applications/filestore-backup/Chart.yaml @@ -4,4 +4,4 @@ version: 1.0.0 description: Tool to manage Google Filestore backups sources: - https://github.com/lsst-sqre/rubin-google-filestore-tool -appVersion: 0.1.2 +appVersion: 0.1.3 From da930d373e8bb7f6e418c2bc6308b2dbc829d197 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 23 Jan 2024 13:03:42 -0700 Subject: [PATCH 578/588] Break up cronjobs and add correct SA to them --- .../templates/cronjob-create-backup.yaml | 45 +++++++++ .../templates/cronjob-purge-backup.yaml | 47 +++++++++ .../filestore-backup/templates/cronjobs.yaml | 97 ------------------- 3 files changed, 92 insertions(+), 97 deletions(-) create mode 100644 applications/filestore-backup/templates/cronjob-create-backup.yaml create mode 100644 applications/filestore-backup/templates/cronjob-purge-backup.yaml delete mode 100644 applications/filestore-backup/templates/cronjobs.yaml diff --git a/applications/filestore-backup/templates/cronjob-create-backup.yaml b/applications/filestore-backup/templates/cronjob-create-backup.yaml new file mode 100644 index 0000000000..c84ef9224a --- /dev/null +++ b/applications/filestore-backup/templates/cronjob-create-backup.yaml @@ -0,0 +1,45 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: create-backup +spec: + schedule: {{ .Values.tool.backup.schedule | quote }} + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + serviceAccountName: "fileserver-backup" + {{- with .Values.tolerations }} + tolerations: {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.tolerations }} + affinity: {{ toYaml . | nindent 12 }} + {{- end }} + containers: + - name: create-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" + value: {{ .Values.global.gcpProjectId | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" + value: "{{ .Values.global.gcpRegion }}-{{ .Values.tool.zone }}" + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" + value: {{ .Values.tool.instance | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" + value: {{ .Values.tool.fileShare | quote }} + {{- with .Values.tool.backup.debug }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" + value: "true" + {{- end }} + command: [ "create_backup" ] diff --git a/applications/filestore-backup/templates/cronjob-purge-backup.yaml b/applications/filestore-backup/templates/cronjob-purge-backup.yaml new file mode 100644 index 0000000000..68198b30b4 --- /dev/null +++ b/applications/filestore-backup/templates/cronjob-purge-backup.yaml @@ -0,0 +1,47 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: purge-backup +spec: + schedule: {{ .Values.tool.purge.schedule | quote }} + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + serviceAccountName: "fileserver-backup" + {{- with .Values.tolerations }} + tolerations: {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.tolerations }} + affinity: {{ toYaml . | nindent 12 }} + {{- end }} + containers: + - name: purge-backup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" + value: {{ .Values.global.gcpProjectId | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" + value: "{{ .Values.global.gcpRegion }}-{{ .Values.tool.zone }}" + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" + value: {{ .Values.tool.instance | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" + value: {{ .Values.tool.fileShare | quote }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_KEEP_BACKUPS" + value: {{ .Values.tool.purge.keep | quote }} + {{- with .Values.tool.purge.debug }} + - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" + value: "true" + {{- end }} + command: [ "purge_backup" ] diff --git a/applications/filestore-backup/templates/cronjobs.yaml b/applications/filestore-backup/templates/cronjobs.yaml deleted file mode 100644 index 75f6393b3e..0000000000 --- a/applications/filestore-backup/templates/cronjobs.yaml +++ /dev/null @@ -1,97 +0,0 @@ -apiVersion: batch/v1 -kind: CronJob -metadata: - name: create-backup -spec: - schedule: {{ .Values.tool.backup.schedule | quote }} - successfulJobsHistoryLimit: 1 - jobTemplate: - spec: - template: - spec: - restartPolicy: Never - automountServiceAccountToken: true - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 12 }} - {{- end }} - {{- with .Values.tolerations }} - affinity: -{{ toYaml . | indent 12 }} - {{- end }} - containers: - - name: create-backup - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - securityContext: - allowPrivilegeEscalation: false - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 - capabilities: - drop: - - all - readOnlyRootFilesystem: true - env: - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" - value: {{ .Values.global.gcpProjectId | quote }} - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" - value: "{{ .Values.global.gcpRegion }}-{{ .Values.tool.zone }}" - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" - value: {{ .Values.tool.instance | quote }} - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" - value: {{ .Values.tool.fileShare | quote }} - {{- with .Values.tool.backup.debug }} - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" - value: "true" - {{- end }} - command: [ "create_backup" ] ---- -apiVersion: batch/v1 -kind: CronJob -metadata: - name: purge-backup -spec: - schedule: {{ .Values.tool.purge.schedule | quote }} - successfulJobsHistoryLimit: 1 - jobTemplate: - spec: - template: - spec: - restartPolicy: Never - automountServiceAccountToken: true - {{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 12 }} - {{- end }} - {{- with .Values.tolerations }} - affinity: -{{ toYaml . | indent 12 }} - {{- end }} - containers: - - name: purge-backup - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - securityContext: - allowPrivilegeEscalation: false - runAsNonRoot: true - runAsUser: 1000 - runAsGroup: 1000 - capabilities: - drop: - - all - readOnlyRootFilesystem: true - env: - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" - value: {{ .Values.global.gcpProjectId | quote }} - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" - value: "{{ .Values.global.gcpRegion }}-{{ .Values.tool.zone }}" - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" - value: {{ .Values.tool.instance | quote }} - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" - value: {{ .Values.tool.fileShare | quote }} - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_KEEP_BACKUPS" - value: {{ .Values.tool.purge.keep | quote }} - {{- with .Values.tool.purge.debug }} - - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" - value: "true" - {{- end }} - command: [ "purge_backup" ] From 1c1656a6c4d079ba3493e35d88f298166d7b19ae Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 23 Jan 2024 13:41:41 -0700 Subject: [PATCH 579/588] Tidy up Helm YAML for filestore-backup --- applications/filestore-backup/README.md | 16 +++++++------- .../templates/cronjob-create-backup.yaml | 22 +++++++++++-------- .../templates/cronjob-purge-backup.yaml | 16 ++++++++------ .../filestore-backup/values-idfdev.yaml | 2 +- applications/filestore-backup/values.yaml | 22 ++++++++++++++----- 5 files changed, 48 insertions(+), 30 deletions(-) diff --git a/applications/filestore-backup/README.md b/applications/filestore-backup/README.md index 84f3cc6b5f..b128ccb5bc 100644 --- a/applications/filestore-backup/README.md +++ b/applications/filestore-backup/README.md @@ -11,6 +11,14 @@ Tool to manage Google Filestore backups | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the filestore-backup pods | +| config.backup.debug | bool | `false` | Turn on debugging mode | +| config.backup.schedule | string | fields are minute hour day-of-month month day-of-week | Backup schedule, in Unix cron job format | +| config.fileShare | string | `"share1"` | File Share name for filestore instance. Always "share1" unless storage is on an Enterprise tier | +| config.instance | string | None, must be set | Filestore instance (e.g. "fshare-instance-dev") | +| config.purge.debug | bool | `false` | Turn on debugging mode | +| config.purge.keep | int | `6` | Number of backups to keep when purging | +| config.purge.schedule | string | fields are minute hour day-of-month month day-of-week | Purge schedule, in Unix cron job format: | +| config.zone | string | None, must be set | Zone for Filestore instance (e.g. "b" from "us-central1-b") | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.gcpProjectId | string | Set by Argo CD | GCP Project ID | | global.gcpRegion | string | Set by Argo CD | GCP Region | @@ -23,11 +31,3 @@ Tool to manage Google Filestore backups | podAnnotations | object | `{}` | Annotations for the filestore-backup pods | | resources | object | `{}` | Resource limits and requests for the filestore-backup pods | | tolerations | list | `[]` | Tolerations for the filestore-backup pods | -| tool.backup.debug | bool | `false` | Turn on debugging mode | -| tool.backup.schedule | string | `"0 10 * * *"` | Backup schedule | -| tool.fileShare | string | `"share1"` | File Share name for filestore instance. Always "share1" unless storage is on an Enterprise tier | -| tool.instance | string | Must be overridden in environment-specific values file | Filestore instance (e.g. "fshare-instance-dev") | -| tool.purge.debug | bool | `false` | Turn on debugging mode | -| tool.purge.keep | int | `6` | Number of backups to keep when purging | -| tool.purge.schedule | string | `"45 10 * * *"` | purge schedule | -| tool.zone | string | Must be overridden in environment-specific values file | Zone for Filestore instance (e.g. "b" from "us-central1-b") | diff --git a/applications/filestore-backup/templates/cronjob-create-backup.yaml b/applications/filestore-backup/templates/cronjob-create-backup.yaml index c84ef9224a..25192a942f 100644 --- a/applications/filestore-backup/templates/cronjob-create-backup.yaml +++ b/applications/filestore-backup/templates/cronjob-create-backup.yaml @@ -2,20 +2,24 @@ apiVersion: batch/v1 kind: CronJob metadata: name: create-backup + labels: + {{- include "filestore-backup.labels" . | nindent 4 }} spec: - schedule: {{ .Values.tool.backup.schedule | quote }} + schedule: {{ .Values.config.backup.schedule | quote }} successfulJobsHistoryLimit: 1 jobTemplate: spec: template: spec: restartPolicy: Never - serviceAccountName: "fileserver-backup" + serviceAccountName: "filestore-backup" {{- with .Values.tolerations }} - tolerations: {{ toYaml . | nindent 12 }} + tolerations: + {{- toYaml . | nindent 12 }} {{- end }} {{- with .Values.tolerations }} - affinity: {{ toYaml . | nindent 12 }} + affinity: + {{- toYaml . | nindent 12 }} {{- end }} containers: - name: create-backup @@ -31,14 +35,14 @@ spec: readOnlyRootFilesystem: true env: - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" - value: {{ .Values.global.gcpProjectId | quote }} + value: {{ required ".Values.global.gcpProjectId must be set to a valid Google Project ID" .Values.global.gcpProjectId | quote }} - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" - value: "{{ .Values.global.gcpRegion }}-{{ .Values.tool.zone }}" + value: "{{ .Values.global.gcpRegion }}-{{ .Values.config.zone }}" - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" - value: {{ .Values.tool.instance | quote }} + value: {{ .Values.config.instance | quote }} - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" - value: {{ .Values.tool.fileShare | quote }} - {{- with .Values.tool.backup.debug }} + value: {{ .Values.config.fileShare | quote }} + {{- with .Values.config.backup.debug }} - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" value: "true" {{- end }} diff --git a/applications/filestore-backup/templates/cronjob-purge-backup.yaml b/applications/filestore-backup/templates/cronjob-purge-backup.yaml index 68198b30b4..298a7eb2e7 100644 --- a/applications/filestore-backup/templates/cronjob-purge-backup.yaml +++ b/applications/filestore-backup/templates/cronjob-purge-backup.yaml @@ -2,15 +2,17 @@ apiVersion: batch/v1 kind: CronJob metadata: name: purge-backup + labels: + {{- include "filestore-backup.labels" . | nindent 4 }} spec: - schedule: {{ .Values.tool.purge.schedule | quote }} + schedule: {{ .Values.config.purge.schedule | quote }} successfulJobsHistoryLimit: 1 jobTemplate: spec: template: spec: restartPolicy: Never - serviceAccountName: "fileserver-backup" + serviceAccountName: "filestore-backup" {{- with .Values.tolerations }} tolerations: {{ toYaml . | nindent 12 }} {{- end }} @@ -33,14 +35,14 @@ spec: - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_PROJECT" value: {{ .Values.global.gcpProjectId | quote }} - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_ZONE" - value: "{{ .Values.global.gcpRegion }}-{{ .Values.tool.zone }}" + value: "{{ .Values.global.gcpRegion }}-{{ .Values.config.zone }}" - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_INSTANCE" - value: {{ .Values.tool.instance | quote }} + value: {{ .Values.config.instance | quote }} - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_SHARE_NAME" - value: {{ .Values.tool.fileShare | quote }} + value: {{ .Values.config.fileShare | quote }} - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_KEEP_BACKUPS" - value: {{ .Values.tool.purge.keep | quote }} - {{- with .Values.tool.purge.debug }} + value: {{ .Values.config.purge.keep | quote }} + {{- with .Values.config.purge.debug }} - name: "RUBIN_GOOGLE_FILESTORE_TOOLS_DEBUG" value: "true" {{- end }} diff --git a/applications/filestore-backup/values-idfdev.yaml b/applications/filestore-backup/values-idfdev.yaml index 860137dccc..4818994c15 100644 --- a/applications/filestore-backup/values-idfdev.yaml +++ b/applications/filestore-backup/values-idfdev.yaml @@ -1,4 +1,4 @@ -tool: +config: instance: "fshare-instance-dev" zone: "b" backup: diff --git a/applications/filestore-backup/values.yaml b/applications/filestore-backup/values.yaml index c1925d0c2e..0dd1d494e5 100644 --- a/applications/filestore-backup/values.yaml +++ b/applications/filestore-backup/values.yaml @@ -26,32 +26,41 @@ affinity: {} image: # -- Filestore-Backup image to use repository: ghcr.io/lsst-sqre/rubin-google-filestore-tools + # -- Pull policy for the filestore-backup image pullPolicy: "IfNotPresent" + # -- Tag of filestore-backup image to use # @default -- The appVersion of the chart tag: "" -tool: +config: # -- Filestore instance (e.g. "fshare-instance-dev") - # @default -- Must be overridden in environment-specific values file + # @default -- None, must be set instance: "" + # -- Zone for Filestore instance (e.g. "b" from "us-central1-b") - # @default -- Must be overridden in environment-specific values file + # @default -- None, must be set zone: "" + # -- File Share name for filestore instance. Always "share1" unless # storage is on an Enterprise tier fileShare: "share1" backup: # -- Turn on debugging mode debug: false - # -- Backup schedule + + # -- Backup schedule, in Unix cron job format + # @default -- fields are minute hour day-of-month month day-of-week schedule: "0 10 * * *" purge: # -- Turn on debugging mode debug: false - # -- purge schedule + + # -- Purge schedule, in Unix cron job format: + # @default -- fields are minute hour day-of-month month day-of-week schedule: "45 10 * * *" + # -- Number of backups to keep when purging keep: 6 @@ -59,9 +68,12 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD vaultSecretsPath: "" + # -- GCP Project ID # @default -- Set by Argo CD + gcpProjectId: "" + # -- GCP Region # @default -- Set by Argo CD gcpRegion: "" From f42dfc53bdb013ffdea0745134be2122643f43dc Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 23 Jan 2024 13:46:10 -0700 Subject: [PATCH 580/588] Add GCP region and project to injected globals --- src/phalanx/services/application.py | 3 +++ tests/data/output/idfdev/lint-all-calls.json | 10 +++++----- tests/data/output/idfdev/lint-git-calls.json | 6 +++--- tests/data/output/idfdev/lint-set-values.json | 4 +++- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index f07a990063..28849d802c 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -286,6 +286,9 @@ def _build_injected_values( "global.baseUrl": f"https://{environment.fqdn}", "global.vaultSecretsPath": environment.vault_path_prefix, } + if environment.gcp: + values["global.gcpProjectId"] = environment.gcp.project_id + values["global.gcpRegion"] = environment.gcp.region if environment.butler_repository_index: butler_index = environment.butler_repository_index values["global.butlerRepositoryIndex"] = butler_index diff --git a/tests/data/output/idfdev/lint-all-calls.json b/tests/data/output/idfdev/lint-all-calls.json index 3fd9997730..1352268773 100644 --- a/tests/data/output/idfdev/lint-all-calls.json +++ b/tests/data/output/idfdev/lint-all-calls.json @@ -35,7 +35,7 @@ "--values", "argocd/values-idfdev.yaml", "--set", - "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" ], [ "lint", @@ -73,7 +73,7 @@ "--values", "gafaelfawr/values-idfdev.yaml", "--set", - "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" ], [ "lint", @@ -100,7 +100,7 @@ "--values", "nublado/values-idfdev.yaml", "--set", - "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" ], [ "dependency", @@ -116,7 +116,7 @@ "--values", "portal/values-idfdev.yaml", "--set", - "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" ], [ "dependency", @@ -132,7 +132,7 @@ "--values", "postgres/values-idfdev.yaml", "--set", - "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" ], [ "lint", diff --git a/tests/data/output/idfdev/lint-git-calls.json b/tests/data/output/idfdev/lint-git-calls.json index 5a67fedca2..64009f4e2e 100644 --- a/tests/data/output/idfdev/lint-git-calls.json +++ b/tests/data/output/idfdev/lint-git-calls.json @@ -29,7 +29,7 @@ "--values", "argocd/values-idfdev.yaml", "--set", - "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" ], [ "dependency", @@ -45,7 +45,7 @@ "--values", "gafaelfawr/values-idfdev.yaml", "--set", - "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" ], [ "lint", @@ -72,6 +72,6 @@ "--values", "portal/values-idfdev.yaml", "--set", - "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev" + "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres,global.host=data-dev.lsst.cloud,global.baseUrl=https://data-dev.lsst.cloud,global.vaultSecretsPath=secret/phalanx/idfdev,global.gcpProjectId=science-platform-dev-7696,global.gcpRegion=us-central1" ] ] diff --git a/tests/data/output/idfdev/lint-set-values.json b/tests/data/output/idfdev/lint-set-values.json index 47d57c3698..7a42ea7ca5 100644 --- a/tests/data/output/idfdev/lint-set-values.json +++ b/tests/data/output/idfdev/lint-set-values.json @@ -2,5 +2,7 @@ "global.enabledServices=@argocd@gafaelfawr@mobu@nublado@portal@postgres", "global.host=data-dev.lsst.cloud", "global.baseUrl=https://data-dev.lsst.cloud", - "global.vaultSecretsPath=secret/phalanx/idfdev" + "global.vaultSecretsPath=secret/phalanx/idfdev", + "global.gcpProjectId=science-platform-dev-7696", + "global.gcpRegion=us-central1" ] From b7cd25cfb5469f3aeb592e5c1a295c260a3caccc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 1 Jan 2024 12:35:43 +0000 Subject: [PATCH 581/588] Update confluentinc/cp-kafka-rest Docker tag to v7.5.3 --- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/rest-proxy/README.md | 2 +- applications/sasquatch/charts/rest-proxy/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 9d24b6a36c..1ca2552c96 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -303,7 +303,7 @@ Rubin Observatory's telemetry service. | rest-proxy.heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | rest-proxy.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | rest-proxy.image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| rest-proxy.image.tag | string | `"7.5.2"` | Kafka REST proxy image tag. | +| rest-proxy.image.tag | string | `"7.5.3"` | Kafka REST proxy image tag. | | rest-proxy.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | rest-proxy.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | rest-proxy.ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index 8286c3294e..6093a92723 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -16,7 +16,7 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| image.tag | string | `"7.5.2"` | Kafka REST proxy image tag. | +| image.tag | string | `"7.5.3"` | Kafka REST proxy image tag. | | ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/rest-proxy/values.yaml b/applications/sasquatch/charts/rest-proxy/values.yaml index b3ccbd2f22..11512234fa 100644 --- a/applications/sasquatch/charts/rest-proxy/values.yaml +++ b/applications/sasquatch/charts/rest-proxy/values.yaml @@ -9,7 +9,7 @@ image: # -- Image pull policy. pullPolicy: IfNotPresent # -- Kafka REST proxy image tag. - tag: 7.5.2 + tag: 7.5.3 service: # -- Kafka REST proxy service port From 9821eaa3274336f765e2e2fbcbff0c72e44d395d Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 23 Jan 2024 15:17:16 -0700 Subject: [PATCH 582/588] Add control system config to ApplicationService injection. --- docs/extras/schemas/environment.json | 22 ++--- src/phalanx/models/environments.py | 135 ++++++++++++++------------- src/phalanx/services/application.py | 27 ++++++ 3 files changed, 106 insertions(+), 78 deletions(-) diff --git a/docs/extras/schemas/environment.json b/docs/extras/schemas/environment.json index 80817cc2f6..46a5e5fa2c 100644 --- a/docs/extras/schemas/environment.json +++ b/docs/extras/schemas/environment.json @@ -235,6 +235,17 @@ "title": "Vault path prefix", "type": "string" }, + "controlSystem": { + "anyOf": [ + { + "$ref": "#/$defs/ControlSystemConfig" + }, + { + "type": "null" + } + ], + "default": null + }, "applications": { "additionalProperties": { "type": "boolean" @@ -268,17 +279,6 @@ "default": null, "description": "Branch of the Git repository holding Argo CD configuration. This is required in the merged values file that includes environment overrides, but the environment override file doesn't need to set it, so it's marked as optional for schema checking purposes to allow the override file to be schema-checked independently.", "title": "Git repository branch" - }, - "controlSystem": { - "anyOf": [ - { - "$ref": "#/$defs/ControlSystemConfig" - }, - { - "type": "null" - } - ], - "default": null } }, "required": [ diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index 1c38460829..e43640f257 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -20,6 +20,7 @@ from .secrets import Secret __all__ = [ + "ControlSystemConfig", "Environment", "EnvironmentBaseConfig", "EnvironmentConfig", @@ -81,6 +82,71 @@ class OnepasswordConfig(CamelCaseModel): ) +class ControlSystemConfig(CamelCaseModel): + """Configuration for the Control System.""" + + app_namespace: str | None = Field( + None, + title="Application Namespace", + description=( + "Set the namespace for the control system components. Each control" + " system application consists of many components that need to know" + " what namespace to which they belong." + ), + ) + + image_tag: str | None = Field( + None, + title="Image Tag", + description=("The image tag to use for control system images."), + ) + + site_tag: str | None = Field( + None, + title="Site Tag", + description=( + "The tag that tells the control system component where it is" + " running." + ), + ) + + topic_name: str | None = Field( + None, + title="Topic Identifier", + description="The Kafka identifier for control system topics.", + ) + + kafka_broker_address: str | None = Field( + None, + title="Kafka Broker Address", + description=( + "The Kafka broker address for the control system components." + ), + ) + + kafka_topic_replication_factor: int | None = Field( + None, + title="Kafka Topic Replication Factor", + description=( + "The Kafka topic replication factor for control system components." + ), + ) + + schema_registry_url: str | None = Field( + None, + title="Schema Registry URL", + description=( + "The Schema Registry URL for the control system components." + ), + ) + + s3_endpoint_url: str | None = Field( + None, + title="S3 Endpoint URL", + description="The S3 URL for the environment specific LFA.", + ) + + class EnvironmentBaseConfig(CamelCaseModel): """Configuration common to `EnviromentConfig` and `Environment`.""" @@ -136,6 +202,8 @@ class EnvironmentBaseConfig(CamelCaseModel): description="Prefix of Vault paths, including the KV v2 mount point", ) + control_system: ControlSystemConfig | None = None + @field_validator("onepassword", mode="before") @classmethod def _validate_onepassword( @@ -192,71 +260,6 @@ def vault_write_policy(self) -> str: return f"{self.vault_path}/write" -class ControlSystemConfig(CamelCaseModel): - """Configuration for the Control System.""" - - app_namespace: str | None = Field( - None, - title="Application Namespace", - description=( - "Set the namespace for the control system components. Each control" - " system application consists of many components that need to know" - " what namespace to which they belong." - ), - ) - - image_tag: str | None = Field( - None, - title="Image Tag", - description=("The image tag to use for control system images."), - ) - - site_tag: str | None = Field( - None, - title="Site Tag", - description=( - "The tag that tells the control system component where it is" - " running." - ), - ) - - topic_name: str | None = Field( - None, - title="Topic Identifier", - description="The Kafka identifier for control system topics.", - ) - - kafka_broker_address: str | None = Field( - None, - title="Kafka Broker Address", - description=( - "The Kafka broker address for the control system components." - ), - ) - - kafka_topic_replication_factor: int | None = Field( - None, - title="Kafka Topic Replication Factor", - description=( - "The Kafka topic replication factor for control system components." - ), - ) - - schema_registry_url: str | None = Field( - None, - title="Schema Registry URL", - description=( - "The Schema Registry URL for the control system components." - ), - ) - - s3_endpoint_url: str | None = Field( - None, - title="S3 Endpoint URL", - description="The S3 URL for the environment specific LFA.", - ) - - class EnvironmentConfig(EnvironmentBaseConfig): """Configuration for a Phalanx environment. @@ -300,8 +303,6 @@ class EnvironmentConfig(EnvironmentBaseConfig): ), ) - control_system: ControlSystemConfig | None = None - model_config = ConfigDict(extra="forbid") @classmethod diff --git a/src/phalanx/services/application.py b/src/phalanx/services/application.py index f07a990063..97f8836a68 100644 --- a/src/phalanx/services/application.py +++ b/src/phalanx/services/application.py @@ -297,6 +297,33 @@ def _build_injected_values( key = "vault-secrets-operator.vault.address" values[key] = str(environment.vault_url) + if environment.control_system: + extras = { + "appNamespace": environment.control_system.app_namespace, + "imageTag": environment.control_system.image_tag, + "siteTag": environment.control_system.site_tag, + "topicName": environment.control_system.topic_name, + "kafkaBrokerAddress": ( + environment.control_system.kafka_broker_address + ), + "kafkaTopicReplicationFactor": ( + str( + environment.control_system.kafka_topic_replication_factor + ) + ), + "schemaRegistryUrl": ( + environment.control_system.schema_registry_url + ), + "s3EndpointUrl": environment.control_system.s3_endpoint_url, + } + values.update( + { + f"global.controlSystem.{k}": v + for k, v in extras.items() + if v is not None + } + ) + return values def _create_application_template(self, name: str) -> None: From 06c3acb1117acce2bb242dbb6f23b5a814793f3d Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 23 Jan 2024 15:21:53 -0700 Subject: [PATCH 583/588] Fix LOVE charts for config naming change. --- .../love-manager/templates/manager-frontend-deployment.yaml | 4 ++-- .../templates/manager-producers-deployment.yaml | 4 ++-- .../love/charts/love-nginx/templates/nginx-deployment.yaml | 6 +++--- .../love/charts/love-producer/templates/deployment.yaml | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml b/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml index cb07fa7208..f57685aebe 100644 --- a/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml +++ b/applications/love/charts/love-manager/templates/manager-frontend-deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "love-manager-frontend.fullname" . }} - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} labels: {{- include "love-manager-frontend.labels" . | nindent 4 }} spec: @@ -19,7 +19,7 @@ spec: spec: containers: - name: {{ include "love-manager-frontend.fullname" . }} - {{- $imageTag := .Values.manager.frontend.image.tag | default $.Values.global.controlSystemImageTag }} + {{- $imageTag := .Values.manager.frontend.image.tag | default $.Values.global.controlSystem.imageTag }} image: "{{ .Values.manager.frontend.image.repository }}:{{ $imageTag }}" imagePullPolicy: {{ .Values.manager.frontend.image.pullPolicy }} ports: diff --git a/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml index d3a7990210..5bb7e050f1 100644 --- a/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml +++ b/applications/love/charts/love-manager/templates/manager-producers-deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "love-manager-producers.fullname" . }} - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} labels: {{- include "love-manager-producers.labels" . | nindent 4 }} spec: @@ -19,7 +19,7 @@ spec: spec: containers: - name: {{ include "love-manager-producers.fullname" . }} - {{- $imageTag := .Values.manager.producers.image.tag | default $.Values.global.controlSystemImageTag }} + {{- $imageTag := .Values.manager.producers.image.tag | default $.Values.global.controlSystem.imageTag }} image: "{{ .Values.manager.producers.image.repository }}:{{ $imageTag }}" imagePullPolicy: {{ .Values.manager.producers.image.pullPolicy }} ports: diff --git a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml index 2b7b48fcff..e2ca31535f 100644 --- a/applications/love/charts/love-nginx/templates/nginx-deployment.yaml +++ b/applications/love/charts/love-nginx/templates/nginx-deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "love-nginx.name" . }} - namespace: {{ $.Values.global.controlSystemAppNamespace }} + namespace: {{ $.Values.global.controlSystem.appNamespace }} labels: {{- include "love-nginx.labels" . | nindent 4 }} spec: @@ -21,7 +21,7 @@ spec: {{- end }} initContainers: - name: love-frontend - {{- $feImageTag := .Values.initContainers.frontend.image.tag | default $.Values.global.controlSystemImageTag }} + {{- $feImageTag := .Values.initContainers.frontend.image.tag | default $.Values.global.controlSystem.imageTag }} image: "{{ .Values.initContainers.frontend.image.repository }}:{{ $feImageTag }}" imagePullPolicy: {{ .Values.initContainers.frontend.image.pullPolicy }} command: ["/bin/sh", "-c", "mkdir -p /usr/src/love-frontend; cp -Rv /usr/src/love/ /usr/src/love-frontend"] @@ -29,7 +29,7 @@ spec: - mountPath: /usr/src name: {{ .Values.staticStore.name }} - name: love-manager-static - {{- $mgImageTag := .Values.initContainers.manager.image.tag | default $.Values.global.controlSystemImageTag }} + {{- $mgImageTag := .Values.initContainers.manager.image.tag | default $.Values.global.controlSystem.imageTag }} image: "{{ .Values.initContainers.manager.image.repository }}:{{ $mgImageTag }}" imagePullPolicy: {{ .Values.initContainers.manager.image.pullPolicy }} {{- with .Values.initContainers.manager.command }} diff --git a/applications/love/charts/love-producer/templates/deployment.yaml b/applications/love/charts/love-producer/templates/deployment.yaml index 0dc4653832..fcc11046a0 100644 --- a/applications/love/charts/love-producer/templates/deployment.yaml +++ b/applications/love/charts/love-producer/templates/deployment.yaml @@ -25,7 +25,7 @@ spec: spec: containers: - name: {{ $producer.name }} - {{- $imageTag := $.Values.image.tag | default $.Values.global.controlSystemImageTag }} + {{- $imageTag := $.Values.image.tag | default $.Values.global.controlSystem.imageTag }} image: "{{ $.Values.image.repository }}:{{ $imageTag }}" imagePullPolicy: {{ $.Values.image.pullPolicy }} envFrom: From 0b9763dceb00092d1a50688fe06e3a017a0fa1ee Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 23 Jan 2024 15:23:27 -0700 Subject: [PATCH 584/588] Fix integration-testing chart for config naming change. --- .../integration-testing/templates/job-workflow-template.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml index 14b6ff793c..5c08c7195c 100644 --- a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml @@ -44,7 +44,7 @@ spec: container: command: [/home/saluser/.startup.sh] name: test-{{ printf "{{inputs.parameters.jobname}}" }} - {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystemImageTag }} + {{- $imageTag := .Values.image.tag | default $.Values.global.controlSystem.imageTag }} image: "ts-dockerhub.lsst.org/integrationtests:{{ $imageTag }}" imagePullPolicy: Always envFrom: From d0fcf846a0167b4866630e524cbeec8466a50c9a Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 24 Jan 2024 09:32:52 -0800 Subject: [PATCH 585/588] Fix broken links Fix link to rubin-google-filestore-tools, and link to the French page for CC-IN2P3 since the English one doesn't seem to be at the previous URL. --- applications/filestore-backup/Chart.yaml | 2 +- applications/filestore-backup/README.md | 2 +- docs/environments/ccin2p3/index.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/filestore-backup/Chart.yaml b/applications/filestore-backup/Chart.yaml index 19abc8ba0d..49468d7b3d 100644 --- a/applications/filestore-backup/Chart.yaml +++ b/applications/filestore-backup/Chart.yaml @@ -3,5 +3,5 @@ name: filestore-backup version: 1.0.0 description: Tool to manage Google Filestore backups sources: - - https://github.com/lsst-sqre/rubin-google-filestore-tool + - https://github.com/lsst-sqre/rubin-google-filestore-tools appVersion: 0.1.3 diff --git a/applications/filestore-backup/README.md b/applications/filestore-backup/README.md index b128ccb5bc..716863d752 100644 --- a/applications/filestore-backup/README.md +++ b/applications/filestore-backup/README.md @@ -4,7 +4,7 @@ Tool to manage Google Filestore backups ## Source Code -* +* ## Values diff --git a/docs/environments/ccin2p3/index.rst b/docs/environments/ccin2p3/index.rst index 733aa9b0ca..f48999008b 100644 --- a/docs/environments/ccin2p3/index.rst +++ b/docs/environments/ccin2p3/index.rst @@ -4,7 +4,7 @@ ccin2p3 — data-dev.lsst.eu (French Data Facility) ################################################# -``ccin2p3`` is the environment for the Rubin Science Platform at the `CC-IN2P3 `__. +``ccin2p3`` is the environment for the Rubin Science Platform at the `CC-IN2P3 `__. .. jinja:: ccin2p3 :file: environments/_summary.rst.jinja From 9937b0adc2f5fddca70de7c11fb657fb12442606 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 24 Jan 2024 20:34:24 +0000 Subject: [PATCH 586/588] Update Helm release argo-cd to v5.53.8 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index f088aacc9a..993b83aaac 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.53.6 + version: 5.53.8 repository: https://argoproj.github.io/argo-helm From d005074e09f54388983ff3557a2f3cfcb7f95f66 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 24 Jan 2024 20:34:27 +0000 Subject: [PATCH 587/588] Update Helm release vault-secrets-operator to v2.5.6 --- applications/vault-secrets-operator/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml index 427c4c2139..2dd13545d4 100644 --- a/applications/vault-secrets-operator/Chart.yaml +++ b/applications/vault-secrets-operator/Chart.yaml @@ -5,7 +5,7 @@ sources: - https://github.com/ricoberger/vault-secrets-operator dependencies: - name: vault-secrets-operator - version: 2.5.5 + version: 2.5.6 repository: https://ricoberger.github.io/helm-charts/ annotations: phalanx.lsst.io/docs: | From f081d17b5d64f95da2f1e99ccc6220c5fc6ce49f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 24 Jan 2024 12:59:36 -0800 Subject: [PATCH 588/588] Fix location of T&S Kafka setting in Nublado Putting arbitrary new settings into the controller isn't supported. Move the new setting to secrets like the other secret-related flag. --- applications/nublado/README.md | 2 +- applications/nublado/templates/vault-secrets.yaml | 2 +- applications/nublado/values-tucson-teststand.yaml | 4 +++- applications/nublado/values.yaml | 6 +++--- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 90a08c8779..d5cdce7a0e 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -54,7 +54,6 @@ JupyterHub and custom spawner for the Rubin Science Platform | controller.config.lab.extraAnnotations | object | `{}` | Extra annotations to add to user lab pods | | controller.config.lab.files | object | See `values.yaml` | Files to be mounted as ConfigMaps inside the user lab pod. `contents` contains the file contents. Set `modify` to true to make the file writable in the pod. | | controller.config.lab.initContainers | list | `[]` | Containers run as init containers with each user pod. Each should set `name`, `image` (a Docker image and pull policy specification), and `privileged`, and may contain `volumeMounts` (similar to the main `volumeMountss` configuration). If `privileged` is true, the container will run as root with all capabilities. Otherwise it will run as the user. | -| controller.config.lab.installTsSalKafkaSecret | bool | `false` | Flag to put T&S SAL Kafka secrets into pod. | | controller.config.lab.namespacePrefix | string | `"nublado"` | Prefix for namespaces for user labs. To this will be added a dash (`-`) and the user's username. | | controller.config.lab.nodeSelector | object | `{}` | Node selector rules for user lab pods | | controller.config.lab.nss.baseGroup | string | See `values.yaml` | Base `/etc/group` file for lab containers | @@ -112,4 +111,5 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.scheduling.userPlaceholder.enabled | bool | `false` | Whether to spawn placeholder pods representing fake users to force autoscaling in advance of running out of resources | | jupyterhub.scheduling.userScheduler.enabled | bool | `false` | Whether the user scheduler should be enabled | | proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | +| secrets.installTsSalKafkaSecret | bool | `false` | Whether to install the T&S SAL Kafka secret. | | secrets.templateSecrets | bool | `false` | Whether to use the new secrets management mechanism. If enabled, the Vault nublado secret will be split into a nublado secret for JupyterHub and a nublado-lab-secret secret used as a source for secret values for the user's lab. | diff --git a/applications/nublado/templates/vault-secrets.yaml b/applications/nublado/templates/vault-secrets.yaml index abd9b87344..6b3df719a0 100644 --- a/applications/nublado/templates/vault-secrets.yaml +++ b/applications/nublado/templates/vault-secrets.yaml @@ -59,7 +59,7 @@ spec: path: "{{- .Values.global.vaultSecretsPath }}/pull-secret" type: kubernetes.io/dockerconfigjson {{- end }} -{{- if .Values.controller.config.lab.installTsSalKafkaSecret }} +{{- if .Values.secrets.installTsSalKafkaSecret }} --- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret diff --git a/applications/nublado/values-tucson-teststand.yaml b/applications/nublado/values-tucson-teststand.yaml index aaf9afe3e9..ce7f8b97cb 100644 --- a/applications/nublado/values-tucson-teststand.yaml +++ b/applications/nublado/values-tucson-teststand.yaml @@ -24,7 +24,6 @@ controller: LSST_SCHEMA_REGISTRY_URL: http://sasquatch-schema-registry.sasquatch:8081 PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" PGUSER: "oods" - installTsSalKafkaSecret: true pullSecret: "pull-secret" secrets: - secretName: "kafka-secret" @@ -117,3 +116,6 @@ jupyterhub: db: upgrade: true url: "postgresql://nublado3@postgresdb01.tu.lsst.org/nublado3" + +secrets: + installTsSalKafkaSecret: true diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 7d99f87501..29d1e83e63 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -216,9 +216,6 @@ controller: # user. initContainers: [] - # -- Flag to put T&S SAL Kafka secrets into pod. - installTsSalKafkaSecret: false - # -- Prefix for namespaces for user labs. To this will be added a dash # (`-`) and the user's username. namespacePrefix: "nublado" @@ -367,6 +364,9 @@ proxy: # Configuration for Nublado secrets management. secrets: + # -- Whether to install the T&S SAL Kafka secret. + installTsSalKafkaSecret: false + # -- Whether to use the new secrets management mechanism. If enabled, the # Vault nublado secret will be split into a nublado secret for JupyterHub # and a nublado-lab-secret secret used as a source for secret values for the