From 18aa1abfc796ec2c202aeeec98774cc551799b93 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Wed, 17 May 2023 11:46:19 -0700 Subject: [PATCH 001/308] add heinrich to argocd rbac, fix typo --- applications/argocd/values-usdfdev.yaml | 1 + applications/argocd/values-usdfprod.yaml | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index ca9f930465..b7512beb4c 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -47,6 +47,7 @@ argo-cd: g, hchiang2@slac.stanford.edu, role:admin g, athor@slac.stanford.edu, role:admin g, jsick@slac.stanford.edu, role:admin + g, reinking@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 4084e0e20f..488cb06d54 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -38,7 +38,7 @@ argo-cd: requestedIDTokenClaims: {"groups": {"essential": true}} rbacConfig: policy.csv: | - g, ytl@slac.stanford.edu, role:adming + g, ytl@slac.stanford.edu, role:admin g, pav@slac.stanford.edu, role:admin g, dspeck@slac.stanford.edu, role:admin g, afausti@slac.stanford.edu, role:admin @@ -46,6 +46,7 @@ argo-cd: g, cbanek@slac.stanford.edu, role:admin g, hchiang2@slac.stanford.edu, role:admin g, athor@slac.stanford.edu, role:admin + g, reinking@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | From 27e28c6698af26552b12390cef618de0a921eb9c Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Wed, 17 May 2023 13:17:41 -0700 Subject: [PATCH 002/308] add brianna to argocd rbac --- applications/argocd/values-usdfdev.yaml | 1 + applications/argocd/values-usdfprod.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index b7512beb4c..b41db6f8c2 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -48,6 +48,7 @@ argo-cd: g, athor@slac.stanford.edu, role:admin g, jsick@slac.stanford.edu, role:admin g, reinking@slac.stanford.edu, role:admin + g, smart@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 488cb06d54..ea0e22e6d7 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -47,6 +47,7 @@ argo-cd: g, hchiang2@slac.stanford.edu, role:admin g, athor@slac.stanford.edu, role:admin g, reinking@slac.stanford.edu, role:admin + g, smart@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | From 83b98057691da9b7cdd2f96113cc360a6a77731c Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 22 May 2023 13:58:33 -0700 Subject: [PATCH 003/308] Turn on fileserver in IDF Int --- applications/nublado/values-idfint.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index 53c51637ae..7f7e712b7c 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -2,6 +2,8 @@ controller: googleServiceAccount: "nublado-controller@science-platform-int-dc5d.iam.gserviceaccount.com" slackAlerts: true config: + fileserver: + enabled: true images: source: type: "google" From 8594e31a06c3c90e03b0c09bd33ab49140d30934 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 24 May 2023 13:59:42 -0700 Subject: [PATCH 004/308] [DM-39343] Make a seperate tap schema db for ssotap --- applications/ssotap/README.md | 5 +- .../templates/tap-schema-db-deployment.yaml | 55 +++++++++++++++++++ .../templates/tap-schema-db-service.yaml | 14 +++++ applications/ssotap/values-idfdev.yaml | 4 ++ applications/ssotap/values-idfint.yaml | 4 ++ applications/ssotap/values-idfprod.yaml | 4 ++ applications/ssotap/values-usdfdev.yaml | 30 ---------- applications/ssotap/values-usdfprod.yaml | 30 ---------- applications/ssotap/values.yaml | 8 ++- 9 files changed, 92 insertions(+), 62 deletions(-) create mode 100644 applications/ssotap/templates/tap-schema-db-deployment.yaml create mode 100644 applications/ssotap/templates/tap-schema-db-service.yaml delete mode 100644 applications/ssotap/values-usdfdev.yaml delete mode 100644 applications/ssotap/values-usdfprod.yaml diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index ee0b20be14..7a11eb8342 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -18,7 +18,7 @@ IVOA TAP service | config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket | | config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. | -| config.tapSchemaAddress | string | `"tap-schema-db.tap-schema.svc.cluster.local:3306"` | Address to a MySQL database containing TAP schema data | +| config.tapSchemaAddress | string | `"tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data | | fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | @@ -45,6 +45,9 @@ IVOA TAP service | podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | +| tap_schema.image.repository | object | `{}` | | +| tap_schema.image.tag | string | `"1.2.11"` | | +| tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | diff --git a/applications/ssotap/templates/tap-schema-db-deployment.yaml b/applications/ssotap/templates/tap-schema-db-deployment.yaml new file mode 100644 index 0000000000..ba534ed314 --- /dev/null +++ b/applications/ssotap/templates/tap-schema-db-deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cadc-tap.fullname" . }}-tap-schema-db + labels: + {{- include "cadc-tap.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "cadc-tap.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "cadc-tap.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: "tap-schema-db" + env: + - name: MYSQL_DATABASE + value: "TAP_SCHEMA" + - name: MYSQL_USER + value: "TAP_SCHEMA" + - name: MYSQL_PASSWORD + value: "TAP_SCHEMA" + - name: MYSQL_ROOT_HOST + value: "%" + image: "{{ .Values.tap_schema.image.repository }}:{{ .Values.tap_schema.image.tag}}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 3306 + protocol: "TCP" + {{- with .Values.tap_schema.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + imagePullSecrets: + - name: "pull-secret" + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/ssotap/templates/tap-schema-db-service.yaml b/applications/ssotap/templates/tap-schema-db-service.yaml new file mode 100644 index 0000000000..74db88ed9c --- /dev/null +++ b/applications/ssotap/templates/tap-schema-db-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: {{ template "cadc-tap.fullname" . }}-db + labels: + {{- include "cadc-tap.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - protocol: "TCP" + port: 3306 + targetPort: 3306 + selector: + {{- include "cadc-tap.selectorLabels" . | nindent 4 }} diff --git a/applications/ssotap/values-idfdev.yaml b/applications/ssotap/values-idfdev.yaml index 9516c1dd94..4c05fbd371 100644 --- a/applications/ssotap/values-idfdev.yaml +++ b/applications/ssotap/values-idfdev.yaml @@ -17,3 +17,7 @@ pg: database: "dp03_catalogs" host: "usdf-pg-catalogs.slac.stanford.edu:5432" username: "dp03" + +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfsso" diff --git a/applications/ssotap/values-idfint.yaml b/applications/ssotap/values-idfint.yaml index 1d65e85abf..fbf8d89484 100644 --- a/applications/ssotap/values-idfint.yaml +++ b/applications/ssotap/values-idfint.yaml @@ -19,3 +19,7 @@ pg: database: "dp03_catalogs" host: "usdf-pg-catalogs.slac.stanford.edu:5432" username: "dp03" + +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfsso" diff --git a/applications/ssotap/values-idfprod.yaml b/applications/ssotap/values-idfprod.yaml index edef98d5b7..231f91b621 100644 --- a/applications/ssotap/values-idfprod.yaml +++ b/applications/ssotap/values-idfprod.yaml @@ -28,3 +28,7 @@ uws: limits: cpu: 2.0 memory: "4G" + +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfsso" diff --git a/applications/ssotap/values-usdfdev.yaml b/applications/ssotap/values-usdfdev.yaml deleted file mode 100644 index a8802d4c5e..0000000000 --- a/applications/ssotap/values-usdfdev.yaml +++ /dev/null @@ -1,30 +0,0 @@ -resources: - requests: - cpu: 2.0 - memory: "2G" - limits: - cpu: 8.0 - memory: "32G" - -replicaCount: 2 - -config: - gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" - jvmMaxHeapSize: "31G" - -pg: - mock: - enabled: false - database: "lsstdb1" - host: "usdf-butler.slac.stanford.edu:5432" - username: "rubin" - -uws: - resources: - requests: - cpu: 0.25 - memory: "1G" - limits: - cpu: 2.0 - memory: "4G" diff --git a/applications/ssotap/values-usdfprod.yaml b/applications/ssotap/values-usdfprod.yaml deleted file mode 100644 index a8802d4c5e..0000000000 --- a/applications/ssotap/values-usdfprod.yaml +++ /dev/null @@ -1,30 +0,0 @@ -resources: - requests: - cpu: 2.0 - memory: "2G" - limits: - cpu: 8.0 - memory: "32G" - -replicaCount: 2 - -config: - gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" - jvmMaxHeapSize: "31G" - -pg: - mock: - enabled: false - database: "lsstdb1" - host: "usdf-butler.slac.stanford.edu:5432" - username: "rubin" - -uws: - resources: - requests: - cpu: 0.25 - memory: "1G" - limits: - cpu: 2.0 - memory: "4G" diff --git a/applications/ssotap/values.yaml b/applications/ssotap/values.yaml index fbf2c73154..051f3c47d1 100644 --- a/applications/ssotap/values.yaml +++ b/applications/ssotap/values.yaml @@ -53,7 +53,7 @@ vaultSecretsPath: "" config: # -- Address to a MySQL database containing TAP schema data - tapSchemaAddress: "tap-schema-db.tap-schema.svc.cluster.local:3306" + tapSchemaAddress: "tap-schema-db:3306" # -- Datalink payload URL datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip" @@ -120,6 +120,12 @@ pg: # -- Affinity rules for the mock postgres pod affinity: {} +tap_schema: + image: + repository: {} + tag: "1.2.11" + resources: {} + uws: image: # -- UWS database image to use From cd81b357578ef006f3fdfb8e916bd280390e4773 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Mon, 22 May 2023 12:40:01 -0700 Subject: [PATCH 005/308] semaphore test config --- applications/semaphore/values-usdfdev.yaml | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/applications/semaphore/values-usdfdev.yaml b/applications/semaphore/values-usdfdev.yaml index 12ae9f3476..f0f9cb7723 100644 --- a/applications/semaphore/values-usdfdev.yaml +++ b/applications/semaphore/values-usdfdev.yaml @@ -1,9 +1,11 @@ -semaphore: - config: - phalanx_env: "usdfdev" - github_app_id: "131502" - enable_github_app: "True" - ingress: - enabled: true - annotations: - kubernetes.io/ingress.class: nginx +image: + pullPolicy: Always +config: + github_app_id: "337324" + enable_github_app: "True" + phalanx_env: usdfdev + log_level: "DEBUG" + +pull-secret: + enabled: true + path: secret/rubin/usdf-rsp-dev/semaphore From 7f43ca3654395c752ffa6ce3e946a5feb7b3d937 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Mon, 22 May 2023 12:51:41 -0700 Subject: [PATCH 006/308] add semaphore configuration --- applications/semaphore/values-usdfdev.yaml | 17 +++++++++-------- applications/semaphore/values-usdfprod.yaml | 10 ++++++++++ 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/applications/semaphore/values-usdfdev.yaml b/applications/semaphore/values-usdfdev.yaml index f0f9cb7723..b8c9dedfea 100644 --- a/applications/semaphore/values-usdfdev.yaml +++ b/applications/semaphore/values-usdfdev.yaml @@ -1,11 +1,12 @@ -image: - pullPolicy: Always -config: - github_app_id: "337324" - enable_github_app: "True" - phalanx_env: usdfdev - log_level: "DEBUG" - +semaphore: + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx pull-secret: enabled: true path: secret/rubin/usdf-rsp-dev/semaphore +config: + phalanx_env: "usdfdev" + github_app_id: "337324" + enable_github_app: "True" diff --git a/applications/semaphore/values-usdfprod.yaml b/applications/semaphore/values-usdfprod.yaml index a4dba3f052..e97f253deb 100644 --- a/applications/semaphore/values-usdfprod.yaml +++ b/applications/semaphore/values-usdfprod.yaml @@ -1,2 +1,12 @@ +semaphore: + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx +pull-secret: + enabled: true + path: secret/rubin/usdf-rsp/semaphore config: phalanx_env: "usdfprod" + github_app_id: "337324" + enable_github_app: "True" From 68f71d68e016ac146ef75aa6379bdbdaafe98fa3 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 24 May 2023 15:47:05 -0700 Subject: [PATCH 007/308] Add new lsst.verify namespace --- applications/sasquatch/values-usdfdev.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index 1bb60f2d56..d286704ecc 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -108,6 +108,12 @@ kafka-connect-manager: timestamp: "timestamp" connectInfluxDb: "lsst.camera" topicsRegex: "lsst.camera.*" + lsstverify: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.verify" + topicsRegex: "lsst.verify.*" + tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run kafdrop: ingress: @@ -129,6 +135,7 @@ rest-proxy: - lsst.example - lsst.rubintv - lsst.camera + - lsst.verify chronograf: ingress: From a37f51538b38c6666961d28b7e5f47c9e75ad0e6 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 24 May 2023 17:09:31 -0700 Subject: [PATCH 008/308] Add documentation to Argo CD settings Make our documentation a little bit nicer. Remove a few settings that matched the defaults. --- applications/argocd/README.md | 35 +++++++++++++++------------------ applications/argocd/values.yaml | 28 +++++++++++++++++++++++++- 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/applications/argocd/README.md b/applications/argocd/README.md index cfe35d5e6b..5f48737563 100644 --- a/applications/argocd/README.md +++ b/applications/argocd/README.md @@ -13,24 +13,21 @@ Kubernetes application manager | Key | Type | Default | Description | |-----|------|---------|-------------| -| argo-cd.configs.secret.createSecret | bool | `false` | | -| argo-cd.controller.metrics.applicationLabels.enabled | bool | `true` | | -| argo-cd.controller.metrics.applicationLabels.labels[0] | string | `"name"` | | -| argo-cd.controller.metrics.applicationLabels.labels[1] | string | `"instance"` | | -| argo-cd.controller.metrics.enabled | bool | `true` | | +| argo-cd.configs.secret.createSecret | bool | `false` | Create the Argo CD secret (we manage this with Vault) | +| argo-cd.controller.metrics.applicationLabels.enabled | bool | `true` | Enable adding additional labels to `argocd_app_labels` metric | +| argo-cd.controller.metrics.applicationLabels.labels | list | `["name","instance"]` | Labels to add to `argocd_app_labels` metric | +| argo-cd.controller.metrics.enabled | bool | `true` | Enable controller metrics service | | argo-cd.global.logging.format | string | `"json"` | Set the global logging format. Either: `text` or `json` | -| argo-cd.notifications.metrics.enabled | bool | `true` | | -| argo-cd.redis.enabled | bool | `true` | | -| argo-cd.redis.metrics.enabled | bool | `true` | | -| argo-cd.repoServer.metrics.enabled | bool | `true` | | -| argo-cd.server.config."helm.repositories" | string | `"- url: https://lsst-sqre.github.io/charts/\n name: lsst-sqre\n- url: https://ricoberger.github.io/helm-charts/\n name: ricoberger\n- url: https://kubernetes.github.io/ingress-nginx/\n name: ingress-nginx\n- url: https://charts.helm.sh/stable\n name: stable\n- url: https://strimzi.io/charts/\n name: strimzi\n"` | | -| argo-cd.server.config."resource.compareoptions" | string | `"ignoreAggregatedRoles: true\n"` | | -| argo-cd.server.extraArgs[0] | string | `"--basehref=/argo-cd"` | | -| argo-cd.server.extraArgs[1] | string | `"--insecure=true"` | | -| argo-cd.server.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | -| argo-cd.server.ingress.enabled | bool | `true` | | -| argo-cd.server.ingress.ingressClassName | string | `"nginx"` | | -| argo-cd.server.ingress.pathType | string | `"ImplementationSpecific"` | | -| argo-cd.server.ingress.paths[0] | string | `"/argo-cd(/|$)(.*)"` | | -| argo-cd.server.metrics.enabled | bool | `true` | | +| argo-cd.notifications.metrics.enabled | bool | `true` | Enable notifications metrics service | +| argo-cd.redis.metrics.enabled | bool | `true` | Enable Redis metrics service | +| argo-cd.repoServer.metrics.enabled | bool | `true` | Enable repo server metrics service | +| argo-cd.server.config."helm.repositories" | string | See `values.yaml` | Additional Helm repositories to use | +| argo-cd.server.config."resource.compareoptions" | string | Ignore aggregated cluster roles | Comparison options for resources | +| argo-cd.server.extraArgs | list | `["--basehref=/argo-cd","--insecure=true"]` | Extra arguments to pass to the Argo CD server | +| argo-cd.server.ingress.annotations | object | Rewrite requests to remove `/argo-cd/` prefix | Additional annotations to add to the Argo CD ingress | +| argo-cd.server.ingress.enabled | bool | `true` | Create an ingress for the Argo CD server | +| argo-cd.server.ingress.ingressClassName | string | `"nginx"` | Ingress class to use for Argo CD ingress | +| argo-cd.server.ingress.pathType | string | `"ImplementationSpecific"` | Type of path expression for Argo CD ingress | +| argo-cd.server.ingress.paths | list | `["/argo-cd(/|$)(.*)"]` | Paths to route to Argo CD | +| argo-cd.server.metrics.enabled | bool | `true` | Enable server metrics service | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/argocd/values.yaml b/applications/argocd/values.yaml index 05310ab224..7817b8d918 100644 --- a/applications/argocd/values.yaml +++ b/applications/argocd/values.yaml @@ -8,42 +8,64 @@ argo-cd: format: "json" redis: - enabled: true metrics: + # -- Enable Redis metrics service enabled: true controller: metrics: + # -- Enable controller metrics service enabled: true + applicationLabels: + # -- Enable adding additional labels to `argocd_app_labels` metric enabled: true + + # -- Labels to add to `argocd_app_labels` metric labels: ["name", "instance"] repoServer: metrics: + # -- Enable repo server metrics service enabled: true notifications: metrics: + # -- Enable notifications metrics service enabled: true server: metrics: + # -- Enable server metrics service enabled: true + ingress: + # -- Create an ingress for the Argo CD server enabled: true + + # -- Additional annotations to add to the Argo CD ingress + # @default -- Rewrite requests to remove `/argo-cd/` prefix annotations: nginx.ingress.kubernetes.io/rewrite-target: "/$2" + + # -- Ingress class to use for Argo CD ingress ingressClassName: "nginx" + + # -- Paths to route to Argo CD paths: - "/argo-cd(/|$)(.*)" + + # -- Type of path expression for Argo CD ingress pathType: "ImplementationSpecific" + # -- Extra arguments to pass to the Argo CD server extraArgs: - "--basehref=/argo-cd" - "--insecure=true" config: + # -- Additional Helm repositories to use + # @default -- See `values.yaml` helm.repositories: | - url: https://lsst-sqre.github.io/charts/ name: lsst-sqre @@ -55,11 +77,15 @@ argo-cd: name: stable - url: https://strimzi.io/charts/ name: strimzi + + # -- Comparison options for resources + # @default -- Ignore aggregated cluster roles resource.compareoptions: | ignoreAggregatedRoles: true configs: secret: + # -- Create the Argo CD secret (we manage this with Vault) createSecret: false # The following will be set by parameters injected by Argo CD and should not From 9f9824be3cabadd9f9245b238dcaa6f2ca13cc54 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 24 May 2023 17:14:30 -0700 Subject: [PATCH 009/308] Simplify Argo CD configuration Set insecure and basehref using configs.params instead of passing command-line flags to the Argo CD server. Remove the Helm repository configuration since apparently it is no longer configured where we were configuring it and doesn't appear to be needed. --- applications/argocd/README.md | 5 ++--- applications/argocd/values.yaml | 32 +++++++------------------------- 2 files changed, 9 insertions(+), 28 deletions(-) diff --git a/applications/argocd/README.md b/applications/argocd/README.md index 5f48737563..29c8b22daf 100644 --- a/applications/argocd/README.md +++ b/applications/argocd/README.md @@ -13,6 +13,8 @@ Kubernetes application manager | Key | Type | Default | Description | |-----|------|---------|-------------| +| argo-cd.configs.params."server.basehref" | string | `"/argo-cd"` | Base href for `index.html` when running under a reverse proxy | +| argo-cd.configs.params."server.insecure" | bool | `true` | Do not use TLS (this is terminated at the ingress) | | argo-cd.configs.secret.createSecret | bool | `false` | Create the Argo CD secret (we manage this with Vault) | | argo-cd.controller.metrics.applicationLabels.enabled | bool | `true` | Enable adding additional labels to `argocd_app_labels` metric | | argo-cd.controller.metrics.applicationLabels.labels | list | `["name","instance"]` | Labels to add to `argocd_app_labels` metric | @@ -21,9 +23,6 @@ Kubernetes application manager | argo-cd.notifications.metrics.enabled | bool | `true` | Enable notifications metrics service | | argo-cd.redis.metrics.enabled | bool | `true` | Enable Redis metrics service | | argo-cd.repoServer.metrics.enabled | bool | `true` | Enable repo server metrics service | -| argo-cd.server.config."helm.repositories" | string | See `values.yaml` | Additional Helm repositories to use | -| argo-cd.server.config."resource.compareoptions" | string | Ignore aggregated cluster roles | Comparison options for resources | -| argo-cd.server.extraArgs | list | `["--basehref=/argo-cd","--insecure=true"]` | Extra arguments to pass to the Argo CD server | | argo-cd.server.ingress.annotations | object | Rewrite requests to remove `/argo-cd/` prefix | Additional annotations to add to the Argo CD ingress | | argo-cd.server.ingress.enabled | bool | `true` | Create an ingress for the Argo CD server | | argo-cd.server.ingress.ingressClassName | string | `"nginx"` | Ingress class to use for Argo CD ingress | diff --git a/applications/argocd/values.yaml b/applications/argocd/values.yaml index 7817b8d918..a97350cc57 100644 --- a/applications/argocd/values.yaml +++ b/applications/argocd/values.yaml @@ -58,32 +58,14 @@ argo-cd: # -- Type of path expression for Argo CD ingress pathType: "ImplementationSpecific" - # -- Extra arguments to pass to the Argo CD server - extraArgs: - - "--basehref=/argo-cd" - - "--insecure=true" - - config: - # -- Additional Helm repositories to use - # @default -- See `values.yaml` - helm.repositories: | - - url: https://lsst-sqre.github.io/charts/ - name: lsst-sqre - - url: https://ricoberger.github.io/helm-charts/ - name: ricoberger - - url: https://kubernetes.github.io/ingress-nginx/ - name: ingress-nginx - - url: https://charts.helm.sh/stable - name: stable - - url: https://strimzi.io/charts/ - name: strimzi - - # -- Comparison options for resources - # @default -- Ignore aggregated cluster roles - resource.compareoptions: | - ignoreAggregatedRoles: true - configs: + params: + # -- Do not use TLS (this is terminated at the ingress) + server.insecure: true + + # -- Base href for `index.html` when running under a reverse proxy + server.basehref: "/argo-cd" + secret: # -- Create the Argo CD secret (we manage this with Vault) createSecret: false From 74a61021e3919dee70e48489ee64f4edf1e012e7 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 25 May 2023 07:53:58 -0700 Subject: [PATCH 010/308] Re-add Argo CD configuration for aggregated roles The documentation was a bit unclear, but I found the right place to put this in the new configuration layout. So far as I can tell, explicitly registering the Helm repositories shouldn't be required, so those are still omitted. --- applications/argocd/README.md | 1 + applications/argocd/values.yaml | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/applications/argocd/README.md b/applications/argocd/README.md index 29c8b22daf..4c2f167dc5 100644 --- a/applications/argocd/README.md +++ b/applications/argocd/README.md @@ -13,6 +13,7 @@ Kubernetes application manager | Key | Type | Default | Description | |-----|------|---------|-------------| +| argo-cd.configs.cm."resource.compareoptions" | string | `"ignoreAggregatedRoles: true\n"` | Configure resource comparison | | argo-cd.configs.params."server.basehref" | string | `"/argo-cd"` | Base href for `index.html` when running under a reverse proxy | | argo-cd.configs.params."server.insecure" | bool | `true` | Do not use TLS (this is terminated at the ingress) | | argo-cd.configs.secret.createSecret | bool | `false` | Create the Argo CD secret (we manage this with Vault) | diff --git a/applications/argocd/values.yaml b/applications/argocd/values.yaml index a97350cc57..dcb51f1c8c 100644 --- a/applications/argocd/values.yaml +++ b/applications/argocd/values.yaml @@ -59,6 +59,11 @@ argo-cd: pathType: "ImplementationSpecific" configs: + cm: + # -- Configure resource comparison + resource.compareoptions: | + ignoreAggregatedRoles: true + params: # -- Do not use TLS (this is terminated at the ingress) server.insecure: true From 13aa4178a3790e9474c12c9a697cdc9c1651be59 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 17 May 2023 17:38:57 -0700 Subject: [PATCH 011/308] BTS: Add more mounts to nublado2. --- applications/nublado2/values-base.yaml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/applications/nublado2/values-base.yaml b/applications/nublado2/values-base.yaml index 1e1b972945..57b7e941ca 100644 --- a/applications/nublado2/values-base.yaml +++ b/applications/nublado2/values-base.yaml @@ -49,6 +49,19 @@ config: nfs: path: /lsstdata server: nfs-lsstdata.ls.lsst.org + - name: auxtel-butler + nfs: + path: /repo/LATISS + server: auxtel-archiver.ls.lsst.org + - name: auxtel-oods + nfs: + path: /lsstdata/BTS/auxtel + server: auxtel-archiver.ls.lsst.org + readOnly: true + - name: obs-env + nfs: + path: /obs-env + server: nfs-obsenv.ls.lsst.org volume_mounts: - name: home mountPath: /home @@ -58,3 +71,10 @@ config: mountPath: /project - name: scratch mountPath: /scratch + - name: auxtel-butler + mountPath: /repo/LATISS + - name: auxtel-oods + mountPath: /data/lsstdata/BTS/auxtel + readOnly: true + - name: obs-env + mountPath: /net/obs-env From c0ef08b4c3b3519b1a344938b58f824903cec5be Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 25 May 2023 13:51:46 -0700 Subject: [PATCH 012/308] [DM-39343] Try to fix up tap-schema-db for ssotap --- applications/ssotap/README.md | 2 +- applications/ssotap/templates/tap-schema-db-deployment.yaml | 1 + applications/ssotap/templates/tap-schema-db-service.yaml | 3 ++- applications/ssotap/values.yaml | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index 7a11eb8342..bcc4be37b7 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -18,7 +18,7 @@ IVOA TAP service | config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket | | config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. | -| config.tapSchemaAddress | string | `"tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data | +| config.tapSchemaAddress | string | `"cadc-tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data | | fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | diff --git a/applications/ssotap/templates/tap-schema-db-deployment.yaml b/applications/ssotap/templates/tap-schema-db-deployment.yaml index ba534ed314..929bdce88f 100644 --- a/applications/ssotap/templates/tap-schema-db-deployment.yaml +++ b/applications/ssotap/templates/tap-schema-db-deployment.yaml @@ -17,6 +17,7 @@ spec: {{- end }} labels: {{- include "cadc-tap.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "schema-db" spec: automountServiceAccountToken: false containers: diff --git a/applications/ssotap/templates/tap-schema-db-service.yaml b/applications/ssotap/templates/tap-schema-db-service.yaml index 74db88ed9c..e5b9dd0856 100644 --- a/applications/ssotap/templates/tap-schema-db-service.yaml +++ b/applications/ssotap/templates/tap-schema-db-service.yaml @@ -1,7 +1,7 @@ kind: Service apiVersion: v1 metadata: - name: {{ template "cadc-tap.fullname" . }}-db + name: {{ template "cadc-tap.fullname" . }}-schema-db labels: {{- include "cadc-tap.labels" . | nindent 4 }} spec: @@ -12,3 +12,4 @@ spec: targetPort: 3306 selector: {{- include "cadc-tap.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: "schema-db" diff --git a/applications/ssotap/values.yaml b/applications/ssotap/values.yaml index 051f3c47d1..b2d51618ce 100644 --- a/applications/ssotap/values.yaml +++ b/applications/ssotap/values.yaml @@ -53,7 +53,7 @@ vaultSecretsPath: "" config: # -- Address to a MySQL database containing TAP schema data - tapSchemaAddress: "tap-schema-db:3306" + tapSchemaAddress: "cadc-tap-schema-db:3306" # -- Datalink payload URL datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip" From 95e16eaca89aba9c19ecdbc8b7b8a211509875aa Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 25 May 2023 14:42:27 -0700 Subject: [PATCH 013/308] remove invalid pull-secret config and delete trailing whitespace --- applications/semaphore/values-usdfdev.yaml | 5 +---- applications/semaphore/values-usdfprod.yaml | 5 +---- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/applications/semaphore/values-usdfdev.yaml b/applications/semaphore/values-usdfdev.yaml index b8c9dedfea..1cb6319f9f 100644 --- a/applications/semaphore/values-usdfdev.yaml +++ b/applications/semaphore/values-usdfdev.yaml @@ -2,10 +2,7 @@ semaphore: ingress: enabled: true annotations: - kubernetes.io/ingress.class: nginx -pull-secret: - enabled: true - path: secret/rubin/usdf-rsp-dev/semaphore + kubernetes.io/ingress.class: nginx config: phalanx_env: "usdfdev" github_app_id: "337324" diff --git a/applications/semaphore/values-usdfprod.yaml b/applications/semaphore/values-usdfprod.yaml index e97f253deb..b6473f3f72 100644 --- a/applications/semaphore/values-usdfprod.yaml +++ b/applications/semaphore/values-usdfprod.yaml @@ -2,10 +2,7 @@ semaphore: ingress: enabled: true annotations: - kubernetes.io/ingress.class: nginx -pull-secret: - enabled: true - path: secret/rubin/usdf-rsp/semaphore + kubernetes.io/ingress.class: nginx config: phalanx_env: "usdfprod" github_app_id: "337324" From c429591f772441760446724e942cbfdad0d50be7 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 26 May 2023 14:16:01 -0700 Subject: [PATCH 014/308] Use new JupyterHub and Nublado controller images Pick up the 0.3.1 release of the REST spawner and the 0.6.0 release of the Nublado controller. --- applications/nublado/Chart.yaml | 2 +- applications/nublado/README.md | 2 +- applications/nublado/values.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 1a76ce6a01..4df80f0668 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -6,7 +6,7 @@ sources: - https://github.com/lsst-sqre/jupyterlab-controller - https://github.com/lsst-sqre/rsp-restspawner home: https://github.com/lsst-sqre/jupyterlab-controller -appVersion: 0.5.0 +appVersion: 0.6.0 dependencies: - name: jupyterhub diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 9d34955f4a..73d6e90b58 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -69,7 +69,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | | jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | | jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/rsp-restspawner"` | Image to use for JupyterHub | -| jupyterhub.hub.image.tag | string | `"0.3.0"` | Tag of image to use for JupyterHub | +| jupyterhub.hub.image.tag | string | `"0.3.1"` | Tag of image to use for JupyterHub | | jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | | jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | | jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 0cf31756f6..afb5409442 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -288,7 +288,7 @@ jupyterhub: name: ghcr.io/lsst-sqre/rsp-restspawner # -- Tag of image to use for JupyterHub - tag: 0.3.0 + tag: 0.3.1 # -- Resource limits and requests resources: From 8d8e63fd4e744700813a988c9c35f898e58ff753 Mon Sep 17 00:00:00 2001 From: Gregory Dubois-Felsmann Date: Fri, 26 May 2023 13:41:31 -0700 Subject: [PATCH 015/308] Add values-level support for adding SSO TAP to Portal menu Allows turning on the SSO TAP service in the Portal on a per-instance basis, initially for data-int. --- applications/portal/templates/deployment.yaml | 16 ++++++++++++++-- applications/portal/values-idfint.yaml | 1 + applications/portal/values.yaml | 3 +++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/applications/portal/templates/deployment.yaml b/applications/portal/templates/deployment.yaml index 676a0b74c8..62cb0628f9 100644 --- a/applications/portal/templates/deployment.yaml +++ b/applications/portal/templates/deployment.yaml @@ -61,7 +61,8 @@ spec: }, "tap" : { "additional": { - "services": [ { + "services": [ + { "label": "LSST RSP", "value": "{{ .Values.global.baseUrl }}/api/tap", {{- if .Values.config.hipsUrl }} @@ -71,7 +72,18 @@ spec: {{- end }} "centerWP": "62;-37;EQ_J2000", "fovDeg": 10 - } ] + } + {{- if .Values.config.ssotap }} + , + { + "label": "DP0.3 SSO", + "value": "{{ .Values.global.baseUrl }}/api/{{ .Values.config.ssotap}}", + "hipsUrl": "{{ .Values.global.baseUrl }}/api/hips/images/color_gri", + "centerWP": "0;0;ECL", + "fovDeg": 10 + } + {{- end }} + ] } }, "hips": { diff --git a/applications/portal/values-idfint.yaml b/applications/portal/values-idfint.yaml index bbff39a615..0d736a812a 100644 --- a/applications/portal/values-idfint.yaml +++ b/applications/portal/values-idfint.yaml @@ -5,6 +5,7 @@ config: workareaNfs: path: "/share1/home/firefly/shared-workarea" server: "10.22.240.130" + ssotap: "ssotap" resources: limits: diff --git a/applications/portal/values.yaml b/applications/portal/values.yaml index a369100e8c..ba77eac515 100644 --- a/applications/portal/values.yaml +++ b/applications/portal/values.yaml @@ -61,6 +61,9 @@ config: # -- Search path for FITS files visualizeFitsSearchPath: "/datasets" + # -- Endpoint under `/api/` for the DP0.3 SSO TAP service on the instance, if present + ssotap: "" + volumes: # -- hostPath to mount as a shared work area. Set either this or # `workareaNfs`, not both. From 4f963c578951bd406f7d1c77a0ef174e514c6a4f Mon Sep 17 00:00:00 2001 From: Gregory Dubois-Felsmann Date: Fri, 26 May 2023 14:29:06 -0700 Subject: [PATCH 016/308] Try to satisfy CI --- applications/portal/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/portal/README.md b/applications/portal/README.md index ba46958eba..08d4a6cecd 100644 --- a/applications/portal/README.md +++ b/applications/portal/README.md @@ -15,6 +15,7 @@ Rubin Science Platform Portal Aspect | config.cleanupInterval | string | `"36h"` | How long results should be retained before being deleted | | config.debug | string | `"FALSE"` | Set to `TRUE` to enable service debugging | | config.hipsUrl | string | `/api/hips/images/color_gri` in the local Science Platform | URL for default HiPS service | +| config.ssotap | string | `""` | Endpoint under `/api/` for the DP0.3 SSO TAP service on the instance, if present | | config.visualizeFitsSearchPath | string | `"/datasets"` | Search path for FITS files | | config.volumes.configHostPath | string | Use an `emptyDir` | hostPath to mount as configuration. Set either this of `configNfs`, not both. | | config.volumes.configNfs | object | Use an `emptyDir` | NFS information for a configuration. If set, must have keys for path and server, Set either this of `configHostPath`, not both. | From 77edd15c317ff4adc421c43f730942600ce58518 Mon Sep 17 00:00:00 2001 From: Frossie Date: Fri, 26 May 2023 15:05:39 -0700 Subject: [PATCH 017/308] add frossie to argocd --- applications/argocd/values-usdfprod.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index ea0e22e6d7..0cf37a9cc8 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -44,6 +44,7 @@ argo-cd: g, afausti@slac.stanford.edu, role:admin g, mfl@slac.stanford.edu, role:admin g, cbanek@slac.stanford.edu, role:admin + g, frossie@slac.stanford.edu, role:admin g, hchiang2@slac.stanford.edu, role:admin g, athor@slac.stanford.edu, role:admin g, reinking@slac.stanford.edu, role:admin From 91c05f88faa933f485d8837d2adb5c65623e55a9 Mon Sep 17 00:00:00 2001 From: Frossie Date: Fri, 26 May 2023 15:08:29 -0700 Subject: [PATCH 018/308] add frossie to usdfdev argocd --- applications/argocd/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index b41db6f8c2..9f5cc695a3 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -44,6 +44,7 @@ argo-cd: g, afausti@slac.stanford.edu, role:admin g, mfl@slac.stanford.edu, role:admin g, cbanek@slac.stanford.edu, role:admin + g. frossie@slac.stanford.edu, role:admin g, hchiang2@slac.stanford.edu, role:admin g, athor@slac.stanford.edu, role:admin g, jsick@slac.stanford.edu, role:admin From e141a891ce997c4f59c158cd683066b4b11ca246 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 26 May 2023 15:15:31 -0700 Subject: [PATCH 019/308] Add USDF-dev nubladov3 --- .../nublado/templates/controller-serviceaccount.yaml | 1 + applications/nublado/values-usdfdev.yaml | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/applications/nublado/templates/controller-serviceaccount.yaml b/applications/nublado/templates/controller-serviceaccount.yaml index d0c09d9fba..35d540edf0 100644 --- a/applications/nublado/templates/controller-serviceaccount.yaml +++ b/applications/nublado/templates/controller-serviceaccount.yaml @@ -31,6 +31,7 @@ rules: - "pods" - "resourcequotas" - "services" + - "persistentvolumeclaims" verbs: - "create" - "delete" diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 762480d2f2..6d41f46005 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -2,6 +2,9 @@ controller: config: safir: logLevel: "DEBUG" + fileserver: + enabled: true + timeout: 21600 images: source: @@ -16,10 +19,13 @@ controller: lab: pullSecret: "pull-secret" + homedirSchema: "initialThenUsername" + env: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" + HUB_ROUTE: "/n3/hub" PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "rubin" S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" @@ -169,6 +175,10 @@ proxy: nginx.ingress.kubernetes.io/proxy-read-timeout: "20" jupyterhub: + hub: + baseUrl: "/n3" + db: + url: "postgresql://nublado3@postgres.postgres/nublado3" cull: timeout: 432000 every: 300 From 8fb10229aca0adc15178f0995e22af4656483d77 Mon Sep 17 00:00:00 2001 From: Frossie Date: Fri, 26 May 2023 15:19:31 -0700 Subject: [PATCH 020/308] Update values-usdfdev.yaml --- applications/argocd/values-usdfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index 9f5cc695a3..cc40ed24bb 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -44,7 +44,7 @@ argo-cd: g, afausti@slac.stanford.edu, role:admin g, mfl@slac.stanford.edu, role:admin g, cbanek@slac.stanford.edu, role:admin - g. frossie@slac.stanford.edu, role:admin + g, frossie@slac.stanford.edu, role:admin g, hchiang2@slac.stanford.edu, role:admin g, athor@slac.stanford.edu, role:admin g, jsick@slac.stanford.edu, role:admin From 341e9a5f490728103d243b71f56e5dd3fa59d46f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 26 May 2023 15:33:25 -0700 Subject: [PATCH 021/308] Test tutorial notebooks on data-dev and data-int Now that the tutorial notebooks should work on the data-dev and data-int environments, start testing them there with mobu using Nublado v3. --- applications/mobu/values-idfdev.yaml | 19 +++++++++++++++++++ applications/mobu/values-idfint.yaml | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 37bc0563ac..7f2d968b38 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -37,6 +37,25 @@ config: url_prefix: "/n3" use_cachemachine: false restart: true + - name: "tutorial" + count: 1 + users: + - username: "bot-mobu-tutorial" + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" + repo_branch: "prod" + max_executions: 1 + working_directory: "notebooks/tutorial-notebooks" + use_cachemachine: false + url_prefix: "/n3" + restart: true - name: "tap" count: 1 users: diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index e8da2c1d59..2349d0456f 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -52,6 +52,25 @@ config: use_cachemachine: false url_prefix: "/n3" restart: true + - name: "tutorial" + count: 1 + users: + - username: "bot-mobu-tutorial" + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" + repo_branch: "prod" + max_executions: 1 + working_directory: "notebooks/tutorial-notebooks" + use_cachemachine: false + url_prefix: "/n3" + restart: true - name: "tap" count: 1 users: From b2f9631cce85d5318b32854fb7f30f9b3a8f1e2f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 26 May 2023 15:50:24 -0700 Subject: [PATCH 022/308] Run tutorial notebooks from main on IDF dev and int The assert still hasn't been fixed on prod, but it looks like it's been fixed on main, so run the main branch for now. --- applications/mobu/values-idfdev.yaml | 2 +- applications/mobu/values-idfint.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 7f2d968b38..24cc6a9890 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -50,7 +50,7 @@ config: type: "NotebookRunner" options: repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" - repo_branch: "prod" + repo_branch: "main" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" use_cachemachine: false diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 2349d0456f..922fdb690b 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -65,7 +65,7 @@ config: type: "NotebookRunner" options: repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" - repo_branch: "prod" + repo_branch: "main" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" use_cachemachine: false From 749fa424ae3d421623ba68b89ca3ee77def4a86a Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 26 May 2023 16:03:22 -0700 Subject: [PATCH 023/308] Turn the tutorial notebook tests off again The assert that prevents them from running anywhere other than IDF prod has not been removed on any branch or PR that I can find, so turn them off on IDF int and dev for the time being. --- applications/mobu/values-idfdev.yaml | 19 ------------------- applications/mobu/values-idfint.yaml | 19 ------------------- 2 files changed, 38 deletions(-) diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 24cc6a9890..37bc0563ac 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -37,25 +37,6 @@ config: url_prefix: "/n3" use_cachemachine: false restart: true - - name: "tutorial" - count: 1 - users: - - username: "bot-mobu-tutorial" - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" - repo_branch: "main" - max_executions: 1 - working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false - url_prefix: "/n3" - restart: true - name: "tap" count: 1 users: diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 922fdb690b..e8da2c1d59 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -52,25 +52,6 @@ config: use_cachemachine: false url_prefix: "/n3" restart: true - - name: "tutorial" - count: 1 - users: - - username: "bot-mobu-tutorial" - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" - repo_branch: "main" - max_executions: 1 - working_directory: "notebooks/tutorial-notebooks" - use_cachemachine: false - url_prefix: "/n3" - restart: true - name: "tap" count: 1 users: From d882ebcbd3a9ff4ba1848913a1bf8b5a6593af07 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 02:15:56 +0000 Subject: [PATCH 024/308] Update Helm release argo-workflows to v0.28.2 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 0c17b5eca8..e1ef54ee56 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.28.0 + version: 0.28.2 repository: https://argoproj.github.io/argo-helm From 663ead1e494b24317fd080750907dcc3ebfd0460 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 05:53:51 +0000 Subject: [PATCH 025/308] Update Helm release argo-cd to v5.34.6 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 8f231534e2..13fd0cd6b6 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.34.1 + version: 5.34.6 repository: https://argoproj.github.io/argo-helm From d3f8ff37d908a236790acda3f0e9043eb5a77161 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 29 May 2023 05:53:55 +0000 Subject: [PATCH 026/308] Update Helm release cert-manager to v1.12.1 --- applications/cert-manager/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml index 8f807c7af3..d0bad7bd8a 100644 --- a/applications/cert-manager/Chart.yaml +++ b/applications/cert-manager/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/cert-manager/cert-manager dependencies: - name: cert-manager - version: v1.12.0 + version: v1.12.1 repository: https://charts.jetstack.io From ecd57dd901b903d626d990be731cff31d94c8ed6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 15:15:39 +0000 Subject: [PATCH 027/308] Update Helm release telegraf to v1.8.28 --- applications/telegraf/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 8a7cd52436..80e4383643 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.27 + version: 1.8.28 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From a0d891ccf67e57bc8b472e2367337456c3328180 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 30 May 2023 15:15:45 +0000 Subject: [PATCH 028/308] Update Helm release telegraf-ds to v1.1.10 --- applications/telegraf-ds/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index 48e744058b..2a1d70213e 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.9 + version: 1.1.10 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From ddf1a851b3d4103a50a7575ead4abdf64122af6d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 30 May 2023 08:22:50 -0700 Subject: [PATCH 029/308] Revert "Turn the tutorial notebook tests off again" This reverts commit 749fa424ae3d421623ba68b89ca3ee77def4a86a. The assert preventing tutorial notebooks to run in other environments has reportedly been removed on main. --- applications/mobu/values-idfdev.yaml | 19 +++++++++++++++++++ applications/mobu/values-idfint.yaml | 19 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 37bc0563ac..24cc6a9890 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -37,6 +37,25 @@ config: url_prefix: "/n3" use_cachemachine: false restart: true + - name: "tutorial" + count: 1 + users: + - username: "bot-mobu-tutorial" + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" + repo_branch: "main" + max_executions: 1 + working_directory: "notebooks/tutorial-notebooks" + use_cachemachine: false + url_prefix: "/n3" + restart: true - name: "tap" count: 1 users: diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index e8da2c1d59..922fdb690b 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -52,6 +52,25 @@ config: use_cachemachine: false url_prefix: "/n3" restart: true + - name: "tutorial" + count: 1 + users: + - username: "bot-mobu-tutorial" + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" + repo_branch: "main" + max_executions: 1 + working_directory: "notebooks/tutorial-notebooks" + use_cachemachine: false + url_prefix: "/n3" + restart: true - name: "tap" count: 1 users: From 2fd5f642fd68dabc406f1c14cdb2296f5b27572c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 30 May 2023 08:53:57 -0700 Subject: [PATCH 030/308] Run tutorial notebooks with latest-weekly On IDF dev and IDF int, run the tutorial notebooks with latest-weekly since they require the new lsst.rsp package. --- applications/mobu/values-idfdev.yaml | 2 ++ applications/mobu/values-idfint.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 24cc6a9890..2497584694 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -49,6 +49,8 @@ config: business: type: "NotebookRunner" options: + image: + image_class: "latest-weekly" repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" repo_branch: "main" max_executions: 1 diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 922fdb690b..767751c08f 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -64,6 +64,8 @@ config: business: type: "NotebookRunner" options: + image: + image_class: "latest-weekly" repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git" repo_branch: "main" max_executions: 1 From 51e275024e50e5e89a7ef08d705e0638b982d244 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 30 May 2023 09:19:00 -0700 Subject: [PATCH 031/308] Summit: Add LaserTracker connector to sasquatch. --- applications/sasquatch/values-summit.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 551a6cbd59..fe62ff3a11 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -99,6 +99,10 @@ kafka-connect-manager: enabled: true repairerConnector: false topicsRegex: ".*MTVMS" + lasertracker: + enabled: true + repairerConnector: false + topicsRegex: ".*LaserTracker" kafdrop: ingress: From 725f8eac7511c47a1ec6afe18ea5c90af66b96ea Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 30 May 2023 14:08:29 -0700 Subject: [PATCH 032/308] USDF: Add LaserTracker connector for sasquatch. --- applications/sasquatch/values-usdfprod.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 734f5952a2..ec3293ecfa 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -73,6 +73,9 @@ kafka-connect-manager: gis: enabled: true topicsRegex: ".*GIS" + lasertracker: + enabled: true + topicsRegex: ".*LaserTracker" kafdrop: ingress: From 2e0586a9546b54040d05608ff985b30677a6f07b Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 30 May 2023 14:30:50 -0700 Subject: [PATCH 033/308] Enforce 4GiB:1CPU ratio --- applications/nublado/values-idfint.yaml | 8 ++++---- applications/nublado/values.yaml | 6 +++--- applications/nublado2/README.md | 2 +- applications/nublado2/values-idfint.yaml | 8 ++++---- applications/nublado2/values.yaml | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index a033ccb94c..b4536fe12f 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -37,16 +37,16 @@ controller: sizes: small: cpu: 1.0 - memory: 3Gi + memory: 4Gi medium: cpu: 2.0 - memory: 6Gi + memory: 8Gi large: cpu: 4.0 - memory: 12Gi + memory: 16Gi huge: cpu: 8.0 - memory: 24Gi + memory: 32Gi initContainers: - name: "initdir" image: "ghcr.io/lsst-sqre/initdir:0.0.4" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index afb5409442..f1131894bd 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -139,13 +139,13 @@ controller: sizes: small: cpu: 1.0 - memory: 3Gi + memory: 4Gi medium: cpu: 2.0 - memory: 6Gi + memory: 8Gi large: cpu: 4.0 - memory: 12Gi + memory: 16Gi # -- Volumes that should be mounted in lab pods. This supports NFS, # HostPath, and PVC volume types (differentiated in source.type) diff --git a/applications/nublado2/README.md b/applications/nublado2/README.md index 9caffa00d5..8f4ebdf915 100644 --- a/applications/nublado2/README.md +++ b/applications/nublado2/README.md @@ -19,7 +19,7 @@ JupyterHub for the Rubin Science Platform | config.pinned_images | list | `[]` | images to pin to spawner menu | | config.pull_secret_path | string | `""` | pull_secret_path must also be set here; it specifies resources in the lab namespace | | config.shutdown_on_logout | bool | `true` | shut down user pods on logout. Superfluous, because our LogoutHandler enforces this in any event, but nice to make explicit. | -| config.sizes | list | `[{"cpu":1,"name":"Small","ram":"3072M"},{"cpu":2,"name":"Medium","ram":"6144M"},{"cpu":4,"name":"Large","ram":"12288M"}]` | definitions of Lab sizes available in a given instance | +| config.sizes | list | `[{"cpu":1,"name":"Small","ram":"4096M"},{"cpu":2,"name":"Medium","ram":"8192M"},{"cpu":4,"name":"Large","ram":"16384M"}]` | definitions of Lab sizes available in a given instance | | config.user_resources_template | string | See `values.yaml` | Templates for the user resources to create for each lab spawn. This is a string that can be templated and then loaded as YAML to generate a list of Kubernetes objects to create. | | config.volume_mounts | list | `[]` | Where to mount volumes for a particular instance | | config.volumes | list | `[]` | Volumes to use for a particular instance | diff --git a/applications/nublado2/values-idfint.yaml b/applications/nublado2/values-idfint.yaml index a04cfdc465..b411ebddad 100644 --- a/applications/nublado2/values-idfint.yaml +++ b/applications/nublado2/values-idfint.yaml @@ -43,16 +43,16 @@ config: sizes: - name: Small cpu: 1 - ram: 3072M + ram: 4096M - name: Medium cpu: 2 - ram: 6144M + ram: 8192M - name: Large cpu: 4 - ram: 12288M + ram: 16384M - name: Huge cpu: 8 - ram: 24576M + ram: 32768M volumes: - name: home nfs: diff --git a/applications/nublado2/values.yaml b/applications/nublado2/values.yaml index 7582be6e31..35b642c2b8 100644 --- a/applications/nublado2/values.yaml +++ b/applications/nublado2/values.yaml @@ -200,13 +200,13 @@ config: sizes: - name: Small cpu: 1 - ram: 3072M + ram: 4096M - name: Medium cpu: 2 - ram: 6144M + ram: 8192M - name: Large cpu: 4 - ram: 12288M + ram: 16384M # -- Volumes to use for a particular instance volumes: [] # -- Where to mount volumes for a particular instance From d85cb1a97cc496c7924679a506c1e2d0968627ec Mon Sep 17 00:00:00 2001 From: Gregory Dubois-Felsmann Date: Wed, 31 May 2023 12:19:03 -0700 Subject: [PATCH 034/308] Restore DP0.2 TAP as the default service Change order of RSP TAP service entries to keep DP0.2 first, and DP0.3 / SSOTAP as the second. (Workaround for surprising behavior of Firefly configuration language.) --- applications/portal/templates/deployment.yaml | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/applications/portal/templates/deployment.yaml b/applications/portal/templates/deployment.yaml index 62cb0628f9..be692fcc41 100644 --- a/applications/portal/templates/deployment.yaml +++ b/applications/portal/templates/deployment.yaml @@ -62,6 +62,15 @@ spec: "tap" : { "additional": { "services": [ + {{- if .Values.config.ssotap }} + { + "label": "DP0.3 SSO", + "value": "{{ .Values.global.baseUrl }}/api/{{ .Values.config.ssotap}}", + "hipsUrl": "{{ .Values.global.baseUrl }}/api/hips/images/color_gri", + "centerWP": "0;0;ECL", + "fovDeg": 10 + }, + {{- end }} { "label": "LSST RSP", "value": "{{ .Values.global.baseUrl }}/api/tap", @@ -73,16 +82,6 @@ spec: "centerWP": "62;-37;EQ_J2000", "fovDeg": 10 } - {{- if .Values.config.ssotap }} - , - { - "label": "DP0.3 SSO", - "value": "{{ .Values.global.baseUrl }}/api/{{ .Values.config.ssotap}}", - "hipsUrl": "{{ .Values.global.baseUrl }}/api/hips/images/color_gri", - "centerWP": "0;0;ECL", - "fovDeg": 10 - } - {{- end }} ] } }, From 037f9da262545bbdfa1a6b35ce3a7de229d789c9 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 31 May 2023 17:37:38 -0700 Subject: [PATCH 035/308] Bump version of mobu 6.1.0 has fixes for timeouts and better error reporting. --- applications/mobu/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/mobu/Chart.yaml b/applications/mobu/Chart.yaml index 0daea287c2..c900cbdb83 100644 --- a/applications/mobu/Chart.yaml +++ b/applications/mobu/Chart.yaml @@ -4,4 +4,4 @@ version: 1.0.0 description: Continuous integration testing sources: - https://github.com/lsst-sqre/mobu -appVersion: 6.0.0 +appVersion: 6.1.0 From 368100a8111eb8eab8a6bc9646bf5a4665b7cf13 Mon Sep 17 00:00:00 2001 From: William O'Mullane Date: Thu, 1 Jun 2023 17:20:10 +0100 Subject: [PATCH 036/308] acitvate obsloctap --- applications/obsloctap/values-usdfdev.yaml | 1 + environments/values-usdfdev.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/applications/obsloctap/values-usdfdev.yaml b/applications/obsloctap/values-usdfdev.yaml index 5865451aa6..130292d4c8 100644 --- a/applications/obsloctap/values-usdfdev.yaml +++ b/applications/obsloctap/values-usdfdev.yaml @@ -4,6 +4,7 @@ environment: PGUSER: "rubin" AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + EFD: "usdf-efd" config: volumes: diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 614f37169b..386f3b1c59 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -37,7 +37,7 @@ nublado: nublado2: enabled: true obsloctap: - enabled: false + enabled: true obstap: enabled: true plot-navigator: From 8650e16d15a237405f3aec86a9abdc2bc185b961 Mon Sep 17 00:00:00 2001 From: William O'Mullane Date: Thu, 1 Jun 2023 19:16:59 +0100 Subject: [PATCH 037/308] added docs/applications/obsloctap --- docs/applications/index.rst | 1 + docs/applications/obsloctap/index.rst | 23 +++++++++++++++++++++++ docs/applications/obsloctap/values.md | 12 ++++++++++++ docs/developers/add-application.rst | 1 + 4 files changed, 37 insertions(+) create mode 100644 docs/applications/obsloctap/index.rst create mode 100644 docs/applications/obsloctap/values.md diff --git a/docs/applications/index.rst b/docs/applications/index.rst index cb21da0bde..c9713c466a 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -52,6 +52,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde alert-stream-broker/index exposurelog/index narrativelog/index + obsloctap/index plot-navigator/index production-tools/index sasquatch/index diff --git a/docs/applications/obsloctap/index.rst b/docs/applications/obsloctap/index.rst new file mode 100644 index 0000000000..889cf8770c --- /dev/null +++ b/docs/applications/obsloctap/index.rst @@ -0,0 +1,23 @@ +.. px-app:: obsloctap + +#################################### +obsloctap — serve observing schedule +#################################### + +Lookup and reformat ``lsst.sal.Scheduler.logevent_predictedSchedule``. +Return a json file of the future observations. +Todo: Also track which observations were made implement ObsLocTAP_ IVOA standard. + +.. _ObsLocTAP: https://www.ivoa.net/documents/ObsLocTAP/ + + +.. jinja:: obsloctap + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/obsloctap/values.md b/docs/applications/obsloctap/values.md new file mode 100644 index 0000000000..2630a7302f --- /dev/null +++ b/docs/applications/obsloctap/values.md @@ -0,0 +1,12 @@ +```{px-app-values} obsloctap +``` + +# Helm values reference + +Helm values reference table for the {px-app}`obsloctap` application. + +```{include} ../../../applications/obsloctap/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/developers/add-application.rst b/docs/developers/add-application.rst index b809c1c27d..34f67f3652 100644 --- a/docs/developers/add-application.rst +++ b/docs/developers/add-application.rst @@ -41,6 +41,7 @@ Documentation Phalanx uses `helm-docs`_ to generate documentation for Helm charts. This produces a nice Markdown README file that documents all the chart options, but it requires special formatting of the ``values.yaml`` file that is not present in the default Helm template. + Publication ----------- From 8da41c88c19a796c200934fc7bfed024b144c805 Mon Sep 17 00:00:00 2001 From: William O'Mullane Date: Thu, 1 Jun 2023 20:03:32 +0100 Subject: [PATCH 038/308] Add mandetory docs section --- docs/developers/add-application.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/developers/add-application.rst b/docs/developers/add-application.rst index 34f67f3652..475d2e360b 100644 --- a/docs/developers/add-application.rst +++ b/docs/developers/add-application.rst @@ -41,12 +41,18 @@ Documentation Phalanx uses `helm-docs`_ to generate documentation for Helm charts. This produces a nice Markdown README file that documents all the chart options, but it requires special formatting of the ``values.yaml`` file that is not present in the default Helm template. +Documentation is **NOT Optional** for any new application added to phalanx a +folder must be added under the +`docs directory `__. +This must have at least an ``index.rst`` file. + +For a simple example see: +- `hips docs `__ . Publication ----------- Rubin-developed Helm charts for the Science Platform are stored as part of the `phalanx repository `__. They can be found in the `applications directory `__. - Examples -------- From ddbbe96411b7d709695d23541efa8ef57937803e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 1 Jun 2023 13:34:44 -0700 Subject: [PATCH 039/308] Add cert-manager docs for adding hostnames Document how to add the Route 53 record for new TLS hostnames, and mention it from the developer documentation. Clean up the information about application documentation a bit. --- .../cert-manager/add-new-hostname.rst | 30 +++++++++++++++++++ docs/applications/cert-manager/bootstrap.rst | 3 ++ docs/applications/cert-manager/index.rst | 3 +- docs/developers/add-application.rst | 18 ++++++----- 4 files changed, 46 insertions(+), 8 deletions(-) create mode 100644 docs/applications/cert-manager/add-new-hostname.rst diff --git a/docs/applications/cert-manager/add-new-hostname.rst b/docs/applications/cert-manager/add-new-hostname.rst new file mode 100644 index 0000000000..e8aadf1ccf --- /dev/null +++ b/docs/applications/cert-manager/add-new-hostname.rst @@ -0,0 +1,30 @@ +####################################### +Add TLS certificates for a new hostname +####################################### + +Every hostname served by an instance of the Rubin Science Platform must have its own TLS certificate, generated by cert-manager, unless that environment is configured to use external certificates. +When adding a new hostname to an existing environment, one must therefore repeat part of the instructions in :px-app-bootstrap:`cert-manager`. +Here are those abbreviated instructions. + +These instructions are specific to environments that use Route 53. + +The hostname must be in the same domain that is already used as the primary hostname of the environment. +See :doc:`route53-setup` and :px-app-bootstrap:`cert-manager` for the details of that setup. + +Instructions +============ + +In the following, ```` is the new hostname that needs a TLS certificate. +```` is the domain shared by that hostname and the primary fully-qualified domain name for that cluster. +For example, when adding ``alert-stream-int-broker-0.lsst.cloud``, the hostname is ``alert-stream-int-broker-0`` and the domain name is ``lsst.cloud``. + +#. Go to the Route 53 UI for the domain used by this RSP environment. + In the above example, that would be ``lsst.cloud``. + +#. Create a CNAME named ``_acme-challenge.`` whose value is ``_acme-challenge.tls.``. + Do this by adding a new record, selecting :guilabel:`CNAME` from the lower drop-down menu, and then selecting :guilabel:`IP address or other value` from the top drop-down menu. + Then, enter ``_acme-challenge.tls.`` as the CNAME target. + +This should be all that's required to allow cert-manager to create certificates for that hostname. +You will then need to configure the ``tls`` portion of the relevant ``Ingress`` or ``GafaelfawrIngress`` object. +For more information on how to do that, see :px-app-notes:`cert-manager`. diff --git a/docs/applications/cert-manager/bootstrap.rst b/docs/applications/cert-manager/bootstrap.rst index d7dd489a21..e80d4dc530 100644 --- a/docs/applications/cert-manager/bootstrap.rst +++ b/docs/applications/cert-manager/bootstrap.rst @@ -21,6 +21,9 @@ Select **CNAME** from the lower drop-down menu and then **IP address or other va For example, if the cluster name is ``data-dev.lsst.cloud``, create a CNAME record at ``_acme-challenge.data-dev.lsst.cloud`` whose value is ``_acme-challenge.tls.lsst.cloud``. In the Route 53 console, the name of the record you create in the ``lsst.cloud`` hosted zone will be ``_acme-challenge.data-dev`` (yes, including the period). +This will need to be done for each hostname served by this instance of the RSP. +See :doc:`add-new-hostname` for a shorter version of these instructions to follow for each new hostname added. + Add the following to the ``values-*.yaml`` file for an environment: .. code-block:: yaml diff --git a/docs/applications/cert-manager/index.rst b/docs/applications/cert-manager/index.rst index 6bffb9f54b..c9658c6676 100644 --- a/docs/applications/cert-manager/index.rst +++ b/docs/applications/cert-manager/index.rst @@ -17,8 +17,9 @@ Guides .. toctree:: - notes bootstrap + add-new-hostname route53-setup upgrade + notes values diff --git a/docs/developers/add-application.rst b/docs/developers/add-application.rst index 475d2e360b..37d133771f 100644 --- a/docs/developers/add-application.rst +++ b/docs/developers/add-application.rst @@ -33,7 +33,13 @@ You will need to make at least the following changes to the default Helm chart t See `the Gafaelfawr's documentation on Ingress configurations `__ for more information, and see :dmtn:`235` for a guide to what scopes to use to protect the application. -- If your application exposes Prometheus endpoints, you will want to configure these in the `telegraf application's prometheus_config `__. +Other Phalanx configuration +--------------------------- + +If the application needs to listen on hostnames other than the normal cluster-wide hostname, you will need to configure :px-app:`cert-manager` so that it can generate a TLS certificate for that hostname. +See :doc:`/applications/cert-manager/add-new-hostname` for more details. + +If your application exposes Prometheus endpoints, you will want to configure these in the `telegraf application's prometheus_config `__. Documentation ------------- @@ -41,13 +47,11 @@ Documentation Phalanx uses `helm-docs`_ to generate documentation for Helm charts. This produces a nice Markdown README file that documents all the chart options, but it requires special formatting of the ``values.yaml`` file that is not present in the default Helm template. -Documentation is **NOT Optional** for any new application added to phalanx a -folder must be added under the -`docs directory `__. -This must have at least an ``index.rst`` file. +Documentation is **REQUIRED**. +Every new application added to Phalanx must have a corresponding folder in the `docs/applications directory `__ containing at least an ``index.rst`` file and a ``values.md`` file. +The ``values.md`` file is boilerplate to incorporate the documentation of the ``values.yaml`` file for the new application. -For a simple example see: -- `hips docs `__ . +For a simple example that you can copy if desired, see the `docs for the HIPS service `__. Publication ----------- From 06d7c4b0221a2f3a6f3c0698cd82e812d456844b Mon Sep 17 00:00:00 2001 From: Gregory Dubois-Felsmann Date: Thu, 1 Jun 2023 15:41:49 -0700 Subject: [PATCH 040/308] Temporary fix to getting SSO second in menu --- applications/portal/templates/deployment.yaml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/applications/portal/templates/deployment.yaml b/applications/portal/templates/deployment.yaml index be692fcc41..b6333bd472 100644 --- a/applications/portal/templates/deployment.yaml +++ b/applications/portal/templates/deployment.yaml @@ -64,13 +64,24 @@ spec: "services": [ {{- if .Values.config.ssotap }} { - "label": "DP0.3 SSO", + "label": "LSST DP0.2 DC2", + "value": "{{ .Values.global.baseUrl }}/api/tap", + {{- if .Values.config.hipsUrl }} + "hipsUrl": "{{ .Values.config.hipsUrl }}", + {{- else }} + "hipsUrl": "{{ .Values.global.baseUrl }}/api/hips/images/color_gri", + {{- end }} + "centerWP": "62;-37;EQ_J2000", + "fovDeg": 10 + }, + { + "label": "LSST DP0.3 SSO", "value": "{{ .Values.global.baseUrl }}/api/{{ .Values.config.ssotap}}", "hipsUrl": "{{ .Values.global.baseUrl }}/api/hips/images/color_gri", "centerWP": "0;0;ECL", "fovDeg": 10 - }, - {{- end }} + } + {{- else }} { "label": "LSST RSP", "value": "{{ .Values.global.baseUrl }}/api/tap", @@ -82,6 +93,7 @@ spec: "centerWP": "62;-37;EQ_J2000", "fovDeg": 10 } + {{- end }} ] } }, From 44ab9da3fff7137120a6e3f5764779c4c811a076 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 1 Jun 2023 16:07:17 -0700 Subject: [PATCH 041/308] Make user fileserver detection more robust --- applications/nublado/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 4df80f0668..5b186050a2 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -6,7 +6,7 @@ sources: - https://github.com/lsst-sqre/jupyterlab-controller - https://github.com/lsst-sqre/rsp-restspawner home: https://github.com/lsst-sqre/jupyterlab-controller -appVersion: 0.6.0 +appVersion: 0.6.1 dependencies: - name: jupyterhub From 1bd7361f8035c73d45d0897e4a49dd765476d5e8 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 1 Jun 2023 16:36:24 -0700 Subject: [PATCH 042/308] [hotfix] Turn on ssotap for idfdev and idfprod --- environments/values-idfdev.yaml | 2 ++ environments/values-idfprod.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 79543ea23a..e6cb2e88f5 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -49,6 +49,8 @@ semaphore: enabled: true sherlock: enabled: true +ssotap: + enabled: true squareone: enabled: true squarebot: diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index d05bc8932e..ee89f52121 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -55,6 +55,8 @@ squareone: enabled: true squash-api: enabled: false +ssotap: + enabled: true strimzi: enabled: false strimzi-access-operator: From 5cceaa5abb30624e0cd787e0f5b2ad03033ccde2 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 1 Jun 2023 16:50:48 -0700 Subject: [PATCH 043/308] Enable ssotap on IDF prod in the Portal --- applications/portal/values-idfprod.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/portal/values-idfprod.yaml b/applications/portal/values-idfprod.yaml index d3325ec38f..2abcb44575 100644 --- a/applications/portal/values-idfprod.yaml +++ b/applications/portal/values-idfprod.yaml @@ -5,6 +5,7 @@ config: workareaNfs: path: "/share1/home/firefly/shared-workarea" server: "10.13.105.122" + ssotap: "ssotap" resources: limits: From 023d77bee0e26a144271986a657754292b7c8a94 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 1 Jun 2023 17:04:35 -0700 Subject: [PATCH 044/308] remove fs-nfs from usdf-dev --- applications/nublado/values-usdfdev.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 6d41f46005..9cd0cfec9a 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -131,16 +131,6 @@ controller: resources: requests: storage: "1Gi" - - containerPath: "/fs/nfs" - mode: "rw" - source: - type: "persistentVolumeClaim" - storageClassName: "fs-nfs" - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: "1Gi" - containerPath: "/fs/ddn/sdf/group/rubin" mode: "rw" source: From 9732a1844c14eff71da4783476e5f9d80742532a Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 2 Jun 2023 08:21:54 -0700 Subject: [PATCH 045/308] Update Gafaelfawr to 9.2.2 Hopefully fix the various Redis pool issues. --- applications/gafaelfawr/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index dd8279cca1..6bb95c897d 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,7 +5,7 @@ description: Authentication and identity system home: https://gafaelfawr.lsst.io/ sources: - https://github.com/lsst-sqre/gafaelfawr -appVersion: 9.2.1 +appVersion: 9.2.2 dependencies: - name: redis From 73330a98a7ef95c7ab66d102e06fa8f6afa3fe14 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 2 Jun 2023 12:07:58 -0700 Subject: [PATCH 046/308] Remove /fs/nfs from USDF environments --- applications/nublado2/values-usdfdev.yaml | 17 -------------- applications/nublado2/values-usdfprod.yaml | 26 ---------------------- 2 files changed, 43 deletions(-) diff --git a/applications/nublado2/values-usdfdev.yaml b/applications/nublado2/values-usdfdev.yaml index eba1cf3260..5e85e0d328 100644 --- a/applications/nublado2/values-usdfdev.yaml +++ b/applications/nublado2/values-usdfdev.yaml @@ -61,9 +61,6 @@ config: - name: fs-ddn-sdf-group-rubin persistentVolumeClaim: claimName: fs-ddn-sdf-group-rubin - - name: fs-nfs - persistentVolumeClaim: - claimName: fs-nfs - name: sdf-scratch persistentVolumeClaim: claimName: sdf-scratch @@ -85,8 +82,6 @@ config: mountPath: /sdf/data/rubin - name: sdf-scratch mountPath: /scratch - - name: fs-nfs - mountPath: /fs/nfs - name: fs-ddn-sdf-group-rubin mountPath: /fs/ddn/sdf/group/rubin - name: fs-ddn-sdf-group-lsst @@ -406,18 +401,6 @@ config: resources: requests: storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-nfs - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-nfs - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi vault_secret_path: "secret/rubin/usdf-rsp-dev/nublado2" diff --git a/applications/nublado2/values-usdfprod.yaml b/applications/nublado2/values-usdfprod.yaml index 8238d4d396..26723b22d4 100644 --- a/applications/nublado2/values-usdfprod.yaml +++ b/applications/nublado2/values-usdfprod.yaml @@ -61,9 +61,6 @@ config: - name: fs-ddn-sdf-group-rubin persistentVolumeClaim: claimName: fs-ddn-sdf-group-rubin - - name: fs-nfs - persistentVolumeClaim: - claimName: fs-nfs - name: sdf-scratch persistentVolumeClaim: claimName: sdf-scratch @@ -73,9 +70,6 @@ config: volume_mounts: - name: home mountPath: "/home/" -# - name: sdf-group-rubin -# mountPath: /datasets -# subPath: datasets - name: sdf-data-rubin mountPath: /repo subPath: repo @@ -88,14 +82,6 @@ config: mountPath: /sdf/data/rubin - name: sdf-scratch mountPath: /scratch -# - name: fs-ddn-sdf-group-rubin -# mountPath: /teststand -# subPath: lsstdata/offline/teststand -# - name: fs-ddn-sdf-group-rubin -# mountPath: /instrument -# subPath: lsstdata/offline/instrument - - name: fs-nfs - mountPath: /fs/nfs - name: fs-ddn-sdf-group-rubin mountPath: /fs/ddn/sdf/group/rubin - name: fs-ddn-sdf-group-lsst @@ -415,18 +401,6 @@ config: resources: requests: storage: 1Gi - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: fs-nfs - namespace: "{{ user_namespace }}" - spec: - storageClassName: fs-nfs - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Gi vault_secret_path: "secret/rubin/usdf-rsp/nublado2" From 0edb5ed0d1ee0717eef19a4f0a675f838aa2282e Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 1 Jun 2023 13:56:43 -0700 Subject: [PATCH 047/308] Enable repairer connectors at usdfprod --- applications/sasquatch/values-usdfprod.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index ec3293ecfa..3b6d27117a 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -27,54 +27,71 @@ kafka-connect-manager: connectors: auxtel: enabled: true + repairerConnector: true topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" maintel: enabled: true + repairerConnector: true topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" mtmount: enabled: true + repairerConnector: true topicsRegex: ".*MTMount" comcam: enabled: true + repairerConnector: true topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" eas: enabled: true + repairerConnector: true topicsRegex: ".*DIMM|.*DSM|.*ESS|.*HVAC|.*WeatherForecast|.*WeatherStation" latiss: enabled: true + repairerConnector: true topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" m1m3: enabled: true + repairerConnector: true topicsRegex: ".*MTM1M3" m2: enabled: true + repairerConnector: true topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" obssys: enabled: true + repairerConnector: true topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" ocps: enabled: true + repairerConnector: true topicsRegex: ".*OCPS" test: enabled: true + repairerConnector: true topicsRegex: ".*Test" pmd: enabled: true + repairerConnector: true topicsRegex: ".*PMD" calsys: enabled: true + repairerConnector: true topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" mtaircompressor: enabled: true + repairerConnector: true topicsRegex: ".*MTAirCompressor" genericcamera: enabled: true + repairerConnector: true topicsRegex: ".*GCHeaderService|.*GenericCamera" gis: enabled: true + repairerConnector: true topicsRegex: ".*GIS" lasertracker: enabled: true + repairerConnector: true topicsRegex: ".*LaserTracker" kafdrop: From 63a4064e81d14be2a2e0ac3caba48e1e7de2b262 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 3 Jun 2023 15:55:23 -0700 Subject: [PATCH 048/308] Disable repairer connectors at usdfprod --- applications/sasquatch/values-usdfprod.yaml | 34 ++++++++++----------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 3b6d27117a..9727a381e1 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -27,71 +27,71 @@ kafka-connect-manager: connectors: auxtel: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" maintel: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" mtmount: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTMount" comcam: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" eas: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*DIMM|.*DSM|.*ESS|.*HVAC|.*WeatherForecast|.*WeatherStation" latiss: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" m1m3: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTM1M3" m2: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" obssys: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" ocps: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*OCPS" test: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*Test" pmd: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*PMD" calsys: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" mtaircompressor: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*MTAirCompressor" genericcamera: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*GCHeaderService|.*GenericCamera" gis: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*GIS" lasertracker: enabled: true - repairerConnector: true + repairerConnector: false topicsRegex: ".*LaserTracker" kafdrop: From 9b3c42862fc87a711cef3576e22da2d54a5b9979 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 01:09:42 +0000 Subject: [PATCH 049/308] Update Helm release ingress-nginx to v4.7.0 --- applications/ingress-nginx/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index 78fc4689e5..481ec0c0c6 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.6.1 + version: 4.7.0 repository: https://kubernetes.github.io/ingress-nginx From 5f8c1f4ccb606a38e58ca459e920c3a7d05d88c6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 15:06:45 +0000 Subject: [PATCH 050/308] Update Helm release argo-workflows to v0.29.1 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index e1ef54ee56..c37c260d7e 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.28.2 + version: 0.29.1 repository: https://argoproj.github.io/argo-helm From f61142560a6692b32545acdf597a7adb6a4db145 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 3 Jun 2023 17:49:44 -0700 Subject: [PATCH 051/308] Add InfluxDB staging deployment - Enable it on the base environment. --- applications/sasquatch/Chart.yaml | 6 +++ applications/sasquatch/README.md | 12 +++++ applications/sasquatch/values-base.yaml | 10 ++++ applications/sasquatch/values.yaml | 63 +++++++++++++++++++++++++ 4 files changed, 91 insertions(+) diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index 2e39bd5300..f80b940616 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -11,9 +11,15 @@ dependencies: version: 2.1.0 repository: https://lsst-sqre.github.io/charts/ - name: influxdb + alias: influxdb condition: influxdb.enabled version: 4.12.1 repository: https://helm.influxdata.com/ + - name: influxdb + alias: influxdb-staging + condition: influxdb-staging.enabled + version: 4.12.1 + repository: https://helm.influxdata.com/ - name: influxdb2 condition: influxdb2.enabled version: 2.1.1 diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 107b2cfdde..213456803a 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -22,6 +22,18 @@ Rubin Observatory's telemetry service. | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| influxdb-staging.enabled | bool | `false` | Enable InfluxDB staging deployment. | +| influxdb-staging.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | +| influxdb-staging.ingress | object | disabled | InfluxDB ingress configuration. | +| influxdb-staging.initScripts.enabled | bool | `false` | Enable InfluxDB custom initialization script. | +| influxdb-staging.persistence.enabled | bool | `true` | Enable persistent volume claim. By default storageClass is undefined choosing the default provisioner (standard on GKE). | +| influxdb-staging.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments | +| influxdb-staging.resources.limits.cpu | int | `8` | | +| influxdb-staging.resources.limits.memory | string | `"96Gi"` | | +| influxdb-staging.resources.requests.cpu | int | `1` | | +| influxdb-staging.resources.requests.memory | string | `"1Gi"` | | +| influxdb-staging.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | | influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb.enabled | bool | `true` | Enable InfluxDB. | | influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 2dfdf2180d..b9a70d9fcd 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -26,6 +26,16 @@ influxdb: enabled: true hostname: base-lsp.lsst.codes +influxdb-staging: + enabled: true + persistence: + storageClass: rook-ceph-block + size: 5Ti + ingress: + enabled: true + hostname: base-lsp.lsst.codes + + kafka-connect-manager: influxdbSink: # Based on the kafka producers configuration for the BTS diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index cffe084c1a..3b4881dc31 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -72,6 +72,69 @@ influxdb: memory: 96Gi cpu: 8 +influxdb-staging: + # -- Enable InfluxDB staging deployment. + enabled: false + # -- InfluxDB image tag. + image: + tag: "1.8.10" + persistence: + # -- Enable persistent volume claim. + # By default storageClass is undefined choosing the default provisioner (standard on GKE). + enabled: true + # -- Persistent volume size. + # @default 1Ti for teststand deployments + size: 1Ti + # -- Default InfluxDB user, use influxb-user and influxdb-password keys from secret. + setDefaultUser: + enabled: true + user: + existingSecret: sasquatch + # -- InfluxDB ingress configuration. + # @default -- disabled + ingress: + enabled: false + tls: false + hostname: "" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 + className: "nginx" + path: /influxdb-staging(/|$)(.*) + # -- Override InfluxDB configuration. + # See https://docs.influxdata.com/influxdb/v1.8/administration/config + config: + data: + cache-max-memory-size: 0 + wal-fsync-delay: "100ms" + trace-logging-enabled: true + http: + enabled: true + flux-enabled: true + auth-enabled: true + max-row-limit: 0 + coordinator: + write-timeout: "1h" + max-concurrent-queries: 0 + query-timeout: "0s" + log-queries-after: "15s" + continuous_queries: + enabled: false + logging: + level: "debug" + initScripts: + # -- Enable InfluxDB custom initialization script. + enabled: false + # scripts: + # # -- InfluxDB custom initialization script. + # init.iql: |+ + resources: + requests: + memory: 1Gi + cpu: 1 + limits: + memory: 96Gi + cpu: 8 + influxdb2: enabled: false adminUser: From 94491b2c6105e111b49b1ef16128fa712dd96bbf Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 7 Jun 2023 10:22:46 -0400 Subject: [PATCH 052/308] Shorten DP0.2 docs description This makes the DP0.2 docs card more concise and fit better with the other cards. --- applications/squareone/values.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/applications/squareone/values.yaml b/applications/squareone/values.yaml index 60bcfed8fd..01c124f527 100644 --- a/applications/squareone/values.yaml +++ b/applications/squareone/values.yaml @@ -124,12 +124,8 @@ config: ### Data Preview 0.2 (DP0.2) DP0.2 is the second phase of the Data Preview 0 program using - precursor data (simulated images from the DESC DC2 data - challenge). For the first time, all the derived data products - have been generated “in-house” on an early version of the Rubin - processing infrastructure using version 23.0 of the LSST Science - Pipelines. As a result, the data model is significantly - different from the DP0.1 dataset. + simulated images from the DESC DC2 data challenge processed with + version 23.0 of the LSST Science Pipelines. From 4893e5c65011e80cad032bfae2febcd52fe92cbe Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 7 Jun 2023 10:25:00 -0400 Subject: [PATCH 053/308] Edit docs card descriptions Captitalize Notebook Aspect more consistently. --- applications/squareone/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/squareone/values.yaml b/applications/squareone/values.yaml index 01c124f527..275f766c45 100644 --- a/applications/squareone/values.yaml +++ b/applications/squareone/values.yaml @@ -149,7 +149,7 @@ config: ### Rubin Science Platform - The Notebook aspect is a powerful data analysis environment with + The Notebook Aspect is a powerful data analysis environment with Jupyter Notebooks and terminals in the browser. Documentation for the Rubin Science Platform, including account set up, portal, notebooks, and API aspects. @@ -171,7 +171,7 @@ config: The Science Pipelines include the Butler for accessing LSST data and a pipeline framework for processing data. The LSST Science - Pipelines Python package is preinstalled in the Notebook aspect. + Pipelines is preinstalled in the Notebook Aspect. From 642691d5dd3339fbb0c44108bcb983488eb4d739 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 7 Jun 2023 10:26:02 -0400 Subject: [PATCH 054/308] Include link to DP0.2 tutorials This direct link was requested by the Users Committee via https://github.com/rubin-dp0/Support/issues/41 --- applications/squareone/values.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/applications/squareone/values.yaml b/applications/squareone/values.yaml index 275f766c45..627e70e601 100644 --- a/applications/squareone/values.yaml +++ b/applications/squareone/values.yaml @@ -129,6 +129,15 @@ config: + + + ### DP0.2 Tutorials + + Tutorials for exploring the DP0.2 dataset on the Rubin Science + Platform. + + + ### DP0.2 Catalog Schema From 2da1335b76947377b2d35e65b0193a5c09c3dfa0 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 7 Jun 2023 10:41:54 -0400 Subject: [PATCH 055/308] Improve description of rsp.lsst.io docs The previous description was a mash-up of the descrption for nb.lsst.io with the broader scope of rsp.lsst.io; I think this is a more accurate and concise wording. --- applications/squareone/values.yaml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/applications/squareone/values.yaml b/applications/squareone/values.yaml index 627e70e601..abab9d546a 100644 --- a/applications/squareone/values.yaml +++ b/applications/squareone/values.yaml @@ -158,10 +158,8 @@ config: ### Rubin Science Platform - The Notebook Aspect is a powerful data analysis environment with - Jupyter Notebooks and terminals in the browser. - Documentation for the Rubin Science Platform, including account set up, - portal, notebooks, and API aspects. + Guides for setting up an account on the Rubin Science Platform + and using the Portal, Notebook, and API Aspects. From ffea03084ae3cc3692d7e5e9a72e393cddd0fb2f Mon Sep 17 00:00:00 2001 From: Gregory Dubois-Felsmann Date: Wed, 7 Jun 2023 14:33:24 -0700 Subject: [PATCH 056/308] Return to desired "insert SSO" behavior Depends on latest version of Firefly, to be released as release-2023.1.4 --- applications/portal/templates/deployment.yaml | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/applications/portal/templates/deployment.yaml b/applications/portal/templates/deployment.yaml index b6333bd472..37f6d63e46 100644 --- a/applications/portal/templates/deployment.yaml +++ b/applications/portal/templates/deployment.yaml @@ -62,9 +62,8 @@ spec: "tap" : { "additional": { "services": [ - {{- if .Values.config.ssotap }} { - "label": "LSST DP0.2 DC2", + "label": "LSST RSP", "value": "{{ .Values.global.baseUrl }}/api/tap", {{- if .Values.config.hipsUrl }} "hipsUrl": "{{ .Values.config.hipsUrl }}", @@ -73,7 +72,9 @@ spec: {{- end }} "centerWP": "62;-37;EQ_J2000", "fovDeg": 10 - }, + } + {{- if .Values.config.ssotap }} + , { "label": "LSST DP0.3 SSO", "value": "{{ .Values.global.baseUrl }}/api/{{ .Values.config.ssotap}}", @@ -81,18 +82,6 @@ spec: "centerWP": "0;0;ECL", "fovDeg": 10 } - {{- else }} - { - "label": "LSST RSP", - "value": "{{ .Values.global.baseUrl }}/api/tap", - {{- if .Values.config.hipsUrl }} - "hipsUrl": "{{ .Values.config.hipsUrl }}", - {{- else }} - "hipsUrl": "{{ .Values.global.baseUrl }}/api/hips/images/color_gri", - {{- end }} - "centerWP": "62;-37;EQ_J2000", - "fovDeg": 10 - } {{- end }} ] } From 54b696e330fabe9469c2986c6fe99c1fdc9c6995 Mon Sep 17 00:00:00 2001 From: roby Date: Tue, 6 Jun 2023 15:01:03 -0600 Subject: [PATCH 057/308] appVersion: "suit-2023.1.4" --- applications/portal/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml index 95a12c7dea..360dad5699 100644 --- a/applications/portal/Chart.yaml +++ b/applications/portal/Chart.yaml @@ -5,7 +5,7 @@ description: Rubin Science Platform Portal Aspect sources: - https://github.com/lsst/suit - https://github.com/Caltech-IPAC/firefly -appVersion: "suit-2023.1.3" +appVersion: "suit-2023.1.4" dependencies: - name: redis From 2543d36a5a622329a1500b688881a3d76a0bbc57 Mon Sep 17 00:00:00 2001 From: Gregory Dubois-Felsmann Date: Wed, 7 Jun 2023 16:46:38 -0700 Subject: [PATCH 058/308] Update to match the new `suit` default TAP service label --- applications/portal/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/portal/templates/deployment.yaml b/applications/portal/templates/deployment.yaml index 37f6d63e46..5d48a8cab7 100644 --- a/applications/portal/templates/deployment.yaml +++ b/applications/portal/templates/deployment.yaml @@ -63,7 +63,7 @@ spec: "additional": { "services": [ { - "label": "LSST RSP", + "label": "LSST DP0.2 DC2", "value": "{{ .Values.global.baseUrl }}/api/tap", {{- if .Values.config.hipsUrl }} "hipsUrl": "{{ .Values.config.hipsUrl }}", From 2d5d819a4a37fe4af996399921293fae608ca87c Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 2 May 2023 15:37:44 -0400 Subject: [PATCH 059/308] Prepare timessquare database on usdfdev --- applications/postgres/templates/deployment.yaml | 11 +++++++++++ applications/postgres/values-usdfdev.yaml | 3 +++ 2 files changed, 14 insertions(+) diff --git a/applications/postgres/templates/deployment.yaml b/applications/postgres/templates/deployment.yaml index ebde275f9f..c04846f7b9 100644 --- a/applications/postgres/templates/deployment.yaml +++ b/applications/postgres/templates/deployment.yaml @@ -99,6 +99,17 @@ spec: name: "postgres" key: "gafaelfawr_password" {{- end }} + {{- with .Values.timessquare_db }} + - name: "VRO_DB_TIMESSQUARE_USER" + value: {{ .user | quote }} + - name: "VRO_DB_TIMESSQUARE_DB" + value: {{ .db | quote }} + - name: "VRO_DB_TIMESSQUARE_PASSWORD" + valueFrom: + secretKeyRef: + name: "postgres" + key: "timessquare_password" + {{- end }} imagePullPolicy: {{ .Values.image.pullPolicy | quote }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" ports: diff --git a/applications/postgres/values-usdfdev.yaml b/applications/postgres/values-usdfdev.yaml index dbc5324ac3..05f630a0d5 100644 --- a/applications/postgres/values-usdfdev.yaml +++ b/applications/postgres/values-usdfdev.yaml @@ -7,5 +7,8 @@ nublado3_db: gafaelfawr_db: user: 'gafaelfawr' db: 'gafaelfawr' +timessquare_db: + user: "timessquare" + db: "timessquare" postgresStorageClass: 'wekafs--sdf-k8s01' From 6e97c6c77cf12a3031cbf59e6005392ea18a2127 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 2 May 2023 15:40:16 -0400 Subject: [PATCH 060/308] Add times-square values for usdfdev --- applications/times-square/values-usdfdev.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 applications/times-square/values-usdfdev.yaml diff --git a/applications/times-square/values-usdfdev.yaml b/applications/times-square/values-usdfdev.yaml new file mode 100644 index 0000000000..bafe7055cc --- /dev/null +++ b/applications/times-square/values-usdfdev.yaml @@ -0,0 +1,12 @@ +image: + pullPolicy: Always +config: + logLevel: "DEBUG" + databaseUrl: "postgresql://timessquare@postgres.postgres/timessquare" + githubAppId: "327289" + enableGitHubApp: "True" +cloudsql: + enabled: false +redis: + persistence: + storageClass: "wekafs--sdf-k8s01" From c6c081bc101e33153e770c7d5f193796a1817820 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 2 May 2023 16:43:15 -0400 Subject: [PATCH 061/308] Activate times-square on usdfdev --- environments/values-usdfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 386f3b1c59..47b6be1dde 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -75,7 +75,7 @@ telegraf: telegraf-ds: enabled: false times-square: - enabled: false + enabled: true vault-secrets-operator: enabled: true vo-cutouts: From c12bd94d1280a74f914d0bde5be76061488782a8 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 2 May 2023 17:41:28 -0400 Subject: [PATCH 062/308] Configure noteburst on usdfdev --- applications/noteburst/values-usdfdev.yaml | 19 +++++++++++++++++++ environments/values-usdfdev.yaml | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 applications/noteburst/values-usdfdev.yaml diff --git a/applications/noteburst/values-usdfdev.yaml b/applications/noteburst/values-usdfdev.yaml new file mode 100644 index 0000000000..0c562f02fc --- /dev/null +++ b/applications/noteburst/values-usdfdev.yaml @@ -0,0 +1,19 @@ +image: + pullPolicy: Always + +config: + logLevel: "DEBUG" + worker: + workerCount: 1 + identities: + - username: "bot-noteburst90000" + - username: "bot-noteburst90001" + - username: "bot-noteburst90002" + - username: "bot-noteburst90003" + - username: "bot-noteburst90004" + - username: "bot-noteburst90005" + +# Use SSD for Redis storage. +redis: + persistence: + storageClass: "wekafs--sdf-k8s01" diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 47b6be1dde..edc9e692b0 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -31,7 +31,7 @@ moneypenny: narrativelog: enabled: false noteburst: - enabled: false + enabled: true nublado: enabled: true nublado2: From a988c76c5d4d981293769f99b11c44d4dc8b7b1c Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Thu, 1 Jun 2023 13:40:30 -0700 Subject: [PATCH 063/308] [DM-39343] Fix up obstap and tap the same way ssotap --- .../templates/tap-schema-db-deployment.yaml | 56 +++++++++++++++++++ .../templates/tap-schema-db-service.yaml | 15 +++++ applications/obstap/values-idfdev.yaml | 17 ------ applications/obstap/values-idfint.yaml | 18 ------ applications/obstap/values-idfprod.yaml | 27 --------- applications/obstap/values-usdfdev.yaml | 4 ++ applications/obstap/values-usdfprod.yaml | 30 ---------- applications/obstap/values.yaml | 8 ++- applications/ssotap/values-idfdev.yaml | 8 +-- applications/ssotap/values-idfint.yaml | 8 +-- applications/ssotap/values-idfprod.yaml | 8 +-- applications/ssotap/values-minikube.yaml | 4 ++ .../templates/tap-schema-db-deployment.yaml | 56 +++++++++++++++++++ .../tap/templates/tap-schema-db-service.yaml | 15 +++++ applications/tap/values-ccin2p3.yaml | 4 ++ applications/tap/values-idfdev.yaml | 4 ++ applications/tap/values-idfint.yaml | 4 ++ applications/tap/values-idfprod.yaml | 4 ++ applications/tap/values-minikube.yaml | 4 ++ applications/tap/values-roe.yaml | 4 ++ applications/tap/values-usdfdev.yaml | 4 ++ applications/tap/values-usdfprod.yaml | 4 ++ applications/tap/values.yaml | 8 ++- 23 files changed, 208 insertions(+), 106 deletions(-) create mode 100644 applications/obstap/templates/tap-schema-db-deployment.yaml create mode 100644 applications/obstap/templates/tap-schema-db-service.yaml delete mode 100644 applications/obstap/values-idfdev.yaml delete mode 100644 applications/obstap/values-idfint.yaml delete mode 100644 applications/obstap/values-idfprod.yaml delete mode 100644 applications/obstap/values-usdfprod.yaml create mode 100644 applications/tap/templates/tap-schema-db-deployment.yaml create mode 100644 applications/tap/templates/tap-schema-db-service.yaml diff --git a/applications/obstap/templates/tap-schema-db-deployment.yaml b/applications/obstap/templates/tap-schema-db-deployment.yaml new file mode 100644 index 0000000000..929bdce88f --- /dev/null +++ b/applications/obstap/templates/tap-schema-db-deployment.yaml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cadc-tap.fullname" . }}-tap-schema-db + labels: + {{- include "cadc-tap.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "cadc-tap.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "cadc-tap.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "schema-db" + spec: + automountServiceAccountToken: false + containers: + - name: "tap-schema-db" + env: + - name: MYSQL_DATABASE + value: "TAP_SCHEMA" + - name: MYSQL_USER + value: "TAP_SCHEMA" + - name: MYSQL_PASSWORD + value: "TAP_SCHEMA" + - name: MYSQL_ROOT_HOST + value: "%" + image: "{{ .Values.tap_schema.image.repository }}:{{ .Values.tap_schema.image.tag}}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 3306 + protocol: "TCP" + {{- with .Values.tap_schema.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + imagePullSecrets: + - name: "pull-secret" + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/obstap/templates/tap-schema-db-service.yaml b/applications/obstap/templates/tap-schema-db-service.yaml new file mode 100644 index 0000000000..e5b9dd0856 --- /dev/null +++ b/applications/obstap/templates/tap-schema-db-service.yaml @@ -0,0 +1,15 @@ +kind: Service +apiVersion: v1 +metadata: + name: {{ template "cadc-tap.fullname" . }}-schema-db + labels: + {{- include "cadc-tap.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - protocol: "TCP" + port: 3306 + targetPort: 3306 + selector: + {{- include "cadc-tap.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: "schema-db" diff --git a/applications/obstap/values-idfdev.yaml b/applications/obstap/values-idfdev.yaml deleted file mode 100644 index b0a7af3d2f..0000000000 --- a/applications/obstap/values-idfdev.yaml +++ /dev/null @@ -1,17 +0,0 @@ -resources: - requests: - cpu: 2.0 - memory: "2G" - limits: - cpu: 8.0 - memory: "32G" - -config: - gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" - jvmMaxHeapSize: "31G" - -qserv: - host: "10.136.1.211:4040" - mock: - enabled: false diff --git a/applications/obstap/values-idfint.yaml b/applications/obstap/values-idfint.yaml deleted file mode 100644 index a7a76f923a..0000000000 --- a/applications/obstap/values-idfint.yaml +++ /dev/null @@ -1,18 +0,0 @@ -resources: - requests: - cpu: 2.0 - memory: "2G" - limits: - cpu: 8.0 - memory: "32G" - -replicaCount: 2 - -config: - gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" - jvmMaxHeapSize: "31G" - -pg: - mock: - enabled: true diff --git a/applications/obstap/values-idfprod.yaml b/applications/obstap/values-idfprod.yaml deleted file mode 100644 index cc8d6515e5..0000000000 --- a/applications/obstap/values-idfprod.yaml +++ /dev/null @@ -1,27 +0,0 @@ -resources: - requests: - cpu: 2.0 - memory: "2G" - limits: - cpu: 8.0 - memory: "32G" - -replicaCount: 2 - -config: - gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" - jvmMaxHeapSize: "31G" - -pg: - mock: - enabled: true - -uws: - resources: - requests: - cpu: 0.25 - memory: "1G" - limits: - cpu: 2.0 - memory: "4G" diff --git a/applications/obstap/values-usdfdev.yaml b/applications/obstap/values-usdfdev.yaml index a8802d4c5e..4bf6319d48 100644 --- a/applications/obstap/values-usdfdev.yaml +++ b/applications/obstap/values-usdfdev.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-usdf-dev-livetap" + resources: requests: cpu: 2.0 diff --git a/applications/obstap/values-usdfprod.yaml b/applications/obstap/values-usdfprod.yaml deleted file mode 100644 index a8802d4c5e..0000000000 --- a/applications/obstap/values-usdfprod.yaml +++ /dev/null @@ -1,30 +0,0 @@ -resources: - requests: - cpu: 2.0 - memory: "2G" - limits: - cpu: 8.0 - memory: "32G" - -replicaCount: 2 - -config: - gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" - jvmMaxHeapSize: "31G" - -pg: - mock: - enabled: false - database: "lsstdb1" - host: "usdf-butler.slac.stanford.edu:5432" - username: "rubin" - -uws: - resources: - requests: - cpu: 0.25 - memory: "1G" - limits: - cpu: 2.0 - memory: "4G" diff --git a/applications/obstap/values.yaml b/applications/obstap/values.yaml index fbf2c73154..b2d51618ce 100644 --- a/applications/obstap/values.yaml +++ b/applications/obstap/values.yaml @@ -53,7 +53,7 @@ vaultSecretsPath: "" config: # -- Address to a MySQL database containing TAP schema data - tapSchemaAddress: "tap-schema-db.tap-schema.svc.cluster.local:3306" + tapSchemaAddress: "cadc-tap-schema-db:3306" # -- Datalink payload URL datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip" @@ -120,6 +120,12 @@ pg: # -- Affinity rules for the mock postgres pod affinity: {} +tap_schema: + image: + repository: {} + tag: "1.2.11" + resources: {} + uws: image: # -- UWS database image to use diff --git a/applications/ssotap/values-idfdev.yaml b/applications/ssotap/values-idfdev.yaml index 4c05fbd371..68c1963f8d 100644 --- a/applications/ssotap/values-idfdev.yaml +++ b/applications/ssotap/values-idfdev.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfdev-sso" + resources: requests: cpu: 2.0 @@ -17,7 +21,3 @@ pg: database: "dp03_catalogs" host: "usdf-pg-catalogs.slac.stanford.edu:5432" username: "dp03" - -tap_schema: - image: - repository: "lsstsqre/tap-schema-idfsso" diff --git a/applications/ssotap/values-idfint.yaml b/applications/ssotap/values-idfint.yaml index fbf8d89484..e4aad19e89 100644 --- a/applications/ssotap/values-idfint.yaml +++ b/applications/ssotap/values-idfint.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfint-sso" + resources: requests: cpu: 2.0 @@ -19,7 +23,3 @@ pg: database: "dp03_catalogs" host: "usdf-pg-catalogs.slac.stanford.edu:5432" username: "dp03" - -tap_schema: - image: - repository: "lsstsqre/tap-schema-idfsso" diff --git a/applications/ssotap/values-idfprod.yaml b/applications/ssotap/values-idfprod.yaml index 231f91b621..639e96c320 100644 --- a/applications/ssotap/values-idfprod.yaml +++ b/applications/ssotap/values-idfprod.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfprod-sso" + resources: requests: cpu: 2.0 @@ -28,7 +32,3 @@ uws: limits: cpu: 2.0 memory: "4G" - -tap_schema: - image: - repository: "lsstsqre/tap-schema-idfsso" diff --git a/applications/ssotap/values-minikube.yaml b/applications/ssotap/values-minikube.yaml index 6e3f1aca1e..9819d2da73 100644 --- a/applications/ssotap/values-minikube.yaml +++ b/applications/ssotap/values-minikube.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfprod-sso" + config: gcsBucket: "async-results.lsst.codes" gcsBucketUrl: "http://async-results.lsst.codes" diff --git a/applications/tap/templates/tap-schema-db-deployment.yaml b/applications/tap/templates/tap-schema-db-deployment.yaml new file mode 100644 index 0000000000..929bdce88f --- /dev/null +++ b/applications/tap/templates/tap-schema-db-deployment.yaml @@ -0,0 +1,56 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "cadc-tap.fullname" . }}-tap-schema-db + labels: + {{- include "cadc-tap.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "cadc-tap.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "cadc-tap.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "schema-db" + spec: + automountServiceAccountToken: false + containers: + - name: "tap-schema-db" + env: + - name: MYSQL_DATABASE + value: "TAP_SCHEMA" + - name: MYSQL_USER + value: "TAP_SCHEMA" + - name: MYSQL_PASSWORD + value: "TAP_SCHEMA" + - name: MYSQL_ROOT_HOST + value: "%" + image: "{{ .Values.tap_schema.image.repository }}:{{ .Values.tap_schema.image.tag}}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - containerPort: 3306 + protocol: "TCP" + {{- with .Values.tap_schema.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + imagePullSecrets: + - name: "pull-secret" + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/tap/templates/tap-schema-db-service.yaml b/applications/tap/templates/tap-schema-db-service.yaml new file mode 100644 index 0000000000..e5b9dd0856 --- /dev/null +++ b/applications/tap/templates/tap-schema-db-service.yaml @@ -0,0 +1,15 @@ +kind: Service +apiVersion: v1 +metadata: + name: {{ template "cadc-tap.fullname" . }}-schema-db + labels: + {{- include "cadc-tap.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - protocol: "TCP" + port: 3306 + targetPort: 3306 + selector: + {{- include "cadc-tap.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: "schema-db" diff --git a/applications/tap/values-ccin2p3.yaml b/applications/tap/values-ccin2p3.yaml index c573f85a6c..49fc426a11 100644 --- a/applications/tap/values-ccin2p3.yaml +++ b/applications/tap/values-ccin2p3.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfprod-tap" + config: gcsBucket: "async-results.lsst.codes" gcsBucketUrl: "https://cccephs3.in2p3.fr:8080" diff --git a/applications/tap/values-idfdev.yaml b/applications/tap/values-idfdev.yaml index b0a7af3d2f..4e431d3bbc 100644 --- a/applications/tap/values-idfdev.yaml +++ b/applications/tap/values-idfdev.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfdev-tap" + resources: requests: cpu: 2.0 diff --git a/applications/tap/values-idfint.yaml b/applications/tap/values-idfint.yaml index 11bab7d2a0..422e0e4ff5 100644 --- a/applications/tap/values-idfint.yaml +++ b/applications/tap/values-idfint.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfint-tap" + resources: requests: cpu: 2.0 diff --git a/applications/tap/values-idfprod.yaml b/applications/tap/values-idfprod.yaml index a96be3b075..92c4a7624f 100644 --- a/applications/tap/values-idfprod.yaml +++ b/applications/tap/values-idfprod.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfprod-tap" + resources: requests: cpu: 2.0 diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml index 6e3f1aca1e..01533a9cda 100644 --- a/applications/tap/values-minikube.yaml +++ b/applications/tap/values-minikube.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfprod-tap" + config: gcsBucket: "async-results.lsst.codes" gcsBucketUrl: "http://async-results.lsst.codes" diff --git a/applications/tap/values-roe.yaml b/applications/tap/values-roe.yaml index 9f302b709c..21202d7c53 100644 --- a/applications/tap/values-roe.yaml +++ b/applications/tap/values-roe.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-idfprod-tap" + config: gcsBucket: "async" gcsBucketUrl: "https://somerville.ed.ac.uk:6780" diff --git a/applications/tap/values-usdfdev.yaml b/applications/tap/values-usdfdev.yaml index 83a2b11731..b269a9317e 100644 --- a/applications/tap/values-usdfdev.yaml +++ b/applications/tap/values-usdfdev.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-usdf-dev-tap" + qserv: host: "172.24.49.51:4040" mock: diff --git a/applications/tap/values-usdfprod.yaml b/applications/tap/values-usdfprod.yaml index 83a2b11731..ee3233c612 100644 --- a/applications/tap/values-usdfprod.yaml +++ b/applications/tap/values-usdfprod.yaml @@ -1,3 +1,7 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-tap" + qserv: host: "172.24.49.51:4040" mock: diff --git a/applications/tap/values.yaml b/applications/tap/values.yaml index 1aec4d4528..5d00a50a10 100644 --- a/applications/tap/values.yaml +++ b/applications/tap/values.yaml @@ -53,7 +53,7 @@ vaultSecretsPath: "" config: # -- Address to a MySQL database containing TAP schema data - tapSchemaAddress: "tap-schema-db.tap-schema.svc.cluster.local:3306" + tapSchemaAddress: "cadc-tap-schema-db:3306" # -- Datalink payload URL datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip" @@ -114,6 +114,12 @@ qserv: # -- Affinity rules for the mock QServ pod affinity: {} +tap_schema: + image: + repository: {} + tag: "1.2.11" + resources: {} + uws: image: # -- UWS database image to use From e7197b83fa9d1d7b043514a810121187f3487d9d Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 7 Jun 2023 11:44:40 -0700 Subject: [PATCH 064/308] [DM-39343] Fix url for livetap --- applications/obstap/templates/tap-ingress-anonymous.yaml | 4 ++-- applications/obstap/templates/tap-ingress-authenticated.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/obstap/templates/tap-ingress-anonymous.yaml b/applications/obstap/templates/tap-ingress-anonymous.yaml index 575173975c..c9b5173bd7 100644 --- a/applications/obstap/templates/tap-ingress-anonymous.yaml +++ b/applications/obstap/templates/tap-ingress-anonymous.yaml @@ -17,7 +17,7 @@ template: nginx.ingress.kubernetes.io/proxy-read-timeout: "900" nginx.ingress.kubernetes.io/rewrite-target: "/tap/$1" nginx.ingress.kubernetes.io/proxy-redirect-from: "http://$host/tap/" - nginx.ingress.kubernetes.io/proxy-redirect-to: "https://$host/api/obstap/" + nginx.ingress.kubernetes.io/proxy-redirect-to: "https://$host/api/live/" nginx.ingress.kubernetes.io/ssl-redirect: "true" nginx.ingress.kubernetes.io/use-regex: "true" {{- with .Values.ingress.anonymousAnnotations }} @@ -28,7 +28,7 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/api/obstap/(availability|capabilities|swagger-ui.*)" + - path: "/api/live/(availability|capabilities|swagger-ui.*)" pathType: "ImplementationSpecific" backend: service: diff --git a/applications/obstap/templates/tap-ingress-authenticated.yaml b/applications/obstap/templates/tap-ingress-authenticated.yaml index 2b38dfeb93..6323445829 100644 --- a/applications/obstap/templates/tap-ingress-authenticated.yaml +++ b/applications/obstap/templates/tap-ingress-authenticated.yaml @@ -24,7 +24,7 @@ template: nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" nginx.ingress.kubernetes.io/rewrite-target: "/tap/$2" nginx.ingress.kubernetes.io/proxy-redirect-from: "http://$host/tap/" - nginx.ingress.kubernetes.io/proxy-redirect-to: "https://$host/api/obstap/" + nginx.ingress.kubernetes.io/proxy-redirect-to: "https://$host/api/live/" nginx.ingress.kubernetes.io/ssl-redirect: "true" nginx.ingress.kubernetes.io/use-regex: "true" {{- with .Values.ingress.authenticatedAnnotations }} @@ -35,7 +35,7 @@ template: - host: {{ required "global.host must be set" .Values.global.host | quote }} http: paths: - - path: "/api/obstap(/|$)(.*)" + - path: "/api/live(/|$)(.*)" pathType: "ImplementationSpecific" backend: service: From 7b1d10d50a914fd1d6356e8360c86680022702a6 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 7 Jun 2023 11:51:39 -0700 Subject: [PATCH 065/308] [DM-39343] Rename obstap to livetap --- applications/{obstap => livetap}/Chart.yaml | 0 applications/{obstap => livetap}/README.md | 0 .../{obstap => livetap}/templates/_helpers.tpl | 0 .../templates/mock-pg-deployment.yaml | 0 .../templates/mock-pg-networkpolicy.yaml | 0 .../{obstap => livetap}/templates/mock-pg-service.yaml | 0 .../{obstap => livetap}/templates/tap-deployment.yaml | 0 .../templates/tap-ingress-anonymous.yaml | 0 .../templates/tap-ingress-authenticated.yaml | 0 .../templates/tap-networkpolicy.yaml | 0 .../templates/tap-schema-db-deployment.yaml | 0 .../templates/tap-schema-db-service.yaml | 0 .../{obstap => livetap}/templates/tap-service.yaml | 0 .../templates/uws-db-deployment.yaml | 0 .../templates/uws-db-networkpolicy.yaml | 0 .../{obstap => livetap}/templates/uws-db-service.yaml | 0 .../{obstap => livetap}/templates/vault-secrets.yaml | 0 applications/{obstap => livetap}/values-minikube.yaml | 0 applications/{obstap => livetap}/values-usdfdev.yaml | 0 applications/{obstap => livetap}/values.yaml | 0 environments/README.md | 2 +- ...bstap-application.yaml => livetap-application.yaml} | 10 +++++----- environments/values-idfint.yaml | 2 -- environments/values-usdfdev.yaml | 2 ++ environments/values-usdfprod.yaml | 2 -- environments/values.yaml | 4 ++-- 26 files changed, 10 insertions(+), 12 deletions(-) rename applications/{obstap => livetap}/Chart.yaml (100%) rename applications/{obstap => livetap}/README.md (100%) rename applications/{obstap => livetap}/templates/_helpers.tpl (100%) rename applications/{obstap => livetap}/templates/mock-pg-deployment.yaml (100%) rename applications/{obstap => livetap}/templates/mock-pg-networkpolicy.yaml (100%) rename applications/{obstap => livetap}/templates/mock-pg-service.yaml (100%) rename applications/{obstap => livetap}/templates/tap-deployment.yaml (100%) rename applications/{obstap => livetap}/templates/tap-ingress-anonymous.yaml (100%) rename applications/{obstap => livetap}/templates/tap-ingress-authenticated.yaml (100%) rename applications/{obstap => livetap}/templates/tap-networkpolicy.yaml (100%) rename applications/{obstap => livetap}/templates/tap-schema-db-deployment.yaml (100%) rename applications/{obstap => livetap}/templates/tap-schema-db-service.yaml (100%) rename applications/{obstap => livetap}/templates/tap-service.yaml (100%) rename applications/{obstap => livetap}/templates/uws-db-deployment.yaml (100%) rename applications/{obstap => livetap}/templates/uws-db-networkpolicy.yaml (100%) rename applications/{obstap => livetap}/templates/uws-db-service.yaml (100%) rename applications/{obstap => livetap}/templates/vault-secrets.yaml (100%) rename applications/{obstap => livetap}/values-minikube.yaml (100%) rename applications/{obstap => livetap}/values-usdfdev.yaml (100%) rename applications/{obstap => livetap}/values.yaml (100%) rename environments/templates/{obstap-application.yaml => livetap-application.yaml} (86%) diff --git a/applications/obstap/Chart.yaml b/applications/livetap/Chart.yaml similarity index 100% rename from applications/obstap/Chart.yaml rename to applications/livetap/Chart.yaml diff --git a/applications/obstap/README.md b/applications/livetap/README.md similarity index 100% rename from applications/obstap/README.md rename to applications/livetap/README.md diff --git a/applications/obstap/templates/_helpers.tpl b/applications/livetap/templates/_helpers.tpl similarity index 100% rename from applications/obstap/templates/_helpers.tpl rename to applications/livetap/templates/_helpers.tpl diff --git a/applications/obstap/templates/mock-pg-deployment.yaml b/applications/livetap/templates/mock-pg-deployment.yaml similarity index 100% rename from applications/obstap/templates/mock-pg-deployment.yaml rename to applications/livetap/templates/mock-pg-deployment.yaml diff --git a/applications/obstap/templates/mock-pg-networkpolicy.yaml b/applications/livetap/templates/mock-pg-networkpolicy.yaml similarity index 100% rename from applications/obstap/templates/mock-pg-networkpolicy.yaml rename to applications/livetap/templates/mock-pg-networkpolicy.yaml diff --git a/applications/obstap/templates/mock-pg-service.yaml b/applications/livetap/templates/mock-pg-service.yaml similarity index 100% rename from applications/obstap/templates/mock-pg-service.yaml rename to applications/livetap/templates/mock-pg-service.yaml diff --git a/applications/obstap/templates/tap-deployment.yaml b/applications/livetap/templates/tap-deployment.yaml similarity index 100% rename from applications/obstap/templates/tap-deployment.yaml rename to applications/livetap/templates/tap-deployment.yaml diff --git a/applications/obstap/templates/tap-ingress-anonymous.yaml b/applications/livetap/templates/tap-ingress-anonymous.yaml similarity index 100% rename from applications/obstap/templates/tap-ingress-anonymous.yaml rename to applications/livetap/templates/tap-ingress-anonymous.yaml diff --git a/applications/obstap/templates/tap-ingress-authenticated.yaml b/applications/livetap/templates/tap-ingress-authenticated.yaml similarity index 100% rename from applications/obstap/templates/tap-ingress-authenticated.yaml rename to applications/livetap/templates/tap-ingress-authenticated.yaml diff --git a/applications/obstap/templates/tap-networkpolicy.yaml b/applications/livetap/templates/tap-networkpolicy.yaml similarity index 100% rename from applications/obstap/templates/tap-networkpolicy.yaml rename to applications/livetap/templates/tap-networkpolicy.yaml diff --git a/applications/obstap/templates/tap-schema-db-deployment.yaml b/applications/livetap/templates/tap-schema-db-deployment.yaml similarity index 100% rename from applications/obstap/templates/tap-schema-db-deployment.yaml rename to applications/livetap/templates/tap-schema-db-deployment.yaml diff --git a/applications/obstap/templates/tap-schema-db-service.yaml b/applications/livetap/templates/tap-schema-db-service.yaml similarity index 100% rename from applications/obstap/templates/tap-schema-db-service.yaml rename to applications/livetap/templates/tap-schema-db-service.yaml diff --git a/applications/obstap/templates/tap-service.yaml b/applications/livetap/templates/tap-service.yaml similarity index 100% rename from applications/obstap/templates/tap-service.yaml rename to applications/livetap/templates/tap-service.yaml diff --git a/applications/obstap/templates/uws-db-deployment.yaml b/applications/livetap/templates/uws-db-deployment.yaml similarity index 100% rename from applications/obstap/templates/uws-db-deployment.yaml rename to applications/livetap/templates/uws-db-deployment.yaml diff --git a/applications/obstap/templates/uws-db-networkpolicy.yaml b/applications/livetap/templates/uws-db-networkpolicy.yaml similarity index 100% rename from applications/obstap/templates/uws-db-networkpolicy.yaml rename to applications/livetap/templates/uws-db-networkpolicy.yaml diff --git a/applications/obstap/templates/uws-db-service.yaml b/applications/livetap/templates/uws-db-service.yaml similarity index 100% rename from applications/obstap/templates/uws-db-service.yaml rename to applications/livetap/templates/uws-db-service.yaml diff --git a/applications/obstap/templates/vault-secrets.yaml b/applications/livetap/templates/vault-secrets.yaml similarity index 100% rename from applications/obstap/templates/vault-secrets.yaml rename to applications/livetap/templates/vault-secrets.yaml diff --git a/applications/obstap/values-minikube.yaml b/applications/livetap/values-minikube.yaml similarity index 100% rename from applications/obstap/values-minikube.yaml rename to applications/livetap/values-minikube.yaml diff --git a/applications/obstap/values-usdfdev.yaml b/applications/livetap/values-usdfdev.yaml similarity index 100% rename from applications/obstap/values-usdfdev.yaml rename to applications/livetap/values-usdfdev.yaml diff --git a/applications/obstap/values.yaml b/applications/livetap/values.yaml similarity index 100% rename from applications/obstap/values.yaml rename to applications/livetap/values.yaml diff --git a/environments/README.md b/environments/README.md index c9e91660fb..945d0d6799 100644 --- a/environments/README.md +++ b/environments/README.md @@ -25,7 +25,7 @@ | nublado.enabled | bool | `false` | | | nublado2.enabled | bool | `false` | | | obsloctap.enabled | bool | `false` | | -| obstap.enabled | bool | `false` | | +| livetap.enabled | bool | `false` | | | onepasswordUuid | string | `"dg5afgiadsffeklfr6jykqymeu"` | UUID of the 1Password item in which to find Vault tokens | | plot-navigator.enabled | bool | `false` | | | portal.enabled | bool | `false` | | diff --git a/environments/templates/obstap-application.yaml b/environments/templates/livetap-application.yaml similarity index 86% rename from environments/templates/obstap-application.yaml rename to environments/templates/livetap-application.yaml index 0816365292..b9b4161242 100644 --- a/environments/templates/obstap-application.yaml +++ b/environments/templates/livetap-application.yaml @@ -1,8 +1,8 @@ -{{- if .Values.obstap.enabled -}} +{{- if .Values.livetap.enabled -}} apiVersion: v1 kind: Namespace metadata: - name: obstap + name: livetap spec: finalizers: - kubernetes @@ -10,17 +10,17 @@ spec: apiVersion: argoproj.io/v1alpha1 kind: Application metadata: - name: obstap + name: livetap namespace: argocd finalizers: - resources-finalizer.argocd.argoproj.io spec: destination: - namespace: obstap + namespace: livetap server: https://kubernetes.default.svc project: default source: - path: applications/obstap + path: applications/livetap repoURL: {{ .Values.repoURL }} targetRevision: {{ .Values.targetRevision }} helm: diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 3b419a2bf8..78ee5ccde3 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -37,8 +37,6 @@ nublado: enabled: true nublado2: enabled: true -obstap: - enabled: true plot-navigator: enabled: true portal: diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index edc9e692b0..73a2128475 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -24,6 +24,8 @@ ingress-nginx: enabled: false kubernetes-replicator: enabled: false +livetap: + enabled: true mobu: enabled: true moneypenny: diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index 6142490c7f..2d9d56d7d1 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -36,8 +36,6 @@ nublado: enabled: false nublado2: enabled: true -obstap: - enabled: true plot-navigator: enabled: true portal: diff --git a/environments/values.yaml b/environments/values.yaml index 3946a1256f..f217d5d736 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -38,6 +38,8 @@ kubernetes-replicator: enabled: false linters: enabled: false +livetap: + enabled: false mobu: enabled: false moneypenny: @@ -52,8 +54,6 @@ nublado2: enabled: false obsloctap: enabled: false -obstap: - enabled: false plot-navigator: enabled: false portal: From 281e86d5acb18de6eedfaa2e80eb9241a11f2b12 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 7 Jun 2023 12:04:18 -0700 Subject: [PATCH 066/308] [DM-39343] Fix linter errors --- applications/livetap/README.md | 5 ++++- applications/tap/README.md | 5 ++++- docs/applications/index.rst | 2 +- docs/applications/{obstap => livetap}/index.rst | 12 ++++++------ .../applications/{obstap => livetap}/notebook-tap.py | 0 docs/applications/livetap/notes.rst | 11 +++++++++++ docs/applications/{obstap => livetap}/portal-tap.py | 0 docs/applications/livetap/values.md | 12 ++++++++++++ docs/applications/obstap/notes.rst | 11 ----------- docs/applications/obstap/values.md | 12 ------------ environments/README.md | 2 +- 11 files changed, 39 insertions(+), 33 deletions(-) rename docs/applications/{obstap => livetap}/index.rst (56%) rename docs/applications/{obstap => livetap}/notebook-tap.py (100%) create mode 100644 docs/applications/livetap/notes.rst rename docs/applications/{obstap => livetap}/portal-tap.py (100%) create mode 100644 docs/applications/livetap/values.md delete mode 100644 docs/applications/obstap/notes.rst delete mode 100644 docs/applications/obstap/values.md diff --git a/applications/livetap/README.md b/applications/livetap/README.md index ee0b20be14..bcc4be37b7 100644 --- a/applications/livetap/README.md +++ b/applications/livetap/README.md @@ -18,7 +18,7 @@ IVOA TAP service | config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket | | config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. | -| config.tapSchemaAddress | string | `"tap-schema-db.tap-schema.svc.cluster.local:3306"` | Address to a MySQL database containing TAP schema data | +| config.tapSchemaAddress | string | `"cadc-tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data | | fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | @@ -45,6 +45,9 @@ IVOA TAP service | podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | +| tap_schema.image.repository | object | `{}` | | +| tap_schema.image.tag | string | `"1.2.11"` | | +| tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | diff --git a/applications/tap/README.md b/applications/tap/README.md index f709fc5a44..372ab024d3 100644 --- a/applications/tap/README.md +++ b/applications/tap/README.md @@ -18,7 +18,7 @@ IVOA TAP service | config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket | | config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. | -| config.tapSchemaAddress | string | `"tap-schema-db.tap-schema.svc.cluster.local:3306"` | Address to a MySQL database containing TAP schema data | +| config.tapSchemaAddress | string | `"cadc-tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data | | fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | @@ -43,6 +43,9 @@ IVOA TAP service | qserv.mock.tolerations | list | `[]` | Tolerations for the mock QServ pod | | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | +| tap_schema.image.repository | object | `{}` | | +| tap_schema.image.tag | string | `"1.2.11"` | | +| tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | diff --git a/docs/applications/index.rst b/docs/applications/index.rst index c9713c466a..d4cccbc9d1 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -28,12 +28,12 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde datalinker/index hips/index linters/index + livetap/index mobu/index moneypenny/index noteburst/index nublado/index nublado2/index - obstap/index portal/index semaphore/index sherlock/index diff --git a/docs/applications/obstap/index.rst b/docs/applications/livetap/index.rst similarity index 56% rename from docs/applications/obstap/index.rst rename to docs/applications/livetap/index.rst index 86c9be1e34..ef6c41b229 100644 --- a/docs/applications/obstap/index.rst +++ b/docs/applications/livetap/index.rst @@ -1,12 +1,12 @@ -.. px-app:: obstap +.. px-app:: livetap -########################################### -obstap — IVOA OBSCore Table Access Protocol -########################################### +############################################ +livetap — IVOA livetap Table Access Protocol +############################################ -OBSTAP_ (OBSCore Table Access Protocol) is an IVOA_ service that provides access to the ObsCore table which is hosted on postgres. +LIVETAP (Live Obscore Table Access Protocol) is an IVOA_ service that provides access to the live obscore table which is hosted on postgres. On the Rubin Science Platform, it is provided by `tap-postgres `__, which is derived from the `CADC TAP service `__. -This service provides access to the ObsTAP tables that are created and served by the butler. +This service provides access to the ObsCore tables that are created and served by the butler and updated live. The TAP data itself, apart from schema queries, comes from Postgres. The TAP schema is provided by the separate :px-app:`tap-schema` application. diff --git a/docs/applications/obstap/notebook-tap.py b/docs/applications/livetap/notebook-tap.py similarity index 100% rename from docs/applications/obstap/notebook-tap.py rename to docs/applications/livetap/notebook-tap.py diff --git a/docs/applications/livetap/notes.rst b/docs/applications/livetap/notes.rst new file mode 100644 index 0000000000..74b82e3a41 --- /dev/null +++ b/docs/applications/livetap/notes.rst @@ -0,0 +1,11 @@ +.. px-app-notes:: livetap + +############################## +LiveTAP architecture and notes +############################## + +The ``livetap`` application consists of the TAP Java web application, a PostgreSQL database used to track user job submissions (the backing store for the UWS_ protocol), and (on development deployments) a mock version of postgres. There is a table that is updated by the butler to keep a live version of the ObsCore table. + +.. diagrams:: notebook-tap.py + +.. diagrams:: portal-tap.py diff --git a/docs/applications/obstap/portal-tap.py b/docs/applications/livetap/portal-tap.py similarity index 100% rename from docs/applications/obstap/portal-tap.py rename to docs/applications/livetap/portal-tap.py diff --git a/docs/applications/livetap/values.md b/docs/applications/livetap/values.md new file mode 100644 index 0000000000..0f4d697630 --- /dev/null +++ b/docs/applications/livetap/values.md @@ -0,0 +1,12 @@ +```{px-app-values} livetap +``` + +# livetap Helm values reference + +Helm values reference table for the {px-app}`livetap` application. + +```{include} ../../../applications/livetap/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/applications/obstap/notes.rst b/docs/applications/obstap/notes.rst deleted file mode 100644 index 56fe36427a..0000000000 --- a/docs/applications/obstap/notes.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. px-app-notes:: obstap - -############################# -OBSTAP architecture and notes -############################# - -The ``obstap`` application consists of the TAP Java web application, a PostgreSQL database used to track user job submissions (the backing store for the UWS_ protocol), and (on development deployments) a mock version of postgres. - -.. diagrams:: notebook-tap.py - -.. diagrams:: portal-tap.py diff --git a/docs/applications/obstap/values.md b/docs/applications/obstap/values.md deleted file mode 100644 index ca5d90c35e..0000000000 --- a/docs/applications/obstap/values.md +++ /dev/null @@ -1,12 +0,0 @@ -```{px-app-values} obstap -``` - -# obstap Helm values reference - -Helm values reference table for the {px-app}`obstap` application. - -```{include} ../../../applications/obstap/README.md ---- -start-after: "## Values" ---- -``` diff --git a/environments/README.md b/environments/README.md index 945d0d6799..69d1323750 100644 --- a/environments/README.md +++ b/environments/README.md @@ -18,6 +18,7 @@ | ingress-nginx.enabled | bool | `false` | | | kubernetes-replicator.enabled | bool | `false` | | | linters.enabled | bool | `false` | | +| livetap.enabled | bool | `false` | | | mobu.enabled | bool | `false` | | | moneypenny.enabled | bool | `false` | | | narrativelog.enabled | bool | `false` | | @@ -25,7 +26,6 @@ | nublado.enabled | bool | `false` | | | nublado2.enabled | bool | `false` | | | obsloctap.enabled | bool | `false` | | -| livetap.enabled | bool | `false` | | | onepasswordUuid | string | `"dg5afgiadsffeklfr6jykqymeu"` | UUID of the 1Password item in which to find Vault tokens | | plot-navigator.enabled | bool | `false` | | | portal.enabled | bool | `false` | | From b4049f99bb52ea674147a642bf2fc2cf5263c578 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 7 Jun 2023 13:23:16 -0700 Subject: [PATCH 067/308] [DM-39343] Add ssotap deployments for usdfdev and usdfprod --- applications/ssotap/values-usdfdev.yaml | 23 ++++++++++++++++ applications/ssotap/values-usdfprod.yaml | 34 ++++++++++++++++++++++++ environments/values-usdfdev.yaml | 2 ++ environments/values-usdfprod.yaml | 2 ++ 4 files changed, 61 insertions(+) create mode 100644 applications/ssotap/values-usdfdev.yaml create mode 100644 applications/ssotap/values-usdfprod.yaml diff --git a/applications/ssotap/values-usdfdev.yaml b/applications/ssotap/values-usdfdev.yaml new file mode 100644 index 0000000000..728f4eb94a --- /dev/null +++ b/applications/ssotap/values-usdfdev.yaml @@ -0,0 +1,23 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-usdf-dev-sso" + +resources: + requests: + cpu: 2.0 + memory: "2G" + limits: + cpu: 8.0 + memory: "32G" + +config: + gcsBucket: "async-results.lsst.codes" + gcsBucketUrl: "http://async-results.lsst.codes" + jvmMaxHeapSize: "31G" + +pg: + mock: + enabled: false + database: "dp03_catalogs" + host: "usdf-pg-catalogs.slac.stanford.edu:5432" + username: "dp03" diff --git a/applications/ssotap/values-usdfprod.yaml b/applications/ssotap/values-usdfprod.yaml new file mode 100644 index 0000000000..1f3fed008b --- /dev/null +++ b/applications/ssotap/values-usdfprod.yaml @@ -0,0 +1,34 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-sso" + +resources: + requests: + cpu: 2.0 + memory: "2G" + limits: + cpu: 8.0 + memory: "32G" + +replicaCount: 2 + +config: + gcsBucket: "async-results.lsst.codes" + gcsBucketUrl: "http://async-results.lsst.codes" + jvmMaxHeapSize: "31G" + +pg: + mock: + enabled: false + database: "dp03_catalogs" + host: "usdf-pg-catalogs.slac.stanford.edu:5432" + username: "dp03" + +uws: + resources: + requests: + cpu: 0.25 + memory: "1G" + limits: + cpu: 2.0 + memory: "4G" diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 73a2128475..26349b35f8 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -56,6 +56,8 @@ semaphore: enabled: true sherlock: enabled: false +ssotap: + enabled: true squarebot: enabled: false squareone: diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index 2d9d56d7d1..5191e65f66 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -50,6 +50,8 @@ semaphore: enabled: true sherlock: enabled: false +ssotap: + enabled: true squarebot: enabled: false squareone: From 51e0799c3107503eff2caef4feab24a35529bef0 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 7 Jun 2023 15:14:16 -0700 Subject: [PATCH 068/308] [DM-39343] tap schema databases to 2.0.0 --- applications/livetap/README.md | 2 +- applications/livetap/values.yaml | 2 +- applications/ssotap/README.md | 2 +- applications/ssotap/values.yaml | 2 +- applications/tap/README.md | 2 +- applications/tap/values.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/livetap/README.md b/applications/livetap/README.md index bcc4be37b7..ef85874da7 100644 --- a/applications/livetap/README.md +++ b/applications/livetap/README.md @@ -46,7 +46,7 @@ IVOA TAP service | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"1.2.11"` | | +| tap_schema.image.tag | string | `"2.0.0"` | | | tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | diff --git a/applications/livetap/values.yaml b/applications/livetap/values.yaml index b2d51618ce..109e7d20b7 100644 --- a/applications/livetap/values.yaml +++ b/applications/livetap/values.yaml @@ -123,7 +123,7 @@ pg: tap_schema: image: repository: {} - tag: "1.2.11" + tag: "2.0.0" resources: {} uws: diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index bcc4be37b7..ef85874da7 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -46,7 +46,7 @@ IVOA TAP service | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"1.2.11"` | | +| tap_schema.image.tag | string | `"2.0.0"` | | | tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | diff --git a/applications/ssotap/values.yaml b/applications/ssotap/values.yaml index b2d51618ce..109e7d20b7 100644 --- a/applications/ssotap/values.yaml +++ b/applications/ssotap/values.yaml @@ -123,7 +123,7 @@ pg: tap_schema: image: repository: {} - tag: "1.2.11" + tag: "2.0.0" resources: {} uws: diff --git a/applications/tap/README.md b/applications/tap/README.md index 372ab024d3..dbf9c302eb 100644 --- a/applications/tap/README.md +++ b/applications/tap/README.md @@ -44,7 +44,7 @@ IVOA TAP service | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"1.2.11"` | | +| tap_schema.image.tag | string | `"2.0.0"` | | | tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | diff --git a/applications/tap/values.yaml b/applications/tap/values.yaml index 5d00a50a10..e8766c9aa7 100644 --- a/applications/tap/values.yaml +++ b/applications/tap/values.yaml @@ -117,7 +117,7 @@ qserv: tap_schema: image: repository: {} - tag: "1.2.11" + tag: "2.0.0" resources: {} uws: From afa0f78016122d059ab946deddf4c49760e99e47 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 7 Jun 2023 15:59:30 -0700 Subject: [PATCH 069/308] [DM-39343] Try to fix test deploy --- applications/livetap/README.md | 2 +- applications/livetap/values.yaml | 2 +- applications/ssotap/README.md | 2 +- applications/ssotap/values.yaml | 2 +- applications/tap/README.md | 2 +- applications/tap/values.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/livetap/README.md b/applications/livetap/README.md index ef85874da7..42fa922a58 100644 --- a/applications/livetap/README.md +++ b/applications/livetap/README.md @@ -46,7 +46,7 @@ IVOA TAP service | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"2.0.0"` | | +| tap_schema.image.tag | string | `"2.0.1"` | | | tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | diff --git a/applications/livetap/values.yaml b/applications/livetap/values.yaml index 109e7d20b7..d54066f272 100644 --- a/applications/livetap/values.yaml +++ b/applications/livetap/values.yaml @@ -123,7 +123,7 @@ pg: tap_schema: image: repository: {} - tag: "2.0.0" + tag: "2.0.1" resources: {} uws: diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index ef85874da7..42fa922a58 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -46,7 +46,7 @@ IVOA TAP service | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"2.0.0"` | | +| tap_schema.image.tag | string | `"2.0.1"` | | | tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | diff --git a/applications/ssotap/values.yaml b/applications/ssotap/values.yaml index 109e7d20b7..d54066f272 100644 --- a/applications/ssotap/values.yaml +++ b/applications/ssotap/values.yaml @@ -123,7 +123,7 @@ pg: tap_schema: image: repository: {} - tag: "2.0.0" + tag: "2.0.1" resources: {} uws: diff --git a/applications/tap/README.md b/applications/tap/README.md index dbf9c302eb..0bb1d5c330 100644 --- a/applications/tap/README.md +++ b/applications/tap/README.md @@ -44,7 +44,7 @@ IVOA TAP service | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"2.0.0"` | | +| tap_schema.image.tag | string | `"2.0.1"` | | | tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | diff --git a/applications/tap/values.yaml b/applications/tap/values.yaml index e8766c9aa7..52723a0999 100644 --- a/applications/tap/values.yaml +++ b/applications/tap/values.yaml @@ -117,7 +117,7 @@ qserv: tap_schema: image: repository: {} - tag: "2.0.0" + tag: "2.0.1" resources: {} uws: From 526949df76c3fd11b2796416de27bd7d0461a1c2 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 8 Jun 2023 18:31:23 -0400 Subject: [PATCH 070/308] Fix typo in deploy-from-a-branch.rst --- docs/developers/deploy-from-a-branch.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/developers/deploy-from-a-branch.rst b/docs/developers/deploy-from-a-branch.rst index 1eb2fc813c..50de6bcef9 100644 --- a/docs/developers/deploy-from-a-branch.rst +++ b/docs/developers/deploy-from-a-branch.rst @@ -21,7 +21,7 @@ Through this process it is possible to develop an application in a fairly tight Preparing and pushing a branch ============================== -Start by creating a branch of the `phalanx repository`_ and editing your appliation. +Start by creating a branch of the `phalanx repository`_ and editing your application. You can make many types of edits to the application. The most straightforward changes are updates to your application's Docker images or the Helm sub-charts the application depends on. From 19d7ee22f5e3d303e0641b5ea3def7f793ad8e81 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 8 Jun 2023 19:12:46 -0400 Subject: [PATCH 071/308] Reorganize argo-workflows docs --- docs/applications/argo-workflows/index.rst | 6 +++--- docs/applications/index.rst | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/applications/argo-workflows/index.rst b/docs/applications/argo-workflows/index.rst index cde4b808bb..da6c4b6846 100644 --- a/docs/applications/argo-workflows/index.rst +++ b/docs/applications/argo-workflows/index.rst @@ -1,8 +1,8 @@ .. px-app:: argo-workflows -############################################# -production-tools — Data Production monitoring -############################################# +############################### +argo-workflows — Argo workflows +############################### Argo Workflows is a workflow engine for job parallelization on Kubernetes. diff --git a/docs/applications/index.rst b/docs/applications/index.rst index d4cccbc9d1..c3b9c13cdb 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -13,7 +13,6 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde :caption: Cluster infrastructure argo-cd/index - argo-workflows/index cert-manager/index ingress-nginx/index gafaelfawr/index @@ -49,6 +48,7 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde :maxdepth: 1 :caption: RSP+ + argo-workflows/index alert-stream-broker/index exposurelog/index narrativelog/index From b97a1f37f868aaabb5a37ff614ce6b7f851aa725 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 7 Jun 2023 11:33:21 -0700 Subject: [PATCH 072/308] Turn on telegraf consumer in sasquatch idf-dev, point to AJT dev image --- applications/sasquatch/values-idfdev.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 86412958b5..63c22a232b 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -26,7 +26,9 @@ influxdb2: hostname: data-dev.lsst.cloud telegraf-kafka-consumer: - enabled: false + enabled: true + image: + tag: "refreshregex" kafkaConsumers: test: enabled: true From f0b90e51b5210060378b5e993c566f540b223c9f Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 7 Jun 2023 11:38:15 -0700 Subject: [PATCH 073/308] keep same version of chronograf as is running in idf-dev --- applications/sasquatch/values-idfdev.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 63c22a232b..f4e40fd31b 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -62,6 +62,8 @@ chronograf: ingress: enabled: true hostname: data-dev.lsst.cloud + image: + tag: "1.10.1" env: GENERIC_NAME: "OIDC" GENERIC_AUTH_URL: https://data-dev.lsst.cloud/auth/openid/login From 77a9ff9477d9e3536b15f2e2730af04f35bf44b2 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 7 Jun 2023 13:53:52 -0700 Subject: [PATCH 074/308] enable influxdb2 in data-dev --- applications/sasquatch/values-idfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index f4e40fd31b..2ba42d98f1 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -20,7 +20,7 @@ influxdb: hostname: data-dev.lsst.cloud influxdb2: - enabled: false + enabled: true ingress: enabled: true hostname: data-dev.lsst.cloud From f3c3866352c5199de4d547c64b5ad3b56c470720 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 8 Jun 2023 12:42:12 -0700 Subject: [PATCH 075/308] Change default telegraf-kafka image, parameterize topicRefreshInterval --- .../sasquatch/charts/telegraf-kafka-consumer/README.md | 3 ++- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 2 +- .../sasquatch/charts/telegraf-kafka-consumer/values.yaml | 5 ++++- applications/sasquatch/values-idfdev.yaml | 2 ++ 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index 6e50cc4eae..a8005a8350 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -18,12 +18,13 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | env[1].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | | image.pullPolicy | string | IfNotPresent | Image pull policy. | | image.repo | string | `"lsstsqre/telegraf"` | Telegraf image repository. | -| image.tag | string | `"kafka-regexp"` | Telegraf image tag. | +| image.tag | string | `"refreshregex"` | Telegraf image tag. | | imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | | influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. | | kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | | kafkaConsumers.test.flush_interval | string | `"1s"` | Default data flushing interval to InfluxDB. | | kafkaConsumers.test.interval | string | `"1s"` | Data collection interval for the Kafka consumer. | +| kafkaConsumers.test.topicRefreshInterval | string | `"60s"` | Default interval for refreshing topics to check for new or removed regexp matches | | kafkaConsumers.test.topicRegexps | string | `"[ \".*Test\" ]\n"` | List of regular expressions to specify the Kafka topics consumed by this agent. | | nodeSelector | object | `{}` | Node labels for pod assignment. | | podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods. | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 4d8f32c7eb..ba0a71dba5 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -44,7 +44,7 @@ data: sasl_mechanism = "SCRAM-SHA-512" sasl_password = "$TELEGRAF_PASSWORD" sasl_username = "telegraf" - topic_refresh_interval = "60s" + topic_refresh_interval = {{ default "60s" $value.topicRefreshInterval | quote }} topic_regexps = {{ $value.topicRegexps }} offset = "newest" consumer_fetch_default = "20MB" diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index f0bf7f8e31..aeeb3a5701 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -8,7 +8,7 @@ image: # -- Telegraf image repository. repo: "lsstsqre/telegraf" # -- Telegraf image tag. - tag: "kafka-regexp" + tag: "refreshregex" # -- Image pull policy. # @default -- IfNotPresent pullPolicy: "Always" @@ -55,6 +55,9 @@ kafkaConsumers: # -- List of regular expressions to specify the Kafka topics consumed by this agent. topicRegexps: | [ ".*Test" ] + # -- Default interval for refreshing topics to check for new or + # removed regexp matches + topicRefreshInterval: "60s" influxdb2: # -- Name of the InfluxDB v2 bucket to write to. diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 2ba42d98f1..88d6cafbd8 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -33,11 +33,13 @@ telegraf-kafka-consumer: test: enabled: true replicaCount: 1 + refresh_interval: "60s" topicRegexps: | [ ".*Test" ] atmcs: enabled: true replicaCount: 1 + refresh_interval: "60s" topicRegexps: | [ ".*ATMCS" ] From a3192635413d6d5e0cfc15e568547b7019c003cb Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 8 Jun 2023 16:29:37 -0700 Subject: [PATCH 076/308] don't override chronograf in data-dev --- applications/sasquatch/values-idfdev.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 88d6cafbd8..a4af8d80e9 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -64,8 +64,6 @@ chronograf: ingress: enabled: true hostname: data-dev.lsst.cloud - image: - tag: "1.10.1" env: GENERIC_NAME: "OIDC" GENERIC_AUTH_URL: https://data-dev.lsst.cloud/auth/openid/login From 83e1b6f8a87876764ec9f35c1ec81254fc6a62f2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 23:35:11 +0000 Subject: [PATCH 077/308] Update Helm release strimzi-kafka-operator to v0.35.1 --- applications/strimzi/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml index c2a93f8082..24139cc212 100644 --- a/applications/strimzi/Chart.yaml +++ b/applications/strimzi/Chart.yaml @@ -7,5 +7,5 @@ home: https://strimzi.io appVersion: "0.26.0" dependencies: - name: strimzi-kafka-operator - version: "0.34.0" + version: "0.35.1" repository: https://strimzi.io/charts/ From 9bd0d85fb40b233e33e62a7ec9e9fa70049dd5bd Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 9 Jun 2023 09:10:11 -0700 Subject: [PATCH 078/308] Update Renovate configuration Enable automated config migration. Disable rebasing of non-conflicted PRs; we use the merge queue, which will rebase them anyway, so these extra rebases are wasted effort and testing cycles. Set the time zone to push weekly updates a bit later into the middle of the night on Monday instead of starting to show up Sunday evening. --- renovate.json | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/renovate.json b/renovate.json index 76bb9056af..42cf0d169a 100644 --- a/renovate.json +++ b/renovate.json @@ -2,8 +2,10 @@ "extends": [ "config:base" ], - "prConcurrentLimit": 5, + "configMigration": true, + "rebaseWhen": "conflicted", "schedule": [ "before 6am on Monday" - ] + ], + "timezone": "America/Los_Angeles" } From fac2cb75490e4b88d556d30c5513cb3d0bf0e1d3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 11:59:05 +0000 Subject: [PATCH 079/308] Update Helm release argo-workflows to v0.29.2 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index c37c260d7e..f810c5905d 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.29.1 + version: 0.29.2 repository: https://argoproj.github.io/argo-helm From c79b5c055d369251d4e17a7a6f8211d77ffb310f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 11:59:10 +0000 Subject: [PATCH 080/308] Update Helm release argo-cd to v5.36.1 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 13fd0cd6b6..fce3f090f8 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.34.6 + version: 5.36.1 repository: https://argoproj.github.io/argo-helm From 63194a550d8a4f53300833c63ff4854b993610b5 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 9 Jun 2023 12:49:39 -0700 Subject: [PATCH 081/308] TTS: Update cachemachine to Cycle 31. --- applications/cachemachine/values-tucson-teststand.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/cachemachine/values-tucson-teststand.yaml b/applications/cachemachine/values-tucson-teststand.yaml index 8fbd910a19..73dde3cb51 100644 --- a/applications/cachemachine/values-tucson-teststand.yaml +++ b/applications/cachemachine/values-tucson-teststand.yaml @@ -8,11 +8,11 @@ autostart: "type": "RubinRepoMan", "registry_url": "ts-dockerhub.lsst.org", "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0030", + "recommended_tag": "recommended_c0031", "num_releases": 1, "num_weeklies": 3, "num_dailies": 2, - "cycle": 30, + "cycle": 31, "alias_tags": [ "latest", "latest_daily", From a3574a95b0790462a2290b1089cb1adbfe26c0c6 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Mon, 12 Jun 2023 12:12:58 -0700 Subject: [PATCH 082/308] add patrick to argocd rbac --- applications/argocd/values-usdfdev.yaml | 1 + applications/argocd/values-usdfprod.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index cc40ed24bb..d458cdb822 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -39,6 +39,7 @@ argo-cd: rbacConfig: policy.csv: | g, ytl@slac.stanford.edu, role:admin + g, ppascual@slac.stanford.edu, role:admin g, pav@slac.stanford.edu, role:admin g, dspeck@slac.stanford.edu, role:admin g, afausti@slac.stanford.edu, role:admin diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 0cf37a9cc8..628ae13d50 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -39,6 +39,7 @@ argo-cd: rbacConfig: policy.csv: | g, ytl@slac.stanford.edu, role:admin + g, ppascual@slac.stanford.edu, role:admin g, pav@slac.stanford.edu, role:admin g, dspeck@slac.stanford.edu, role:admin g, afausti@slac.stanford.edu, role:admin From 000be3b8bfb08088c192d9e7827d54848b5c2873 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 12 Jun 2023 18:03:33 -0700 Subject: [PATCH 083/308] Upgrade Sasquatch Kafka to version 3.4.0 --- applications/sasquatch/charts/strimzi-kafka/README.md | 4 ++-- applications/sasquatch/charts/strimzi-kafka/values.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 3147c450a1..c0c0e2de2d 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -8,7 +8,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. |-----|------|---------|-------------| | cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | | connect.enabled | bool | `true` | Enable Kafka Connect. | -| connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.34.0-kafka-3.3.1:1.1.0"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | +| connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.35.1-kafka-3.4.0:1.2.0"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | | connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | | kafka.affinity | object | `{}` | Node affinity for Kafka broker pod assignment. | | kafka.config."log.retention.bytes" | string | `"429496729600"` | Maximum retained number of bytes for a topic's data. | @@ -30,7 +30,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | -| kafka.version | string | `"3.3.1"` | Version of Kafka to deploy. | +| kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | | mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | | mirrormaker2.source.bootstrapServer | string | `""` | Source (active) cluster to replicate from. | | mirrormaker2.source.topicsPattern | string | `"registry-schemas, lsst.sal.*"` | Topic replication from the source cluster defined as a comma-separated list or regular expression pattern. | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 4b0347bec0..3ce98f9cee 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -5,7 +5,7 @@ cluster: kafka: # -- Version of Kafka to deploy. - version: "3.3.1" + version: "3.4.0" # -- Number of Kafka broker replicas to run. replicas: 3 storage: @@ -99,7 +99,7 @@ connect: # -- Enable Kafka Connect. enabled: true # -- Custom strimzi-kafka image with connector plugins used by sasquatch. - image: ghcr.io/lsst-sqre/strimzi-0.34.0-kafka-3.3.1:1.1.0 + image: ghcr.io/lsst-sqre/strimzi-0.35.1-kafka-3.4.0:1.2.0 # -- Number of Kafka Connect replicas to run. replicas: 3 From a4b51a1ffa56beb99325abc5f9f38f44510f6862 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 13 Jun 2023 14:50:28 -0700 Subject: [PATCH 084/308] auxtel-archiver.cp.lsst.org:/ -> nfs-auxtel.cp.lsst.org:/auxtel --- applications/nublado2/values-summit.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/nublado2/values-summit.yaml b/applications/nublado2/values-summit.yaml index 60af40ddc7..3f8f14c394 100644 --- a/applications/nublado2/values-summit.yaml +++ b/applications/nublado2/values-summit.yaml @@ -40,8 +40,8 @@ config: server: nfs1.cp.lsst.org - name: auxtel nfs: - path: /lsstdata - server: auxtel-archiver.cp.lsst.org + path: /auxtel/lsstdata + server: nfs-auxtel.cp.lsst.org readOnly: true - name: comcam nfs: @@ -55,12 +55,12 @@ config: readOnly: true - name: latiss nfs: - path: /repo/LATISS - server: auxtel-archiver.cp.lsst.org + path: /auxtel/repo/LATISS + server: nfs-auxtel.cp.lsst.org - name: base-auxtel nfs: - path: /lsstdata/base/auxtel - server: auxtel-archiver.cp.lsst.org + path: /auxtel/lsstdata/base/auxtel + server: nfs-auxtel.cp.lsst.org readOnly: true - name: lsstcomcam nfs: From ab889233e15e00c947fb6e33c8a0ab0ee41905aa Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 13 Jun 2023 16:08:21 -0700 Subject: [PATCH 085/308] Increase Chronograf resource requests and limits --- applications/sasquatch/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index 3b4881dc31..ae8c794dc3 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -222,10 +222,10 @@ chronograf: envFromSecret: "sasquatch" resources: requests: - memory: 1Gi + memory: 4Gi cpu: 1 limits: - memory: 16Gi + memory: 64Gi cpu: 4 kapacitor: From fa80e18f42d676a1708c22a4c535751462120ff5 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 13 Jun 2023 16:23:04 -0700 Subject: [PATCH 086/308] Enable replicator user on src and target clusters - The replicator user is used by MM2 and it needs to be created at both source and target clusters. --- .../sasquatch/charts/strimzi-kafka/templates/users.yaml | 2 +- applications/sasquatch/charts/strimzi-kafka/values.yaml | 4 ++++ applications/sasquatch/values-base.yaml | 5 +++++ applications/sasquatch/values-idfdev.yaml | 3 +++ applications/sasquatch/values-idfint.yaml | 3 +++ applications/sasquatch/values-summit.yaml | 3 +++ applications/sasquatch/values-usdfdev.yaml | 3 +++ applications/sasquatch/values-usdfprod.yaml | 3 +++ 8 files changed, 25 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml index cab285b05a..13ab31bbe1 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml @@ -30,7 +30,7 @@ spec: requestPercentage: 90 controllerMutationRate: 1000 {{- end }} -{{- if .Values.mirrormaker2.enabled -}} +{{- if .Values.users.replicator.enabled -}} --- apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaUser diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 3ce98f9cee..13935f50d0 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -117,6 +117,10 @@ users: # -- Enable user ts-salkafka. enabled: true + replicator: + # -- Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) + enabled: false + kafdrop: # -- Enable user Kafdrop (deployed by parent Sasquatch chart). enabled: true diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index b9a70d9fcd..269991b9f5 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -18,6 +18,11 @@ strimzi-kafka: zookeeper: storage: storageClassName: rook-ceph-block + users: + replicator: + enabled: true + + influxdb: persistence: diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index a4af8d80e9..4309cfd9c5 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -13,6 +13,9 @@ strimzi-kafka: host: sasquatch-dev-kafka-1.lsst.cloud - loadBalancerIP: "35.184.86.132" host: sasquatch-dev-kafka-2.lsst.cloud + users: + replicator: + enabled: true influxdb: ingress: diff --git a/applications/sasquatch/values-idfint.yaml b/applications/sasquatch/values-idfint.yaml index cdfe4b4b60..61ce4c54d4 100644 --- a/applications/sasquatch/values-idfint.yaml +++ b/applications/sasquatch/values-idfint.yaml @@ -26,6 +26,9 @@ strimzi-kafka: limits: cpu: 4 memory: 8Gi + users: + replicator: + enabled: true influxdb: ingress: diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index fe62ff3a11..4dbd0f0131 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -18,6 +18,9 @@ strimzi-kafka: zookeeper: storage: storageClassName: rook-ceph-block + users: + replicator: + enabled: true influxdb: persistence: diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index d286704ecc..f2e8ffd3de 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -11,6 +11,9 @@ strimzi-kafka: limits: cpu: 4 memory: 8Gi + users: + replicator: + enabled: true influxdb: ingress: diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 9727a381e1..e4de2181cd 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -11,6 +11,9 @@ strimzi-kafka: limits: cpu: 4 memory: 8Gi + users: + replicator: + enabled: true influxdb: ingress: From 3cd0f6b6634440b537aa38cba2bb6cc75873566f Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 13 Jun 2023 16:27:03 -0700 Subject: [PATCH 087/308] Update kafka-connect-manager to version 1.2.0 - Add support to Strimzi 0.35.1 and Kafka 3.4.0 --- applications/sasquatch/charts/kafka-connect-manager/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/kafka-connect-manager/values.yaml b/applications/sasquatch/charts/kafka-connect-manager/values.yaml index 7c329843b6..ff3aad79cb 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/values.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/values.yaml @@ -6,7 +6,7 @@ enabled: true image: repository: ghcr.io/lsst-sqre/kafkaconnect - tag: 1.1.0 + tag: 1.2.0 pullPolicy: IfNotPresent influxdbSink: From 6f3bddf77326333b40e50ca0485bdf179c12cbda Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 13 Jun 2023 16:29:51 -0700 Subject: [PATCH 088/308] Update helm docs --- applications/sasquatch/README.md | 4 ++-- applications/sasquatch/charts/kafka-connect-manager/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/README.md | 1 + applications/sasquatch/values-base.yaml | 2 -- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 213456803a..6d6616ce9d 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -16,9 +16,9 @@ Rubin Observatory's telemetry service. | chronograf.ingress | object | disabled | Chronograf ingress configuration. | | chronograf.persistence | object | `{"enabled":true,"size":"100Gi"}` | Chronograf data persistence configuration. | | chronograf.resources.limits.cpu | int | `4` | | -| chronograf.resources.limits.memory | string | `"16Gi"` | | +| chronograf.resources.limits.memory | string | `"64Gi"` | | | chronograf.resources.requests.cpu | int | `1` | | -| chronograf.resources.requests.memory | string | `"1Gi"` | | +| chronograf.resources.requests.memory | string | `"4Gi"` | | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md index dd8bbc8909..cc830127f9 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/README.md +++ b/applications/sasquatch/charts/kafka-connect-manager/README.md @@ -12,7 +12,7 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | | -| image.tag | string | `"1.1.0"` | | +| image.tag | string | `"1.2.0"` | | | influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. | | influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | | influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index c0c0e2de2d..49dde44f9a 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -39,6 +39,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). | | users.kafkaConnectManager.enabled | bool | `true` | Enable user kafka-connect-manager | | users.promptProcessing.enabled | bool | `true` | Enable user prompt-processing | +| users.replicator.enabled | bool | `false` | Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) | | users.telegraf.enabled | bool | `true` | Enable user telegraf (deployed by parent Sasquatch chart) | | users.tsSalKafka.enabled | bool | `true` | Enable user ts-salkafka. | | zookeeper.affinity | object | `{}` | Node affinity for Zookeeper pod assignment. | diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 269991b9f5..3455a565ae 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -22,8 +22,6 @@ strimzi-kafka: replicator: enabled: true - - influxdb: persistence: storageClass: rook-ceph-block From 3824298d9a051dd479521e88af64459e606e3751 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 13 Jun 2023 17:16:59 -0700 Subject: [PATCH 089/308] Upgrade REST proxy to version 7.4.0 --- applications/sasquatch/charts/rest-proxy/README.md | 2 +- applications/sasquatch/charts/rest-proxy/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index a23b106a54..b895087f09 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -16,7 +16,7 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | | image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | -| image.tag | string | `"7.3.3"` | Kafka REST proxy image tag. | +| image.tag | string | `"7.4.0"` | Kafka REST proxy image tag. | | ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | | ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | | ingress.hostname | string | `""` | Ingress hostname. | diff --git a/applications/sasquatch/charts/rest-proxy/values.yaml b/applications/sasquatch/charts/rest-proxy/values.yaml index f3f0b92208..2c3054b9cb 100644 --- a/applications/sasquatch/charts/rest-proxy/values.yaml +++ b/applications/sasquatch/charts/rest-proxy/values.yaml @@ -9,7 +9,7 @@ image: # -- Image pull policy. pullPolicy: IfNotPresent # -- Kafka REST proxy image tag. - tag: 7.3.3 + tag: 7.4.0 service: # -- Kafka REST proxy service port From 117c32deeb863bc792e8a5b452765f794f1d71f2 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 14 Jun 2023 09:35:14 -0700 Subject: [PATCH 090/308] Configure REST proxy to use lsst.Test topic prefix --- applications/sasquatch/values-idfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 4309cfd9c5..12c90c51af 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -62,6 +62,7 @@ rest-proxy: topicPrefixes: - test - lsst.dm + - lsst.Test chronograf: ingress: From 4de0ddcd2a4276b2c172d229a9b48d3feff05165 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 14 Jun 2023 11:29:59 -0700 Subject: [PATCH 091/308] Enable ts-salkafka user --- .../sasquatch/charts/strimzi-kafka/templates/users.yaml | 2 +- applications/sasquatch/charts/strimzi-kafka/values.yaml | 8 ++++---- applications/sasquatch/values-summit.yaml | 2 ++ 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml index 13ab31bbe1..f3bcddfa85 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml @@ -30,7 +30,7 @@ spec: requestPercentage: 90 controllerMutationRate: 1000 {{- end }} -{{- if .Values.users.replicator.enabled -}} +{{- if .Values.users.replicator.enabled }} --- apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaUser diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 13935f50d0..55a031c55f 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -113,14 +113,14 @@ superusers: - kafka-admin users: - tsSalKafka: - # -- Enable user ts-salkafka. - enabled: true - replicator: # -- Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) enabled: false + tsSalKafka: + # -- Enable user ts-salkafka. + enabled: true + kafdrop: # -- Enable user Kafdrop (deployed by parent Sasquatch chart). enabled: true diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 4dbd0f0131..db8717ae3e 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -19,6 +19,8 @@ strimzi-kafka: storage: storageClassName: rook-ceph-block users: + tsSalKafka: + enabled: true replicator: enabled: true From 353398787a3bc79f1f5851424c0828bea85b5c2a Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 15 Jun 2023 09:26:28 -0700 Subject: [PATCH 092/308] Add omullan to usdf argocd rbac --- applications/argocd/values-usdfdev.yaml | 1 + applications/argocd/values-usdfprod.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index d458cdb822..342c3294f9 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -51,6 +51,7 @@ argo-cd: g, jsick@slac.stanford.edu, role:admin g, reinking@slac.stanford.edu, role:admin g, smart@slac.stanford.edu, role:admin + g, omullan@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 628ae13d50..0fd2ed162c 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -50,6 +50,7 @@ argo-cd: g, athor@slac.stanford.edu, role:admin g, reinking@slac.stanford.edu, role:admin g, smart@slac.stanford.edu, role:admin + g, omullan@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | From 1f6bdd6f6ea5830c004515561650f6ca336a935f Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Thu, 15 Jun 2023 10:11:25 -0700 Subject: [PATCH 093/308] Enable topic creation/deletion in Kafdrop --- applications/sasquatch/values-idfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 12c90c51af..c60e9f96ad 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -47,6 +47,7 @@ telegraf-kafka-consumer: [ ".*ATMCS" ] kafdrop: + cmdArgs: "--message.format=AVRO --topic.deleteEnabled=true --topic.createEnabled=true" ingress: enabled: true hostname: data-dev.lsst.cloud From 5f8d88e89f2a089a3f3f7c7c1a349f921ae09ff2 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Wed, 14 Jun 2023 12:40:29 -0700 Subject: [PATCH 094/308] Add git index corruption troubleshooting for argocd. --- docs/applications/argo-cd/index.rst | 1 + docs/applications/argo-cd/troubleshoot.rst | 47 ++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 docs/applications/argo-cd/troubleshoot.rst diff --git a/docs/applications/argo-cd/index.rst b/docs/applications/argo-cd/index.rst index 92ed9114c9..ea807af5a1 100644 --- a/docs/applications/argo-cd/index.rst +++ b/docs/applications/argo-cd/index.rst @@ -20,4 +20,5 @@ Guides bootstrap authentication upgrade + troubleshoot values diff --git a/docs/applications/argo-cd/troubleshoot.rst b/docs/applications/argo-cd/troubleshoot.rst new file mode 100644 index 0000000000..5bd127966d --- /dev/null +++ b/docs/applications/argo-cd/troubleshoot.rst @@ -0,0 +1,47 @@ +.. px-app-troubleshooting:: argocd + +###################### +Troubleshooting argocd +###################### + +.. _argocd-fix-corrupt-git-index: + +Fixing a corrupt git index +========================== + +If an ArgoCD app or apps are giving the following error in the UI + +.. code-block:: shell + + rpc error: code = Internal desc = Failed to fetch default: `git fetch origin --tags --force --prune` failed exit status 128: fatal: .git/index: index file smaller than expected + +The git index for the cloned repository that controls the apps is corrupted and needs to be removed. +Do this by the following: + +#. Find the argocd-repo-server pod and grep the logs. + + .. code-block:: shell + + pod=$(kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-repo-server | grep argocd | awk '{print $1}') + kubectl logs -n argocd $pod | grep -B1 "index file smaller than expected" | grep -B1 "execID" + +#. Find the "dir" entry in the line above the one has the phase noted in the previous step. Example output show below. + + .. code-block:: shell + + {"dir":"/tmp/_argocd-repo/35fe76f8-488a-4871-baaa-5f81d81331b1","execID":"a98af","level":"info","msg":"git fetch origin --tags --force --prune","time":"2023-06-13T18:48:12Z"} + {"execID":"a98af","level":"error","msg":"`git fetch origin --tags --force --prune` failed exit status 128: fatal: .git/index: index file smaller than expected","time":"2023-06-13T18:48:12Z"} + +#. Exec into the repo server pod + + .. code-block:: shell + + kubectl exec -it -n argocd $pod -- /bin/bash + +#. Using the directory found from the logs, execute: + + .. code-block:: shell + + rm /path/from/log/.git/index + +The system will refresh itself automatically, so all the needs to be done further is wait and see if the error clears. \ No newline at end of file From 0676a52d1b59f98da357c0bed78ffe34b6c91d80 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Thu, 15 Jun 2023 10:06:42 -0700 Subject: [PATCH 095/308] Address reviewer comments. --- docs/applications/argo-cd/troubleshoot.rst | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/applications/argo-cd/troubleshoot.rst b/docs/applications/argo-cd/troubleshoot.rst index 5bd127966d..e64d1a98db 100644 --- a/docs/applications/argo-cd/troubleshoot.rst +++ b/docs/applications/argo-cd/troubleshoot.rst @@ -9,30 +9,29 @@ Troubleshooting argocd Fixing a corrupt git index ========================== -If an ArgoCD app or apps are giving the following error in the UI +The Git index for the cloned repository that controls the Argo CD apps is corrupted if an Argo CD app shows the following error: .. code-block:: shell rpc error: code = Internal desc = Failed to fetch default: `git fetch origin --tags --force --prune` failed exit status 128: fatal: .git/index: index file smaller than expected -The git index for the cloned repository that controls the apps is corrupted and needs to be removed. -Do this by the following: +The Git repository needs to be removed and re-created by following these steps: -#. Find the argocd-repo-server pod and grep the logs. +#. Find the ``argocd-repo-server`` pod and grep the logs: .. code-block:: shell pod=$(kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-repo-server | grep argocd | awk '{print $1}') kubectl logs -n argocd $pod | grep -B1 "index file smaller than expected" | grep -B1 "execID" -#. Find the "dir" entry in the line above the one has the phase noted in the previous step. Example output show below. +#. In that grep, find the ``dir`` field. For example: .. code-block:: shell {"dir":"/tmp/_argocd-repo/35fe76f8-488a-4871-baaa-5f81d81331b1","execID":"a98af","level":"info","msg":"git fetch origin --tags --force --prune","time":"2023-06-13T18:48:12Z"} {"execID":"a98af","level":"error","msg":"`git fetch origin --tags --force --prune` failed exit status 128: fatal: .git/index: index file smaller than expected","time":"2023-06-13T18:48:12Z"} -#. Exec into the repo server pod +#. Exec into the repo server pod: .. code-block:: shell From 22eeb7914358197dbc631768b9f0c457c94d22d0 Mon Sep 17 00:00:00 2001 From: roby Date: Thu, 15 Jun 2023 13:54:42 -0600 Subject: [PATCH 096/308] for portal 2023.1.5 --- applications/portal/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml index 360dad5699..eefa25cc21 100644 --- a/applications/portal/Chart.yaml +++ b/applications/portal/Chart.yaml @@ -5,7 +5,7 @@ description: Rubin Science Platform Portal Aspect sources: - https://github.com/lsst/suit - https://github.com/Caltech-IPAC/firefly -appVersion: "suit-2023.1.4" +appVersion: "suit-2023.1.5" dependencies: - name: redis From ca9426e3a9026abde38a83bcda04dd0d527e2c95 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 16 Jun 2023 13:36:47 -0700 Subject: [PATCH 097/308] Summit: Update cachemachine to cycle 31. --- applications/cachemachine/values-summit.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/cachemachine/values-summit.yaml b/applications/cachemachine/values-summit.yaml index 92f3503a56..e40d0ee13b 100644 --- a/applications/cachemachine/values-summit.yaml +++ b/applications/cachemachine/values-summit.yaml @@ -8,11 +8,11 @@ autostart: "type": "RubinRepoMan", "registry_url": "ts-dockerhub.lsst.org", "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0030", + "recommended_tag": "recommended_c0031", "num_releases": 0, "num_weeklies": 3, "num_dailies": 2, - "cycle": 30, + "cycle": 31, "alias_tags": [ "latest", "latest_daily", From 5bc3565e827f6006016fce11fd6f5bc88ae786f4 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 16 Jun 2023 14:42:08 -0700 Subject: [PATCH 098/308] Update InfluxDB to version 2.7.1 --- applications/sasquatch/values.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index ae8c794dc3..dbae28fc9f 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -137,6 +137,8 @@ influxdb-staging: influxdb2: enabled: false + image: + tag: 2.7.1-alpine adminUser: # -- Admin default organization. organization: "default" From c43572a5d935a7c68036e45f1404b4d3ef49ae98 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 16 Jun 2023 15:49:22 -0700 Subject: [PATCH 099/308] Do not drop large messages --- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index ba0a71dba5..ef8c172552 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -40,7 +40,6 @@ data: ] consumer_group = "telegraf-kafka-consumer-{{ $key }}" data_format = "avro" - max_message_len = 1000000 sasl_mechanism = "SCRAM-SHA-512" sasl_password = "$TELEGRAF_PASSWORD" sasl_username = "telegraf" From 6fc6549d45df0490783c7e8cb7aa71e3acdb8e18 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 16 Jun 2023 15:50:16 -0700 Subject: [PATCH 100/308] Increase max_processing_time This fixes the `'consumer/broker/1 abandoned subscription to lsst.sal.ATPneumatics.m1AirPressure/0 because consuming was taking too long" errors --- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index ef8c172552..094bb1d83d 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -40,6 +40,7 @@ data: ] consumer_group = "telegraf-kafka-consumer-{{ $key }}" data_format = "avro" + max_processing_time = "5s" sasl_mechanism = "SCRAM-SHA-512" sasl_password = "$TELEGRAF_PASSWORD" sasl_username = "telegraf" From 324a494e003f219fa648e54bbcb47918c745da67 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 16 Jun 2023 16:08:41 -0700 Subject: [PATCH 101/308] Set Initial offset position to oldest Start consuming the oldest offsets in Kafka --- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 094bb1d83d..b713817408 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -46,7 +46,7 @@ data: sasl_username = "telegraf" topic_refresh_interval = {{ default "60s" $value.topicRefreshInterval | quote }} topic_regexps = {{ $value.topicRegexps }} - offset = "newest" + offset = "oldest" consumer_fetch_default = "20MB" [[inputs.internal]] From 574b5383d24e79ddeb910c2587e60be262d7406c Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 16 Jun 2023 16:33:04 -0700 Subject: [PATCH 102/308] Fine tune M1M3 data collection and flush intervals Try to get rid of the "did not complete within its flush interval" message. --- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 2 +- applications/sasquatch/values-tucson-teststand.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index b713817408..094bb1d83d 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -46,7 +46,7 @@ data: sasl_username = "telegraf" topic_refresh_interval = {{ default "60s" $value.topicRefreshInterval | quote }} topic_regexps = {{ $value.topicRegexps }} - offset = "oldest" + offset = "newest" consumer_fetch_default = "20MB" [[inputs.internal]] diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 56126adfa6..7a061c2d32 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -64,8 +64,8 @@ telegraf-kafka-consumer: [ ".*ATCamera", ".*ATHeaderService", ".*ATOODS", ".*ATSpectrograph" ] m1m3: enabled: true - flush_interval: "0.1s" - interval: "0.1s" + flush_interval: "0.5s" + interval: "0.5s" topicRegexps: | [ ".*MTM1M3" ] m2: From a12c4b5adc8e7ea938187369353cd70a99b2f2ae Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 17 Jun 2023 06:55:43 -0700 Subject: [PATCH 103/308] Update helm docs --- applications/sasquatch/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 6d6616ce9d..3a1bf3335d 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -58,6 +58,7 @@ Rubin Observatory's telemetry service. | influxdb2.env[2].value | string | `"true"` | | | influxdb2.env[3].name | string | `"INFLUXD_LOG_LEVEL"` | | | influxdb2.env[3].value | string | `"debug"` | | +| influxdb2.image.tag | string | `"2.7.1-alpine"` | | | influxdb2.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/api/v2/$2"` | | | influxdb2.ingress.className | string | `"nginx"` | | | influxdb2.ingress.enabled | bool | `false` | InfluxDB2 ingress configuration | From 41fb60fc92c42a2aeed9b2f054998dd49f663790 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 17 Jun 2023 13:00:24 -0700 Subject: [PATCH 104/308] Make metric_batch_size configurable --- .../charts/telegraf-kafka-consumer/templates/configmap.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml index 094bb1d83d..55bfef6591 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml @@ -16,7 +16,7 @@ data: flush_jitter = "0s" interval = {{ default "1s" $value.interval | quote }} logfile = "" - metric_batch_size = 1000 + metric_batch_size = {{ default 1000 $value.metric_batch_size }} metric_buffer_limit = 10000 omit_hostname = true precision = "" From bbcbcf048161aa860fca010843bc52130c638f25 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 17 Jun 2023 13:01:08 -0700 Subject: [PATCH 105/308] Increase metric_batch_size for M1M3 Telegraf will trigger a write when either metric_batch_size new metrics are collected or after flush_interval, whichever comes first. --- applications/sasquatch/values-tucson-teststand.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 7a061c2d32..c6971d73b9 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -64,8 +64,9 @@ telegraf-kafka-consumer: [ ".*ATCamera", ".*ATHeaderService", ".*ATOODS", ".*ATSpectrograph" ] m1m3: enabled: true - flush_interval: "0.5s" - interval: "0.5s" + flush_interval: "0.1s" + metric_batch_size: 5000 + interval: "1s" topicRegexps: | [ ".*MTM1M3" ] m2: From e8e133b24cf3a792bd050c156a8a9a7a899c0aff Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 17 Jun 2023 13:12:18 -0700 Subject: [PATCH 106/308] Not clear if changing interval makes any difference --- applications/sasquatch/values-tucson-teststand.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index c6971d73b9..3ae497685d 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -66,7 +66,7 @@ telegraf-kafka-consumer: enabled: true flush_interval: "0.1s" metric_batch_size: 5000 - interval: "1s" + interval: "0.1s" topicRegexps: | [ ".*MTM1M3" ] m2: From 1b6f86696e684c917aab5f99e073219a959e4e71 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 17 Jun 2023 14:51:03 -0700 Subject: [PATCH 107/308] Increase flush_interval back to 1s --- applications/sasquatch/values-tucson-teststand.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 3ae497685d..e3f34eb553 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -64,7 +64,7 @@ telegraf-kafka-consumer: [ ".*ATCamera", ".*ATHeaderService", ".*ATOODS", ".*ATSpectrograph" ] m1m3: enabled: true - flush_interval: "0.1s" + flush_interval: "1s" metric_batch_size: 5000 interval: "0.1s" topicRegexps: | From 0c33ea4daf5ee6ae45c99b5c5c006970af570dd1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 08:17:12 +0000 Subject: [PATCH 108/308] Update Helm release cert-manager to v1.12.2 --- applications/cert-manager/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml index d0bad7bd8a..115fb6d946 100644 --- a/applications/cert-manager/Chart.yaml +++ b/applications/cert-manager/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/cert-manager/cert-manager dependencies: - name: cert-manager - version: v1.12.1 + version: v1.12.2 repository: https://charts.jetstack.io From 468c89a83999146bc1863b1761e1b7e450b70d4a Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 20 Jun 2023 11:38:19 -0700 Subject: [PATCH 109/308] Add lsst.lf namespace - Configure the REST Proxy to expose topics with the lsst.lf prefix - An an InfluxDB connector to write these topics to the lsst.lf database --- applications/sasquatch/values-usdfdev.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index f2e8ffd3de..909359c677 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -117,6 +117,11 @@ kafka-connect-manager: connectInfluxDb: "lsst.verify" topicsRegex: "lsst.verify.*" tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run + lsstlf: + enabled: true + timestamp: "timestamp" + connectInfluxDb: "lsst.lf" + topicsRegex: "lsst.lf.*" kafdrop: ingress: @@ -139,6 +144,7 @@ rest-proxy: - lsst.rubintv - lsst.camera - lsst.verify + - lsst.lf chronograf: ingress: From 39d0944a4a7238e875a2947475e1957f8336ee28 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 20 Jun 2023 16:03:14 -0400 Subject: [PATCH 110/308] Update bot names for usdf-dev bot-noteburst 01 to 20 were created at usdfdev; for now only configure the first two to run on dev. --- applications/noteburst/values-usdfdev.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/applications/noteburst/values-usdfdev.yaml b/applications/noteburst/values-usdfdev.yaml index 0c562f02fc..dae96ed465 100644 --- a/applications/noteburst/values-usdfdev.yaml +++ b/applications/noteburst/values-usdfdev.yaml @@ -6,12 +6,8 @@ config: worker: workerCount: 1 identities: - - username: "bot-noteburst90000" - - username: "bot-noteburst90001" - - username: "bot-noteburst90002" - - username: "bot-noteburst90003" - - username: "bot-noteburst90004" - - username: "bot-noteburst90005" + - username: "bot-noteburst-01" + - username: "bot-noteburst-02" # Use SSD for Redis storage. redis: From 7f7eb1bdc961fb98896d773b3efebea384307c49 Mon Sep 17 00:00:00 2001 From: Drew Oldag Date: Tue, 20 Jun 2023 16:33:59 -0700 Subject: [PATCH 111/308] Adding tags for lsst.lf influxDB. --- applications/sasquatch/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml index 909359c677..fcd3053fbf 100644 --- a/applications/sasquatch/values-usdfdev.yaml +++ b/applications/sasquatch/values-usdfdev.yaml @@ -122,6 +122,7 @@ kafka-connect-manager: timestamp: "timestamp" connectInfluxDb: "lsst.lf" topicsRegex: "lsst.lf.*" + tags: benchmark_env,module,benchmark_type kafdrop: ingress: From b8506abfe99f14c7de681119c43e52b57388f280 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Tue, 20 Jun 2023 14:16:35 -0700 Subject: [PATCH 112/308] BTS: Update cachemachine to Cycle 31. --- applications/cachemachine/values-base.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/cachemachine/values-base.yaml b/applications/cachemachine/values-base.yaml index a4e00aff39..738f0af25c 100644 --- a/applications/cachemachine/values-base.yaml +++ b/applications/cachemachine/values-base.yaml @@ -10,11 +10,11 @@ autostart: "type": "RubinRepoMan", "registry_url": "ts-dockerhub.lsst.org", "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0030", + "recommended_tag": "recommended_c0031", "num_releases": 0, "num_weeklies": 3, "num_dailies": 2, - "cycle": 30, + "cycle": 31, "alias_tags": [ "latest", "latest_daily", From 59912e11597b72354e4f48b3e71719c10e15edb9 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 21 Jun 2023 11:45:40 -0700 Subject: [PATCH 113/308] Update Auxtel mount points for T&S sites --- applications/exposurelog/values-base.yaml | 4 ++-- applications/exposurelog/values-summit.yaml | 4 ++-- applications/exposurelog/values-tucson-teststand.yaml | 4 ++-- applications/nublado2/values-base.yaml | 8 ++++---- applications/nublado2/values-tucson-teststand.yaml | 8 ++++---- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/applications/exposurelog/values-base.yaml b/applications/exposurelog/values-base.yaml index 1c7834eacd..c3ff786c6e 100644 --- a/applications/exposurelog/values-base.yaml +++ b/applications/exposurelog/values-base.yaml @@ -1,7 +1,7 @@ config: site_id: base - nfs_path_1: /repo/LATISS # Mounted as /volume_1 - nfs_server_1: auxtel-archiver.ls.lsst.org + nfs_path_1: /auxtel/repo/LATISS # Mounted as /volume_1 + nfs_server_1: nfs-auxtel.ls.lsst.org butler_uri_1: /volume_1 db: diff --git a/applications/exposurelog/values-summit.yaml b/applications/exposurelog/values-summit.yaml index 991b8e96a1..636150ebec 100644 --- a/applications/exposurelog/values-summit.yaml +++ b/applications/exposurelog/values-summit.yaml @@ -4,8 +4,8 @@ config: nfs_server_1: comcam-archiver.cp.lsst.org butler_uri_1: /volume_1 - nfs_path_2: /repo/LATISS # Mounted as /volume_2 - nfs_server_2: auxtel-archiver.cp.lsst.org + nfs_path_2: /auxtel/repo/LATISS # Mounted as /volume_2 + nfs_server_2: nfs-auxtel.cp.lsst.org butler_uri_2: /volume_2 db: host: postgresdb01.cp.lsst.org diff --git a/applications/exposurelog/values-tucson-teststand.yaml b/applications/exposurelog/values-tucson-teststand.yaml index c634947b61..94a3159b2f 100644 --- a/applications/exposurelog/values-tucson-teststand.yaml +++ b/applications/exposurelog/values-tucson-teststand.yaml @@ -4,8 +4,8 @@ config: nfs_server_1: comcam-archiver.tu.lsst.org butler_uri_1: /volume_1 - nfs_path_2: /repo/LATISS # Mounted as /volume_2 - nfs_server_2: auxtel-archiver.tu.lsst.org + nfs_path_2: /auxtel/repo/LATISS # Mounted as /volume_2 + nfs_server_2: nfs-auxtel.tu.lsst.org butler_uri_2: /volume_2 db: host: postgresdb01.tu.lsst.org diff --git a/applications/nublado2/values-base.yaml b/applications/nublado2/values-base.yaml index 57b7e941ca..6d677fe081 100644 --- a/applications/nublado2/values-base.yaml +++ b/applications/nublado2/values-base.yaml @@ -51,12 +51,12 @@ config: server: nfs-lsstdata.ls.lsst.org - name: auxtel-butler nfs: - path: /repo/LATISS - server: auxtel-archiver.ls.lsst.org + path: /auxtel/repo/LATISS + server: nfs-auxtel.ls.lsst.org - name: auxtel-oods nfs: - path: /lsstdata/BTS/auxtel - server: auxtel-archiver.ls.lsst.org + path: /auxtel/lsstdata/BTS/auxtel + server: nfs-auxtel.ls.lsst.org readOnly: true - name: obs-env nfs: diff --git a/applications/nublado2/values-tucson-teststand.yaml b/applications/nublado2/values-tucson-teststand.yaml index 99898d00b2..4739d07de4 100644 --- a/applications/nublado2/values-tucson-teststand.yaml +++ b/applications/nublado2/values-tucson-teststand.yaml @@ -51,12 +51,12 @@ config: server: nfs-lsstdata.tu.lsst.org - name: auxtel-butler nfs: - path: /repo/LATISS - server: auxtel-archiver.tu.lsst.org + path: /auxtel/repo/LATISS + server: nfs-auxtel.tu.lsst.org - name: auxtel-oods nfs: - path: /lsstdata/TTS/auxtel - server: auxtel-archiver.tu.lsst.org + path: /auxtel/lsstdata/TTS/auxtel + server: nfs-auxtel.tu.lsst.org readOnly: true - name: comcam-butler nfs: From 3154b6f19f4c8ed850891afd50e6ef78bcdd1098 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 21 Jun 2023 17:25:02 -0700 Subject: [PATCH 114/308] [DM-39754] Enable livetap for usdfprod --- applications/livetap/README.md | 2 +- applications/livetap/values-usdfprod.yaml | 34 +++++++++++++++++++++++ applications/livetap/values.yaml | 2 +- environments/values-usdfprod.yaml | 2 ++ 4 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 applications/livetap/values-usdfprod.yaml diff --git a/applications/livetap/README.md b/applications/livetap/README.md index 42fa922a58..992ef3bf97 100644 --- a/applications/livetap/README.md +++ b/applications/livetap/README.md @@ -46,7 +46,7 @@ IVOA TAP service | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"2.0.1"` | | +| tap_schema.image.tag | string | `"2.0.2"` | | | tap_schema.resources | object | `{}` | | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | diff --git a/applications/livetap/values-usdfprod.yaml b/applications/livetap/values-usdfprod.yaml new file mode 100644 index 0000000000..52a661a7d3 --- /dev/null +++ b/applications/livetap/values-usdfprod.yaml @@ -0,0 +1,34 @@ +tap_schema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-livetap" + +resources: + requests: + cpu: 2.0 + memory: "2G" + limits: + cpu: 8.0 + memory: "32G" + +replicaCount: 2 + +config: + gcsBucket: "async-results.lsst.codes" + gcsBucketUrl: "http://async-results.lsst.codes" + jvmMaxHeapSize: "31G" + +pg: + mock: + enabled: false + database: "lsstdb1" + host: "usdf-butler.slac.stanford.edu:5432" + username: "rubin" + +uws: + resources: + requests: + cpu: 0.25 + memory: "1G" + limits: + cpu: 2.0 + memory: "4G" diff --git a/applications/livetap/values.yaml b/applications/livetap/values.yaml index d54066f272..14f1cfef49 100644 --- a/applications/livetap/values.yaml +++ b/applications/livetap/values.yaml @@ -123,7 +123,7 @@ pg: tap_schema: image: repository: {} - tag: "2.0.1" + tag: "2.0.2" resources: {} uws: diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index 5191e65f66..1cfcabdefa 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -24,6 +24,8 @@ ingress-nginx: enabled: false kubernetes-replicator: enabled: false +livetap: + enabled: true mobu: enabled: true moneypenny: From 0bde64b83adfbb71515454a46f063565e5edc4ee Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Wed, 21 Jun 2023 18:05:44 -0700 Subject: [PATCH 115/308] [DM-39754] Fix vault secrets path --- applications/livetap/templates/vault-secrets.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/livetap/templates/vault-secrets.yaml b/applications/livetap/templates/vault-secrets.yaml index ab07ba33d2..66a0f5d6e1 100644 --- a/applications/livetap/templates/vault-secrets.yaml +++ b/applications/livetap/templates/vault-secrets.yaml @@ -6,7 +6,7 @@ metadata: app: {{ template "cadc-tap.fullname" . }} {{ include "cadc-tap.labels" . | indent 4 }} spec: - path: "{{ .Values.global.vaultSecretsPath }}/obstap" + path: "{{ .Values.global.vaultSecretsPath }}/livetap" type: Opaque --- apiVersion: ricoberger.de/v1alpha1 From 9caa040a8494baa0b8c5bb97138110d65580d5a2 Mon Sep 17 00:00:00 2001 From: Fritz Mueller Date: Thu, 22 Jun 2023 14:30:32 -0700 Subject: [PATCH 116/308] Redirect IDF ssotap services to interim cloud Postgres for USDF outage --- applications/ssotap/values-idfdev.yaml | 4 +++- applications/ssotap/values-idfint.yaml | 4 +++- applications/ssotap/values-idfprod.yaml | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/applications/ssotap/values-idfdev.yaml b/applications/ssotap/values-idfdev.yaml index 68c1963f8d..9f20438b3b 100644 --- a/applications/ssotap/values-idfdev.yaml +++ b/applications/ssotap/values-idfdev.yaml @@ -19,5 +19,7 @@ pg: mock: enabled: false database: "dp03_catalogs" - host: "usdf-pg-catalogs.slac.stanford.edu:5432" + # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23 + # host: "usdf-pg-catalogs.slac.stanford.edu:5432" + host: 104.197.78.194:5432 username: "dp03" diff --git a/applications/ssotap/values-idfint.yaml b/applications/ssotap/values-idfint.yaml index e4aad19e89..bf6f0b52d9 100644 --- a/applications/ssotap/values-idfint.yaml +++ b/applications/ssotap/values-idfint.yaml @@ -21,5 +21,7 @@ pg: mock: enabled: false database: "dp03_catalogs" - host: "usdf-pg-catalogs.slac.stanford.edu:5432" + # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23 + # host: "usdf-pg-catalogs.slac.stanford.edu:5432" + host: 104.197.78.194:5432 username: "dp03" diff --git a/applications/ssotap/values-idfprod.yaml b/applications/ssotap/values-idfprod.yaml index 639e96c320..643b677f44 100644 --- a/applications/ssotap/values-idfprod.yaml +++ b/applications/ssotap/values-idfprod.yaml @@ -21,7 +21,9 @@ pg: mock: enabled: false database: "dp03_catalogs" - host: "usdf-pg-catalogs.slac.stanford.edu:5432" + # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23 + # host: "usdf-pg-catalogs.slac.stanford.edu:5432" + host: 104.197.78.194:5432 username: "dp03" uws: From e5fb657ebaba5cb929ecc6a240b0d1b52d3814fa Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 20 Jun 2023 16:27:31 -0400 Subject: [PATCH 117/308] Use nublado at /n3 on usdf-dev --- applications/noteburst/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/noteburst/values-usdfdev.yaml b/applications/noteburst/values-usdfdev.yaml index dae96ed465..963bab1e0c 100644 --- a/applications/noteburst/values-usdfdev.yaml +++ b/applications/noteburst/values-usdfdev.yaml @@ -3,6 +3,7 @@ image: config: logLevel: "DEBUG" + hubPathPrefix: "/n3" worker: workerCount: 1 identities: From 1034302fdd6cb18f327f3b596c15276b7efb9689 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 20 Jun 2023 16:45:21 -0400 Subject: [PATCH 118/308] Deploy noteburst 0.7.1 --- applications/noteburst/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index c25e51b424..6e6c626fc4 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: noteburst version: 1.0.0 -appVersion: "0.7.0" +appVersion: "0.7.1" description: Noteburst is a notebook execution service for the Rubin Science Platform. type: application home: https://noteburst.lsst.io/ From c13fa035c51c817682a6a951b249061333a4a105 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 22 Jun 2023 17:45:06 -0400 Subject: [PATCH 119/308] Fix names of bot users on usdf The usdf usernames for noteburst don't use a dash before the bot number. --- applications/noteburst/values-usdfdev.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/noteburst/values-usdfdev.yaml b/applications/noteburst/values-usdfdev.yaml index 963bab1e0c..4ab6827768 100644 --- a/applications/noteburst/values-usdfdev.yaml +++ b/applications/noteburst/values-usdfdev.yaml @@ -7,8 +7,8 @@ config: worker: workerCount: 1 identities: - - username: "bot-noteburst-01" - - username: "bot-noteburst-02" + - username: "bot-noteburst01" + - username: "bot-noteburst02" # Use SSD for Redis storage. redis: From bfcac0b2affe8ebbd7ad3c511cb68a8ac59eae14 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 22 Jun 2023 19:09:45 -0400 Subject: [PATCH 120/308] Enable Times Square in usdf-dev Squareone --- applications/squareone/values-usdfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/squareone/values-usdfdev.yaml b/applications/squareone/values-usdfdev.yaml index b9ae4d7f26..c0d3946711 100644 --- a/applications/squareone/values-usdfdev.yaml +++ b/applications/squareone/values-usdfdev.yaml @@ -2,3 +2,4 @@ replicaCount: 3 config: siteName: "Rubin Science Platform" semaphoreUrl: "https://usdf-rsp-dev.slac.stanford.edu/semaphore" + timesSquareUrl: "https://usdf-rsp-dev.slac.stanford.edu/times-square/api" From adff6ffc7b6f1173ce948a2155785ae5f6f90708 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 23 Jun 2023 13:35:55 -0700 Subject: [PATCH 121/308] Enable mirrormaker2 at base - Configure mirrormaker2 at base (target) to replicate topics from summit (source) using the default replication policy to rename topics at the target cluster. This avoids topic name collision with topics in the target cluster. - Set up another schema registry configured to hold schemas from the source cluster. This avoids schema id collision with schemas in the target cluster. --- .../sasquatch/charts/strimzi-kafka/README.md | 4 ++ .../strimzi-kafka/templates/mirrormaker2.yaml | 12 ++-- .../templates/source-schema-registry.yaml | 60 +++++++++++++++++++ .../charts/strimzi-kafka/values.yaml | 13 ++++ applications/sasquatch/values-base.yaml | 19 ++++++ 5 files changed, 101 insertions(+), 7 deletions(-) create mode 100644 applications/sasquatch/charts/strimzi-kafka/templates/source-schema-registry.yaml diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 49dde44f9a..52d8752bcf 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -32,8 +32,12 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | | kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | | mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | +| mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. | +| mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. | | mirrormaker2.source.bootstrapServer | string | `""` | Source (active) cluster to replicate from. | | mirrormaker2.source.topicsPattern | string | `"registry-schemas, lsst.sal.*"` | Topic replication from the source cluster defined as a comma-separated list or regular expression pattern. | +| mirrormaker2.sourceRegistry.enabled | bool | `false` | Whether to deploy another Schema Registry for the schemas replicated from the source cluster. | +| mirrormaker2.sourceRegistry.schemaTopic | string | `"source.registry-schemas"` | Name of the topic Schema Registry topic replicated from the source cluster | | registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | | superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | | users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml index ea6d63ffd5..39ae427957 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml @@ -55,9 +55,10 @@ spec: # The frequency to check for new topics. refresh.topics.interval.seconds: 60 # Policy to define the remote topic naming convention. - # This setting will preserve topic names in the target cluster. - replication.policy.separator: "" - replication.policy.class: "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" + # The default is to preserve topic names in the target cluster. + # To add the source cluster alias as a prefix to the topic name, use replication.policy.separator="." and replication.policy.class="org.apache.kafka.connect.mirror.DefaultReplicationPolicy" + replication.policy.separator: {{ default "" .Values.mirrormaker2.replication.policy.separator }} + replication.policy.class: {{ default "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" .Values.mirrormaker2.replication.policy.class }} # Handling high volumes of messages # By increasing the batch size, produce requests are delayed and more messages are # added to the batch and sent to brokers at the same time. @@ -76,7 +77,6 @@ spec: # Increase request timeout producer.request.timeout.ms: 120000 consumer.request.timeout.ms: 120000 - heartbeatConnector: config: heartbeats.topic.replication.factor: 3 @@ -91,9 +91,7 @@ spec: sync.group.offsets.interval.seconds: 60 # The frequency of checks for offset tracking. emit.checkpoints.interval.seconds: 60 - # Policy to define the remote topic naming convention. - # This setting will preserve topic names in the target cluster. - replication.policy.class: "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" + replication.policy.class: {{ default "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" .Values.mirrormaker2.replication.policy.class }} # Topic replication from the source cluster defined as a comma-separated list # or regular expression pattern. topicsPattern: {{ .Values.mirrormaker2.source.topicsPattern }} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/source-schema-registry.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/source-schema-registry.yaml new file mode 100644 index 0000000000..5917abe19b --- /dev/null +++ b/applications/sasquatch/charts/strimzi-kafka/templates/source-schema-registry.yaml @@ -0,0 +1,60 @@ +{{- if .Values.mirrormaker2.sourceRegistry.enabled }} +--- +apiVersion: roundtable.lsst.codes/v1beta1 +kind: StrimziSchemaRegistry +metadata: + name: {{ .Values.cluster.name }}-source-schema-registry +spec: + listener: tls + compatibilityLevel: none +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: {{ .Values.cluster.name }}-source-schema-registry + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + authentication: + type: tls + authorization: + # Official docs on authorizations required for the Schema Registry: + # https://docs.confluent.io/current/schema-registry/security/index.html#authorizing-access-to-the-schemas-topic + type: simple + acls: + # Allow Read, Write and DescribeConfigs operations on the + # schemas topic + - resource: + type: topic + name: {{ .Values.mirrormaker2.sourceRegistry.schemaTopic }} + patternType: literal + operation: Read + type: allow + - resource: + type: topic + name: {{ .Values.mirrormaker2.sourceRegistry.schemaTopic }} + patternType: literal + operation: Write + type: allow + - resource: + type: topic + name: {{ .Values.mirrormaker2.sourceRegistry.schemaTopic }} + patternType: literal + operation: DescribeConfigs + type: allow + # Allow all operations on the schema-registry* group + - resource: + type: group + name: schema-registry + patternType: prefix + operation: All + type: allow + # Allow Describe on the __consumer_offsets topic + # (The official docs also mention DescribeConfigs?) + - resource: + type: topic + name: "__consumer_offsets" + patternType: literal + operation: Describe + type: allow +{{- end }} diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 55a031c55f..4a12062527 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -145,3 +145,16 @@ mirrormaker2: bootstrapServer: "" # -- Topic replication from the source cluster defined as a comma-separated list or regular expression pattern. topicsPattern: "registry-schemas, lsst.sal.*" + replication: + policy: + # -- Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. + # @default -- "" + separator: "." + # -- Replication policy. + # @default -- IdentityReplicationPolicy + class: "org.apache.kafka.connect.mirror.DefaultReplicationPolicy" + sourceRegistry: + # -- Whether to deploy another Schema Registry for the schemas replicated from the source cluster. + enabled: false + # -- Name of the topic Schema Registry topic replicated from the source cluster + schemaTopic: "source.registry-schemas" diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 3455a565ae..d8b3d1efd5 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -1,4 +1,23 @@ strimzi-kafka: + mirrormaker2: + enabled: true + source: + bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094 + topicsPattern: "test.*, registry-schemas" + replication: + policy: + separator: "." + class: "org.apache.kafka.connect.mirror.DefaultReplicationPolicy" + sourceRegistry: + enabled: true + schemaTopic: source.registry-schemas + resources: + requests: + cpu: 2 + memory: 4Gi + limits: + cpu: 4 + memory: 8Gi kafka: storage: storageClassName: rook-ceph-block From ddb1c1788449a378bc5e75fa52fd67d933ab6b63 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 23 Jun 2023 16:19:52 -0700 Subject: [PATCH 122/308] Deploy sourceConnect - Deploy another Connect resource that is configured to use the sourceRegistry. --- .../templates/source-connect.yaml | 83 +++++++++++++++++++ .../charts/strimzi-kafka/values.yaml | 5 ++ applications/sasquatch/values-base.yaml | 2 + 3 files changed, 90 insertions(+) create mode 100644 applications/sasquatch/charts/strimzi-kafka/templates/source-connect.yaml diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/source-connect.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/source-connect.yaml new file mode 100644 index 0000000000..c9eb89148a --- /dev/null +++ b/applications/sasquatch/charts/strimzi-kafka/templates/source-connect.yaml @@ -0,0 +1,83 @@ +{{- if .Values.mirrormaker2.sourceConnect.enabled }} +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaConnect +metadata: + name: {{ .Values.cluster.name }}-source + annotations: + # Use Connect REST API to configure connectors + strimzi.io/use-connector-resources: "false" +spec: + image: {{ .Values.connect.image | quote }} + replicas: {{ .Values.connect.replicas }} + bootstrapServers: {{ .Values.cluster.name }}-kafka-bootstrap:9093 + tls: + trustedCertificates: + - secretName: {{ .Values.cluster.name }}-cluster-ca-cert + certificate: ca.crt + authentication: + type: tls + certificateAndKey: + secretName: {{ .Values.cluster.name }}-source-connect + certificate: user.crt + key: user.key + config: + group.id: {{ .Values.cluster.name }}-source-connect + offset.storage.topic: {{ .Values.cluster.name }}-source-connect-offsets + config.storage.topic: {{ .Values.cluster.name }}-source-connect-configs + status.storage.topic: {{ .Values.cluster.name }}-source-connect-status + # -1 means it will use the default replication factor configured in the broker + config.storage.replication.factor: -1 + offset.storage.replication.factor: -1 + status.storage.replication.factor: -1 + key.converter: io.confluent.connect.avro.AvroConverter + key.converter.schemas.enable: true + key.converter.schema.registry.url: http://sasquatch-source-schema-registry.sasquatch:8081 + value.converter: io.confluent.connect.avro.AvroConverter + value.converter.schemas.enable: true + value.converter.schema.registry.url: http://sasquatch-source-schema-registry.sasquatch:8081 + request.timeout.ms: 120000 + resources: + requests: + cpu: "2" + memory: 4Gi + limits: + cpu: "8" + memory: 24Gi + jvmOptions: + "-Xmx": "8g" + "-Xms": "8g" +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: {{ .Values.cluster.name }}-source-connect + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + authentication: + type: tls + authorization: + type: simple + acls: + - resource: + type: group + name: {{ .Values.cluster.name }}-source-connect + operation: Read + - resource: + type: group + name: "*" + patternType: literal + operation: All + - resource: + type: topic + name: "*" + patternType: literal + type: allow + host: "*" + operation: All + quotas: + producerByteRate: 1073741824 + consumerByteRate: 1073741824 + requestPercentage: 90 + controllerMutationRate: 1000 +{{- end }} diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 4a12062527..7352d608a5 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -158,3 +158,8 @@ mirrormaker2: enabled: false # -- Name of the topic Schema Registry topic replicated from the source cluster schemaTopic: "source.registry-schemas" + sourceConnect: + # -- Whether to deploy another Connect cluster for topics replicated from the source cluster. + # Requires the sourceRegistry enabled. + enabled: false + diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index d8b3d1efd5..b188acd218 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -11,6 +11,8 @@ strimzi-kafka: sourceRegistry: enabled: true schemaTopic: source.registry-schemas + sourceConnect: + enabled: true resources: requests: cpu: 2 From 60d9cd9d698ff58da54893ee3beea50cdd2e7d77 Mon Sep 17 00:00:00 2001 From: stvoutsin Date: Sat, 24 Jun 2023 21:07:04 +0300 Subject: [PATCH 123/308] Make vault lease duration & vault_addr optional parameters --- installer/install.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/installer/install.sh b/installer/install.sh index 81c71afe5b..6ad1832ef9 100755 --- a/installer/install.sh +++ b/installer/install.sh @@ -1,8 +1,9 @@ #!/bin/bash -e -USAGE="Usage: ./install.sh ENVIRONMENT VAULT_TOKEN" +USAGE="Usage: ./install.sh ENVIRONMENT VAULT_TOKEN [VAULT_ADDR] [VAULT_TOKEN_LEASE_DURATION]" ENVIRONMENT=${1:?$USAGE} export VAULT_TOKEN=${2:?$USAGE} -export VAULT_ADDR=https://vault.lsst.codes +export VAULT_ADDR=${3:-https://vault.lsst.codes} +export VAULT_TOKEN_LEASE_DURATION=${4:-31536000} VAULT_PATH_PREFIX=`yq -r .vaultPathPrefix ../environments/values-$ENVIRONMENT.yaml` ARGOCD_PASSWORD=`vault kv get --field=argocd.admin.plaintext_password $VAULT_PATH_PREFIX/installer` @@ -18,7 +19,7 @@ kubectl create ns vault-secrets-operator || true kubectl create secret generic vault-secrets-operator \ --namespace vault-secrets-operator \ --from-literal=VAULT_TOKEN=$VAULT_TOKEN \ - --from-literal=VAULT_TOKEN_LEASE_DURATION=31536000 \ + --from-literal=VAULT_TOKEN_LEASE_DURATION=$VAULT_TOKEN_LEASE_DURATION \ --dry-run=client -o yaml | kubectl apply -f - echo "Set up docker pull secret for vault-secrets-operator..." @@ -28,7 +29,6 @@ kubectl create secret generic pull-secret -n vault-secrets-operator \ --type=kubernetes.io/dockerconfigjson \ --dry-run=client -o yaml | kubectl apply -f - - echo "Update / install vault-secrets-operator..." # ArgoCD depends on pull-secret, which depends on vault-secrets-operator. helm dependency update ../applications/vault-secrets-operator From f743b36c799f2de6c2e8e4ecbd10bec9358b100f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 26 Jun 2023 10:48:40 +0000 Subject: [PATCH 124/308] Update gcr.io/cloudsql-docker/gce-proxy Docker tag to v1.33.8 --- applications/gafaelfawr/values.yaml | 2 +- applications/sqlproxy-cross-project/values.yaml | 2 +- applications/times-square/values.yaml | 2 +- applications/vo-cutouts/values.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 3aa3614042..72f564c63f 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -287,7 +287,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.7" + tag: "1.33.8" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index b2b3aac43b..0e022d21b9 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.33.7" + tag: "1.33.8" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 080268f60b..45168cb2bb 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -123,7 +123,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.7" + tag: "1.33.8" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index 99bd762f47..f160031c90 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -75,7 +75,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.7" + tag: "1.33.8" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" From 76de62e64b82a8a52c2c06489d146c9900365026 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 30 Jun 2023 16:25:38 +0000 Subject: [PATCH 125/308] Update Helm release influxdb to v4.12.4 --- applications/sasquatch/Chart.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index f80b940616..4685829cf2 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -13,12 +13,12 @@ dependencies: - name: influxdb alias: influxdb condition: influxdb.enabled - version: 4.12.1 + version: 4.12.4 repository: https://helm.influxdata.com/ - name: influxdb alias: influxdb-staging condition: influxdb-staging.enabled - version: 4.12.1 + version: 4.12.4 repository: https://helm.influxdata.com/ - name: influxdb2 condition: influxdb2.enabled From f8957b12b16e6f906f419cf456e54a0874c0d5f1 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 1 Jul 2023 11:12:05 -0400 Subject: [PATCH 126/308] Update topic replication pattern --- applications/sasquatch/values-base.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index b188acd218..8f83e1286d 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -3,7 +3,7 @@ strimzi-kafka: enabled: true source: bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094 - topicsPattern: "test.*, registry-schemas" + topicsPattern: "lsst.sal.Test.*, registry-schemas" replication: policy: separator: "." From f1ef142303636b86ec2b56f2300f4b2bab373df2 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 1 Jul 2023 12:10:38 -0400 Subject: [PATCH 127/308] Deploy source-kafka-connect-manager Another deployment of kafka-connect-manager configured to use the sourceConnect cluster --- applications/sasquatch/Chart.yaml | 5 +++++ applications/sasquatch/values.yaml | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index f80b940616..497567deef 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -25,8 +25,13 @@ dependencies: version: 2.1.1 repository: https://helm.influxdata.com/ - name: kafka-connect-manager + alias: kafka-connect-manager condition: kafka-connect-manager.enabled version: 1.0.0 + - name: kafka-connect-manager + alias: source-kafka-connect-manager + condition: source-kafka-connect-manager.enabled + version: 1.0.0 - name: chronograf condition: chronograf.enabled version: 1.2.5 diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index dbae28fc9f..5f63bf3d81 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -188,6 +188,12 @@ influxdb2: # -- Override kafka-connect-manager configuration. kafka-connect-manager: {} +# -- Override source-kafka-connect-manager configuration. +source-kafka-connect-manager: + enabled: false + env: + kafkaConnectUrl: "http://sasquatch-source-connect-api.sasquatch:8083" + # -- Override telegraf-kafka-consumer configuration. telegraf-kafka-consumer: enabled: false From 73d9fc240012cd6938139d79d9e41046edafd081 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 1 Jul 2023 12:12:30 -0400 Subject: [PATCH 128/308] Make Connect InfluxDB URL configurable --- .../charts/kafka-connect-manager/templates/influxdb_sink.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml index e176644d26..f309ed5fef 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml @@ -40,7 +40,11 @@ spec: - name: KAFKA_CONNECT_NAME value: influxdb-sink-{{ $key }} - name: KAFKA_CONNECT_INFLUXDB_URL + {{- if $value.connectInfluxUrl }} + value: {{ $value.connectInfluxUrl | quote }} + {{- else }} value: {{ $.Values.influxdbSink.connectInfluxUrl | quote }} + {{- end }} - name: KAFKA_CONNECT_DATABASE {{- if $value.connectInfluxDb }} value: {{ $value.connectInfluxDb | quote }} From b4e5594c3aeb305304a1d14bd6705f71769a98af Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 1 Jul 2023 12:18:46 -0400 Subject: [PATCH 129/308] Configure new InfluxDB connectors at the base - Add new connectors to write topics replicated from the source cluster to the second InfluxDB instance - Topics replicated from the source cluster have the "source." prefix so we need to fix the topic regex in the connector configuration elsewhere --- applications/sasquatch/values-base.yaml | 44 +++++++++++++++---------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 8f83e1286d..89c395aec8 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -67,55 +67,65 @@ kafka-connect-manager: connectors: auxtel: enabled: true - topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" + topicsRegex: "lsst.sal.ATAOS|lsst.sal.ATDome|lsst.sal.ATDomeTrajectory|lsst.sal.ATHexapod|lsst.sal.ATPneumatics|lsst.sal.ATPtg|lsst.sal.ATMCS" maintel: enabled: true - topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg" + topicsRegex: "lsst.sal.MTAOS|lsst.sal.MTDome|lsst.sal.MTDomeTrajectory|lsst.sal.MTPtg" mtmount: enabled: true - topicsRegex: ".*MTMount" + topicsRegex: "lsst.sal.MTMount" comcam: enabled: true - topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS" + topicsRegex: "lsst.sal.CCCamera|lsst.sal.CCHeaderService|lsst.sal.CCOODS" eas: enabled: true - topicsRegex: ".*DIMM|.*DSM|.*ESS|.*WeatherForecast" + topicsRegex: "lsst.sal.DIMM|lsst.sal.DSM|lsst.sal.ESS|lsst.sal.WeatherForecast" latiss: enabled: true - topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph" + topicsRegex: "lsst.sal.ATCamera|lsst.sal.ATHeaderService|lsst.sal.ATOODS|lsst.sal.ATSpectrograph" m1m3: enabled: true - topicsRegex: ".*MTM1M3" + topicsRegex: "lsst.sal.MTM1M3" m2: enabled: true - topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator" + topicsRegex: "lsst.sal.MTHexapod|lsst.sal.MTM2|lsst.sal.MTRotator" obssys: enabled: true - topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher" + topicsRegex: "lsst.sal.Scheduler|lsst.sal.Script|lsst.sal.ScriptQueue|lsst.sal.Watcher" ocps: enabled: true - topicsRegex: ".*OCPS" + topicsRegex: "lsst.sal.OCPS" test: enabled: true - topicsRegex: ".*Test" + topicsRegex: "lsst.sal.Test" pmd: enabled: true - topicsRegex: ".*PMD" + topicsRegex: "lsst.sal.PMD" calsys: enabled: true - topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" + topicsRegex: "lsst.sal.ATMonochromator|lsst.sal.ATWhiteLight|lsst.sal.CBP|lsst.sal.Electrometer|lsst.sal.FiberSpectrograph|lsst.sal.LinearStage|lsst.sal.TunableLaser" mtaircompressor: enabled: true - topicsRegex: ".*MTAirCompressor" + topicsRegex: "lsst.sal.MTAirCompressor" authorize: enabled: true - topicsRegex: ".*Authorize" + topicsRegex: "lsst.sal.Authorize" lasertracker: enabled: true - topicsRegex: ".*LaserTracker" + topicsRegex: "lsst.sal.LaserTracker" genericcamera: enabled: true - topicsRegex: ".*GCHeaderService|.*GenericCamera" + topicsRegex: "lsst.sal.GCHeaderService|lsst.sal.GenericCamera" + +source-kafka-connect-manager: + enabled: true + influxdbSink: + connectInfluxUrl: "http://sasquatch-influxdb-staging.sasquatch:8086" + connectInfluxDb: "test" + connectors: + source-test: + enabled: true + topicsRegex: "source.lsst.sal.Test" kafdrop: ingress: From cf865124051db79fa500df734dcc25513293b115 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 3 Jul 2023 13:22:55 -0400 Subject: [PATCH 130/308] Update kafka-connect-manager to version 1.3.1 --- applications/sasquatch/charts/kafka-connect-manager/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/kafka-connect-manager/values.yaml b/applications/sasquatch/charts/kafka-connect-manager/values.yaml index ff3aad79cb..51c827f46e 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/values.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/values.yaml @@ -6,7 +6,7 @@ enabled: true image: repository: ghcr.io/lsst-sqre/kafkaconnect - tag: 1.2.0 + tag: 1.3.1 pullPolicy: IfNotPresent influxdbSink: From 854dc4f47cd6058debc481522562bc88ec0f7deb Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 3 Jul 2023 13:33:43 -0400 Subject: [PATCH 131/308] Add support to the removePrefix option in helm --- .../charts/kafka-connect-manager/templates/influxdb_sink.yaml | 4 ++++ .../sasquatch/charts/kafka-connect-manager/values.yaml | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml index f309ed5fef..037b3a58fe 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml @@ -79,6 +79,10 @@ spec: - name: KAFKA_CONNECT_INFLUXDB_TAGS value: {{ $value.tags | quote }} {{- end }} + {{ - if $value.removePrefix }} + - name: KAFKA_CONNECT_INFLUXDB_REMOVE_PREFIX + value: {{ $value.removePrefix | quote }} + {{- end }} - name: KAFKA_CONNECT_ERROR_POLICY value: {{ $.Values.influxdbSink.connectInfluxErrorPolicy | quote }} - name: KAFKA_CONNECT_MAX_RETRIES diff --git a/applications/sasquatch/charts/kafka-connect-manager/values.yaml b/applications/sasquatch/charts/kafka-connect-manager/values.yaml index 51c827f46e..e508350f03 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/values.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/values.yaml @@ -40,9 +40,11 @@ influxdbSink: # -- Whether to deploy a repairer connector in addition to the original connector instance. repairerConnector: false # -- Regex to select topics from Kafka. - topicsRegex: ".*Test" + topicsRegex: "source.lsst.sal.Test" # -- Fields in the Avro payload that are treated as InfluxDB tags. tags: "" + # -- Remove prefix from topic name. + removePrefix: "source." # The s3Sink connector assumes Parquet format with Snappy compression # and a time based partitioner. From 402c6107a8384e4e62584ae70fda401d57d9835d Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 3 Jul 2023 13:34:19 -0400 Subject: [PATCH 132/308] Remove the "source." prefix from replicated topics at base --- .../charts/kafka-connect-manager/templates/influxdb_sink.yaml | 2 +- applications/sasquatch/values-base.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml index 037b3a58fe..382794d646 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml @@ -79,7 +79,7 @@ spec: - name: KAFKA_CONNECT_INFLUXDB_TAGS value: {{ $value.tags | quote }} {{- end }} - {{ - if $value.removePrefix }} + {{- if $value.removePrefix }} - name: KAFKA_CONNECT_INFLUXDB_REMOVE_PREFIX value: {{ $value.removePrefix | quote }} {{- end }} diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 89c395aec8..878ba83aef 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -126,6 +126,7 @@ source-kafka-connect-manager: source-test: enabled: true topicsRegex: "source.lsst.sal.Test" + removePrefix: "source." kafdrop: ingress: From b49011474e4474b9ef0c4b5001ad22a7be70a3b0 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 3 Jul 2023 15:38:00 -0400 Subject: [PATCH 133/308] Configure source connectors to use the efd database --- applications/sasquatch/values-base.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 878ba83aef..866bb415d5 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -121,7 +121,7 @@ source-kafka-connect-manager: enabled: true influxdbSink: connectInfluxUrl: "http://sasquatch-influxdb-staging.sasquatch:8086" - connectInfluxDb: "test" + connectInfluxDb: "efd" connectors: source-test: enabled: true From 1d399b9d0a045fe0a8558630eea4de241bfc8f4f Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 3 Jul 2023 15:39:26 -0400 Subject: [PATCH 134/308] Add additional connectors following the summit configuration - Configure source-kafka-connect-manager connectors following summit configuration --- applications/sasquatch/values-base.yaml | 75 +++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 866bb415d5..037d510f94 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -117,16 +117,91 @@ kafka-connect-manager: enabled: true topicsRegex: "lsst.sal.GCHeaderService|lsst.sal.GenericCamera" +# This needs to follow the kafka-connect-manager configuration for the summit +# environment where data is replicated from. +# We need to remove the "source." prefix from the topic name before writing to InfluxDB. source-kafka-connect-manager: enabled: true influxdbSink: connectInfluxUrl: "http://sasquatch-influxdb-staging.sasquatch:8086" connectInfluxDb: "efd" connectors: + source-auxtel: + enabled: true + topicsRegex: "source.lsst.sal.ATAOS|source.lsst.sal.ATDome|source.lsst.sal.ATDomeTrajectory|source.lsst.sal.ATHexapod|source.lsst.sal.ATPneumatics|source.lsst.sal.ATPtg|source.lsst.sal.ATMCS" + removePrefix: "source." + source-maintel: + enabled: true + topicsRegex: "source.lsst.sal.MTAOS|source.lsst.sal.MTDome|source.lsst.sal.MTDomeTrajectory|source.lsst.sal.MTPtg" + removePrefix: "source." + source-mtmount: + enabled: true + topicsRegex: "source.lsst.sal.MTMount" + removePrefix: "source." + source-comcam: + enabled: true + topicsRegex: "source.lsst.sal.CCCamera|source.lsst.sal.CCHeaderService|source.lsst.sal.CCOODS" + removePrefix: "source." + source-eas: + enabled: true + topicsRegex: "source.lsst.sal.DIMM|source.lsst.sal.DSM|source.lsst.sal.ESS|source.lsst.sal.WeatherForecast" + removePrefix: "source." + source-latiss: + enabled: true + topicsRegex: "source.lsst.sal.ATCamera|source.lsst.sal.ATHeaderService|source.lsst.sal.ATOODS|source.lsst.sal.ATSpectrograph" + removePrefix: "source." + source-m1m3: + enabled: true + topicsRegex: "source.lsst.sal.MTM1M3" + removePrefix: "source." + source-m2: + enabled: true + topicsRegex: "source.lsst.sal.MTHexapod|source.lsst.sal.MTM2|source.lsst.sal.MTRotator" + removePrefix: "source." + source-obssys: + enabled: true + topicsRegex: "source.lsst.sal.Scheduler|source.lsst.sal.Script|source.lsst.sal.ScriptQueue|source.lsst.sal.Watcher" + removePrefix: "source." + source-ocps: + enabled: true + topicsRegex: "source.lsst.sal.OCPS" + removePrefix: "source." source-test: enabled: true topicsRegex: "source.lsst.sal.Test" removePrefix: "source." + source-pmd: + enabled: true + topicsRegex: "source.lsst.sal.PMD" + removePrefix: "source." + source-calsys: + enabled: true + topicsRegex: "source.lsst.sal.ATMonochromator|source.lsst.sal.ATWhiteLight|source.lsst.sal.CBP|source.lsst.sal.Electrometer|source.lsst.sal.FiberSpectrograph|source.lsst.sal.LinearStage|source.lsst.sal.TunableLaser" + removePrefix: "source." + source-mtaircompressor: + enabled: true + topicsRegex: "source.lsst.sal.MTAirCompressor" + removePrefix: "source." + source-authorize: + enabled: true + topicsRegex: "source.lsst.sal.Authorize" + removePrefix: "source." + source-genericcamera: + enabled: true + topicsRegex: "source.lsst.sal.GCHeaderService|source.lsst.sal.GenericCamera" + removePrefix: "source." + source-gis: + enabled: true + topicsRegex: "source.lsst.sal.GIS" + removePrefix: "source." + source-mtvms: + enabled: true + topicsRegex: "source.lsst.sal.MTVMS" + removePrefix: "source." + source-lasertracker: + enabled: true + topicsRegex: "source.lsst.sal.LaserTracker" + removePrefix: "source." kafdrop: ingress: From 354febb1f448f253f078529d017d495a26660ea0 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 3 Jul 2023 15:47:46 -0400 Subject: [PATCH 135/308] Replicate all lsst.sal topics from summit --- applications/sasquatch/values-base.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 037d510f94..5929e8cf0e 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -3,7 +3,7 @@ strimzi-kafka: enabled: true source: bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094 - topicsPattern: "lsst.sal.Test.*, registry-schemas" + topicsPattern: "lsst.sal.*, registry-schemas" replication: policy: separator: "." From 011fae74fd3e93306270cdf35d44c3569dce0dcc Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 3 Jul 2023 21:34:27 -0400 Subject: [PATCH 136/308] Fix a lint error and add helm docs --- applications/sasquatch/README.md | 1 + .../sasquatch/charts/kafka-connect-manager/README.md | 7 ++++--- applications/sasquatch/charts/strimzi-kafka/README.md | 1 + applications/sasquatch/charts/strimzi-kafka/values.yaml | 1 - 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 3a1bf3335d..9896e2613a 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -85,6 +85,7 @@ Rubin Observatory's telemetry service. | kapacitor.resources.requests.cpu | int | `1` | | | kapacitor.resources.requests.memory | string | `"1Gi"` | | | rest-proxy | object | `{"enabled":false}` | Override rest-proxy configuration. | +| source-kafka-connect-manager | object | `{"enabled":false,"env":{"kafkaConnectUrl":"http://sasquatch-source-connect-api.sasquatch:8083"}}` | Override source-kafka-connect-manager configuration. | | squareEvents.enabled | bool | `false` | Enable the Square Events subchart with topic and user configurations. | | strimzi-kafka | object | `{}` | Override strimzi-kafka configuration. | | strimzi-registry-operator | object | `{"clusterName":"sasquatch","clusterNamespace":"sasquatch","operatorNamespace":"sasquatch"}` | strimzi-registry-operator configuration. | diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md index cc830127f9..c9ff922025 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/README.md +++ b/applications/sasquatch/charts/kafka-connect-manager/README.md @@ -12,7 +12,7 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | | -| image.tag | string | `"1.2.0"` | | +| image.tag | string | `"1.3.1"` | | | influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. | | influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | | influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. | @@ -21,11 +21,12 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | -| influxdbSink.connectors | object | `{"test":{"enabled":false,"repairerConnector":false,"tags":"","topicsRegex":".*Test"}}` | Connector instances to deploy. | +| influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | | influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | +| influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | | influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | | influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | -| influxdbSink.connectors.test.topicsRegex | string | `".*Test"` | Regex to select topics from Kafka. | +| influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | | influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | | influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | | influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 52d8752bcf..f6cffe6b8d 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -36,6 +36,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. | | mirrormaker2.source.bootstrapServer | string | `""` | Source (active) cluster to replicate from. | | mirrormaker2.source.topicsPattern | string | `"registry-schemas, lsst.sal.*"` | Topic replication from the source cluster defined as a comma-separated list or regular expression pattern. | +| mirrormaker2.sourceConnect.enabled | bool | `false` | Whether to deploy another Connect cluster for topics replicated from the source cluster. Requires the sourceRegistry enabled. | | mirrormaker2.sourceRegistry.enabled | bool | `false` | Whether to deploy another Schema Registry for the schemas replicated from the source cluster. | | mirrormaker2.sourceRegistry.schemaTopic | string | `"source.registry-schemas"` | Name of the topic Schema Registry topic replicated from the source cluster | | registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 7352d608a5..c926eb09db 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -162,4 +162,3 @@ mirrormaker2: # -- Whether to deploy another Connect cluster for topics replicated from the source cluster. # Requires the sourceRegistry enabled. enabled: false - From 0beafa814fd22ac62b430072a20ec8d1468ec1f5 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 3 Jul 2023 21:45:28 -0400 Subject: [PATCH 137/308] Require pydantic<2.0.0 --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 55bca21138..edde5980f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,7 @@ dev = [ "documenteer[guide]>=0.7.0b4", "sphinx-diagrams", "sphinx-jinja", + "pydantic<2.0.0", ] [project.scripts] From ccd60e3b1a3c4d19043a88f7284886daf3f8422a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 16:31:45 +0000 Subject: [PATCH 138/308] Update Helm release argo-cd to v5.37.0 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index fce3f090f8..2afc84d617 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.36.1 + version: 5.37.0 repository: https://argoproj.github.io/argo-helm From 6f46f849a38638fbc95b78cfab863400f4540d2c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 5 Jul 2023 08:21:58 -0700 Subject: [PATCH 139/308] Update Helm documentation --- applications/gafaelfawr/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index a1012431f9..ed0125f474 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -17,7 +17,7 @@ Authentication and identity system | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.7"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.8"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index 4b132f8b03..7a84cea38a 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.33.7"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.33.8"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/times-square/README.md b/applications/times-square/README.md index 832f05a38a..a2ab3059d3 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -18,7 +18,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.7"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.8"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index fad03120b2..eea5d28e20 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.7"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.8"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | From 1e74e5cf142a05c1d93f344ed91b72f98f045ae9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 10:59:28 +0000 Subject: [PATCH 140/308] Update Helm release ingress-nginx to v4.7.1 --- applications/ingress-nginx/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index 481ec0c0c6..77127b0d9f 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.7.0 + version: 4.7.1 repository: https://kubernetes.github.io/ingress-nginx From ccb428d6ea86dfdc3006c55018998cfefce69fad Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 5 Jul 2023 09:11:27 -0700 Subject: [PATCH 141/308] Use newer Argo CD client --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e30bb68d02..bbae9b674b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -101,7 +101,7 @@ jobs: unzip /tmp/vault.zip sudo mv vault /usr/local/bin/vault sudo chmod +x /usr/local/bin/vault - sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.6.7/argocd-linux-amd64 + sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.6/argocd-linux-amd64 sudo chmod +x /usr/local/bin/argocd sudo apt-get install socat sudo pip install -r installer/requirements.txt From 36f4184b911ed836f7dfac671febe0c74912ecdd Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 3 Jul 2023 10:59:34 +0000 Subject: [PATCH 142/308] Update Helm release argo-workflows to v0.30.0 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index f810c5905d..84811f4992 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.29.2 + version: 0.30.0 repository: https://argoproj.github.io/argo-helm From 9686171fdb7e5b0a14cfd2373a318f12bf6e8f0b Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 5 Jul 2023 14:20:53 -0400 Subject: [PATCH 143/308] Enable InfluxDB2 at summit --- applications/sasquatch/values-summit.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index db8717ae3e..15692a9d77 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -32,6 +32,15 @@ influxdb: enabled: true hostname: summit-lsp.lsst.codes +influxdb2: + enabled: true + persistence: + storageClass: rook-ceph-block + size: 5Ti + ingress: + enabled: true + hostname: summit-lsp.lsst.codes + kafka-connect-manager: influxdbSink: # Based on the kafka producers configuration for the Summit From 5ab942db8121bd346ac5b48cab319fed39abf97e Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 5 Jul 2023 14:21:12 -0400 Subject: [PATCH 144/308] Enable telegraf-kafka-consumer at summit --- applications/sasquatch/values-summit.yaml | 79 +++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 15692a9d77..d9cb8fd8dc 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -118,6 +118,85 @@ kafka-connect-manager: repairerConnector: false topicsRegex: ".*LaserTracker" +telegraf-kafka-consumer: + enabled: true + kafkaConsumers: + auxtel: + enabled: true + topicRegexps: | + [ ".*ATAOS", ".*ATDome", ".*ATDomeTrajectory", ".*ATHexapod", ".*ATPneumatics", ".*ATPtg", ".*ATMCS" ] + maintel: + enabled: true + topicRegexps: | + [ ".*MTAOS", ".*MTDome", ".*MTDomeTrajectory", ".*MTPtg" ] + mtmount: + enabled: true + topicRegexps: | + [ ".*MTMount" ] + comcam: + enabled: true + topicRegexps: | + [ ".*CCCamera", ".*CCHeaderService", ".*CCOODS" ] + eas: + enabled: true + topicRegexps: | + [ ".*DIMM", ".*DSM", ".*ESS", ".*HVAC", ".*WeatherForecast" ] + latiss: + enabled: true + topicRegexps: | + [ ".*ATCamera", ".*ATHeaderService", ".*ATOODS", ".*ATSpectrograph" ] + m1m3: + enabled: true + flush_interval: "1s" + metric_batch_size: 5000 + interval: "0.1s" + topicRegexps: | + [ ".*MTM1M3" ] + m2: + enabled: true + topicRegexps: | + [ ".*MTHexapod", ".*MTM2", ".*MTRotator" ] + obssys: + enabled: true + topicRegexps: | + [ ".*Scheduler", ".*Script", ".*ScriptQueue", ".*Watcher" ] + ocps: + enabled: true + topicRegexps: | + [ ".*OCPS" ] + test: + enabled: true + topicRegexps: | + [ "lsst.sal.Test" ] + pmd: + enabled: true + topicRegexps: | + [ ".*PMD" ] + calsys: + enabled: true + topicRegexps: | + [ ".*ATMonochromator", ".*ATWhiteLight", ".*CBP", ".*Electrometer", ".*FiberSpectrograph", ".*LinearStage", ".*TunableLaser" ] + mtaircompressor: + enabled: true + topicRegexps: | + [ ".*MTAirCompressor" ] + genericcamera: + enabled: true + topicRegexps: | + [ ".*GCHeaderService", ".*GenericCamera" ] + gis: + enabled: true + topicRegexps: | + [ ".*GIS" ] + mtvms: + enabled: true + topicRegexps: | + [ ".*MTVMS" ] + lasertracker: + enabled: true + topicRegexps: | + [ ".*LaserTracker" ] + kafdrop: ingress: enabled: true From e8139cb5a625250851d3ab1eb9eb5c0314b84ef9 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 6 Jul 2023 09:08:37 -0700 Subject: [PATCH 145/308] Bump version of REST spawner for Nublado v3 Update to version 0.3.2, which has a timeout fix. --- applications/nublado/README.md | 2 +- applications/nublado/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 73d6e90b58..eb7768219e 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -69,7 +69,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub | | jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub | | jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/rsp-restspawner"` | Image to use for JupyterHub | -| jupyterhub.hub.image.tag | string | `"0.3.1"` | Tag of image to use for JupyterHub | +| jupyterhub.hub.image.tag | string | `"0.3.2"` | Tag of image to use for JupyterHub | | jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) | | jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) | | jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index f1131894bd..280302bffb 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -288,7 +288,7 @@ jupyterhub: name: ghcr.io/lsst-sqre/rsp-restspawner # -- Tag of image to use for JupyterHub - tag: 0.3.1 + tag: 0.3.2 # -- Resource limits and requests resources: From 3712031232ac4fdd266957fe5ea1cea419a7c34c Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 6 Jul 2023 08:56:56 -0700 Subject: [PATCH 146/308] idf-dev, idf-int, and usdf-dev -> nublado3 by default --- applications/mobu/values-idfdev.yaml | 4 ++-- applications/mobu/values-idfint.yaml | 6 +++--- applications/noteburst/values-idfdev.yaml | 1 - applications/noteburst/values-usdfdev.yaml | 1 - applications/nublado/values-idfdev.yaml | 1 - applications/nublado/values-idfint.yaml | 1 - applications/nublado/values-usdfdev.yaml | 2 +- applications/nublado2/values-idfdev.yaml | 1 + applications/nublado2/values-idfint.yaml | 1 + applications/nublado2/values-usdfdev.yaml | 1 + 10 files changed, 9 insertions(+), 10 deletions(-) diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 2497584694..667dc76f5f 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -34,7 +34,7 @@ config: image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - url_prefix: "/n3" + url_prefix: "/nb" use_cachemachine: false restart: true - name: "tutorial" @@ -56,7 +56,7 @@ config: max_executions: 1 working_directory: "notebooks/tutorial-notebooks" use_cachemachine: false - url_prefix: "/n3" + url_prefix: "/nb" restart: true - name: "tap" count: 1 diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 767751c08f..7445f3fad6 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -31,7 +31,7 @@ config: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" use_cachemachine: false - url_prefix: "/n3" + url_prefix: "/nb" restart: true - name: "weekly" count: 1 @@ -50,7 +50,7 @@ config: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" use_cachemachine: false - url_prefix: "/n3" + url_prefix: "/nb" restart: true - name: "tutorial" count: 1 @@ -71,7 +71,7 @@ config: max_executions: 1 working_directory: "notebooks/tutorial-notebooks" use_cachemachine: false - url_prefix: "/n3" + url_prefix: "/nb" restart: true - name: "tap" count: 1 diff --git a/applications/noteburst/values-idfdev.yaml b/applications/noteburst/values-idfdev.yaml index bf6d619d3f..b1f15683d5 100644 --- a/applications/noteburst/values-idfdev.yaml +++ b/applications/noteburst/values-idfdev.yaml @@ -3,7 +3,6 @@ image: config: logLevel: "DEBUG" - hubPathPrefix: "/n3" worker: workerCount: 1 identities: diff --git a/applications/noteburst/values-usdfdev.yaml b/applications/noteburst/values-usdfdev.yaml index 4ab6827768..60e463b6e0 100644 --- a/applications/noteburst/values-usdfdev.yaml +++ b/applications/noteburst/values-usdfdev.yaml @@ -3,7 +3,6 @@ image: config: logLevel: "DEBUG" - hubPathPrefix: "/n3" worker: workerCount: 1 identities: diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml index bfcc0838f9..d886967228 100644 --- a/applications/nublado/values-idfdev.yaml +++ b/applications/nublado/values-idfdev.yaml @@ -68,6 +68,5 @@ controller: server: "10.87.86.26" jupyterhub: hub: - baseUrl: "/n3" db: url: "postgresql://nublado3@postgres.postgres/nublado3" diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml index b4536fe12f..7c794d2cd1 100644 --- a/applications/nublado/values-idfint.yaml +++ b/applications/nublado/values-idfint.yaml @@ -89,7 +89,6 @@ controller: jupyterhub: hub: - baseUrl: "/n3" config: ServerApp: shutdown_no_activity_timeout: 432000 diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 9cd0cfec9a..0cd845af00 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -25,7 +25,7 @@ controller: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" - HUB_ROUTE: "/n3/hub" + HUB_ROUTE: "/nb/hub" PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" PGUSER: "rubin" S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" diff --git a/applications/nublado2/values-idfdev.yaml b/applications/nublado2/values-idfdev.yaml index cafa58321f..f63977381b 100644 --- a/applications/nublado2/values-idfdev.yaml +++ b/applications/nublado2/values-idfdev.yaml @@ -1,5 +1,6 @@ jupyterhub: hub: + baseUrl: "/n2" config: ServerApp: shutdown_no_activity_timeout: 432000 diff --git a/applications/nublado2/values-idfint.yaml b/applications/nublado2/values-idfint.yaml index b411ebddad..c5812aedb8 100644 --- a/applications/nublado2/values-idfint.yaml +++ b/applications/nublado2/values-idfint.yaml @@ -1,5 +1,6 @@ jupyterhub: hub: + baseUrl: "/n2" config: ServerApp: shutdown_no_activity_timeout: 432000 diff --git a/applications/nublado2/values-usdfdev.yaml b/applications/nublado2/values-usdfdev.yaml index 5e85e0d328..77d80bc08e 100644 --- a/applications/nublado2/values-usdfdev.yaml +++ b/applications/nublado2/values-usdfdev.yaml @@ -1,6 +1,7 @@ jupyterhub: hub: + baseUrl: "/n2" config: ServerApp: shutdown_no_activity_timeout: 432000 From dcfab577ae8952c53b20fd76cb706c5bb1ad5ebe Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 6 Jul 2023 13:17:28 -0700 Subject: [PATCH 147/308] Update to mobu 6.1.1 Improved error reporting and hopefully a fix for the timeouts when summarizing flocks. --- applications/mobu/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/mobu/Chart.yaml b/applications/mobu/Chart.yaml index c900cbdb83..9d1709f040 100644 --- a/applications/mobu/Chart.yaml +++ b/applications/mobu/Chart.yaml @@ -4,4 +4,4 @@ version: 1.0.0 description: Continuous integration testing sources: - https://github.com/lsst-sqre/mobu -appVersion: 6.1.0 +appVersion: 6.1.1 From fd2e1cb1653920d06d16cf8350e59fa16b8293d1 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 6 Jul 2023 14:17:02 -0700 Subject: [PATCH 148/308] Change TAP schema database pod configuration Make the TAP schema database pod configuration consistent with other deployments and Helm values style: * Use tapSchema rather than tap_schema since Helm uses camelCase * Add and use podAnnotations, nodeSelector, tolerations, and affinity rather than using the ones for the base TAP server * Add image.pullPolicy configuration Also set a default image.repository for tapSchema. It would be nice to have Helm force this to be set, but unfortunately it breaks Renovate discovery of the latest image tag version, which we want as a reminder when the image is out of date. Use the same image name that we use for minikube as the default, but document that it must be set for each environment. --- applications/livetap/README.md | 11 +++++--- .../templates/tap-schema-db-deployment.yaml | 14 +++++----- applications/livetap/values-minikube.yaml | 4 +++ applications/livetap/values-usdfdev.yaml | 2 +- applications/livetap/values-usdfprod.yaml | 2 +- applications/livetap/values.yaml | 27 ++++++++++++++++--- applications/ssotap/README.md | 11 +++++--- .../templates/tap-schema-db-deployment.yaml | 14 +++++----- applications/ssotap/values-idfdev.yaml | 2 +- applications/ssotap/values-idfint.yaml | 2 +- applications/ssotap/values-idfprod.yaml | 2 +- applications/ssotap/values-minikube.yaml | 2 +- applications/ssotap/values-usdfdev.yaml | 2 +- applications/ssotap/values-usdfprod.yaml | 2 +- applications/ssotap/values.yaml | 25 +++++++++++++++-- applications/tap/README.md | 11 +++++--- .../templates/tap-schema-db-deployment.yaml | 14 +++++----- applications/tap/values-ccin2p3.yaml | 2 +- applications/tap/values-idfdev.yaml | 2 +- applications/tap/values-idfint.yaml | 2 +- applications/tap/values-idfprod.yaml | 2 +- applications/tap/values-minikube.yaml | 2 +- applications/tap/values-roe.yaml | 2 +- applications/tap/values-usdfdev.yaml | 2 +- applications/tap/values-usdfprod.yaml | 2 +- applications/tap/values.yaml | 25 +++++++++++++++-- 26 files changed, 135 insertions(+), 53 deletions(-) diff --git a/applications/livetap/README.md b/applications/livetap/README.md index 992ef3bf97..a2d086d89f 100644 --- a/applications/livetap/README.md +++ b/applications/livetap/README.md @@ -45,9 +45,14 @@ IVOA TAP service | podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | -| tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"2.0.2"` | | -| tap_schema.resources | object | `{}` | | +| tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | +| tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | +| tapSchema.image.repository | string | `"lsstsqre/tap-schema-idfprod-tap"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | +| tapSchema.image.tag | string | `"2.0.1"` | Tag of TAP schema image | +| tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | +| tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | +| tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | +| tapSchema.tolerations | list | `[]` | Tolerations for the mock QServ pod | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | diff --git a/applications/livetap/templates/tap-schema-db-deployment.yaml b/applications/livetap/templates/tap-schema-db-deployment.yaml index 929bdce88f..0623cf1c9a 100644 --- a/applications/livetap/templates/tap-schema-db-deployment.yaml +++ b/applications/livetap/templates/tap-schema-db-deployment.yaml @@ -11,7 +11,7 @@ spec: {{- include "cadc-tap.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.podAnnotations }} + {{- with .Values.tapSchema.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} @@ -31,26 +31,26 @@ spec: value: "TAP_SCHEMA" - name: MYSQL_ROOT_HOST value: "%" - image: "{{ .Values.tap_schema.image.repository }}:{{ .Values.tap_schema.image.tag}}" - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + image: "{{ .Values.tapSchema.image.repository }}:{{ .Values.tapSchema.image.tag}}" + imagePullPolicy: {{ .Values.tapSchema.image.pullPolicy | quote }} ports: - containerPort: 3306 protocol: "TCP" - {{- with .Values.tap_schema.resources }} + {{- with .Values.tapSchema.resources }} resources: {{- toYaml . | nindent 12 }} {{- end }} imagePullSecrets: - name: "pull-secret" - {{- with .Values.nodeSelector }} + {{- with .Values.tapSchema.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.affinity }} + {{- with .Values.tapSchema.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.tolerations }} + {{- with .Values.tapSchema.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/applications/livetap/values-minikube.yaml b/applications/livetap/values-minikube.yaml index 6e3f1aca1e..34af311b08 100644 --- a/applications/livetap/values-minikube.yaml +++ b/applications/livetap/values-minikube.yaml @@ -1,3 +1,7 @@ +tapSchema: + image: + repository: "lsstsqre/tap-schema-usdf-prod-livetap" + config: gcsBucket: "async-results.lsst.codes" gcsBucketUrl: "http://async-results.lsst.codes" diff --git a/applications/livetap/values-usdfdev.yaml b/applications/livetap/values-usdfdev.yaml index 4bf6319d48..0ade540ca7 100644 --- a/applications/livetap/values-usdfdev.yaml +++ b/applications/livetap/values-usdfdev.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-usdf-dev-livetap" diff --git a/applications/livetap/values-usdfprod.yaml b/applications/livetap/values-usdfprod.yaml index 52a661a7d3..1d89fa6bf1 100644 --- a/applications/livetap/values-usdfprod.yaml +++ b/applications/livetap/values-usdfprod.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-usdf-prod-livetap" diff --git a/applications/livetap/values.yaml b/applications/livetap/values.yaml index 14f1cfef49..af2aa2de93 100644 --- a/applications/livetap/values.yaml +++ b/applications/livetap/values.yaml @@ -120,12 +120,33 @@ pg: # -- Affinity rules for the mock postgres pod affinity: {} -tap_schema: +tapSchema: image: - repository: {} - tag: "2.0.2" + # -- TAP schema image to ue. This must be overridden by each environment + # with the TAP schema for that environment. + repository: "lsstsqre/tap-schema-idfprod-tap" + + # -- Pull policy for the TAP schema image + pullPolicy: "IfNotPresent" + + # -- Tag of TAP schema image + tag: "2.0.1" + + # -- Resource limits and requests for the TAP schema database pod resources: {} + # -- Annotations for the mock QServ pod + podAnnotations: {} + + # -- Node selection rules for the mock QServ pod + nodeSelector: {} + + # -- Tolerations for the mock QServ pod + tolerations: [] + + # -- Affinity rules for the mock QServ pod + affinity: {} + uws: image: # -- UWS database image to use diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index 42fa922a58..a2d086d89f 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -45,9 +45,14 @@ IVOA TAP service | podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | -| tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"2.0.1"` | | -| tap_schema.resources | object | `{}` | | +| tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | +| tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | +| tapSchema.image.repository | string | `"lsstsqre/tap-schema-idfprod-tap"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | +| tapSchema.image.tag | string | `"2.0.1"` | Tag of TAP schema image | +| tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | +| tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | +| tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | +| tapSchema.tolerations | list | `[]` | Tolerations for the mock QServ pod | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | diff --git a/applications/ssotap/templates/tap-schema-db-deployment.yaml b/applications/ssotap/templates/tap-schema-db-deployment.yaml index 929bdce88f..0623cf1c9a 100644 --- a/applications/ssotap/templates/tap-schema-db-deployment.yaml +++ b/applications/ssotap/templates/tap-schema-db-deployment.yaml @@ -11,7 +11,7 @@ spec: {{- include "cadc-tap.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.podAnnotations }} + {{- with .Values.tapSchema.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} @@ -31,26 +31,26 @@ spec: value: "TAP_SCHEMA" - name: MYSQL_ROOT_HOST value: "%" - image: "{{ .Values.tap_schema.image.repository }}:{{ .Values.tap_schema.image.tag}}" - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + image: "{{ .Values.tapSchema.image.repository }}:{{ .Values.tapSchema.image.tag}}" + imagePullPolicy: {{ .Values.tapSchema.image.pullPolicy | quote }} ports: - containerPort: 3306 protocol: "TCP" - {{- with .Values.tap_schema.resources }} + {{- with .Values.tapSchema.resources }} resources: {{- toYaml . | nindent 12 }} {{- end }} imagePullSecrets: - name: "pull-secret" - {{- with .Values.nodeSelector }} + {{- with .Values.tapSchema.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.affinity }} + {{- with .Values.tapSchema.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.tolerations }} + {{- with .Values.tapSchema.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/applications/ssotap/values-idfdev.yaml b/applications/ssotap/values-idfdev.yaml index 9f20438b3b..dcc448f905 100644 --- a/applications/ssotap/values-idfdev.yaml +++ b/applications/ssotap/values-idfdev.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfdev-sso" diff --git a/applications/ssotap/values-idfint.yaml b/applications/ssotap/values-idfint.yaml index bf6f0b52d9..5d0e6f035c 100644 --- a/applications/ssotap/values-idfint.yaml +++ b/applications/ssotap/values-idfint.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfint-sso" diff --git a/applications/ssotap/values-idfprod.yaml b/applications/ssotap/values-idfprod.yaml index 643b677f44..f84b84aa58 100644 --- a/applications/ssotap/values-idfprod.yaml +++ b/applications/ssotap/values-idfprod.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfprod-sso" diff --git a/applications/ssotap/values-minikube.yaml b/applications/ssotap/values-minikube.yaml index 9819d2da73..f2056773f3 100644 --- a/applications/ssotap/values-minikube.yaml +++ b/applications/ssotap/values-minikube.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfprod-sso" diff --git a/applications/ssotap/values-usdfdev.yaml b/applications/ssotap/values-usdfdev.yaml index 728f4eb94a..2cd2a832b4 100644 --- a/applications/ssotap/values-usdfdev.yaml +++ b/applications/ssotap/values-usdfdev.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-usdf-dev-sso" diff --git a/applications/ssotap/values-usdfprod.yaml b/applications/ssotap/values-usdfprod.yaml index 1f3fed008b..07ec085214 100644 --- a/applications/ssotap/values-usdfprod.yaml +++ b/applications/ssotap/values-usdfprod.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-usdf-prod-sso" diff --git a/applications/ssotap/values.yaml b/applications/ssotap/values.yaml index d54066f272..af2aa2de93 100644 --- a/applications/ssotap/values.yaml +++ b/applications/ssotap/values.yaml @@ -120,12 +120,33 @@ pg: # -- Affinity rules for the mock postgres pod affinity: {} -tap_schema: +tapSchema: image: - repository: {} + # -- TAP schema image to ue. This must be overridden by each environment + # with the TAP schema for that environment. + repository: "lsstsqre/tap-schema-idfprod-tap" + + # -- Pull policy for the TAP schema image + pullPolicy: "IfNotPresent" + + # -- Tag of TAP schema image tag: "2.0.1" + + # -- Resource limits and requests for the TAP schema database pod resources: {} + # -- Annotations for the mock QServ pod + podAnnotations: {} + + # -- Node selection rules for the mock QServ pod + nodeSelector: {} + + # -- Tolerations for the mock QServ pod + tolerations: [] + + # -- Affinity rules for the mock QServ pod + affinity: {} + uws: image: # -- UWS database image to use diff --git a/applications/tap/README.md b/applications/tap/README.md index 0bb1d5c330..0bb6632941 100644 --- a/applications/tap/README.md +++ b/applications/tap/README.md @@ -43,9 +43,14 @@ IVOA TAP service | qserv.mock.tolerations | list | `[]` | Tolerations for the mock QServ pod | | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | -| tap_schema.image.repository | object | `{}` | | -| tap_schema.image.tag | string | `"2.0.1"` | | -| tap_schema.resources | object | `{}` | | +| tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | +| tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | +| tapSchema.image.repository | string | `"lsstsqre/tap-schema-idfprod-tap"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | +| tapSchema.image.tag | string | `"2.0.1"` | Tag of TAP schema image | +| tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | +| tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | +| tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | +| tapSchema.tolerations | list | `[]` | Tolerations for the mock QServ pod | | tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | diff --git a/applications/tap/templates/tap-schema-db-deployment.yaml b/applications/tap/templates/tap-schema-db-deployment.yaml index 929bdce88f..0623cf1c9a 100644 --- a/applications/tap/templates/tap-schema-db-deployment.yaml +++ b/applications/tap/templates/tap-schema-db-deployment.yaml @@ -11,7 +11,7 @@ spec: {{- include "cadc-tap.selectorLabels" . | nindent 6 }} template: metadata: - {{- with .Values.podAnnotations }} + {{- with .Values.tapSchema.podAnnotations }} annotations: {{- toYaml . | nindent 8 }} {{- end }} @@ -31,26 +31,26 @@ spec: value: "TAP_SCHEMA" - name: MYSQL_ROOT_HOST value: "%" - image: "{{ .Values.tap_schema.image.repository }}:{{ .Values.tap_schema.image.tag}}" - imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + image: "{{ .Values.tapSchema.image.repository }}:{{ .Values.tapSchema.image.tag}}" + imagePullPolicy: {{ .Values.tapSchema.image.pullPolicy | quote }} ports: - containerPort: 3306 protocol: "TCP" - {{- with .Values.tap_schema.resources }} + {{- with .Values.tapSchema.resources }} resources: {{- toYaml . | nindent 12 }} {{- end }} imagePullSecrets: - name: "pull-secret" - {{- with .Values.nodeSelector }} + {{- with .Values.tapSchema.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.affinity }} + {{- with .Values.tapSchema.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} - {{- with .Values.tolerations }} + {{- with .Values.tapSchema.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/applications/tap/values-ccin2p3.yaml b/applications/tap/values-ccin2p3.yaml index 49fc426a11..23363bbc93 100644 --- a/applications/tap/values-ccin2p3.yaml +++ b/applications/tap/values-ccin2p3.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfprod-tap" diff --git a/applications/tap/values-idfdev.yaml b/applications/tap/values-idfdev.yaml index 4e431d3bbc..6ce3a752dc 100644 --- a/applications/tap/values-idfdev.yaml +++ b/applications/tap/values-idfdev.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfdev-tap" diff --git a/applications/tap/values-idfint.yaml b/applications/tap/values-idfint.yaml index 422e0e4ff5..305da1f001 100644 --- a/applications/tap/values-idfint.yaml +++ b/applications/tap/values-idfint.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfint-tap" diff --git a/applications/tap/values-idfprod.yaml b/applications/tap/values-idfprod.yaml index 92c4a7624f..68b54cb45d 100644 --- a/applications/tap/values-idfprod.yaml +++ b/applications/tap/values-idfprod.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfprod-tap" diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml index 01533a9cda..e8242075b9 100644 --- a/applications/tap/values-minikube.yaml +++ b/applications/tap/values-minikube.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfprod-tap" diff --git a/applications/tap/values-roe.yaml b/applications/tap/values-roe.yaml index 21202d7c53..d8d5b07528 100644 --- a/applications/tap/values-roe.yaml +++ b/applications/tap/values-roe.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-idfprod-tap" diff --git a/applications/tap/values-usdfdev.yaml b/applications/tap/values-usdfdev.yaml index b269a9317e..026cc54a10 100644 --- a/applications/tap/values-usdfdev.yaml +++ b/applications/tap/values-usdfdev.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-usdf-dev-tap" diff --git a/applications/tap/values-usdfprod.yaml b/applications/tap/values-usdfprod.yaml index ee3233c612..e274871f19 100644 --- a/applications/tap/values-usdfprod.yaml +++ b/applications/tap/values-usdfprod.yaml @@ -1,4 +1,4 @@ -tap_schema: +tapSchema: image: repository: "lsstsqre/tap-schema-usdf-prod-tap" diff --git a/applications/tap/values.yaml b/applications/tap/values.yaml index 52723a0999..cde1b74d77 100644 --- a/applications/tap/values.yaml +++ b/applications/tap/values.yaml @@ -114,12 +114,33 @@ qserv: # -- Affinity rules for the mock QServ pod affinity: {} -tap_schema: +tapSchema: image: - repository: {} + # -- TAP schema image to ue. This must be overridden by each environment + # with the TAP schema for that environment. + repository: "lsstsqre/tap-schema-idfprod-tap" + + # -- Pull policy for the TAP schema image + pullPolicy: "IfNotPresent" + + # -- Tag of TAP schema image tag: "2.0.1" + + # -- Resource limits and requests for the TAP schema database pod resources: {} + # -- Annotations for the mock QServ pod + podAnnotations: {} + + # -- Node selection rules for the mock QServ pod + nodeSelector: {} + + # -- Tolerations for the mock QServ pod + tolerations: [] + + # -- Affinity rules for the mock QServ pod + affinity: {} + uws: image: # -- UWS database image to use From 3fac470e7d58baa4a4a27b39744f6491a5166ab0 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 6 Jul 2023 15:45:50 -0700 Subject: [PATCH 149/308] Update default TAP repository and tag Use tap-schema-mock as the repository for the default TAP schema image in values.yaml for the use of Renovate. This should have the same version as all the environment-specific ones. Update the version to 2.0.2, which is the current version. --- applications/livetap/README.md | 4 ++-- applications/livetap/values.yaml | 4 ++-- applications/ssotap/README.md | 4 ++-- applications/ssotap/values.yaml | 4 ++-- applications/tap/README.md | 4 ++-- applications/tap/values.yaml | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/applications/livetap/README.md b/applications/livetap/README.md index a2d086d89f..ab125fd219 100644 --- a/applications/livetap/README.md +++ b/applications/livetap/README.md @@ -47,8 +47,8 @@ IVOA TAP service | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | -| tapSchema.image.repository | string | `"lsstsqre/tap-schema-idfprod-tap"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.0.1"` | Tag of TAP schema image | +| tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | +| tapSchema.image.tag | string | `"2.0.2"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | diff --git a/applications/livetap/values.yaml b/applications/livetap/values.yaml index af2aa2de93..e235b944fe 100644 --- a/applications/livetap/values.yaml +++ b/applications/livetap/values.yaml @@ -124,13 +124,13 @@ tapSchema: image: # -- TAP schema image to ue. This must be overridden by each environment # with the TAP schema for that environment. - repository: "lsstsqre/tap-schema-idfprod-tap" + repository: "lsstsqre/tap-schema-mock" # -- Pull policy for the TAP schema image pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.0.1" + tag: "2.0.2" # -- Resource limits and requests for the TAP schema database pod resources: {} diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index a2d086d89f..ab125fd219 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -47,8 +47,8 @@ IVOA TAP service | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | -| tapSchema.image.repository | string | `"lsstsqre/tap-schema-idfprod-tap"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.0.1"` | Tag of TAP schema image | +| tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | +| tapSchema.image.tag | string | `"2.0.2"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | diff --git a/applications/ssotap/values.yaml b/applications/ssotap/values.yaml index af2aa2de93..e235b944fe 100644 --- a/applications/ssotap/values.yaml +++ b/applications/ssotap/values.yaml @@ -124,13 +124,13 @@ tapSchema: image: # -- TAP schema image to ue. This must be overridden by each environment # with the TAP schema for that environment. - repository: "lsstsqre/tap-schema-idfprod-tap" + repository: "lsstsqre/tap-schema-mock" # -- Pull policy for the TAP schema image pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.0.1" + tag: "2.0.2" # -- Resource limits and requests for the TAP schema database pod resources: {} diff --git a/applications/tap/README.md b/applications/tap/README.md index 0bb6632941..59c87e5f7b 100644 --- a/applications/tap/README.md +++ b/applications/tap/README.md @@ -45,8 +45,8 @@ IVOA TAP service | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | | tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | -| tapSchema.image.repository | string | `"lsstsqre/tap-schema-idfprod-tap"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.0.1"` | Tag of TAP schema image | +| tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | +| tapSchema.image.tag | string | `"2.0.2"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | diff --git a/applications/tap/values.yaml b/applications/tap/values.yaml index cde1b74d77..9132115129 100644 --- a/applications/tap/values.yaml +++ b/applications/tap/values.yaml @@ -118,13 +118,13 @@ tapSchema: image: # -- TAP schema image to ue. This must be overridden by each environment # with the TAP schema for that environment. - repository: "lsstsqre/tap-schema-idfprod-tap" + repository: "lsstsqre/tap-schema-mock" # -- Pull policy for the TAP schema image pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.0.1" + tag: "2.0.2" # -- Resource limits and requests for the TAP schema database pod resources: {} From 62a6489eaf024f1377990822b25f2ee7b2d666e9 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 7 Jul 2023 10:30:15 -0700 Subject: [PATCH 150/308] Fix mountpoint for panda configmap --- applications/nublado/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index 280302bffb..f01dc79621 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -226,7 +226,7 @@ controller: # No longer used, but preserves compatibility with runlab.sh dask_worker.yml: | enabled: false - /opt/lsst/software/jupyterlab/panda: + /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template: modify: false contents: | # Licensed under the Apache License, Version 2.0 (the "License"); From 465df305b5d21547d58b2b48b5fc3895581de3b8 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Fri, 7 Jul 2023 12:56:41 -0700 Subject: [PATCH 151/308] [DM-39825] Allow portal to connect to SSOTAP --- applications/portal/values-idfdev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/portal/values-idfdev.yaml b/applications/portal/values-idfdev.yaml index b8d18401c0..2b55535762 100644 --- a/applications/portal/values-idfdev.yaml +++ b/applications/portal/values-idfdev.yaml @@ -5,6 +5,7 @@ config: workareaNfs: path: "/share1/home/firefly/shared-workarea" server: "10.87.86.26" + ssotap: "ssotap" resources: limits: From 0b6562c61dd07225096d04bcb485d0fcdda7cad4 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Fri, 7 Jul 2023 16:38:14 -0400 Subject: [PATCH 152/308] Review influxdb requests configuration - Increase requests to guarantee that the influxdb pod has the resources it needs reserved to it. --- applications/sasquatch/README.md | 4 ++-- applications/sasquatch/values.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 9896e2613a..2c14de6beb 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -43,8 +43,8 @@ Rubin Observatory's telemetry service. | influxdb.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments | | influxdb.resources.limits.cpu | int | `8` | | | influxdb.resources.limits.memory | string | `"96Gi"` | | -| influxdb.resources.requests.cpu | int | `1` | | -| influxdb.resources.requests.memory | string | `"1Gi"` | | +| influxdb.resources.requests.cpu | int | `8` | | +| influxdb.resources.requests.memory | string | `"96Gi"` | | | influxdb.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | | influxdb2.adminUser.bucket | string | `"default"` | Admin default bucket. | | influxdb2.adminUser.existingSecret | string | `"sasquatch"` | Get admin-password/admin-token keys from secret. | diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index 5f63bf3d81..83d5bfb33a 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -66,8 +66,8 @@ influxdb: # init.iql: |+ resources: requests: - memory: 1Gi - cpu: 1 + memory: 96Gi + cpu: 8 limits: memory: 96Gi cpu: 8 From 967c7a7dc499b431615268c7514517c41248fd04 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 9 Jul 2023 09:39:49 +0000 Subject: [PATCH 153/308] Migrate config renovate.json --- renovate.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/renovate.json b/renovate.json index 42cf0d169a..940c7400f3 100644 --- a/renovate.json +++ b/renovate.json @@ -1,6 +1,6 @@ { "extends": [ - "config:base" + "config:recommended" ], "configMigration": true, "rebaseWhen": "conflicted", From 087f449757a7f66cef7fb0756515519009968230 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sun, 9 Jul 2023 18:16:17 -0400 Subject: [PATCH 154/308] Add source-influxdb deployment --- applications/sasquatch/Chart.yaml | 5 +++ applications/sasquatch/README.md | 12 ++++++ applications/sasquatch/values.yaml | 64 ++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+) diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml index ac7f4b89ad..de02248006 100644 --- a/applications/sasquatch/Chart.yaml +++ b/applications/sasquatch/Chart.yaml @@ -20,6 +20,11 @@ dependencies: condition: influxdb-staging.enabled version: 4.12.4 repository: https://helm.influxdata.com/ + - name: influxdb + alias: source-influxdb + condition: source-influxdb.enabled + version: 4.12.4 + repository: https://helm.influxdata.com/ - name: influxdb2 condition: influxdb2.enabled version: 2.1.1 diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 2c14de6beb..78fc176491 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -85,6 +85,18 @@ Rubin Observatory's telemetry service. | kapacitor.resources.requests.cpu | int | `1` | | | kapacitor.resources.requests.memory | string | `"1Gi"` | | | rest-proxy | object | `{"enabled":false}` | Override rest-proxy configuration. | +| source-influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | +| source-influxdb.enabled | bool | `false` | Enable InfluxDB staging deployment. | +| source-influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | +| source-influxdb.ingress | object | disabled | InfluxDB ingress configuration. | +| source-influxdb.initScripts.enabled | bool | `false` | Enable InfluxDB custom initialization script. | +| source-influxdb.persistence.enabled | bool | `true` | Enable persistent volume claim. By default storageClass is undefined choosing the default provisioner (standard on GKE). | +| source-influxdb.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments | +| source-influxdb.resources.limits.cpu | int | `8` | | +| source-influxdb.resources.limits.memory | string | `"96Gi"` | | +| source-influxdb.resources.requests.cpu | int | `8` | | +| source-influxdb.resources.requests.memory | string | `"96Gi"` | | +| source-influxdb.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | | source-kafka-connect-manager | object | `{"enabled":false,"env":{"kafkaConnectUrl":"http://sasquatch-source-connect-api.sasquatch:8083"}}` | Override source-kafka-connect-manager configuration. | | squareEvents.enabled | bool | `false` | Enable the Square Events subchart with topic and user configurations. | | strimzi-kafka | object | `{}` | Override strimzi-kafka configuration. | diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index 83d5bfb33a..70ed6e5d1a 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -135,6 +135,70 @@ influxdb-staging: memory: 96Gi cpu: 8 +source-influxdb: + # -- Enable InfluxDB staging deployment. + enabled: false + # -- InfluxDB image tag. + image: + tag: "1.8.10" + persistence: + # -- Enable persistent volume claim. + # By default storageClass is undefined choosing the default provisioner (standard on GKE). + enabled: true + # -- Persistent volume size. + # @default 1Ti for teststand deployments + size: 1Ti + # -- Default InfluxDB user, use influxb-user and influxdb-password keys from secret. + setDefaultUser: + enabled: true + user: + existingSecret: sasquatch + # -- InfluxDB ingress configuration. + # @default -- disabled + ingress: + enabled: false + tls: false + hostname: "" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 + className: "nginx" + path: /source-influxdb(/|$)(.*) + # -- Override InfluxDB configuration. + # See https://docs.influxdata.com/influxdb/v1.8/administration/config + config: + data: + cache-max-memory-size: 0 + wal-fsync-delay: "100ms" + trace-logging-enabled: true + http: + enabled: true + flux-enabled: true + auth-enabled: true + max-row-limit: 0 + coordinator: + write-timeout: "1h" + max-concurrent-queries: 0 + query-timeout: "0s" + log-queries-after: "15s" + continuous_queries: + enabled: false + logging: + level: "debug" + initScripts: + # -- Enable InfluxDB custom initialization script. + enabled: false + # scripts: + # # -- InfluxDB custom initialization script. + # init.iql: |+ + resources: + requests: + memory: 96Gi + cpu: 8 + limits: + memory: 96Gi + cpu: 8 + + influxdb2: enabled: false image: From 5032b75b839432379ad1174fa567b32f7f7b0d4f Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sun, 9 Jul 2023 18:16:58 -0400 Subject: [PATCH 155/308] Enable source-influxdb at base --- applications/sasquatch/values-base.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 5929e8cf0e..7cb4baf1de 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -59,6 +59,15 @@ influxdb-staging: enabled: true hostname: base-lsp.lsst.codes +source-influxdb: + enabled: true + persistence: + storageClass: rook-ceph-block + size: 5Ti + ingress: + enabled: true + hostname: base-lsp.lsst.codes + kafka-connect-manager: influxdbSink: From 33534b7ed06b91e199d6f4d1ff7f97cf7858dce1 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sun, 9 Jul 2023 19:35:32 -0400 Subject: [PATCH 156/308] Enable source-influxdb at usdfprod --- applications/sasquatch/values-usdfprod.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index e4de2181cd..6bb90c185a 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -23,6 +23,15 @@ influxdb: enabled: true size: 15Ti +source-influxdb: + enabled: true + ingress: + enabled: true + hostname: usdf-rsp.slac.stanford.edu + persistence: + enabled: true + size: 15Ti + kafka-connect-manager: influxdbSink: # Based on the kafka producers configuration for the Summit From 77dace6c3f3946e6e26e55f98b6f981e7da223ac Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 08:35:03 +0000 Subject: [PATCH 157/308] Update Helm release argo-cd to v5.38.0 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 2afc84d617..8029338d89 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.37.0 + version: 5.38.0 repository: https://argoproj.github.io/argo-helm From 7a46353cf175669f64128c8558cd7e944fd00f08 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Fri, 30 Jun 2023 12:54:00 -0700 Subject: [PATCH 158/308] [DM-39825] tap-postgres 1.12 This has an attempt at getting tap upload to work --- applications/ssotap/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ssotap/Chart.yaml b/applications/ssotap/Chart.yaml index 990e2d0670..eff6e39966 100644 --- a/applications/ssotap/Chart.yaml +++ b/applications/ssotap/Chart.yaml @@ -5,4 +5,4 @@ description: IVOA TAP service sources: - https://github.com/lsst-sqre/tap-postgres - https://github.com/opencadc/tap -appVersion: "1.10" +appVersion: "1.12" From 8dbbd85a119e0c559e9c789d338bcbae134f8d0b Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 10 Jul 2023 13:46:01 -0700 Subject: [PATCH 159/308] Fix usdf proxy hub ingress --- applications/nublado/values-usdfdev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 0cd845af00..e3885bfafe 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -166,7 +166,7 @@ proxy: jupyterhub: hub: - baseUrl: "/n3" + baseUrl: "/nb" db: url: "postgresql://nublado3@postgres.postgres/nublado3" cull: From 317d98a36cc5239b2ee9516fda523a80ae4ca60d Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 10 Jul 2023 14:49:04 -0700 Subject: [PATCH 160/308] Point nublado2 mobu in int at /n2 --- applications/mobu/values-idfint.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 7445f3fad6..40f639d44e 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -15,6 +15,7 @@ config: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" max_executions: 1 + url_prefix: "/n2" restart: true - name: "recommended" count: 1 From d1a2618c633767d95fa664e7a297da0bfe465e06 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 10 Jul 2023 15:05:16 -0700 Subject: [PATCH 161/308] Fix IDF dev too; remove values shadowing defaults --- applications/mobu/values-idfdev.yaml | 3 +-- applications/mobu/values-idfint.yaml | 3 --- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 667dc76f5f..1ef44e63ab 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -17,6 +17,7 @@ config: image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" + url_prefix: "/n2" restart: true - name: "weekly" count: 1 @@ -34,7 +35,6 @@ config: image_class: "latest-weekly" repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - url_prefix: "/nb" use_cachemachine: false restart: true - name: "tutorial" @@ -56,7 +56,6 @@ config: max_executions: 1 working_directory: "notebooks/tutorial-notebooks" use_cachemachine: false - url_prefix: "/nb" restart: true - name: "tap" count: 1 diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 40f639d44e..7dc005b4ca 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -32,7 +32,6 @@ config: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" use_cachemachine: false - url_prefix: "/nb" restart: true - name: "weekly" count: 1 @@ -51,7 +50,6 @@ config: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" use_cachemachine: false - url_prefix: "/nb" restart: true - name: "tutorial" count: 1 @@ -72,7 +70,6 @@ config: max_executions: 1 working_directory: "notebooks/tutorial-notebooks" use_cachemachine: false - url_prefix: "/nb" restart: true - name: "tap" count: 1 From 5c551c217cafad5f51ea2c47737fe6bf4fb7404b Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 10 Jul 2023 22:50:55 -0400 Subject: [PATCH 162/308] Reduce resource requests for idfdev and idfdint environments --- applications/sasquatch/values-idfdev.yaml | 7 +++++++ applications/sasquatch/values-idfint.yaml | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index c60e9f96ad..0a1d965f07 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -21,6 +21,13 @@ influxdb: ingress: enabled: true hostname: data-dev.lsst.cloud + resources: + requests: + memory: 16Gi + cpu: 2 + limits: + memory: 16Gi + cpu: 2 influxdb2: enabled: true diff --git a/applications/sasquatch/values-idfint.yaml b/applications/sasquatch/values-idfint.yaml index 61ce4c54d4..12d9e206f3 100644 --- a/applications/sasquatch/values-idfint.yaml +++ b/applications/sasquatch/values-idfint.yaml @@ -34,6 +34,13 @@ influxdb: ingress: enabled: true hostname: data-int.lsst.cloud + resources: + requests: + memory: 16Gi + cpu: 2 + limits: + memory: 16Gi + cpu: 2 kafka-connect-manager: influxdbSink: From be550bed35a77adc57747968982619f51a5788ce Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Tue, 11 Jul 2023 14:08:39 -0700 Subject: [PATCH 163/308] [DM-39825] Livetap to tap-postgres 1.12 This will allow for temporary uploading of tables for livetap. --- applications/livetap/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/livetap/Chart.yaml b/applications/livetap/Chart.yaml index 990e2d0670..eff6e39966 100644 --- a/applications/livetap/Chart.yaml +++ b/applications/livetap/Chart.yaml @@ -5,4 +5,4 @@ description: IVOA TAP service sources: - https://github.com/lsst-sqre/tap-postgres - https://github.com/opencadc/tap -appVersion: "1.10" +appVersion: "1.12" From 35890b87f901a668dfd9747f19353982fcdc51a7 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 12 Jul 2023 11:50:30 -0700 Subject: [PATCH 164/308] Add periodic pre-commit dependency update Run neophile weekly to update the pre-commit dependencies. Fix the URLs of the existing pre-commit dependencies to use the canonical URLs, since neophile does not yet support fixing those. --- .github/workflows/dependencies.yaml | 34 +++++++++++++++++++++++++++++ .pre-commit-config.yaml | 6 ++--- tox.ini | 31 ++++++++++++++++---------- 3 files changed, 56 insertions(+), 15 deletions(-) create mode 100644 .github/workflows/dependencies.yaml diff --git a/.github/workflows/dependencies.yaml b/.github/workflows/dependencies.yaml new file mode 100644 index 0000000000..bd49d9d99c --- /dev/null +++ b/.github/workflows/dependencies.yaml @@ -0,0 +1,34 @@ +name: Dependency Update + +"on": + schedule: + - cron: "0 12 * * 1" + workflow_dispatch: {} + +jobs: + update: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v3 + + - name: Run neophile + uses: lsst-sqre/run-tox@v1 + with: + python-version: "3.11" + tox-envs: "neophile-update" + tox-posargs: "--pr pre-commit" + env: + NEOPHILE_GITHUB_APP_ID: ${{ secrets.NEOPHILE_APP_ID }} + NEOPHILE_GITHUB_PRIVATE_KEY: ${{ secrets.NEOPHILE_PRIVATE_KEY }} + + - name: Report status + if: always() + uses: ravsamhq/notify-slack-action@v2 + with: + status: ${{ job.status }} + notify_when: "failure" + notification_title: "Periodic dependency update for {repo} failed" + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_ALERT_WEBHOOK }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index addc124764..5a5d704513 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: # The `./` makes it relative to the chart-search-root set above - "--template-files=./helm-docs.md.gotmpl" - - repo: https://github.com/PyCQA/isort + - repo: https://github.com/pycqa/isort rev: 5.12.0 hooks: - id: isort @@ -33,13 +33,13 @@ repos: hooks: - id: black - - repo: https://github.com/asottile/blacken-docs + - repo: https://github.com/adamchainz/blacken-docs rev: 1.13.0 hooks: - id: blacken-docs additional_dependencies: [black==23.1.0] - - repo: https://github.com/PyCQA/flake8 + - repo: https://github.com/pycqa/flake8 rev: 6.0.0 hooks: - id: flake8 diff --git a/tox.ini b/tox.ini index 6650653286..91ac1306f1 100644 --- a/tox.ini +++ b/tox.ini @@ -7,6 +7,16 @@ description = Run pytest against {envname}. extras = dev +[testenv:docs] +description = Build documentation (HTML) with Sphinx. +commands = + sphinx-build --keep-going -n -W -T -b html -d {envtmpdir}/doctrees docs docs/_build/html + +[testenv:docs-linkcheck] +description = Check links in the documentation. +commands = + sphinx-build --keep-going -n -W -T -b linkcheck -d {envtmpdir}/doctrees docs docs/_build/linkcheck + [testenv:py] description = Run pytest commands = @@ -22,11 +32,6 @@ commands = coverage combine coverage report -[testenv:typing] -description = Run mypy. -commands = - mypy src/phalanx tests - [testenv:lint] description = Lint codebase by running pre-commit (Black, isort, Flake8). skip_install = true @@ -34,12 +39,14 @@ deps = pre-commit commands = pre-commit run --all-files -[testenv:docs] -description = Build documentation (HTML) with Sphinx. -commands = - sphinx-build --keep-going -n -W -T -b html -d {envtmpdir}/doctrees docs docs/_build/html +[testenv:neophile-update] +description = Run neophile to update dependencies +skip_install = true +deps = + neophile +commands = neophile update {posargs} -[testenv:docs-linkcheck] -description = Check links in the documentation. +[testenv:typing] +description = Run mypy. commands = - sphinx-build --keep-going -n -W -T -b linkcheck -d {envtmpdir}/doctrees docs docs/_build/linkcheck + mypy src/phalanx tests From 01189455de584206ec1bec14504e6f817490872d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 12 Jul 2023 13:36:43 -0700 Subject: [PATCH 165/308] Remove .git from the yamllint pre-commit hook neophile can't handle the trailing .git, so remove it so that neophile can properly update the dependency. --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5a5d704513..6757264148 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: - id: trailing-whitespace - id: check-toml - - repo: https://github.com/adrienverge/yamllint.git + - repo: https://github.com/adrienverge/yamllint rev: v1.30.0 hooks: - id: yamllint From 3b35f8a8a38b2ff80fc5ddcb9bff836ecc1b1e87 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 12 Jul 2023 21:02:23 -0400 Subject: [PATCH 166/308] Fix default values for MirrorMaker2 --- applications/sasquatch/charts/strimzi-kafka/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index c926eb09db..6c1fefe589 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -149,10 +149,10 @@ mirrormaker2: policy: # -- Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. # @default -- "" - separator: "." + separator: "" # -- Replication policy. # @default -- IdentityReplicationPolicy - class: "org.apache.kafka.connect.mirror.DefaultReplicationPolicy" + class: "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" sourceRegistry: # -- Whether to deploy another Schema Registry for the schemas replicated from the source cluster. enabled: false From 8b834599301ba73d5d678a91e81b62b9e206b747 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 12 Jul 2023 21:47:18 -0400 Subject: [PATCH 167/308] Change connector error policy to RETRY - Change max number of retries to 20 (default). --- applications/sasquatch/charts/kafka-connect-manager/README.md | 4 ++-- .../sasquatch/charts/kafka-connect-manager/values.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md index c9ff922025..75178cb75b 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/README.md +++ b/applications/sasquatch/charts/kafka-connect-manager/README.md @@ -16,8 +16,8 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. | | influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | | influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. | -| influxdbSink.connectInfluxErrorPolicy | string | `"NOOP"` | Error policy, see connector documetation for details. | -| influxdbSink.connectInfluxMaxRetries | string | `"10"` | The maximum number of times a message is retried. | +| influxdbSink.connectInfluxErrorPolicy | string | `"RETRY"` | Error policy, see connector documetation for details. | +| influxdbSink.connectInfluxMaxRetries | string | `"20"` | The maximum number of times a message is retried. | | influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | diff --git a/applications/sasquatch/charts/kafka-connect-manager/values.yaml b/applications/sasquatch/charts/kafka-connect-manager/values.yaml index e508350f03..f7b49e3fd6 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/values.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/values.yaml @@ -23,9 +23,9 @@ influxdbSink: # -- Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. timestamp: private_efdStamp # -- Error policy, see connector documetation for details. - connectInfluxErrorPolicy: NOOP + connectInfluxErrorPolicy: RETRY # -- The maximum number of times a message is retried. - connectInfluxMaxRetries: "10" + connectInfluxMaxRetries: "20" # -- The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. connectInfluxRetryInterval: "60000" # -- Enables the output for how many records have been processed. From 986da120eb4fb61b5665a864fdb687d02bc5d7cf Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 13 Jul 2023 08:24:21 -0700 Subject: [PATCH 168/308] Use the new lsst-sqre/run-neophile action Change the workflow for periodic pre-commit dependency updates to use the new lsst-sqre/run-neophile GitHub Action. --- .github/workflows/dependencies.yaml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/dependencies.yaml b/.github/workflows/dependencies.yaml index bd49d9d99c..96075c909c 100644 --- a/.github/workflows/dependencies.yaml +++ b/.github/workflows/dependencies.yaml @@ -14,14 +14,13 @@ jobs: - uses: actions/checkout@v3 - name: Run neophile - uses: lsst-sqre/run-tox@v1 + uses: lsst-sqre/run-neophile@v1 with: python-version: "3.11" - tox-envs: "neophile-update" - tox-posargs: "--pr pre-commit" - env: - NEOPHILE_GITHUB_APP_ID: ${{ secrets.NEOPHILE_APP_ID }} - NEOPHILE_GITHUB_PRIVATE_KEY: ${{ secrets.NEOPHILE_PRIVATE_KEY }} + mode: pr + types: pre-commit + app-id: ${{ secrets.NEOPHILE_APP_ID }} + app-secret: ${{ secrets.NEOPHILE_PRIVATE_KEY }} - name: Report status if: always() From 6374f316cd5dd7eac7606770dee90e7f0b982dc4 Mon Sep 17 00:00:00 2001 From: "neophile-square[bot]" <136651988+neophile-square[bot]@users.noreply.github.com> Date: Thu, 13 Jul 2023 15:32:24 +0000 Subject: [PATCH 169/308] [neophile] Update dependencies - Update adrienverge/yamllint pre-commit hook from v1.30.0 to v1.32.0 - Update norwoodj/helm-docs pre-commit hook from v1.11.0 to 19.0614 - Update psf/black pre-commit hook from 23.1.0 to 23.7.0 - Update adamchainz/blacken-docs pre-commit hook from 1.13.0 to 1.15.0 --- .pre-commit-config.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6757264148..a8f8362799 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,20 +6,20 @@ repos: - id: check-toml - repo: https://github.com/adrienverge/yamllint - rev: v1.30.0 + rev: v1.32.0 hooks: - id: yamllint args: - - "-c=.yamllint.yml" + - -c=.yamllint.yml - repo: https://github.com/norwoodj/helm-docs - rev: v1.11.0 + rev: '19.0614' hooks: - id: helm-docs args: - - "--chart-search-root=." + - --chart-search-root=. # The `./` makes it relative to the chart-search-root set above - - "--template-files=./helm-docs.md.gotmpl" + - --template-files=./helm-docs.md.gotmpl - repo: https://github.com/pycqa/isort rev: 5.12.0 @@ -29,12 +29,12 @@ repos: - toml - repo: https://github.com/psf/black - rev: 23.1.0 + rev: 23.7.0 hooks: - id: black - repo: https://github.com/adamchainz/blacken-docs - rev: 1.13.0 + rev: 1.15.0 hooks: - id: blacken-docs additional_dependencies: [black==23.1.0] From c1e66a534af134118a718fc04366bdfdac87ea33 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 13 Jul 2023 10:37:00 -0700 Subject: [PATCH 170/308] Fix version of helm-docs helm-docs has old tags with much larger versions but that don't work as pre-commit hooks and shouldn't be used. Fix the version of helm-docs to the latest proper release. --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a8f8362799..e59d56dd0c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ repos: - -c=.yamllint.yml - repo: https://github.com/norwoodj/helm-docs - rev: '19.0614' + rev: v1.11.0 hooks: - id: helm-docs args: From 1f1c3042746e9758ad5ff41737695d8000ab345f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 13 Jul 2023 10:37:59 -0700 Subject: [PATCH 171/308] Update blacken-docs to use current black version --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e59d56dd0c..df63329eca 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ repos: rev: 1.15.0 hooks: - id: blacken-docs - additional_dependencies: [black==23.1.0] + additional_dependencies: [black==23.7.0] - repo: https://github.com/pycqa/flake8 rev: 6.0.0 From 9a065aba3d89200c545173a8cc9dbc570f059f4b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 13 Jul 2023 10:38:28 -0700 Subject: [PATCH 172/308] Disable pre-commit dependency updates cron For now, disable the periodic updates of pre-commit dependencies, since neophile will need changes to do the correct thing for helm-docs. --- .github/workflows/dependencies.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/dependencies.yaml b/.github/workflows/dependencies.yaml index 96075c909c..0fa3c498e0 100644 --- a/.github/workflows/dependencies.yaml +++ b/.github/workflows/dependencies.yaml @@ -1,8 +1,6 @@ name: Dependency Update "on": - schedule: - - cron: "0 12 * * 1" workflow_dispatch: {} jobs: From b092d03c205fbf0734ffb47ef752f2d6dc50f445 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 13 Jul 2023 15:25:29 -0700 Subject: [PATCH 173/308] Give the linkcheck workflow a unique name Don't name the linkcheck workflow docs, because then it becomes a required workflow for merging and the whole point of making it a separate workflow was so that it wouldn't be required. --- .github/workflows/linkcheck.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linkcheck.yaml b/.github/workflows/linkcheck.yaml index b8e0ac1c9e..e3c4489051 100644 --- a/.github/workflows/linkcheck.yaml +++ b/.github/workflows/linkcheck.yaml @@ -35,7 +35,7 @@ name: Link Check workflow_dispatch: {} jobs: - docs: + linkcheck: runs-on: ubuntu-latest steps: From d27608ba97b9e74364ccd83fadc02cac52e9a42f Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 13 Jul 2023 15:02:20 -0700 Subject: [PATCH 174/308] Enable nublado3 at /n3 in IDF prod --- applications/nublado/values-idfprod.yaml | 96 +++++++++++++++++++++++ applications/postgres/values-idfprod.yaml | 3 + environments/values-idfprod.yaml | 2 +- 3 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 applications/nublado/values-idfprod.yaml diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml new file mode 100644 index 0000000000..57bef7281d --- /dev/null +++ b/applications/nublado/values-idfprod.yaml @@ -0,0 +1,96 @@ +controller: + googleServiceAccount: "nublado-controller@science-platform-stable-6994.iam.gserviceaccount.com" + slackAlerts: true + config: + fileserver: + enabled: true + images: + source: + type: "google" + location: "us-central1" + projectId: "rubin-shared-services-71ec" + repository: "sciplat" + image: "sciplat-lab" + recommendedTag: "recommended" + numReleases: 1 + numWeeklies: 2 + numDailies: 3 + lab: + env: + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" + GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" + DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-repos.yaml" + HUB_ROUTE: "/n3/hub" + S3_ENDPOINT_URL: "https://storage.googleapis.com" + NO_ACTIVITY_TIMEOUT: "432000" + CULL_KERNEL_IDLE_TIMEOUT: "432000" + CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" + + sizes: + small: + cpu: 1.0 + memory: 4Gi + medium: + cpu: 2.0 + memory: 8Gi + large: + cpu: 4.0 + memory: 16Gi + initContainers: + - name: "initdir" + image: "ghcr.io/lsst-sqre/initdir:0.0.4" + privileged: true + volumes: + - containerPath: "/home" + mode: "rw" + source: + serverPath: "/share1/home" + server: "10.13.105.122" + type: "nfs" + secrets: + - secretName: "nublado-lab-secret" + secretKey: "aws-credentials.ini" + - secretName: "nublado-lab-secret" + secretKey: "butler-gcs-idf-creds.json" + - secretName: "nublado-lab-secret" + secretKey: "butler-hmac-idf-creds.json" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + volumes: + - containerPath: "/home" + mode: "rw" + source: + serverPath: "/share1/home" + server: "10.13.105.122" + type: "nfs" + - containerPath: "/project" + mode: "rw" + source: + serverPath: "/share1/project" + server: "10.13.105.122" + type: "nfs" + - containerPath: "/scratch" + mode: "rw" + source: + serverPath: "/share1/scratch" + server: "10.13.105.122" + type: "nfs" + +jupyterhub: + hub: + baseUrl: "/n3" + config: + ServerApp: + shutdown_no_activity_timeout: 432000 + db: + url: "postgresql://nublado3@postgres.postgres/nublado3" + + cull: + enabled: true + users: false + removeNamedServers: false + timeout: 432000 + every: 300 + maxAge: 2160000 diff --git a/applications/postgres/values-idfprod.yaml b/applications/postgres/values-idfprod.yaml index 5a77f93b71..62d07440f7 100644 --- a/applications/postgres/values-idfprod.yaml +++ b/applications/postgres/values-idfprod.yaml @@ -1,3 +1,6 @@ jupyterhub_db: user: "jovyan" db: "jupyterhub" +nublado3_db: + user: "nublado3" + db: "nublado3" diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index ee89f52121..ef3df38d35 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -32,7 +32,7 @@ narrativelog: noteburst: enabled: false nublado: - enabled: false + enabled: true nublado2: enabled: true plot-navigator: From 0c163d0bb2c8267c0068ccdf66ad2dee5b9b10c3 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 13 Jul 2023 15:22:53 -0700 Subject: [PATCH 175/308] disable WebDAV in nublado/idf-prod --- applications/nublado/values-idfprod.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 57bef7281d..71999a9c51 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -2,8 +2,6 @@ controller: googleServiceAccount: "nublado-controller@science-platform-stable-6994.iam.gserviceaccount.com" slackAlerts: true config: - fileserver: - enabled: true images: source: type: "google" From 51222509b67eaa76a46e55e9e1865959de46774d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 14 Jul 2023 08:31:29 -0700 Subject: [PATCH 176/308] Update versions for minikube testing Try to use the latest minikube and Kubernetes, and also bump the verisons of Vault and Argo CD. --- .github/workflows/ci.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index bbae9b674b..c64b776347 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -87,8 +87,8 @@ jobs: if: steps.filter.outputs.minikube == 'true' uses: manusa/actions-setup-minikube@v2.7.2 with: - minikube version: 'v1.28.0' - kubernetes version: 'v1.25.2' + minikube version: 'v1.30.1' + kubernetes version: 'v1.26.3' - name: Test interaction with the cluster if: steps.filter.outputs.minikube == 'true' @@ -97,11 +97,11 @@ jobs: - name: Download installer dependencies if: steps.filter.outputs.minikube == 'true' run: | - curl -sSL -o /tmp/vault.zip https://releases.hashicorp.com/vault/1.12.1/vault_1.12.1_linux_amd64.zip + curl -sSL -o /tmp/vault.zip https://releases.hashicorp.com/vault/1.14.0/vault_1.14.0_linux_amd64.zip unzip /tmp/vault.zip sudo mv vault /usr/local/bin/vault sudo chmod +x /usr/local/bin/vault - sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.6/argocd-linux-amd64 + sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.7/argocd-linux-amd64 sudo chmod +x /usr/local/bin/argocd sudo apt-get install socat sudo pip install -r installer/requirements.txt From e9984081942a52c7e3ab2215e5c5e97ed626abc1 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 14 Jul 2023 08:52:49 -0700 Subject: [PATCH 177/308] Switch minikube GitHub Actions The GitHub Action we were using to set up minikube seems to not be supported and doesn't work with the current version of Kubernetes. Switch to the GitHub Action recommended by the minikube docs. --- .github/workflows/ci.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c64b776347..304be46aed 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -85,10 +85,9 @@ jobs: - name: Setup Minikube if: steps.filter.outputs.minikube == 'true' - uses: manusa/actions-setup-minikube@v2.7.2 + uses: medyagh/setup-minikube@v0.0.13 with: - minikube version: 'v1.30.1' - kubernetes version: 'v1.26.3' + kubernetes-version: "v1.27.3" - name: Test interaction with the cluster if: steps.filter.outputs.minikube == 'true' From 15baf33367897177df08bfba1b93e00d18efa75c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 14 Jul 2023 09:25:43 -0700 Subject: [PATCH 178/308] Disable more services for minikube testing Disable the old Nublado v2 services. Disable noteburst since Nublado isn't installed, and therefore noteburst cannot set up workers anyway. --- environments/values-minikube.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index 5a513cad05..fcb75dc490 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -7,7 +7,7 @@ alert-stream-broker: argo-workflows: enabled: false cachemachine: - enabled: true + enabled: false cert-manager: enabled: true datalinker: @@ -25,11 +25,11 @@ kubernetes-replicator: mobu: enabled: true moneypenny: - enabled: true + enabled: false narrativelog: enabled: false noteburst: - enabled: true + enabled: false nublado: enabled: false nublado2: From 1e91554f9b7596a8cdd463592555b59c4f79c697 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 14 Jul 2023 10:36:20 -0700 Subject: [PATCH 179/308] Increase cpu and memory to max See if this solves the problem where Argo CD dies at the end of testing and therefore tests fail. --- .github/workflows/ci.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 304be46aed..e6ec04ac79 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -88,6 +88,8 @@ jobs: uses: medyagh/setup-minikube@v0.0.13 with: kubernetes-version: "v1.27.3" + cpus: max + memory: max - name: Test interaction with the cluster if: steps.filter.outputs.minikube == 'true' From 91571a862d9278d0d5ef90de23d18088c5c25fae Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 14 Jul 2023 10:49:02 -0700 Subject: [PATCH 180/308] Be less aggressive about minikube memory Allocate 5.5GB instead of all available memory to leave some room for the system. --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e6ec04ac79..88c3464403 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -89,7 +89,7 @@ jobs: with: kubernetes-version: "v1.27.3" cpus: max - memory: max + memory: 5500m # Linux virtual machines have 7GB of RAM - name: Test interaction with the cluster if: steps.filter.outputs.minikube == 'true' From 54cc3afe13da2a1fc4b0cc2dee1979a4ad3fcc72 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 14 Jul 2023 10:50:53 -0700 Subject: [PATCH 181/308] Add some more timeouts for minikube testing Currently the full test takes about 10 minutes. Cap it at 30 minutes with shorter caps on the individual steps. --- .github/workflows/ci.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 88c3464403..4c79cf782b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -61,6 +61,7 @@ jobs: minikube: name: Test deploy runs-on: ubuntu-latest + timeout-minutes: 30 needs: [helm] steps: @@ -108,7 +109,7 @@ jobs: sudo pip install -r installer/requirements.txt - name: Run installer - timeout-minutes: 30 + timeout-minutes: 15 if: steps.filter.outputs.minikube == 'true' run: | cd installer @@ -121,6 +122,7 @@ jobs: kubectl get ingress -A - name: Wait for all applications to be healthy + timeout-minutes: 15 if: steps.filter.outputs.minikube == 'true' run: | argocd app wait -l "argocd.argoproj.io/instance=science-platform" \ From d3944380a76633156ba9ec402d8a75d298e3f593 Mon Sep 17 00:00:00 2001 From: Christine Banek Date: Tue, 11 Jul 2023 14:02:05 -0700 Subject: [PATCH 182/308] [DM-39960] Change URL from http to https Once there is a load balancer set up in front of the bucket, we should use the https URL. Then if we really want or need to, we can use an http endpoint, and the loadbalancer will redirect to an https URL. --- applications/livetap/values-minikube.yaml | 2 +- applications/livetap/values-usdfdev.yaml | 2 +- applications/livetap/values-usdfprod.yaml | 2 +- applications/ssotap/values-idfdev.yaml | 2 +- applications/ssotap/values-idfint.yaml | 2 +- applications/ssotap/values-idfprod.yaml | 2 +- applications/ssotap/values-minikube.yaml | 2 +- applications/ssotap/values-usdfdev.yaml | 2 +- applications/ssotap/values-usdfprod.yaml | 2 +- applications/tap/values-idfdev.yaml | 2 +- applications/tap/values-idfint.yaml | 2 +- applications/tap/values-idfprod.yaml | 2 +- applications/tap/values-minikube.yaml | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/applications/livetap/values-minikube.yaml b/applications/livetap/values-minikube.yaml index 34af311b08..ad1b6ab54f 100644 --- a/applications/livetap/values-minikube.yaml +++ b/applications/livetap/values-minikube.yaml @@ -4,4 +4,4 @@ tapSchema: config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" diff --git a/applications/livetap/values-usdfdev.yaml b/applications/livetap/values-usdfdev.yaml index 0ade540ca7..bcaa935a27 100644 --- a/applications/livetap/values-usdfdev.yaml +++ b/applications/livetap/values-usdfdev.yaml @@ -14,7 +14,7 @@ replicaCount: 2 config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" pg: diff --git a/applications/livetap/values-usdfprod.yaml b/applications/livetap/values-usdfprod.yaml index 1d89fa6bf1..f716d9db32 100644 --- a/applications/livetap/values-usdfprod.yaml +++ b/applications/livetap/values-usdfprod.yaml @@ -14,7 +14,7 @@ replicaCount: 2 config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" pg: diff --git a/applications/ssotap/values-idfdev.yaml b/applications/ssotap/values-idfdev.yaml index dcc448f905..317f430926 100644 --- a/applications/ssotap/values-idfdev.yaml +++ b/applications/ssotap/values-idfdev.yaml @@ -12,7 +12,7 @@ resources: config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" pg: diff --git a/applications/ssotap/values-idfint.yaml b/applications/ssotap/values-idfint.yaml index 5d0e6f035c..4f6ea30546 100644 --- a/applications/ssotap/values-idfint.yaml +++ b/applications/ssotap/values-idfint.yaml @@ -14,7 +14,7 @@ replicaCount: 2 config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" pg: diff --git a/applications/ssotap/values-idfprod.yaml b/applications/ssotap/values-idfprod.yaml index f84b84aa58..63f900e695 100644 --- a/applications/ssotap/values-idfprod.yaml +++ b/applications/ssotap/values-idfprod.yaml @@ -14,7 +14,7 @@ replicaCount: 2 config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" pg: diff --git a/applications/ssotap/values-minikube.yaml b/applications/ssotap/values-minikube.yaml index f2056773f3..732b70bbdb 100644 --- a/applications/ssotap/values-minikube.yaml +++ b/applications/ssotap/values-minikube.yaml @@ -4,4 +4,4 @@ tapSchema: config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" diff --git a/applications/ssotap/values-usdfdev.yaml b/applications/ssotap/values-usdfdev.yaml index 2cd2a832b4..9a73e3d658 100644 --- a/applications/ssotap/values-usdfdev.yaml +++ b/applications/ssotap/values-usdfdev.yaml @@ -12,7 +12,7 @@ resources: config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" pg: diff --git a/applications/ssotap/values-usdfprod.yaml b/applications/ssotap/values-usdfprod.yaml index 07ec085214..279ebf4f79 100644 --- a/applications/ssotap/values-usdfprod.yaml +++ b/applications/ssotap/values-usdfprod.yaml @@ -14,7 +14,7 @@ replicaCount: 2 config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" pg: diff --git a/applications/tap/values-idfdev.yaml b/applications/tap/values-idfdev.yaml index 6ce3a752dc..13247032ca 100644 --- a/applications/tap/values-idfdev.yaml +++ b/applications/tap/values-idfdev.yaml @@ -12,7 +12,7 @@ resources: config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" qserv: diff --git a/applications/tap/values-idfint.yaml b/applications/tap/values-idfint.yaml index 305da1f001..635c879fe1 100644 --- a/applications/tap/values-idfint.yaml +++ b/applications/tap/values-idfint.yaml @@ -14,7 +14,7 @@ replicaCount: 2 config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" qserv: diff --git a/applications/tap/values-idfprod.yaml b/applications/tap/values-idfprod.yaml index 68b54cb45d..d451c4d94a 100644 --- a/applications/tap/values-idfprod.yaml +++ b/applications/tap/values-idfprod.yaml @@ -14,7 +14,7 @@ replicaCount: 2 config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" jvmMaxHeapSize: "31G" qserv: diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml index e8242075b9..67040a8f0c 100644 --- a/applications/tap/values-minikube.yaml +++ b/applications/tap/values-minikube.yaml @@ -4,4 +4,4 @@ tapSchema: config: gcsBucket: "async-results.lsst.codes" - gcsBucketUrl: "http://async-results.lsst.codes" + gcsBucketUrl: "https://tap-files.lsst.codes" From 1c8cef52a3ef32f7069d51a1b7a4b435813067c4 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Sat, 15 Jul 2023 10:31:23 -0400 Subject: [PATCH 183/308] Revert "Change connector error policy to RETRY" This reverts commit f9a12aff45a10cec90f7ed10308f75fc641c7a82. --- applications/sasquatch/charts/kafka-connect-manager/README.md | 4 ++-- .../sasquatch/charts/kafka-connect-manager/values.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md index 75178cb75b..c9ff922025 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/README.md +++ b/applications/sasquatch/charts/kafka-connect-manager/README.md @@ -16,8 +16,8 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. | | influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | | influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. | -| influxdbSink.connectInfluxErrorPolicy | string | `"RETRY"` | Error policy, see connector documetation for details. | -| influxdbSink.connectInfluxMaxRetries | string | `"20"` | The maximum number of times a message is retried. | +| influxdbSink.connectInfluxErrorPolicy | string | `"NOOP"` | Error policy, see connector documetation for details. | +| influxdbSink.connectInfluxMaxRetries | string | `"10"` | The maximum number of times a message is retried. | | influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | | influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | | influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | diff --git a/applications/sasquatch/charts/kafka-connect-manager/values.yaml b/applications/sasquatch/charts/kafka-connect-manager/values.yaml index f7b49e3fd6..e508350f03 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/values.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/values.yaml @@ -23,9 +23,9 @@ influxdbSink: # -- Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. timestamp: private_efdStamp # -- Error policy, see connector documetation for details. - connectInfluxErrorPolicy: RETRY + connectInfluxErrorPolicy: NOOP # -- The maximum number of times a message is retried. - connectInfluxMaxRetries: "20" + connectInfluxMaxRetries: "10" # -- The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. connectInfluxRetryInterval: "60000" # -- Enables the output for how many records have been processed. From bba91a8c140419c083f8f8196d261007847a21fc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 07:50:00 +0000 Subject: [PATCH 184/308] Update Helm release telegraf to v1.8.29 --- applications/telegraf/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 80e4383643..9579821110 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.28 + version: 1.8.29 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 3e64097896fda0bb3cb61c92551a4a997aa6873b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 07:50:06 +0000 Subject: [PATCH 185/308] Update Helm release telegraf-ds to v1.1.11 --- applications/telegraf-ds/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index 2a1d70213e..e344e67a9e 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.10 + version: 1.1.11 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | From 9b7f5bb722c7ec166bc5482376d9f38b56fc9603 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 13:00:48 +0000 Subject: [PATCH 186/308] Update Helm release argo-cd to v5.40.0 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 8029338d89..35c093f553 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.38.0 + version: 5.40.0 repository: https://argoproj.github.io/argo-helm From b8dec43fc1f325a9317a8261672ce1a7cfb7f539 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 14:45:07 +0000 Subject: [PATCH 187/308] Update Helm release argo-workflows to v0.31.0 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 84811f4992..d4cc5d8b92 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.30.0 + version: 0.31.0 repository: https://argoproj.github.io/argo-helm From 1c156f071832575e72040ad7035e9a52dc3e596f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 14:45:12 +0000 Subject: [PATCH 188/308] Update Helm release vault-secrets-operator to v2.5.0 --- applications/vault-secrets-operator/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml index ab0467b701..49372ad751 100644 --- a/applications/vault-secrets-operator/Chart.yaml +++ b/applications/vault-secrets-operator/Chart.yaml @@ -5,7 +5,7 @@ sources: - https://github.com/ricoberger/vault-secrets-operator dependencies: - name: vault-secrets-operator - version: 2.4.0 + version: 2.5.0 repository: https://ricoberger.github.io/helm-charts/ annotations: phalanx.lsst.io/docs: | From 6e6969dc0748bc24a63a1ca52ecab5a64351a50b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 17 Jul 2023 08:18:47 -0700 Subject: [PATCH 189/308] Update neophile documentation Remove the dead link to the Roundtable neophile configuration and change the wording to stress that Science Platform applications must periodically update their dependencies. --- docs/about/contributing-docs.rst | 4 ++-- docs/developers/create-an-application.rst | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/about/contributing-docs.rst b/docs/about/contributing-docs.rst index 1ace4686d7..9a48d4af89 100644 --- a/docs/about/contributing-docs.rst +++ b/docs/about/contributing-docs.rst @@ -64,9 +64,9 @@ Submitting a pull request and sharing documentation drafts Members of the `lsst-sqre/phalanx`_ repository should submit pull requests following the `Data Management workflow guide`_. Note that GitHub Actions builds the documentation and uploads a draft edition of the documentation to the web. -You can find your branch's development edition at https://phalanx.lsst.io/v. +You can find your branch's development edition at `the list of available versions `__. -If you are submitting a GitHub pull request from a fork, the documentation will build as a check, however the draft won't upload for public staging. +If you are submitting a GitHub pull request from a fork, the documentation will build as a check, but the draft won't upload for public staging. More information on writing documentation ========================================= diff --git a/docs/developers/create-an-application.rst b/docs/developers/create-an-application.rst index c9159e804d..516cf11a85 100644 --- a/docs/developers/create-an-application.rst +++ b/docs/developers/create-an-application.rst @@ -22,9 +22,8 @@ Select ``FastAPI application (Safir)`` from the list of project types. This will create a new GitHub repository with the basic framework of a FastAPI_ application that will work well inside the Rubin Science Platform. The template uses Safir_ to simplify and regularize many parts of your FastAPI_ application, from logger to database handling. -Any Python application destined for the RSP should regularly update its dependencies to pick up any security fixes. -If your application follows the code layout of the FastAPI template, use `neophile `__ to automatically create PRs to update your dependencies. -To add your application to the list of repositories that neophile updates, submit a PR to add the repository owner and name to `neophile's configuration `__. +Any Python application destined for the RSP must regularly update its dependencies to pick up any security fixes and make new releases with those updated dependencies. +If you use the template as described above, GitHub Actions CI will warn you when application dependencies are out of date. Each release of your application must be tagged. The tag should use `semantic versioning`_ (for example, ``1.3.2``). From e7ecc40fab6a5016234d3c3954d12d40ac584a05 Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 14 Jul 2023 09:51:17 -0700 Subject: [PATCH 190/308] BTS: Activate argo-workflows. --- applications/argo-workflows/values-base.yaml | 0 environments/values-base.yaml | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 applications/argo-workflows/values-base.yaml diff --git a/applications/argo-workflows/values-base.yaml b/applications/argo-workflows/values-base.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/environments/values-base.yaml b/environments/values-base.yaml index aae83667d5..698978b072 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -5,7 +5,7 @@ vaultPathPrefix: secret/k8s_operator/base-lsp.lsst.codes alert-stream-broker: enabled: false argo-workflows: - enabled: false + enabled: true cachemachine: enabled: true cert-manager: From 4a674a62f1536a9b271d4ccfdd16e32ea686ef6c Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 17 Jul 2023 09:41:14 -0700 Subject: [PATCH 191/308] Increase idf-int quota --- applications/gafaelfawr/values-idfint.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/values-idfint.yaml b/applications/gafaelfawr/values-idfint.yaml index 8569c0861c..056a679d76 100644 --- a/applications/gafaelfawr/values-idfint.yaml +++ b/applications/gafaelfawr/values-idfint.yaml @@ -36,7 +36,7 @@ config: default: notebook: cpu: 9.0 - memory: 27 + memory: 36 # Allow access by GitHub team. groupMapping: From 5bc21add3e49744fd705d6384f9840eb1d1a9654 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 17 Jul 2023 10:28:23 -0700 Subject: [PATCH 192/308] set quotas for idf* to largest single container --- applications/gafaelfawr/values-idfdev.yaml | 4 ++-- applications/gafaelfawr/values-idfint.yaml | 4 ++-- applications/gafaelfawr/values-idfprod.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/applications/gafaelfawr/values-idfdev.yaml b/applications/gafaelfawr/values-idfdev.yaml index 2bcb74b887..fc371da3c2 100644 --- a/applications/gafaelfawr/values-idfdev.yaml +++ b/applications/gafaelfawr/values-idfdev.yaml @@ -34,8 +34,8 @@ config: quota: default: notebook: - cpu: 9.0 - memory: 27 + cpu: 4.0 + memory: 16 groupMapping: "admin:jupyterlab": diff --git a/applications/gafaelfawr/values-idfint.yaml b/applications/gafaelfawr/values-idfint.yaml index 056a679d76..e7798bca92 100644 --- a/applications/gafaelfawr/values-idfint.yaml +++ b/applications/gafaelfawr/values-idfint.yaml @@ -35,8 +35,8 @@ config: quota: default: notebook: - cpu: 9.0 - memory: 36 + cpu: 8.0 + memory: 32 # Allow access by GitHub team. groupMapping: diff --git a/applications/gafaelfawr/values-idfprod.yaml b/applications/gafaelfawr/values-idfprod.yaml index 481fb53603..309e698d29 100644 --- a/applications/gafaelfawr/values-idfprod.yaml +++ b/applications/gafaelfawr/values-idfprod.yaml @@ -30,8 +30,8 @@ config: quota: default: notebook: - cpu: 9.0 - memory: 27 + cpu: 4.0 + memory: 16 groupMapping: "admin:provision": From a88675380e95cfdea0f05422313961aef67a13db Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 17 Jul 2023 18:58:41 -0400 Subject: [PATCH 193/308] Ability to configure number of tasks per connector instance --- .../charts/kafka-connect-manager/templates/influxdb_sink.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml index 382794d646..5aa6a34583 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml +++ b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml @@ -62,7 +62,11 @@ spec: name: sasquatch key: influxdb-password - name: KAFKA_CONNECT_TASKS_MAX + {{- if $value.tasksMax }} + value: {{ $value.tasksMax | quote }} + {{- else }} value: {{ $.Values.influxdbSink.tasksMax | quote }} + {{- end }} - name: KAFKA_CONNECT_TOPIC_REGEX value: {{ $value.topicsRegex | quote }} - name: KAFKA_CONNECT_CHECK_INTERVAL From 75cd37fded2a2e8b6d8fa481592c53c6ebf14b01 Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 17 Jul 2023 18:59:39 -0400 Subject: [PATCH 194/308] Increase throughput for MTM1M3 and MTMount - Configure the connectors at base and usdfprod with 8 tasks --- applications/sasquatch/values-base.yaml | 2 ++ applications/sasquatch/values-usdfprod.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 7cb4baf1de..3f04cc16bb 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -83,6 +83,7 @@ kafka-connect-manager: mtmount: enabled: true topicsRegex: "lsst.sal.MTMount" + tasksMax: "8" comcam: enabled: true topicsRegex: "lsst.sal.CCCamera|lsst.sal.CCHeaderService|lsst.sal.CCOODS" @@ -95,6 +96,7 @@ kafka-connect-manager: m1m3: enabled: true topicsRegex: "lsst.sal.MTM1M3" + tasksMax: "8" m2: enabled: true topicsRegex: "lsst.sal.MTHexapod|lsst.sal.MTM2|lsst.sal.MTRotator" diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 6bb90c185a..4d52d13fd8 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -49,6 +49,7 @@ kafka-connect-manager: enabled: true repairerConnector: false topicsRegex: ".*MTMount" + tasksMax: "8" comcam: enabled: true repairerConnector: false @@ -65,6 +66,7 @@ kafka-connect-manager: enabled: true repairerConnector: false topicsRegex: ".*MTM1M3" + tasksMax: "8" m2: enabled: true repairerConnector: false From fbbaa49623c326821657efe2c674c61390c8da58 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 26 Jun 2023 15:24:21 -0700 Subject: [PATCH 195/308] Start giftless implementation --- applications/giftless/Chart.yaml | 7 ++ applications/giftless/README.md | 25 +++++ applications/giftless/templates/_helpers.tpl | 53 +++++++++++ .../giftless/templates/configmap.yaml | 10 ++ .../giftless/templates/deployment.yaml | 93 +++++++++++++++++++ applications/giftless/templates/ingress.yaml | 26 ++++++ applications/giftless/templates/service.yaml | 14 +++ .../giftless/templates/vault-secrets.yaml | 9 ++ .../giftless/values-roundtable-dev.yaml | 7 ++ applications/giftless/values.yaml | 22 +++++ environments/README.md | 1 + .../templates/giftless-application.yaml | 37 ++++++++ environments/values-roundtable-dev.yaml | 2 + environments/values.yaml | 2 + 14 files changed, 308 insertions(+) create mode 100644 applications/giftless/Chart.yaml create mode 100644 applications/giftless/README.md create mode 100644 applications/giftless/templates/_helpers.tpl create mode 100644 applications/giftless/templates/configmap.yaml create mode 100644 applications/giftless/templates/deployment.yaml create mode 100644 applications/giftless/templates/ingress.yaml create mode 100644 applications/giftless/templates/service.yaml create mode 100644 applications/giftless/templates/vault-secrets.yaml create mode 100644 applications/giftless/values-roundtable-dev.yaml create mode 100644 applications/giftless/values.yaml create mode 100644 environments/templates/giftless-application.yaml diff --git a/applications/giftless/Chart.yaml b/applications/giftless/Chart.yaml new file mode 100644 index 0000000000..9e89c31030 --- /dev/null +++ b/applications/giftless/Chart.yaml @@ -0,0 +1,7 @@ +apiVersion: v2 +name: giftless +version: 0.0.1 +description: Git-LFS server with GCS S3 backend +sources: + - https://github.com/datopian/giftless +appVersion: 0.5.0 diff --git a/applications/giftless/README.md b/applications/giftless/README.md new file mode 100644 index 0000000000..0eb8fcf26f --- /dev/null +++ b/applications/giftless/README.md @@ -0,0 +1,25 @@ +# giftless + +Git-LFS server with GCS S3 backend + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| config.AUTH_PROVIDERS[0] | string | `"giftless.auth.allow_anon:read_only"` | | +| config.TRANSFER_ADAPTERS.basic.factory | string | `"giftless.transfer.basic_external:factory"` | | +| config.TRANSFER_ADAPTERS.basic.options.storage_class | string | `"giftless.storage.google_cloud:GoogleCloudStorage"` | | +| config.TRANSFER_ADAPTERS.basic.options.storage_options.account_key_file | string | `"/etc/secret/giftless-gcp-key.json"` | | +| config.TRANSFER_ADAPTERS.basic.options.storage_options.bucket_name | string | `"rubin-gitlfs-experimental"` | | +| config.TRANSFER_ADAPTERS.basic.options.storage_options.project_name | string | `"plasma-geode-127520"` | | +| image.repository | string | `"docker.io/datopian/giftless"` | | +| ingress.annotations | object | `{}` | | +| ingress.hostname | string | `""` | | +| server.debug | bool | `false` | | +| server.port | int | `5000` | | +| server.processes | int | `2` | | +| server.threads | int | `2` | | diff --git a/applications/giftless/templates/_helpers.tpl b/applications/giftless/templates/_helpers.tpl new file mode 100644 index 0000000000..1e6709c030 --- /dev/null +++ b/applications/giftless/templates/_helpers.tpl @@ -0,0 +1,53 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "giftless.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "giftless.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "giftless.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "giftless.labels" -}} +app.kubernetes.io/name: {{ include "giftless.name" . }} +helm.sh/chart: {{ include "giftless.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "giftless.selectorLabels" -}} +app.kubernetes.io/name: {{ include "giftless.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml new file mode 100644 index 0000000000..cf585e0cc5 --- /dev/null +++ b/applications/giftless/templates/configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "giftless.fullname" . }} + labels: + {{- include "giftless.labels" . | nindent 4 }} +data: + giftless.conf.yaml: |- + {{- toYaml .Values.config | nindent 4 }} + diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml new file mode 100644 index 0000000000..4066cf52d5 --- /dev/null +++ b/applications/giftless/templates/deployment.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "giftless.fullname" . }} + labels: + {{- include "giftless.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "giftless.selectorLabels" . | nindent 6 }} + strategy: + type: "Recreate" + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "giftless.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + command: + - "uwsgi" + - "--http" + - ":{{- .Values.server.port }}" + - "-M" + - "-T" + - "--threads" + - "{{- .Values.server.threads }}" + - "-p" + - "{{- .Values.server.processes }}" + - "--manage-script-name" + - "--callable" + - "app" + env: + - name: GIFTLESS_CONFIG_FILE + value: "/etc/giftless/giftless.conf.yaml" + {{- if .Values.server.debug }} + - name: GIFTLESS_DEBUG + value: "true" + {{- end }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - name: "http" + containerPort: {{ .Values.server.port }} + protocol: "TCP" + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: "tmp" + mountPath: "/tmp" + - name: "giftless-config" + mountPath: "/etc/giftless" + - name: "giftless-secret" + mountPath: "/etc/secret" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + volumes: + - name: "tmp" + emptyDir: {} + - name: "giftless-config" + configMap: + name: {{ include "giftless.fullname" . | quote }} + - name: "giftless-secret" + secret: + secretName: {{ include "giftless.fullname" . | quote }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml new file mode 100644 index 0000000000..efc49638fd --- /dev/null +++ b/applications/giftless/templates/ingress.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + cert-manager.io/cluster-issuer: letsencrypt-dns + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "giftless.fullname" . }} +spec: + ingressClassName: nginx + tls: + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: tls + rules: + - host: {{ .Values.ingress.hostname | quote }} + http: + paths: + - backend: + service: + name: giftless + port: + number: 5000 + path: / + pathType: Prefix diff --git a/applications/giftless/templates/service.yaml b/applications/giftless/templates/service.yaml new file mode 100644 index 0000000000..408315d8b3 --- /dev/null +++ b/applications/giftless/templates/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "giftless.fullname" . }} + labels: + {{- include "giftless.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 5000 + targetPort: "http" + protocol: "TCP" + selector: + {{- include "giftless.selectorLabels" . | nindent 4 }} diff --git a/applications/giftless/templates/vault-secrets.yaml b/applications/giftless/templates/vault-secrets.yaml new file mode 100644 index 0000000000..0466225d3c --- /dev/null +++ b/applications/giftless/templates/vault-secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "giftless.fullname" . }} + labels: + {{- include "giftless.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/giftless" + type: "Opaque" diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml new file mode 100644 index 0000000000..3aba811367 --- /dev/null +++ b/applications/giftless/values-roundtable-dev.yaml @@ -0,0 +1,7 @@ +server: + debug: true +ingress: + hostname: "git-lfs-dev.lsst.cloud" +config: + AUTH_PROVIDERS: + - giftless.auth.allow_anon:read_write diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml new file mode 100644 index 0000000000..e4712d609e --- /dev/null +++ b/applications/giftless/values.yaml @@ -0,0 +1,22 @@ +image: + repository: docker.io/datopian/giftless +ingress: + hostname: "" # Must override in environment-specific values file + annotations: {} +server: + debug: false + port: 5000 + threads: 2 + processes: 2 +config: + AUTH_PROVIDERS: + - giftless.auth.allow_anon:read_only + TRANSFER_ADAPTERS: + basic: + factory: giftless.transfer.basic_external:factory + options: + storage_class: giftless.storage.google_cloud:GoogleCloudStorage + storage_options: + project_name: plasma-geode-127520 + bucket_name: rubin-gitlfs-experimental + account_key_file: /etc/secret/giftless-gcp-key.json diff --git a/environments/README.md b/environments/README.md index 69d1323750..548dad9826 100644 --- a/environments/README.md +++ b/environments/README.md @@ -14,6 +14,7 @@ | exposurelog.enabled | bool | `false` | | | fqdn | string | None, must be set | Fully-qualified domain name where the environment is running | | gafaelfawr.enabled | bool | `false` | | +| giftless.enabled | bool | `false` | | | hips.enabled | bool | `false` | | | ingress-nginx.enabled | bool | `false` | | | kubernetes-replicator.enabled | bool | `false` | | diff --git a/environments/templates/giftless-application.yaml b/environments/templates/giftless-application.yaml new file mode 100644 index 0000000000..940d35b335 --- /dev/null +++ b/environments/templates/giftless-application.yaml @@ -0,0 +1,37 @@ +{{- if .Values.giftless.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "giftless" +spec: + finalizers: + - "kubernetes" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "giftless" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "giftless" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/giftless" + repoURL: {{ .Values.repoURL | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 4d176ccd45..4325108877 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -16,6 +16,8 @@ exposurelog: enabled: false gafaelfawr: enabled: true +giftless: + enabled: true hips: enabled: false ingress-nginx: diff --git a/environments/values.yaml b/environments/values.yaml index f217d5d736..d727400ee2 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -30,6 +30,8 @@ exposurelog: enabled: false gafaelfawr: enabled: false +giftless: + enabled: false hips: enabled: false ingress-nginx: From e12aa99008de86383dac20a030a2834cec7f6980 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 28 Jun 2023 13:39:18 -0700 Subject: [PATCH 196/308] add cert definition and values documentation --- applications/giftless/README.md | 25 ++++++++++----------- applications/giftless/templates/cert.yaml | 17 ++++++++++++++ applications/giftless/values.yaml | 27 ++++++++++++++++++++++- 3 files changed, 55 insertions(+), 14 deletions(-) create mode 100644 applications/giftless/templates/cert.yaml diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 0eb8fcf26f..be84c6188b 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -10,16 +10,15 @@ Git-LFS server with GCS S3 backend | Key | Type | Default | Description | |-----|------|---------|-------------| -| config.AUTH_PROVIDERS[0] | string | `"giftless.auth.allow_anon:read_only"` | | -| config.TRANSFER_ADAPTERS.basic.factory | string | `"giftless.transfer.basic_external:factory"` | | -| config.TRANSFER_ADAPTERS.basic.options.storage_class | string | `"giftless.storage.google_cloud:GoogleCloudStorage"` | | -| config.TRANSFER_ADAPTERS.basic.options.storage_options.account_key_file | string | `"/etc/secret/giftless-gcp-key.json"` | | -| config.TRANSFER_ADAPTERS.basic.options.storage_options.bucket_name | string | `"rubin-gitlfs-experimental"` | | -| config.TRANSFER_ADAPTERS.basic.options.storage_options.project_name | string | `"plasma-geode-127520"` | | -| image.repository | string | `"docker.io/datopian/giftless"` | | -| ingress.annotations | object | `{}` | | -| ingress.hostname | string | `""` | | -| server.debug | bool | `false` | | -| server.port | int | `5000` | | -| server.processes | int | `2` | | -| server.threads | int | `2` | | +| config | object | YAML will be used as-is. cf https://giftless.datopian.com/en/latest/configuration.html | Configuration for giftless server | +| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the giftless image | +| image.repository | string | `"docker.io/datopian/giftless"` | Giftless image to use | +| image.tag | string | The appVersion of the chart | Tag of giftless image to use | +| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| ingress.hostname | string | `""` | FQDN of giftless ingress | +| nameOverride | string | `""` | Override the base name for resources | +| server.debug | bool | `false` | Turn on debugging mode | +| server.port | int | `5000` | Port for giftless server to listen on | +| server.processes | int | `2` | Number of processes for server | +| server.threads | int | `2` | Number of threads per process | diff --git a/applications/giftless/templates/cert.yaml b/applications/giftless/templates/cert.yaml new file mode 100644 index 0000000000..4aa83a7829 --- /dev/null +++ b/applications/giftless/templates/cert.yaml @@ -0,0 +1,17 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: tls +spec: + secretName: tls + + issuerRef: + name: "letsencrypt-dns" + kind: ClusterIssuer + + subject: + organizations: + - "lsst.org" + + dnsNames: + - {{ .Values.ingress.hostname }} diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index e4712d609e..0005565608 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -1,13 +1,38 @@ +# Default values for giftless. + +# -- Override the base name for resources +nameOverride: "" + +# -- Override the full name for resources (includes the release name) +fullnameOverride: "" + image: + # -- Giftless image to use repository: docker.io/datopian/giftless + # -- Pull policy for the giftless image + pullPolicy: "IfNotPresent" + # -- Tag of giftless image to use + # @default -- The appVersion of the chart + tag: "" + ingress: + # -- FQDN of giftless ingress hostname: "" # Must override in environment-specific values file + # -- Additional annotations to add to the ingress annotations: {} + server: + # -- Turn on debugging mode debug: false + # -- Port for giftless server to listen on port: 5000 - threads: 2 + # -- Number of processes for server processes: 2 + # -- Number of threads per process + threads: 2 + +# -- Configuration for giftless server +# @default -- YAML will be used as-is. cf https://giftless.datopian.com/en/latest/configuration.html config: AUTH_PROVIDERS: - giftless.auth.allow_anon:read_only From 3879334dc207d31741d7818563850b3b0dca85ff Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 28 Jun 2023 14:08:54 -0700 Subject: [PATCH 197/308] Add additional doc fields --- applications/giftless/README.md | 8 +++++++- applications/giftless/values.yaml | 25 ++++++++++++++++++++++++- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/applications/giftless/README.md b/applications/giftless/README.md index be84c6188b..5b4ba6aa12 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -10,15 +10,21 @@ Git-LFS server with GCS S3 backend | Key | Type | Default | Description | |-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the giftless frontend pod | | config | object | YAML will be used as-is. cf https://giftless.datopian.com/en/latest/configuration.html | Configuration for giftless server | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the giftless image | | image.repository | string | `"docker.io/datopian/giftless"` | Giftless image to use | | image.tag | string | The appVersion of the chart | Tag of giftless image to use | | ingress.annotations | object | `{}` | Additional annotations to add to the ingress | -| ingress.hostname | string | `""` | FQDN of giftless ingress | +| ingress.hostname | string | Must be overridden in environment-specific values file | FQDN of giftless ingress | | nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | Node selector rules for the giftless frontend pod | +| podAnnotations | object | `{}` | Annotations for the giftless frontend pod | +| resources | object | `{}` | Resource limits and requests for the giftless frontend pod | | server.debug | bool | `false` | Turn on debugging mode | | server.port | int | `5000` | Port for giftless server to listen on | | server.processes | int | `2` | Number of processes for server | | server.threads | int | `2` | Number of threads per process | +| tolerations | list | `[]` | Tolerations for the giftless frontend pod | diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index 0005565608..584f2a7d20 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -6,6 +6,23 @@ nameOverride: "" # -- Override the full name for resources (includes the release name) fullnameOverride: "" +# -- Resource limits and requests for the giftless frontend pod +resources: {} + +# -- Annotations for the giftless frontend pod +podAnnotations: {} + +# -- Node selector rules for the giftless frontend pod +nodeSelector: {} + +# -- Tolerations for the giftless frontend pod +tolerations: [] + +# -- Affinity rules for the giftless frontend pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. image: # -- Giftless image to use repository: docker.io/datopian/giftless @@ -17,7 +34,8 @@ image: ingress: # -- FQDN of giftless ingress - hostname: "" # Must override in environment-specific values file + # @default -- Must be overridden in environment-specific values file + hostname: "" # -- Additional annotations to add to the ingress annotations: {} @@ -45,3 +63,8 @@ config: project_name: plasma-geode-127520 bucket_name: rubin-gitlfs-experimental account_key_file: /etc/secret/giftless-gcp-key.json + +global: + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" From dd0034214268a3cb12e6bea022173661414c8e83 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 28 Jun 2023 14:29:04 -0700 Subject: [PATCH 198/308] Exit on SIGTERM --- applications/giftless/templates/deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index 4066cf52d5..a0e40038fb 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -29,6 +29,7 @@ spec: - ":{{- .Values.server.port }}" - "-M" - "-T" + - "--die-on-term" - "--threads" - "{{- .Values.server.threads }}" - "-p" From 2bc3b452008510c8720f0a2b4508a8bc68b3ae6f Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 28 Jun 2023 14:30:31 -0700 Subject: [PATCH 199/308] Leave subject.organizations out of TLS cert --- applications/giftless/templates/cert.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/applications/giftless/templates/cert.yaml b/applications/giftless/templates/cert.yaml index 4aa83a7829..ef8a808d43 100644 --- a/applications/giftless/templates/cert.yaml +++ b/applications/giftless/templates/cert.yaml @@ -9,9 +9,5 @@ spec: name: "letsencrypt-dns" kind: ClusterIssuer - subject: - organizations: - - "lsst.org" - dnsNames: - {{ .Values.ingress.hostname }} From 10e8cb12309f536201d7cf95c23e6d66dd52784d Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 30 Jun 2023 11:14:27 -0700 Subject: [PATCH 200/308] Use custom build --- applications/giftless/templates/deployment.yaml | 5 +++-- applications/giftless/values-roundtable-dev.yaml | 8 +++++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index a0e40038fb..c986f8bfb5 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -13,10 +13,11 @@ spec: type: "Recreate" template: metadata: - {{- with .Values.podAnnotations }} annotations: + {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} labels: {{- include "giftless.selectorLabels" . | nindent 8 }} spec: diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 3aba811367..6e1addd4e2 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,7 +1,13 @@ +image: + repository: docker.io/lsstsqre/giftless + tag: ajt-dev + pullPolicy: "Always" server: debug: true ingress: hostname: "git-lfs-dev.lsst.cloud" config: AUTH_PROVIDERS: - - giftless.auth.allow_anon:read_write + - factory: "giftless_github_proxy_auth.auth:factory" + options: {} + - giftless.auth.allow_anon:read_only From fa80bdf9efca6500d17b82201994f591ef67e617 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 5 Jul 2023 12:38:37 -0700 Subject: [PATCH 201/308] remove giftless pre-auth provider --- applications/giftless/values.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index 584f2a7d20..8d9579acc1 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -63,6 +63,7 @@ config: project_name: plasma-geode-127520 bucket_name: rubin-gitlfs-experimental account_key_file: /etc/secret/giftless-gcp-key.json + PRE_AUTHORIZED_ACTION_PROVIDER: {} global: # -- Base path for Vault secrets From 46f21595b4dabac089c8a3ad2ae8a5c2ca83bef2 Mon Sep 17 00:00:00 2001 From: Adam Thornton Date: Thu, 6 Jul 2023 12:47:43 -0700 Subject: [PATCH 202/308] Tidy up and use ghcr.io image --- applications/giftless/Chart.yaml | 6 +++--- .../giftless/values-roundtable-dev.yaml | 9 --------- applications/giftless/values.yaml | 20 +++++++++++-------- 3 files changed, 15 insertions(+), 20 deletions(-) diff --git a/applications/giftless/Chart.yaml b/applications/giftless/Chart.yaml index 9e89c31030..d47b894e8f 100644 --- a/applications/giftless/Chart.yaml +++ b/applications/giftless/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: giftless -version: 0.0.1 -description: Git-LFS server with GCS S3 backend +version: 0.5.0 +description: Git-LFS server with GCS S3 backend, with Rubin-specific auth sources: - https://github.com/datopian/giftless -appVersion: 0.5.0 +appVersion: 0.0.3 diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 6e1addd4e2..cd4492816a 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,13 +1,4 @@ -image: - repository: docker.io/lsstsqre/giftless - tag: ajt-dev - pullPolicy: "Always" server: debug: true ingress: hostname: "git-lfs-dev.lsst.cloud" -config: - AUTH_PROVIDERS: - - factory: "giftless_github_proxy_auth.auth:factory" - options: {} - - giftless.auth.allow_anon:read_only diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index 8d9579acc1..22c7b7a1b8 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -25,7 +25,7 @@ affinity: {} # be set in the individual environment values files. image: # -- Giftless image to use - repository: docker.io/datopian/giftless + repository: ghcr.io/lsst-sqre/giftless-github-proxy-auth # -- Pull policy for the giftless image pullPolicy: "IfNotPresent" # -- Tag of giftless image to use @@ -53,17 +53,21 @@ server: # @default -- YAML will be used as-is. cf https://giftless.datopian.com/en/latest/configuration.html config: AUTH_PROVIDERS: - - giftless.auth.allow_anon:read_only + - factory: "giftless_github_proxy_auth.auth:factory" + options: {} + - "giftless.auth.allow_anon:read_only" + PRE_AUTHORIZED_ACTION_PROVIDER: + factory: "giftless_github_proxy_auth.auth:factory" + options: {} TRANSFER_ADAPTERS: basic: - factory: giftless.transfer.basic_external:factory + factory: "giftless.transfer.basic_external:factory" options: - storage_class: giftless.storage.google_cloud:GoogleCloudStorage + storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" storage_options: - project_name: plasma-geode-127520 - bucket_name: rubin-gitlfs-experimental - account_key_file: /etc/secret/giftless-gcp-key.json - PRE_AUTHORIZED_ACTION_PROVIDER: {} + project_name: "plasma-geode-127520" + bucket_name: "rubin-gitlfs-experimental" + account_key_file: "/etc/secret/giftless-gcp-key.json" global: # -- Base path for Vault secrets From a0e384f42999093ac6b98713ef2894a23f666b44 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 7 Jul 2023 12:26:30 -0700 Subject: [PATCH 203/308] Do auth via Gafaelfawr instead --- .../gafaelfawr/values-roundtable-dev.yaml | 4 + applications/giftless/Chart.yaml | 4 +- applications/giftless/README.md | 4 +- applications/giftless/templates/cert.yaml | 3 +- .../giftless/templates/deployment.yaml | 96 +++++++++++++++++++ applications/giftless/templates/ingress.yaml | 75 ++++++++++----- applications/giftless/templates/service.yaml | 15 +++ .../giftless/values-roundtable-dev.yaml | 23 ++++- applications/giftless/values.yaml | 38 ++++---- 9 files changed, 213 insertions(+), 49 deletions(-) diff --git a/applications/gafaelfawr/values-roundtable-dev.yaml b/applications/gafaelfawr/values-roundtable-dev.yaml index 3a6770ec5b..e32d530efb 100644 --- a/applications/gafaelfawr/values-roundtable-dev.yaml +++ b/applications/gafaelfawr/values-roundtable-dev.yaml @@ -23,6 +23,10 @@ config: - github: organization: "lsst-sqre" team: "square" + "write:git-lfs": + - github: + organization: "lsst-sqre" + team: "square" initialAdmins: - "afausti" diff --git a/applications/giftless/Chart.yaml b/applications/giftless/Chart.yaml index d47b894e8f..08dbe29ad5 100644 --- a/applications/giftless/Chart.yaml +++ b/applications/giftless/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: giftless -version: 0.5.0 +version: 0.0.1 description: Git-LFS server with GCS S3 backend, with Rubin-specific auth sources: - https://github.com/datopian/giftless -appVersion: 0.0.3 +appVersion: 0.5.0 diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 5b4ba6aa12..1ae75016dc 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -1,6 +1,6 @@ # giftless -Git-LFS server with GCS S3 backend +Git-LFS server with GCS S3 backend, with Rubin-specific auth ## Source Code @@ -18,7 +18,7 @@ Git-LFS server with GCS S3 backend | image.repository | string | `"docker.io/datopian/giftless"` | Giftless image to use | | image.tag | string | The appVersion of the chart | Tag of giftless image to use | | ingress.annotations | object | `{}` | Additional annotations to add to the ingress | -| ingress.hostname | string | Must be overridden in environment-specific values file | FQDN of giftless ingress | +| ingress.hostname | object | Must be overridden in environment-specific values file | FQDNs of giftless ingresses | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the giftless frontend pod | | podAnnotations | object | `{}` | Annotations for the giftless frontend pod | diff --git a/applications/giftless/templates/cert.yaml b/applications/giftless/templates/cert.yaml index ef8a808d43..dcbf18fa94 100644 --- a/applications/giftless/templates/cert.yaml +++ b/applications/giftless/templates/cert.yaml @@ -10,4 +10,5 @@ spec: kind: ClusterIssuer dnsNames: - - {{ .Values.ingress.hostname }} + - {{ .Values.ingress.hostname.readonly | quote }} + - {{ .Values.ingress.hostname.readwrite | quote }} diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index c986f8bfb5..33934d4e6f 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -93,3 +93,99 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "giftless.fullname" . }}-rw + labels: + {{- include "giftless.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "giftless.selectorLabels" . | nindent 6 }} + strategy: + type: "Recreate" + template: + metadata: + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + labels: + {{- include "giftless.selectorLabels" . | nindent 8 }} + spec: + automountServiceAccountToken: false + containers: + - name: {{ .Chart.Name }} + command: + - "uwsgi" + - "--http" + - ":{{- .Values.server.port }}" + - "-M" + - "-T" + - "--die-on-term" + - "--threads" + - "{{- .Values.server.threads }}" + - "-p" + - "{{- .Values.server.processes }}" + - "--manage-script-name" + - "--callable" + - "app" + env: + - name: GIFTLESS_CONFIG_FILE + value: "/etc/giftless/giftless.conf.yaml" + {{- if .Values.server.debug }} + - name: GIFTLESS_DEBUG + value: "true" + {{- end }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + ports: + - name: "http" + containerPort: {{ .Values.server.port }} + protocol: "TCP" + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: "tmp" + mountPath: "/tmp" + - name: "giftless-config" + mountPath: "/etc/giftless" + - name: "giftless-secret" + mountPath: "/etc/secret" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + volumes: + - name: "tmp" + emptyDir: {} + - name: "giftless-config" + configMap: + name: {{ template "giftless.fullname" . }}-rw + - name: "giftless-secret" + secret: + secretName: {{ include "giftless.fullname" . | quote }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index efc49638fd..a31ea22203 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -1,26 +1,53 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress metadata: - annotations: - cert-manager.io/cluster-issuer: letsencrypt-dns - {{- with .Values.ingress.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} name: {{ include "giftless.fullname" . }} -spec: - ingressClassName: nginx - tls: - - hosts: - - {{ .Values.ingress.hostname | quote }} - secretName: tls - rules: - - host: {{ .Values.ingress.hostname | quote }} - http: - paths: - - backend: - service: - name: giftless - port: - number: 5000 - path: / - pathType: Prefix + labels: + {{- include "giftless.labels" . | nindent 4 }} +config: + baseUrl: "https://{{ .Values.ingress.hostname.readonly }}" + scopes: + anonymous: true +template: + metadata: + name: {{ include "giftless.fullname" . }} + spec: + rules: + - host: {{ .Values.ingress.hostname.readonly | quote }} + http: + paths: + - path: "/" + pathType: "Prefix" + backend: + service: + name: {{ include "giftless.fullname" . }} + port: + number: 5000 +--- +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "giftless-rw" + name: {{ template "giftless.fullname" . }}-rw + labels: + {{- include "giftless.labels" . | nindent 4 }} +config: + baseUrl: "https://{{ .Values.ingress.hostname.readwrite }}" + scopes: + all: + - "write:git-lfs" +template: + metadata: + name: {{ template "giftless.fullname" . }}-rw + spec: + rules: + - host: {{ .Values.ingress.hostname.readwrite | quote }} + http: + paths: + - path: "/files" + pathType: "Prefix" + backend: + service: + name: {{ template "giftless.fullname" . }}-rw + port: + number: 5000 diff --git a/applications/giftless/templates/service.yaml b/applications/giftless/templates/service.yaml index 408315d8b3..5c5a270abc 100644 --- a/applications/giftless/templates/service.yaml +++ b/applications/giftless/templates/service.yaml @@ -12,3 +12,18 @@ spec: protocol: "TCP" selector: {{- include "giftless.selectorLabels" . | nindent 4 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "giftless.fullname" . }}-rw + labels: + {{- include "giftless.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 5000 + targetPort: "http" + protocol: "TCP" + selector: + {{- include "giftless.selectorLabels" . | nindent 4 }} diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index cd4492816a..1ef112bc17 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -1,4 +1,25 @@ server: debug: true ingress: - hostname: "git-lfs-dev.lsst.cloud" + hostname: + readonly: "git-lfs-dev.lsst.cloud" + readwrite: "git-lfs-dev-rw.lsst.cloud" +config: + readonly: + TRANSFER_ADAPTERS: + basic: + options: + storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" + storage_options: + project_name: "plasma-geode-127520" + bucket_name: "rubin-gitlfs-experimental" + account_key_file: "/etc/secret/giftless-gcp-key.json" + readwrite: + TRANSFER_ADAPTERS: + basic: + options: + storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" + storage_options: + project_name: "plasma-geode-127520" + bucket_name: "rubin-gitlfs-experimental" + account_key_file: "/etc/secret/giftless-gcp-key.json" diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index 22c7b7a1b8..0e4ab42592 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -25,7 +25,7 @@ affinity: {} # be set in the individual environment values files. image: # -- Giftless image to use - repository: ghcr.io/lsst-sqre/giftless-github-proxy-auth + repository: docker.io/datopian/giftless # -- Pull policy for the giftless image pullPolicy: "IfNotPresent" # -- Tag of giftless image to use @@ -33,9 +33,11 @@ image: tag: "" ingress: - # -- FQDN of giftless ingress + # -- FQDNs of giftless ingresses # @default -- Must be overridden in environment-specific values file - hostname: "" + hostname: + readonly: "" + readwrite: "" # -- Additional annotations to add to the ingress annotations: {} @@ -52,22 +54,20 @@ server: # -- Configuration for giftless server # @default -- YAML will be used as-is. cf https://giftless.datopian.com/en/latest/configuration.html config: - AUTH_PROVIDERS: - - factory: "giftless_github_proxy_auth.auth:factory" - options: {} - - "giftless.auth.allow_anon:read_only" - PRE_AUTHORIZED_ACTION_PROVIDER: - factory: "giftless_github_proxy_auth.auth:factory" - options: {} - TRANSFER_ADAPTERS: - basic: - factory: "giftless.transfer.basic_external:factory" - options: - storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" - storage_options: - project_name: "plasma-geode-127520" - bucket_name: "rubin-gitlfs-experimental" - account_key_file: "/etc/secret/giftless-gcp-key.json" + readonly: + AUTH_PROVIDERS: + - "giftless.auth.allow_anon:read_only" + TRANSFER_ADAPTERS: + basic: + factory: "giftless.transfer.basic_external:factory" + # @default -- Must be overridden in environment-specific values file + readwrite: + AUTH_PROVIDERS: + - "giftless.auth.allow_anon:read_write" # Ingress does control. + TRANSFER_ADAPTERS: + basic: + factory: "giftless.transfer.basic_external:factory" + # @default -- Must be overridden in environment-specific values file global: # -- Base path for Vault secrets From 1e110c270a5eaff981c622b3fe19e5db4501d107 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 7 Jul 2023 12:32:36 -0700 Subject: [PATCH 204/308] correct ingress tls --- applications/giftless/templates/ingress.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index a31ea22203..a76ad0e9e0 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -12,6 +12,10 @@ template: metadata: name: {{ include "giftless.fullname" . }} spec: + tls: + - hosts: + - {{ .Values.ingress.hostname.readonly | quote }} + secretName: tls rules: - host: {{ .Values.ingress.hostname.readonly | quote }} http: @@ -40,6 +44,10 @@ template: metadata: name: {{ template "giftless.fullname" . }}-rw spec: + tls: + - hosts: + - {{ .Values.ingress.hostname.readwrite | quote }} + secretName: tls rules: - host: {{ .Values.ingress.hostname.readwrite | quote }} http: From 2d894a43cd6d8238adf7cb8e29ae33bbc57079bf Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 7 Jul 2023 12:40:00 -0700 Subject: [PATCH 205/308] Add rw configmap --- applications/giftless/templates/configmap.yaml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index cf585e0cc5..69beec1049 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -6,5 +6,15 @@ metadata: {{- include "giftless.labels" . | nindent 4 }} data: giftless.conf.yaml: |- - {{- toYaml .Values.config | nindent 4 }} + {{- toYaml .Values.config.readonly | nindent 4 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "giftless.fullname" . }}-rw + labels: + {{- include "giftless.labels" . | nindent 4 }} +data: + giftless.conf.yaml: |- + {{- toYaml .Values.config.readwrite | nindent 4 }} From 9b8a92191f59772d8921289ea5bffed1f47c8942 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 7 Jul 2023 13:46:17 -0700 Subject: [PATCH 206/308] Add additional FQDN support to Gafaelfawr --- applications/gafaelfawr/README.md | 1 + .../gafaelfawr/templates/ingress.yaml | 45 +++++++++++++++++++ .../gafaelfawr/values-roundtable-dev.yaml | 8 ++++ applications/gafaelfawr/values.yaml | 6 +++ 4 files changed, 60 insertions(+) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index ed0125f474..f1561d8ee3 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -24,6 +24,7 @@ Authentication and identity system | cloudsql.resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod | | cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | +| config.additionalFQDNs | list | `[]` | Defines additional FQDNs for Gafaelfawr. This doesn't work for cookie or browser authentication, but for token-based services like git-lfs or the webdav server it does. | | config.cilogon.clientId | string | `""` | CILogon client ID. One and only one of this, `config.github.clientId`, or `config.oidc.clientId` must be set. | | config.cilogon.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | | config.cilogon.gidClaim | string | Do not set a primary GID | Claim from which to get the primary GID (only used if not retrieved from LDAP or Firestore) | diff --git a/applications/gafaelfawr/templates/ingress.yaml b/applications/gafaelfawr/templates/ingress.yaml index 18a03df271..2abe7f4c42 100644 --- a/applications/gafaelfawr/templates/ingress.yaml +++ b/applications/gafaelfawr/templates/ingress.yaml @@ -47,3 +47,48 @@ spec: port: number: 8080 {{- end }} + {{- $context := . }} + {{- with $context.Values.config.additionalFQDNs }} + {{- range . }} + - host: {{ . | quote }} + http: + paths: + - path: "/auth" + pathType: Prefix + backend: + service: + name: {{ template "gafaelfawr.fullname" $context }} + port: + number: 8080 + - path: "/login" + pathType: Exact + backend: + service: + name: {{ template "gafaelfawr.fullname" $context }} + port: + number: 8080 + - path: "/logout" + pathType: Exact + backend: + service: + name: {{ template "gafaelfawr.fullname" $context }} + port: + number: 8080 + {{- if $context.Values.config.oidcServer.enabled }} + - path: "/.well-known/jwks.json" + pathType: Exact + backend: + service: + name: {{ template "gafaelfawr.fullname" $context }} + port: + number: 8080 + - path: "/.well-known/openid-configuration" + pathType: Exact + backend: + service: + name: {{ template "gafaelfawr.fullname" $context }} + port: + number: 8080 + {{- end }} + {{- end }} + {{- end }} diff --git a/applications/gafaelfawr/values-roundtable-dev.yaml b/applications/gafaelfawr/values-roundtable-dev.yaml index e32d530efb..aabd0191cf 100644 --- a/applications/gafaelfawr/values-roundtable-dev.yaml +++ b/applications/gafaelfawr/values-roundtable-dev.yaml @@ -18,6 +18,10 @@ config: oidcServer: enabled: false + knownScopes: + "write:git-lfs": >- + Can write objects to Git LFS storage bucket + groupMapping: "exec:admin": - github: @@ -38,3 +42,7 @@ config: errorFooter: | To report problems or ask for help, contact #dm-square on the LSSTC Slack. + + additionalFQDNs: + - "git-lfs-dev.data.lsst.cloud" + - "git-lfs-dev-rw.data.lsst.cloud" diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 72f564c63f..99f326a25d 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -275,6 +275,12 @@ config: # [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes. groupMapping: {} + # -- Defines additional FQDNs for Gafaelfawr. This doesn't work for + # cookie or browser authentication, but for token-based services like + # git-lfs or the webdav server it does. + + additionalFQDNs: [] + cloudsql: # -- Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google # Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as From 0aefa7833e81e5548feba6a6d10c25161900d5c2 Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 7 Jul 2023 14:08:12 -0700 Subject: [PATCH 207/308] Add additional FQDN certs to Gafaelfawr --- applications/gafaelfawr/values-roundtable-dev.yaml | 4 ++-- applications/giftless/templates/ingress.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/gafaelfawr/values-roundtable-dev.yaml b/applications/gafaelfawr/values-roundtable-dev.yaml index aabd0191cf..21006a98a5 100644 --- a/applications/gafaelfawr/values-roundtable-dev.yaml +++ b/applications/gafaelfawr/values-roundtable-dev.yaml @@ -44,5 +44,5 @@ config: To report problems or ask for help, contact #dm-square on the LSSTC Slack. additionalFQDNs: - - "git-lfs-dev.data.lsst.cloud" - - "git-lfs-dev-rw.data.lsst.cloud" + - "git-lfs-dev.lsst.cloud" + - "git-lfs-dev-rw.lsst.cloud" diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index a76ad0e9e0..a3a0727f5f 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -52,7 +52,7 @@ template: - host: {{ .Values.ingress.hostname.readwrite | quote }} http: paths: - - path: "/files" + - path: "/" pathType: "Prefix" backend: service: From 18a72663ecd8361d2934a97cafa765bfbff4ba19 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 10 Jul 2023 12:11:30 -0700 Subject: [PATCH 208/308] Update labels/selectorLabels for rw --- applications/giftless/templates/_helpers.tpl | 20 +++++++++++++++++++ .../giftless/templates/configmap.yaml | 2 +- .../giftless/templates/deployment.yaml | 6 +++--- applications/giftless/templates/ingress.yaml | 2 +- applications/giftless/templates/service.yaml | 4 ++-- 5 files changed, 27 insertions(+), 7 deletions(-) diff --git a/applications/giftless/templates/_helpers.tpl b/applications/giftless/templates/_helpers.tpl index 1e6709c030..5851081721 100644 --- a/applications/giftless/templates/_helpers.tpl +++ b/applications/giftless/templates/_helpers.tpl @@ -51,3 +51,23 @@ Selector labels app.kubernetes.io/name: {{ include "giftless.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} +{{/* +Common labels-RW +*/}} +{{- define "giftless-rw.labels" -}} +app.kubernetes.io/name: {{ include "giftless.name" . }}-rw +helm.sh/chart: {{ include "giftless.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "giftless-rw.selectorLabels" -}} +app.kubernetes.io/name: {{ include "giftless.name" . }}-rw +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index 69beec1049..cb3afefe86 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -13,7 +13,7 @@ kind: ConfigMap metadata: name: {{ template "giftless.fullname" . }}-rw labels: - {{- include "giftless.labels" . | nindent 4 }} + {{- include "giftless-rw.labels" . | nindent 4 }} data: giftless.conf.yaml: |- {{- toYaml .Values.config.readwrite | nindent 4 }} diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index 33934d4e6f..db5b02db7f 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -99,12 +99,12 @@ kind: Deployment metadata: name: {{ template "giftless.fullname" . }}-rw labels: - {{- include "giftless.labels" . | nindent 4 }} + {{- include "giftless-rw.labels" . | nindent 4 }} spec: replicas: 1 selector: matchLabels: - {{- include "giftless.selectorLabels" . | nindent 6 }} + {{- include "giftless-rw.selectorLabels" . | nindent 6 }} strategy: type: "Recreate" template: @@ -115,7 +115,7 @@ spec: {{- end }} checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} labels: - {{- include "giftless.selectorLabels" . | nindent 8 }} + {{- include "giftless-rw.selectorLabels" . | nindent 8 }} spec: automountServiceAccountToken: false containers: diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index a3a0727f5f..c53b07074d 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -34,7 +34,7 @@ metadata: name: "giftless-rw" name: {{ template "giftless.fullname" . }}-rw labels: - {{- include "giftless.labels" . | nindent 4 }} + {{- include "giftless-rw.labels" . | nindent 4 }} config: baseUrl: "https://{{ .Values.ingress.hostname.readwrite }}" scopes: diff --git a/applications/giftless/templates/service.yaml b/applications/giftless/templates/service.yaml index 5c5a270abc..1ce6a9be64 100644 --- a/applications/giftless/templates/service.yaml +++ b/applications/giftless/templates/service.yaml @@ -18,7 +18,7 @@ kind: Service metadata: name: {{ template "giftless.fullname" . }}-rw labels: - {{- include "giftless.labels" . | nindent 4 }} + {{- include "giftless-rw.labels" . | nindent 4 }} spec: type: "ClusterIP" ports: @@ -26,4 +26,4 @@ spec: targetPort: "http" protocol: "TCP" selector: - {{- include "giftless.selectorLabels" . | nindent 4 }} + {{- include "giftless-rw.selectorLabels" . | nindent 4 }} From a75ba32b9c7a59b040a88fdcc7f2b263e7aa7b68 Mon Sep 17 00:00:00 2001 From: stvoutsin Date: Tue, 18 Jul 2023 18:10:16 +0300 Subject: [PATCH 209/308] Change tapSchema image repository for roe environment --- applications/tap/values-roe.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/tap/values-roe.yaml b/applications/tap/values-roe.yaml index d8d5b07528..34a231353d 100644 --- a/applications/tap/values-roe.yaml +++ b/applications/tap/values-roe.yaml @@ -1,6 +1,6 @@ tapSchema: image: - repository: "lsstsqre/tap-schema-idfprod-tap" + repository: "stvoutsin/tap-schema-roe" config: gcsBucket: "async" From 289aee4d9bfc3d10eed30334c7c98b69def94b1e Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 18 Jul 2023 10:52:49 -0700 Subject: [PATCH 210/308] Don't create separate cert resources; do that with ingress annotations --- applications/giftless/templates/cert.yaml | 14 -------------- applications/giftless/templates/ingress.yaml | 10 ++++++++++ 2 files changed, 10 insertions(+), 14 deletions(-) delete mode 100644 applications/giftless/templates/cert.yaml diff --git a/applications/giftless/templates/cert.yaml b/applications/giftless/templates/cert.yaml deleted file mode 100644 index dcbf18fa94..0000000000 --- a/applications/giftless/templates/cert.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: tls -spec: - secretName: tls - - issuerRef: - name: "letsencrypt-dns" - kind: ClusterIssuer - - dnsNames: - - {{ .Values.ingress.hostname.readonly | quote }} - - {{ .Values.ingress.hostname.readwrite | quote }} diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index c53b07074d..40e18393a5 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -4,6 +4,11 @@ metadata: name: {{ include "giftless.fullname" . }} labels: {{- include "giftless.labels" . | nindent 4 }} + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-dns" + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} config: baseUrl: "https://{{ .Values.ingress.hostname.readonly }}" scopes: @@ -35,6 +40,11 @@ metadata: name: {{ template "giftless.fullname" . }}-rw labels: {{- include "giftless-rw.labels" . | nindent 4 }} + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-dns" + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} config: baseUrl: "https://{{ .Values.ingress.hostname.readwrite }}" scopes: From 3402b55e28881c1603a2c2963856583c1ea320c7 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 18 Jul 2023 11:04:44 -0700 Subject: [PATCH 211/308] Address review commentary --- applications/gafaelfawr/README.md | 2 +- .../gafaelfawr/templates/ingress.yaml | 32 +------------------ applications/gafaelfawr/values.yaml | 4 +-- 3 files changed, 4 insertions(+), 34 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index f1561d8ee3..03c806bbc0 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -24,7 +24,6 @@ Authentication and identity system | cloudsql.resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod | | cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | -| config.additionalFQDNs | list | `[]` | Defines additional FQDNs for Gafaelfawr. This doesn't work for cookie or browser authentication, but for token-based services like git-lfs or the webdav server it does. | | config.cilogon.clientId | string | `""` | CILogon client ID. One and only one of this, `config.github.clientId`, or `config.oidc.clientId` must be set. | | config.cilogon.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | | config.cilogon.gidClaim | string | Do not set a primary GID | Claim from which to get the primary GID (only used if not retrieved from LDAP or Firestore) | @@ -80,6 +79,7 @@ Authentication and identity system | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Gafaelfawr image | | image.repository | string | `"ghcr.io/lsst-sqre/gafaelfawr"` | Gafaelfawr image to use | | image.tag | string | The appVersion of the chart | Tag of Gafaelfawr image to use | +| ingress.additionalHosts | list | `[]` | Defines additional FQDNs for Gafaelfawr. This doesn't work for cookie or browser authentication, but for token-based services like git-lfs or the webdav server it does. | | maintenance.affinity | object | `{}` | Affinity rules for Gafaelfawr maintenance and audit pods | | maintenance.auditSchedule | string | `"30 3 * * *"` | Cron schedule string for Gafaelfawr data consistency audit (in UTC) | | maintenance.maintenanceSchedule | string | `"5 * * * *"` | Cron schedule string for Gafaelfawr periodic maintenance (in UTC) | diff --git a/applications/gafaelfawr/templates/ingress.yaml b/applications/gafaelfawr/templates/ingress.yaml index 2abe7f4c42..1284d838f2 100644 --- a/applications/gafaelfawr/templates/ingress.yaml +++ b/applications/gafaelfawr/templates/ingress.yaml @@ -48,7 +48,7 @@ spec: number: 8080 {{- end }} {{- $context := . }} - {{- with $context.Values.config.additionalFQDNs }} + {{- with $context.Values.ingress.additionalHosts }} {{- range . }} - host: {{ . | quote }} http: @@ -60,35 +60,5 @@ spec: name: {{ template "gafaelfawr.fullname" $context }} port: number: 8080 - - path: "/login" - pathType: Exact - backend: - service: - name: {{ template "gafaelfawr.fullname" $context }} - port: - number: 8080 - - path: "/logout" - pathType: Exact - backend: - service: - name: {{ template "gafaelfawr.fullname" $context }} - port: - number: 8080 - {{- if $context.Values.config.oidcServer.enabled }} - - path: "/.well-known/jwks.json" - pathType: Exact - backend: - service: - name: {{ template "gafaelfawr.fullname" $context }} - port: - number: 8080 - - path: "/.well-known/openid-configuration" - pathType: Exact - backend: - service: - name: {{ template "gafaelfawr.fullname" $context }} - port: - number: 8080 - {{- end }} {{- end }} {{- end }} diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 99f326a25d..6ba21a9a0c 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -275,11 +275,11 @@ config: # [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes. groupMapping: {} +ingress: # -- Defines additional FQDNs for Gafaelfawr. This doesn't work for # cookie or browser authentication, but for token-based services like # git-lfs or the webdav server it does. - - additionalFQDNs: [] + additionalHosts: [] cloudsql: # -- Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google From 9d9cabceecd7cb147cc4bcf0e1fe740efd3c84da Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 18 Jul 2023 11:42:38 -0700 Subject: [PATCH 212/308] Address review commentary. --- applications/giftless/README.md | 3 ++- applications/giftless/templates/deployment.yaml | 13 +++++++++++-- applications/giftless/values.yaml | 6 ++++-- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 1ae75016dc..785dbbb85e 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -19,12 +19,13 @@ Git-LFS server with GCS S3 backend, with Rubin-specific auth | image.tag | string | The appVersion of the chart | Tag of giftless image to use | | ingress.annotations | object | `{}` | Additional annotations to add to the ingress | | ingress.hostname | object | Must be overridden in environment-specific values file | FQDNs of giftless ingresses | +| ingress.hostname.readonly | string | Must be overridden in environment-specific values file | FQDN for the read-only giftless ingress | +| ingress.hostname.readwrite | string | Must be overridden in environment-specific values file | FQDN for the read-write giftless ingress | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the giftless frontend pod | | podAnnotations | object | `{}` | Annotations for the giftless frontend pod | | resources | object | `{}` | Resource limits and requests for the giftless frontend pod | | server.debug | bool | `false` | Turn on debugging mode | -| server.port | int | `5000` | Port for giftless server to listen on | | server.processes | int | `2` | Number of processes for server | | server.threads | int | `2` | Number of threads per process | | tolerations | list | `[]` | Tolerations for the giftless frontend pod | diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index db5b02db7f..6c105aa64f 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -1,3 +1,12 @@ +# Note that this creates two nearly-identical deployments, one named +# "giftless" and one named "giftless-rw". The only real difference +# between them is that their configuration configmaps are different: one +# has the configuration for read-only access to the Git LFS server, and +# other has configuration for read-write access. It is possible that we +# might in future want to further split the configuration in order to +# allow, for instance, different numbers of processes and threads for +# the read-write and the read-only servers, on the grounds that our Git +# LFS usage is read-mostly. apiVersion: apps/v1 kind: Deployment metadata: @@ -27,7 +36,7 @@ spec: command: - "uwsgi" - "--http" - - ":{{- .Values.server.port }}" + - ":5000" - "-M" - "-T" - "--die-on-term" @@ -123,7 +132,7 @@ spec: command: - "uwsgi" - "--http" - - ":{{- .Values.server.port }}" + - ":5000" - "-M" - "-T" - "--die-on-term" diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index 0e4ab42592..07ca78dd70 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -36,7 +36,11 @@ ingress: # -- FQDNs of giftless ingresses # @default -- Must be overridden in environment-specific values file hostname: + # -- FQDN for the read-only giftless ingress + # @default -- Must be overridden in environment-specific values file readonly: "" + # -- FQDN for the read-write giftless ingress + # @default -- Must be overridden in environment-specific values file readwrite: "" # -- Additional annotations to add to the ingress annotations: {} @@ -44,8 +48,6 @@ ingress: server: # -- Turn on debugging mode debug: false - # -- Port for giftless server to listen on - port: 5000 # -- Number of processes for server processes: 2 # -- Number of threads per process From 2a4683178885f8fd05dc510f2a51ec27664fff0a Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 18 Jul 2023 11:44:12 -0700 Subject: [PATCH 213/308] add giftless docs --- docs/applications/giftless/index.rst | 22 ++++++++++++++++++++++ docs/applications/giftless/values.md | 12 ++++++++++++ docs/applications/index.rst | 1 + 3 files changed, 35 insertions(+) create mode 100644 docs/applications/giftless/index.rst create mode 100644 docs/applications/giftless/values.md diff --git a/docs/applications/giftless/index.rst b/docs/applications/giftless/index.rst new file mode 100644 index 0000000000..383fba05a2 --- /dev/null +++ b/docs/applications/giftless/index.rst @@ -0,0 +1,22 @@ +.. px-app:: giftless + +####################### +Giftless Git LFS server +####################### + +Giftless, a Git LFS server provided by Datopian, is the Rubin +Observatory provider of Git LFS services. This implementation provides +both read-only and read-write endpoints for Git LFS. + +See https://giftless.datopian.com/en/latest/index.html for details. + +.. jinja:: giftless + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/giftless/values.md b/docs/applications/giftless/values.md new file mode 100644 index 0000000000..f2566e36dc --- /dev/null +++ b/docs/applications/giftless/values.md @@ -0,0 +1,12 @@ +```{px-app-values} giftless +``` + +# Giftless Helm values reference + +Helm values reference table for the {px-app}`giftless` application. + +```{include} ../../../applications/giftless/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/applications/index.rst b/docs/applications/index.rst index c3b9c13cdb..79fb4d856c 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -66,5 +66,6 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde :maxdepth: 1 :caption: Roundtable + giftless/index kubernetes-replicator/index squarebot/index From 2ff6a5f2e20e11ebac19779d3c4c21b63bbd462a Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 18 Jul 2023 11:46:24 -0700 Subject: [PATCH 214/308] Rework Gafaelfawr additionalHosts config in roundtable-dev --- applications/gafaelfawr/values-roundtable-dev.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/applications/gafaelfawr/values-roundtable-dev.yaml b/applications/gafaelfawr/values-roundtable-dev.yaml index 21006a98a5..8af02989fb 100644 --- a/applications/gafaelfawr/values-roundtable-dev.yaml +++ b/applications/gafaelfawr/values-roundtable-dev.yaml @@ -43,6 +43,7 @@ config: errorFooter: | To report problems or ask for help, contact #dm-square on the LSSTC Slack. - additionalFQDNs: +ingress: + additionalHosts: - "git-lfs-dev.lsst.cloud" - "git-lfs-dev-rw.lsst.cloud" From b6eaf680bcda2ca0303024e6fbf79ac2d16b8c56 Mon Sep 17 00:00:00 2001 From: adam Date: Tue, 18 Jul 2023 14:33:07 -0700 Subject: [PATCH 215/308] hardcode most of the GCS info --- applications/giftless/README.md | 4 ++- .../giftless/templates/configmap.yaml | 25 ++++++++++++++++--- .../giftless/templates/deployment.yaml | 18 ++++++------- .../giftless/values-roundtable-dev.yaml | 20 ++------------- applications/giftless/values.yaml | 21 +++++----------- 5 files changed, 42 insertions(+), 46 deletions(-) diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 785dbbb85e..4979510629 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -11,7 +11,9 @@ Git-LFS server with GCS S3 backend, with Rubin-specific auth | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the giftless frontend pod | -| config | object | YAML will be used as-is. cf https://giftless.datopian.com/en/latest/configuration.html | Configuration for giftless server | +| config | object | `{"bucketName":"","projectName":""}` | Configuration for giftless server | +| config.bucketName | string | Must be overridden in environment-specific values file | Bucket name for GCS LFS Object bucket | +| config.projectName | string | Must be overridden in environment-specific values file | Project name for GCS LFS Object bucket | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the giftless image | diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml index cb3afefe86..7cb4901730 100644 --- a/applications/giftless/templates/configmap.yaml +++ b/applications/giftless/templates/configmap.yaml @@ -6,7 +6,17 @@ metadata: {{- include "giftless.labels" . | nindent 4 }} data: giftless.conf.yaml: |- - {{- toYaml .Values.config.readonly | nindent 4 }} + AUTH_PROVIDERS: + - "giftless.auth.allow_anon:read_only" + TRANSFER_ADAPTERS: + basic: + factory: "giftless.transfer.basic_external:factory" + options: + storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" + storage_options: + account_key_file: "/etc/secret/giftless-gcp-key.json" + project_name: {{ .Values.config.projectName | quote }} + bucket_name: {{ .Values.config.bucketName | quote }} --- apiVersion: v1 kind: ConfigMap @@ -16,5 +26,14 @@ metadata: {{- include "giftless-rw.labels" . | nindent 4 }} data: giftless.conf.yaml: |- - {{- toYaml .Values.config.readwrite | nindent 4 }} - + AUTH_PROVIDERS: + - "giftless.auth.allow_anon:read_write" + TRANSFER_ADAPTERS: + basic: + factory: "giftless.transfer.basic_external:factory" + options: + storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" + storage_options: + account_key_file: "/etc/secret/giftless-gcp-key.json" + project_name: {{ .Values.config.projectName | quote }} + bucket_name: {{ .Values.config.bucketName | quote }} diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml index 6c105aa64f..ab17ea9b2c 100644 --- a/applications/giftless/templates/deployment.yaml +++ b/applications/giftless/templates/deployment.yaml @@ -1,12 +1,12 @@ # Note that this creates two nearly-identical deployments, one named # "giftless" and one named "giftless-rw". The only real difference -# between them is that their configuration configmaps are different: one -# has the configuration for read-only access to the Git LFS server, and -# other has configuration for read-write access. It is possible that we -# might in future want to further split the configuration in order to -# allow, for instance, different numbers of processes and threads for -# the read-write and the read-only servers, on the grounds that our Git -# LFS usage is read-mostly. +# between them is that their configuration configmaps and secrets are +# different: one has the configuration for read-only access to the Git +# LFS server, and other has configuration for read-write access. It is +# possible that we might in future want to further split the +# configuration in order to allow, for instance, different numbers of +# processes and threads for the read-write and the read-only servers, on +# the grounds that our Git LFS usage is read-mostly. apiVersion: apps/v1 kind: Deployment metadata: @@ -58,7 +58,7 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy | quote }} ports: - name: "http" - containerPort: {{ .Values.server.port }} + containerPort: 5000 protocol: "TCP" {{- with .Values.resources }} resources: @@ -154,7 +154,7 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy | quote }} ports: - name: "http" - containerPort: {{ .Values.server.port }} + containerPort: 5000 protocol: "TCP" {{- with .Values.resources }} resources: diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml index 1ef112bc17..5bbf6b5651 100644 --- a/applications/giftless/values-roundtable-dev.yaml +++ b/applications/giftless/values-roundtable-dev.yaml @@ -5,21 +5,5 @@ ingress: readonly: "git-lfs-dev.lsst.cloud" readwrite: "git-lfs-dev-rw.lsst.cloud" config: - readonly: - TRANSFER_ADAPTERS: - basic: - options: - storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" - storage_options: - project_name: "plasma-geode-127520" - bucket_name: "rubin-gitlfs-experimental" - account_key_file: "/etc/secret/giftless-gcp-key.json" - readwrite: - TRANSFER_ADAPTERS: - basic: - options: - storage_class: "giftless.storage.google_cloud:GoogleCloudStorage" - storage_options: - project_name: "plasma-geode-127520" - bucket_name: "rubin-gitlfs-experimental" - account_key_file: "/etc/secret/giftless-gcp-key.json" + projectName: "plasma-geode-127520" + bucketName: "rubin-gitlfs-experimental" diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml index 07ca78dd70..0de26a224b 100644 --- a/applications/giftless/values.yaml +++ b/applications/giftless/values.yaml @@ -54,22 +54,13 @@ server: threads: 2 # -- Configuration for giftless server -# @default -- YAML will be used as-is. cf https://giftless.datopian.com/en/latest/configuration.html config: - readonly: - AUTH_PROVIDERS: - - "giftless.auth.allow_anon:read_only" - TRANSFER_ADAPTERS: - basic: - factory: "giftless.transfer.basic_external:factory" - # @default -- Must be overridden in environment-specific values file - readwrite: - AUTH_PROVIDERS: - - "giftless.auth.allow_anon:read_write" # Ingress does control. - TRANSFER_ADAPTERS: - basic: - factory: "giftless.transfer.basic_external:factory" - # @default -- Must be overridden in environment-specific values file + # -- Project name for GCS LFS Object bucket + # @default -- Must be overridden in environment-specific values file + projectName: "" + # -- Bucket name for GCS LFS Object bucket + # @default -- Must be overridden in environment-specific values file + bucketName: "" global: # -- Base path for Vault secrets From 3853ff684de5451bb6af02d0db7d8461203af32f Mon Sep 17 00:00:00 2001 From: Kai Koehler Date: Thu, 13 Jul 2023 13:57:15 -0700 Subject: [PATCH 216/308] Add umbrella helm-doc function --- .pre-commit-config.yaml | 1 + applications/alert-stream-broker/README.md | 93 ++++++++ applications/sasquatch/README.md | 252 ++++++++++++++++++++- 3 files changed, 343 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index df63329eca..655a5888b8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,6 +20,7 @@ repos: - --chart-search-root=. # The `./` makes it relative to the chart-search-root set above - --template-files=./helm-docs.md.gotmpl + - --document-dependency-values=true - repo: https://github.com/pycqa/isort rev: 5.12.0 diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index cffc94fec9..e91597a416 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -15,3 +15,96 @@ Alert transmission to community brokers | strimzi-registry-operator.clusterNamespace | string | `"alert-stream-broker"` | | | strimzi-registry-operator.operatorNamespace | string | `"alert-stream-broker"` | | | strimzi-registry-operator.watchNamespace | string | `"alert-stream-broker"` | | +| alert-database.fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| alert-database.ingester.gcp.projectID | string | `""` | Project ID which has the above GCP IAM service account | +| alert-database.ingester.gcp.serviceAccountName | string | `""` | Name of a service account which has credentials granting access to the alert database's backing storage buckets. | +| alert-database.ingester.image.imagePullPolicy | string | `"IfNotPresent"` | | +| alert-database.ingester.image.repository | string | `"lsstdm/alert_database_ingester"` | | +| alert-database.ingester.image.tag | string | `"v2.0.2"` | | +| alert-database.ingester.kafka.cluster | string | `"alert-broker"` | Name of a Strimzi Kafka cluster to connect to. | +| alert-database.ingester.kafka.port | int | `9092` | Port to connect to on the Strimzi Kafka cluster. It should be an internal listener that expects SCRAM SHA-512 auth. | +| alert-database.ingester.kafka.strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | +| alert-database.ingester.kafka.topic | string | `"alerts-simulated"` | Name of the topic which will holds alert data. | +| alert-database.ingester.kafka.user | string | `"alert-database-ingester"` | The username of the Kafka user identity used to connect to the broker. | +| alert-database.ingester.logLevel | string | `"verbose"` | set the log level of the application. can be 'info', or 'debug', or anything else to suppress logging. | +| alert-database.ingester.schemaRegistryURL | string | `""` | URL of a schema registry instance | +| alert-database.ingester.serviceAccountName | string | `"alert-database-ingester"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database ingester. | +| alert-database.ingress.annotations | object | `{}` | | +| alert-database.ingress.enabled | bool | `true` | Whether to create an ingress | +| alert-database.ingress.gafaelfawrAuthQuery | string | `"scope=read:alertdb"` | Query string for Gafaelfawr to authorize access | +| alert-database.ingress.host | string | None, must be set if the ingress is enabled | Hostname for the ingress | +| alert-database.ingress.path | string | `"/alertdb"` | Subpath to host the alert database application under the ingress | +| alert-database.ingress.tls | list | `[]` | Configures TLS for the ingress if needed. If multiple ingresses share the same hostname, only one of them needs a TLS configuration. | +| alert-database.nameOverride | string | `""` | Override the base name for resources | +| alert-database.server.gcp.projectID | string | `""` | Project ID which has the above GCP IAM service account | +| alert-database.server.gcp.serviceAccountName | string | `""` | Name of a service account which has credentials granting access to the alert database's backing storage buckets. | +| alert-database.server.image.imagePullPolicy | string | `"IfNotPresent"` | | +| alert-database.server.image.repository | string | `"lsstdm/alert_database_server"` | | +| alert-database.server.image.tag | string | `"v2.1.0"` | | +| alert-database.server.logLevel | string | `"verbose"` | set the log level of the application. can be 'info', or 'debug', or anything else to suppress logging. | +| alert-database.server.service.port | int | `3000` | | +| alert-database.server.service.type | string | `"ClusterIP"` | | +| alert-database.server.serviceAccountName | string | `"alertdb-reader"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database server. | +| alert-database.storage.gcp.alertBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with alert data | +| alert-database.storage.gcp.project | string | `""` | Name of a GCP project that has a bucket for database storage | +| alert-database.storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data | +| alert-stream-broker.cluster.name | string | `"alert-broker"` | Name used for the Kafka broker, and used by Strimzi for many annotations. | +| alert-stream-broker.fullnameOverride | string | `""` | Override for the full name used for Kubernetes resources; by default one will be created based on the chart name and helm release name. | +| alert-stream-broker.kafka.config | object | `{"log.retention.bytes":"42949672960","log.retention.hours":168,"offsets.retention.minutes":1440}` | Configuration overrides for the Kafka server. | +| alert-stream-broker.kafka.config."log.retention.bytes" | string | `"42949672960"` | Maximum retained number of bytes for a broker's data. This is a string to avoid YAML type conversion issues for large numbers. | +| alert-stream-broker.kafka.config."log.retention.hours" | int | `168` | Number of hours for a brokers data to be retained. | +| alert-stream-broker.kafka.config."offsets.retention.minutes" | int | `1440` | Number of minutes for a consumer group's offsets to be retained. | +| alert-stream-broker.kafka.externalListener.bootstrap.annotations | object | `{}` | | +| alert-stream-broker.kafka.externalListener.bootstrap.host | string | `""` | Hostname that should be used by clients who want to connect to the broker through the bootstrap address. | +| alert-stream-broker.kafka.externalListener.bootstrap.ip | string | `""` | IP address that should be used by the broker's external bootstrap load balancer for access from the internet. The format of this is a string like "192.168.1.1". | +| alert-stream-broker.kafka.externalListener.brokers | list | `[]` | List of hostname and IP for each broker. The format of this is a list of maps with 'ip' and 'host' keys. For example: - ip: "192.168.1.1" host: broker-0.example - ip: "192.168.1.2" host: broker-1.example Each replica should get a host and IP. If these are unset, then IP addresses will be chosen automatically by the Kubernetes cluster's LoadBalancer controller, and hostnames will be unset, which will break TLS connections. | +| alert-stream-broker.kafka.externalListener.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of the certificate issuer. | +| alert-stream-broker.kafka.externalListener.tls.enabled | bool | `false` | Whether TLS encryption is enabled. | +| alert-stream-broker.kafka.interBrokerProtocolVersion | float | `3.2` | Version of the protocol for inter-broker communication, see https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. | +| alert-stream-broker.kafka.logMessageFormatVersion | float | `3.2` | Encoding version for messages, see https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. | +| alert-stream-broker.kafka.nodePool.affinities | list | `[{"key":"kafka","value":"ok"}]` | List of node affinities to set for the broker's nodes. The key should be a label key, and the value should be a label value, and then the broker will prefer running Kafka and Zookeeper on nodes with those key-value pairs. | +| alert-stream-broker.kafka.nodePool.tolerations | list | `[{"effect":"NoSchedule","key":"kafka","value":"ok"}]` | List of taint tolerations when scheduling the broker's pods onto nodes. The key should be a taint key, the value should be a taint value, and effect should be a taint effect that can be tolerated (ignored) when scheduling the broker's Kafka and Zookeeper pods. | +| alert-stream-broker.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | +| alert-stream-broker.kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | +| alert-stream-broker.kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | +| alert-stream-broker.kafka.version | string | `"3.2.3"` | Version of Kafka to deploy. | +| alert-stream-broker.nameOverride | string | `""` | | +| alert-stream-broker.strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | +| alert-stream-broker.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | +| alert-stream-broker.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | +| alert-stream-broker.tls.subject.organization | string | `"Vera C. Rubin Observatory"` | Organization to use in the 'Subject' field of the broker's TLS certificate. | +| alert-stream-broker.users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. | +| alert-stream-broker.users[0].groups | list | `["rubin-testing"]` | A list of string prefixes for groups that the user should get admin access to, allowing them to create, delete, describe, etc consumer groups. Note that these are prefix-matched, not just literal exact matches. | +| alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated"]` | A list of topics that the user should get read-only access to. | +| alert-stream-broker.users[0].username | string | `"rubin-testing"` | The username for the user that should be created. | +| alert-stream-broker.vaultSecretsPath | string | `""` | Path to the secret resource in Vault | +| alert-stream-broker.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | +| alert-stream-broker.zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | +| alert-stream-broker.zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | +| alert-stream-schema-registry.clusterName | string | `"alert-broker"` | Strimzi "cluster name" of the broker to use as a backend. | +| alert-stream-schema-registry.hostname | string | `"alert-schemas-int.lsst.cloud"` | Hostname for an ingress which sends traffic to the Schema Registry. | +| alert-stream-schema-registry.name | string | `"alert-schema-registry"` | Name used by the registry, and by its users. | +| alert-stream-schema-registry.port | int | `8081` | Port where the registry is listening. NOTE: Not actually configurable in strimzi-registry-operator, so this basically cannot be changed. | +| alert-stream-schema-registry.schemaSync | object | `{"image":{"repository":"lsstdm/lsst_alert_packet","tag":"tickets-DM-32743"},"subject":"alert-packet"}` | Configuration for the Job which injects the most recent alert_packet schema into the Schema Registry | +| alert-stream-schema-registry.schemaSync.image.repository | string | `"lsstdm/lsst_alert_packet"` | Repository of a container which has the alert_packet syncLatestSchemaToRegistry.py program | +| alert-stream-schema-registry.schemaSync.image.tag | string | `"tickets-DM-32743"` | Version of the container to use | +| alert-stream-schema-registry.schemaSync.subject | string | `"alert-packet"` | Subject name to use when inserting data into the Schema Registry | +| alert-stream-schema-registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry to store data. | +| alert-stream-schema-registry.strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | +| alert-stream-simulator.clusterName | string | `"alert-broker"` | Name of a Strimzi Kafka cluster to connect to. | +| alert-stream-simulator.clusterPort | int | `9092` | Port to connect to on the Strimzi Kafka cluster. It should be an internal TLS listener. | +| alert-stream-simulator.fullnameOverride | string | `""` | Explicitly sets the full name used for the deployment and job (includes the release name). | +| alert-stream-simulator.image.imagePullPolicy | string | `"IfNotPresent"` | Pull policy for the Deployment | +| alert-stream-simulator.image.repository | string | `"lsstdm/alert-stream-simulator"` | Source repository for the image which holds the rubin-alert-stream program. | +| alert-stream-simulator.image.tag | string | `"v1.2.1"` | Tag to use for the rubin-alert-stream container. | +| alert-stream-simulator.kafkaUserName | string | `"alert-stream-simulator"` | The username of the Kafka user identity used to connect to the broker. | +| alert-stream-simulator.maxBytesRetained | string | `"24000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. | +| alert-stream-simulator.maxMillisecondsRetained | string | `"604800000"` | Maximum amount of time to save simulated alerts in the replay topic, in milliseconds. Default is 7 days. | +| alert-stream-simulator.nameOverride | string | `""` | Explicitly sets the name of the deployment and job. | +| alert-stream-simulator.repeatInterval | int | `37` | How often (in seconds) to repeat the sample data into the replay topic. | +| alert-stream-simulator.replayTopicName | string | `"alerts-simulated"` | Name of the topic which will receive the repeated alerts on an interval. | +| alert-stream-simulator.replayTopicPartitions | int | `8` | | +| alert-stream-simulator.replayTopicReplicas | int | `2` | | +| alert-stream-simulator.schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | +| alert-stream-simulator.staticTopicName | string | `"alerts-static"` | Name of the topic which will hold a static single visit of sample data. | +| alert-stream-simulator.strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 78fc176491..a02536bc9a 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -6,6 +6,9 @@ Rubin Observatory's telemetry service. | Key | Type | Default | Description | |-----|------|---------|-------------| +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | bucketmapper.image | object | `{"repository":"ghcr.io/lsst-sqre/rubin-influx-tools","tag":"0.1.23"}` | image for monitoring-related cronjobs | | bucketmapper.image.repository | string | `"ghcr.io/lsst-sqre/rubin-influx-tools"` | repository for rubin-influx-tools | | bucketmapper.image.tag | string | `"0.1.23"` | tag for rubin-influx-tools | @@ -19,9 +22,6 @@ Rubin Observatory's telemetry service. | chronograf.resources.limits.memory | string | `"64Gi"` | | | chronograf.resources.requests.cpu | int | `1` | | | chronograf.resources.requests.memory | string | `"4Gi"` | | -| global.baseUrl | string | Set by Argo CD | Base URL for the environment | -| global.host | string | Set by Argo CD | Host name for ingress | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb-staging.enabled | bool | `false` | Enable InfluxDB staging deployment. | | influxdb-staging.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. | @@ -102,3 +102,249 @@ Rubin Observatory's telemetry service. | strimzi-kafka | object | `{}` | Override strimzi-kafka configuration. | | strimzi-registry-operator | object | `{"clusterName":"sasquatch","clusterNamespace":"sasquatch","operatorNamespace":"sasquatch"}` | strimzi-registry-operator configuration. | | telegraf-kafka-consumer | object | `{"enabled":false}` | Override telegraf-kafka-consumer configuration. | +| kafdrop.affinity | object | `{}` | Affinity configuration. | +| kafdrop.cmdArgs | string | `"--message.format=AVRO --topic.deleteEnabled=false --topic.createEnabled=false"` | Command line arguments to Kafdrop. | +| kafdrop.existingSecret | string | `""` | Existing k8s secrect use to set kafdrop environment variables. Set SCHEMAREGISTRY_AUTH for basic auth credentials in the form username:password | +| kafdrop.host | string | Defaults to localhost. | The hostname to report for the RMI registry (used for JMX). | +| kafdrop.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | +| kafdrop.image.repository | string | `"obsidiandynamics/kafdrop"` | Kafdrop Docker image repository. | +| kafdrop.image.tag | string | `"3.31.0"` | Kafdrop image version. | +| kafdrop.ingress.annotations | object | `{}` | Ingress annotations. | +| kafdrop.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | +| kafdrop.ingress.hostname | string | `""` | Ingress hostname. | +| kafdrop.ingress.path | string | `"/kafdrop"` | Ingress path. | +| kafdrop.jmx.port | int | Defaults to 8686 | Port to use for JMX. If unspecified, JMX will not be exposed. | +| kafdrop.jvm.opts | string | `""` | JVM options. | +| kafdrop.kafka.broker | string | `"sasquatch-kafka-bootstrap.sasquatch:9092"` | Bootstrap list of Kafka host/port pairs | +| kafdrop.nodeSelector | object | `{}` | Node selector configuration. | +| kafdrop.podAnnotations | object | `{}` | Pod annotations. | +| kafdrop.replicaCount | int | `1` | Number of kafdrop pods to run in the deployment. | +| kafdrop.resources.limits.cpu | int | `2` | | +| kafdrop.resources.limits.memory | string | `"4Gi"` | | +| kafdrop.resources.requests.cpu | int | `1` | | +| kafdrop.resources.requests.memory | string | `"200Mi"` | | +| kafdrop.schemaregistry | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | The endpoint of Schema Registry | +| kafdrop.server.port | int | Defaults to 9000. | The web server port to listen on. | +| kafdrop.server.servlet | object | Defaults to /. | The context path to serve requests on (must end with a /). | +| kafdrop.service.annotations | object | `{}` | Service annotations | +| kafdrop.service.port | int | `9000` | Service port | +| kafdrop.tolerations | list | `[]` | Tolerations configuration. | +| kafka-connect-manager.enabled | bool | `true` | Enable Kafka Connect Manager. | +| kafka-connect-manager.env.kafkaBrokerUrl | string | `"sasquatch-kafka-bootstrap.sasquatch:9092"` | Kafka broker URL. | +| kafka-connect-manager.env.kafkaConnectUrl | string | `"http://sasquatch-connect-api.sasquatch:8083"` | Kafka connnect URL. | +| kafka-connect-manager.env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. | +| kafka-connect-manager.image.pullPolicy | string | `"IfNotPresent"` | | +| kafka-connect-manager.image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | | +| kafka-connect-manager.image.tag | string | `"1.3.1"` | | +| kafka-connect-manager.influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. | +| kafka-connect-manager.influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | +| kafka-connect-manager.influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. | +| kafka-connect-manager.influxdbSink.connectInfluxErrorPolicy | string | `"NOOP"` | Error policy, see connector documetation for details. | +| kafka-connect-manager.influxdbSink.connectInfluxMaxRetries | string | `"10"` | The maximum number of times a message is retried. | +| kafka-connect-manager.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | +| kafka-connect-manager.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | +| kafka-connect-manager.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | +| kafka-connect-manager.influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | +| kafka-connect-manager.influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | +| kafka-connect-manager.influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | +| kafka-connect-manager.influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| kafka-connect-manager.influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| kafka-connect-manager.influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | +| kafka-connect-manager.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | +| kafka-connect-manager.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | +| kafka-connect-manager.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | +| kafka-connect-manager.jdbcSink.autoCreate | string | `"true"` | Whether to automatically create the destination table. | +| kafka-connect-manager.jdbcSink.autoEvolve | string | `"false"` | Whether to automatically add columns in the table schema. | +| kafka-connect-manager.jdbcSink.batchSize | string | `"3000"` | Specifies how many records to attempt to batch together for insertion into the destination table. | +| kafka-connect-manager.jdbcSink.connectionUrl | string | `"jdbc:postgresql://localhost:5432/mydb"` | Database connection URL. | +| kafka-connect-manager.jdbcSink.dbTimezone | string | `"UTC"` | Name of the JDBC timezone that should be used in the connector when inserting time-based values. | +| kafka-connect-manager.jdbcSink.enabled | bool | `false` | Whether the JDBC Sink connector is deployed. | +| kafka-connect-manager.jdbcSink.insertMode | string | `"insert"` | The insertion mode to use. Supported modes are: `insert`, `upsert` and `update`. | +| kafka-connect-manager.jdbcSink.maxRetries | string | `"10"` | The maximum number of times to retry on errors before failing the task. | +| kafka-connect-manager.jdbcSink.name | string | `"postgres-sink"` | Name of the connector to create. | +| kafka-connect-manager.jdbcSink.retryBackoffMs | string | `"3000"` | The time in milliseconds to wait following an error before a retry attempt is made. | +| kafka-connect-manager.jdbcSink.tableNameFormat | string | `"${topic}"` | A format string for the destination table name. | +| kafka-connect-manager.jdbcSink.tasksMax | string | `"10"` | Number of Kafka Connect tasks. | +| kafka-connect-manager.jdbcSink.topicRegex | string | `".*"` | Regex for selecting topics. | +| kafka-connect-manager.s3Sink.behaviorOnNullValues | string | `"fail"` | How to handle records with a null value (for example, Kafka tombstone records). Valid options are ignore and fail. | +| kafka-connect-manager.s3Sink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | +| kafka-connect-manager.s3Sink.enabled | bool | `false` | Whether the Amazon S3 Sink connector is deployed. | +| kafka-connect-manager.s3Sink.excludedTopicRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | +| kafka-connect-manager.s3Sink.flushSize | string | `"1000"` | Number of records written to store before invoking file commits. | +| kafka-connect-manager.s3Sink.locale | string | `"en-US"` | The locale to use when partitioning with TimeBasedPartitioner. | +| kafka-connect-manager.s3Sink.name | string | `"s3-sink"` | Name of the connector to create. | +| kafka-connect-manager.s3Sink.partitionDurationMs | string | `"3600000"` | The duration of a partition in milliseconds, used by TimeBasedPartitioner. Default is 1h for an hourly based partitioner. | +| kafka-connect-manager.s3Sink.pathFormat | string | `"'year'=YYYY/'month'=MM/'day'=dd/'hour'=HH"` | Pattern used to format the path in the S3 object name. | +| kafka-connect-manager.s3Sink.rotateIntervalMs | string | `"600000"` | The time interval in milliseconds to invoke file commits. Set to 10 minutes by default. | +| kafka-connect-manager.s3Sink.s3BucketName | string | `""` | s3 bucket name. The bucket must already exist at the s3 provider. | +| kafka-connect-manager.s3Sink.s3PartRetries | int | `3` | Maximum number of retry attempts for failed requests. Zero means no retries. | +| kafka-connect-manager.s3Sink.s3PartSize | int | `5242880` | The Part Size in S3 Multi-part Uploads. Valid Values: [5242880,…,2147483647] | +| kafka-connect-manager.s3Sink.s3Region | string | `"us-east-1"` | s3 region | +| kafka-connect-manager.s3Sink.s3RetryBackoffMs | int | `200` | How long to wait in milliseconds before attempting the first retry of a failed S3 request. | +| kafka-connect-manager.s3Sink.s3SchemaCompatibility | string | `"NONE"` | s3 schema compatibility | +| kafka-connect-manager.s3Sink.schemaCacheConfig | int | `5000` | The size of the schema cache used in the Avro converter. | +| kafka-connect-manager.s3Sink.storeUrl | string | `""` | The object storage connection URL, for non-AWS s3 providers. | +| kafka-connect-manager.s3Sink.tasksMax | int | `1` | Number of Kafka Connect tasks. | +| kafka-connect-manager.s3Sink.timestampExtractor | string | `"Record"` | The extractor determines how to obtain a timestamp from each record. | +| kafka-connect-manager.s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. | +| kafka-connect-manager.s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. | +| kafka-connect-manager.s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. | +| kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | +| rest-proxy.affinity | object | `{}` | Affinity configuration. | +| rest-proxy.configurationOverrides | object | `{"client.sasl.mechanism":"SCRAM-SHA-512","client.security.protocol":"SASL_PLAINTEXT"}` | Kafka REST configuration options | +| rest-proxy.customEnv | string | `nil` | Kafka REST additional env variables | +| rest-proxy.heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option | +| rest-proxy.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | +| rest-proxy.image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. | +| rest-proxy.image.tag | string | `"7.4.0"` | Kafka REST proxy image tag. | +| rest-proxy.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. | +| rest-proxy.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. | +| rest-proxy.ingress.hostname | string | `""` | Ingress hostname. | +| rest-proxy.ingress.path | string | `"/sasquatch-rest-proxy(/|$)(.*)"` | Ingress path. | +| rest-proxy.kafka.bootstrapServers | string | `"SASL_PLAINTEXT://sasquatch-kafka-bootstrap.sasquatch:9092"` | Kafka bootstrap servers, use the internal listerner on port 9092 wit SASL connection. | +| rest-proxy.kafka.cluster.name | string | `"sasquatch"` | Name of the Strimzi Kafka cluster. | +| rest-proxy.kafka.topicPrefixes | string | `nil` | List of topic prefixes to use when exposing Kafka topics to the REST Proxy v2 API. | +| rest-proxy.kafka.topics | string | `nil` | List of Kafka topics to create via Strimzi. Alternatively topics can be created using the REST Proxy v3 API. | +| rest-proxy.nodeSelector | object | `{}` | Node selector configuration. | +| rest-proxy.podAnnotations | object | `{}` | Pod annotations. | +| rest-proxy.replicaCount | int | `1` | Number of Kafka REST proxy pods to run in the deployment. | +| rest-proxy.resources.limits.cpu | int | `2` | Kafka REST proxy cpu limits | +| rest-proxy.resources.limits.memory | string | `"4Gi"` | Kafka REST proxy memory limits | +| rest-proxy.resources.requests.cpu | int | `1` | Kafka REST proxy cpu requests | +| rest-proxy.resources.requests.memory | string | `"200Mi"` | Kafka REST proxy memory requests | +| rest-proxy.schemaregistry.url | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL | +| rest-proxy.service.port | int | `8082` | Kafka REST proxy service port | +| rest-proxy.tolerations | list | `[]` | Tolerations configuration. | +| source-kafka-connect-manager.enabled | bool | `true` | Enable Kafka Connect Manager. | +| source-kafka-connect-manager.env.kafkaBrokerUrl | string | `"sasquatch-kafka-bootstrap.sasquatch:9092"` | Kafka broker URL. | +| source-kafka-connect-manager.env.kafkaConnectUrl | string | `"http://sasquatch-connect-api.sasquatch:8083"` | Kafka connnect URL. | +| source-kafka-connect-manager.env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. | +| source-kafka-connect-manager.image.pullPolicy | string | `"IfNotPresent"` | | +| source-kafka-connect-manager.image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | | +| source-kafka-connect-manager.image.tag | string | `"1.3.1"` | | +| source-kafka-connect-manager.influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. | +| source-kafka-connect-manager.influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | +| source-kafka-connect-manager.influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. | +| source-kafka-connect-manager.influxdbSink.connectInfluxErrorPolicy | string | `"NOOP"` | Error policy, see connector documetation for details. | +| source-kafka-connect-manager.influxdbSink.connectInfluxMaxRetries | string | `"10"` | The maximum number of times a message is retried. | +| source-kafka-connect-manager.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. | +| source-kafka-connect-manager.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. | +| source-kafka-connect-manager.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. | +| source-kafka-connect-manager.influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. | +| source-kafka-connect-manager.influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. | +| source-kafka-connect-manager.influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. | +| source-kafka-connect-manager.influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. | +| source-kafka-connect-manager.influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. | +| source-kafka-connect-manager.influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. | +| source-kafka-connect-manager.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | +| source-kafka-connect-manager.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. | +| source-kafka-connect-manager.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. | +| source-kafka-connect-manager.jdbcSink.autoCreate | string | `"true"` | Whether to automatically create the destination table. | +| source-kafka-connect-manager.jdbcSink.autoEvolve | string | `"false"` | Whether to automatically add columns in the table schema. | +| source-kafka-connect-manager.jdbcSink.batchSize | string | `"3000"` | Specifies how many records to attempt to batch together for insertion into the destination table. | +| source-kafka-connect-manager.jdbcSink.connectionUrl | string | `"jdbc:postgresql://localhost:5432/mydb"` | Database connection URL. | +| source-kafka-connect-manager.jdbcSink.dbTimezone | string | `"UTC"` | Name of the JDBC timezone that should be used in the connector when inserting time-based values. | +| source-kafka-connect-manager.jdbcSink.enabled | bool | `false` | Whether the JDBC Sink connector is deployed. | +| source-kafka-connect-manager.jdbcSink.insertMode | string | `"insert"` | The insertion mode to use. Supported modes are: `insert`, `upsert` and `update`. | +| source-kafka-connect-manager.jdbcSink.maxRetries | string | `"10"` | The maximum number of times to retry on errors before failing the task. | +| source-kafka-connect-manager.jdbcSink.name | string | `"postgres-sink"` | Name of the connector to create. | +| source-kafka-connect-manager.jdbcSink.retryBackoffMs | string | `"3000"` | The time in milliseconds to wait following an error before a retry attempt is made. | +| source-kafka-connect-manager.jdbcSink.tableNameFormat | string | `"${topic}"` | A format string for the destination table name. | +| source-kafka-connect-manager.jdbcSink.tasksMax | string | `"10"` | Number of Kafka Connect tasks. | +| source-kafka-connect-manager.jdbcSink.topicRegex | string | `".*"` | Regex for selecting topics. | +| source-kafka-connect-manager.s3Sink.behaviorOnNullValues | string | `"fail"` | How to handle records with a null value (for example, Kafka tombstone records). Valid options are ignore and fail. | +| source-kafka-connect-manager.s3Sink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. | +| source-kafka-connect-manager.s3Sink.enabled | bool | `false` | Whether the Amazon S3 Sink connector is deployed. | +| source-kafka-connect-manager.s3Sink.excludedTopicRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. | +| source-kafka-connect-manager.s3Sink.flushSize | string | `"1000"` | Number of records written to store before invoking file commits. | +| source-kafka-connect-manager.s3Sink.locale | string | `"en-US"` | The locale to use when partitioning with TimeBasedPartitioner. | +| source-kafka-connect-manager.s3Sink.name | string | `"s3-sink"` | Name of the connector to create. | +| source-kafka-connect-manager.s3Sink.partitionDurationMs | string | `"3600000"` | The duration of a partition in milliseconds, used by TimeBasedPartitioner. Default is 1h for an hourly based partitioner. | +| source-kafka-connect-manager.s3Sink.pathFormat | string | `"'year'=YYYY/'month'=MM/'day'=dd/'hour'=HH"` | Pattern used to format the path in the S3 object name. | +| source-kafka-connect-manager.s3Sink.rotateIntervalMs | string | `"600000"` | The time interval in milliseconds to invoke file commits. Set to 10 minutes by default. | +| source-kafka-connect-manager.s3Sink.s3BucketName | string | `""` | s3 bucket name. The bucket must already exist at the s3 provider. | +| source-kafka-connect-manager.s3Sink.s3PartRetries | int | `3` | Maximum number of retry attempts for failed requests. Zero means no retries. | +| source-kafka-connect-manager.s3Sink.s3PartSize | int | `5242880` | The Part Size in S3 Multi-part Uploads. Valid Values: [5242880,…,2147483647] | +| source-kafka-connect-manager.s3Sink.s3Region | string | `"us-east-1"` | s3 region | +| source-kafka-connect-manager.s3Sink.s3RetryBackoffMs | int | `200` | How long to wait in milliseconds before attempting the first retry of a failed S3 request. | +| source-kafka-connect-manager.s3Sink.s3SchemaCompatibility | string | `"NONE"` | s3 schema compatibility | +| source-kafka-connect-manager.s3Sink.schemaCacheConfig | int | `5000` | The size of the schema cache used in the Avro converter. | +| source-kafka-connect-manager.s3Sink.storeUrl | string | `""` | The object storage connection URL, for non-AWS s3 providers. | +| source-kafka-connect-manager.s3Sink.tasksMax | int | `1` | Number of Kafka Connect tasks. | +| source-kafka-connect-manager.s3Sink.timestampExtractor | string | `"Record"` | The extractor determines how to obtain a timestamp from each record. | +| source-kafka-connect-manager.s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. | +| source-kafka-connect-manager.s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. | +| source-kafka-connect-manager.s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. | +| source-kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | +| square-events.cluster.name | string | `"sasquatch"` | | +| strimzi-kafka.cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. | +| strimzi-kafka.connect.enabled | bool | `true` | Enable Kafka Connect. | +| strimzi-kafka.connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.35.1-kafka-3.4.0:1.2.0"` | Custom strimzi-kafka image with connector plugins used by sasquatch. | +| strimzi-kafka.connect.replicas | int | `3` | Number of Kafka Connect replicas to run. | +| strimzi-kafka.kafka.affinity | object | `{}` | Node affinity for Kafka broker pod assignment. | +| strimzi-kafka.kafka.config."log.retention.bytes" | string | `"429496729600"` | Maximum retained number of bytes for a topic's data. | +| strimzi-kafka.kafka.config."log.retention.hours" | int | `72` | Number of days for a topic's data to be retained. | +| strimzi-kafka.kafka.config."message.max.bytes" | int | `10485760` | The largest record batch size allowed by Kafka. | +| strimzi-kafka.kafka.config."offsets.retention.minutes" | int | `4320` | Number of minutes for a consumer group's offsets to be retained. | +| strimzi-kafka.kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition. | +| strimzi-kafka.kafka.config."replica.lag.time.max.ms" | int | `120000` | Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. | +| strimzi-kafka.kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource. | +| strimzi-kafka.kafka.externalListener.bootstrap.host | string | `""` | Name used for TLS hostname verification. | +| strimzi-kafka.kafka.externalListener.bootstrap.loadBalancerIP | string | `""` | The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the loadBalancerIP when a load balancer is created. This field is ignored if the cloud provider does not support the feature. Once the IP address is provisioned this option make it possible to pin the IP address. We can request the same IP next time it is provisioned. This is important because it lets us configure a DNS record, associating a hostname with that pinned IP address. | +| strimzi-kafka.kafka.externalListener.brokers | list | `[]` | Borkers configuration. host is used in the brokers' advertised.brokers configuration and for TLS hostname verification. The format is a list of maps. | +| strimzi-kafka.kafka.externalListener.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. | +| strimzi-kafka.kafka.externalListener.tls.enabled | bool | `false` | Whether TLS encryption is enabled. | +| strimzi-kafka.kafka.listeners.external.enabled | bool | `true` | Whether external listener is enabled. | +| strimzi-kafka.kafka.listeners.plain.enabled | bool | `true` | Whether internal plaintext listener is enabled. | +| strimzi-kafka.kafka.listeners.tls.enabled | bool | `true` | Whether internal TLS listener is enabled. | +| strimzi-kafka.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | +| strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. | +| strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | +| strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. | +| strimzi-kafka.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | +| strimzi-kafka.mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. | +| strimzi-kafka.mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. | +| strimzi-kafka.mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. | +| strimzi-kafka.mirrormaker2.source.bootstrapServer | string | `""` | Source (active) cluster to replicate from. | +| strimzi-kafka.mirrormaker2.source.topicsPattern | string | `"registry-schemas, lsst.sal.*"` | Topic replication from the source cluster defined as a comma-separated list or regular expression pattern. | +| strimzi-kafka.mirrormaker2.sourceConnect.enabled | bool | `false` | Whether to deploy another Connect cluster for topics replicated from the source cluster. Requires the sourceRegistry enabled. | +| strimzi-kafka.mirrormaker2.sourceRegistry.enabled | bool | `false` | Whether to deploy another Schema Registry for the schemas replicated from the source cluster. | +| strimzi-kafka.mirrormaker2.sourceRegistry.schemaTopic | string | `"source.registry-schemas"` | Name of the topic Schema Registry topic replicated from the source cluster | +| strimzi-kafka.registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry | +| strimzi-kafka.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | +| strimzi-kafka.users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). | +| strimzi-kafka.users.kafkaConnectManager.enabled | bool | `true` | Enable user kafka-connect-manager | +| strimzi-kafka.users.promptProcessing.enabled | bool | `true` | Enable user prompt-processing | +| strimzi-kafka.users.replicator.enabled | bool | `false` | Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) | +| strimzi-kafka.users.telegraf.enabled | bool | `true` | Enable user telegraf (deployed by parent Sasquatch chart) | +| strimzi-kafka.users.tsSalKafka.enabled | bool | `true` | Enable user ts-salkafka. | +| strimzi-kafka.zookeeper.affinity | object | `{}` | Node affinity for Zookeeper pod assignment. | +| strimzi-kafka.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | +| strimzi-kafka.zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | +| strimzi-kafka.zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | +| strimzi-kafka.zookeeper.tolerations | list | `[]` | Tolerations for Zookeeper pod assignment. | +| telegraf-kafka-consumer.affinity | object | `{}` | Affinity for pod assignment. | +| telegraf-kafka-consumer.args | list | `[]` | Arguments passed to the Telegraf agent containers. | +| telegraf-kafka-consumer.enabled | bool | `false` | Enable Telegraf Kafka Consumer. Note that the default configuration is meant to work with InfluxDB2. | +| telegraf-kafka-consumer.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | +| telegraf-kafka-consumer.env[0].name | string | `"TELEGRAF_PASSWORD"` | | +| telegraf-kafka-consumer.env[0].valueFrom.secretKeyRef.key | string | `"telegraf-password"` | Telegraf KafkaUser password. | +| telegraf-kafka-consumer.env[0].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | +| telegraf-kafka-consumer.env[1].name | string | `"INFLUXDB_TOKEN"` | | +| telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.key | string | `"admin-token"` | InfluxDB admin token. | +| telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.name | string | `"sasquatch"` | | +| telegraf-kafka-consumer.image.pullPolicy | string | IfNotPresent | Image pull policy. | +| telegraf-kafka-consumer.image.repo | string | `"lsstsqre/telegraf"` | Telegraf image repository. | +| telegraf-kafka-consumer.image.tag | string | `"refreshregex"` | Telegraf image tag. | +| telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. | +| telegraf-kafka-consumer.influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. | +| telegraf-kafka-consumer.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. | +| telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | `"1s"` | Default data flushing interval to InfluxDB. | +| telegraf-kafka-consumer.kafkaConsumers.test.interval | string | `"1s"` | Data collection interval for the Kafka consumer. | +| telegraf-kafka-consumer.kafkaConsumers.test.topicRefreshInterval | string | `"60s"` | Default interval for refreshing topics to check for new or removed regexp matches | +| telegraf-kafka-consumer.kafkaConsumers.test.topicRegexps | string | `"[ \".*Test\" ]\n"` | List of regular expressions to specify the Kafka topics consumed by this agent. | +| telegraf-kafka-consumer.nodeSelector | object | `{}` | Node labels for pod assignment. | +| telegraf-kafka-consumer.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods. | +| telegraf-kafka-consumer.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods. | +| telegraf-kafka-consumer.resources | object | `{}` | Kubernetes resources requests and limits. | +| telegraf-kafka-consumer.tolerations | list | `[]` | Tolerations for pod assignment. | From 9fd306952e0ff3ccf02d7ecb17e6d8b80792f8aa Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Mon, 17 Jul 2023 17:40:20 -0400 Subject: [PATCH 217/308] Add the ook application --- applications/ook/Chart.yaml | 12 ++ applications/ook/README.md | 40 +++++ applications/ook/templates/_helpers.tpl | 62 ++++++++ applications/ook/templates/configmap.yaml | 16 ++ applications/ook/templates/deployment.yaml | 139 ++++++++++++++++++ applications/ook/templates/ingress.yaml | 31 ++++ applications/ook/templates/kafkaaccess.yaml | 14 ++ .../ook/templates/kafkauser-secret.yaml | 8 + applications/ook/templates/networkpolicy.yaml | 21 +++ applications/ook/templates/service.yaml | 15 ++ .../ook/templates/serviceaccount.yaml | 12 ++ .../ook/templates/tests/test-connection.yaml | 15 ++ applications/ook/templates/vaultsecret.yaml | 9 ++ applications/ook/values-roundtable-dev.yaml | 5 + applications/ook/values.yaml | 103 +++++++++++++ docs/applications/index.rst | 1 + docs/applications/ook/index.rst | 19 +++ docs/applications/ook/values.md | 12 ++ docs/documenteer.toml | 1 + environments/README.md | 1 + environments/templates/ook-application.yaml | 37 +++++ environments/values-base.yaml | 2 + environments/values-ccin2p3.yaml | 2 + environments/values-idfdev.yaml | 2 + environments/values-idfint.yaml | 2 + environments/values-idfprod.yaml | 2 + environments/values-minikube.yaml | 2 + environments/values-roe.yaml | 2 + environments/values-roundtable-dev.yaml | 2 + environments/values-roundtable-prod.yaml | 2 + environments/values-summit.yaml | 2 + environments/values-tucson-teststand.yaml | 2 + environments/values-usdfdev.yaml | 2 + environments/values-usdfprod.yaml | 2 + environments/values.yaml | 2 + 35 files changed, 601 insertions(+) create mode 100644 applications/ook/Chart.yaml create mode 100644 applications/ook/README.md create mode 100644 applications/ook/templates/_helpers.tpl create mode 100644 applications/ook/templates/configmap.yaml create mode 100644 applications/ook/templates/deployment.yaml create mode 100644 applications/ook/templates/ingress.yaml create mode 100644 applications/ook/templates/kafkaaccess.yaml create mode 100644 applications/ook/templates/kafkauser-secret.yaml create mode 100644 applications/ook/templates/networkpolicy.yaml create mode 100644 applications/ook/templates/service.yaml create mode 100644 applications/ook/templates/serviceaccount.yaml create mode 100644 applications/ook/templates/tests/test-connection.yaml create mode 100644 applications/ook/templates/vaultsecret.yaml create mode 100644 applications/ook/values-roundtable-dev.yaml create mode 100644 applications/ook/values.yaml create mode 100644 docs/applications/ook/index.rst create mode 100644 docs/applications/ook/values.md create mode 100644 environments/templates/ook-application.yaml diff --git a/applications/ook/Chart.yaml b/applications/ook/Chart.yaml new file mode 100644 index 0000000000..35860c90cc --- /dev/null +++ b/applications/ook/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v2 +name: ook +version: 1.0.0 +appVersion: "tickets-DM-39636" +description: Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io. +type: application +home: https://ook.lsst.io/ +sources: + - https://github.com/lsst-sqre/ook +maintainers: + - name: jonathansick + url: https://github.com/jonathansick diff --git a/applications/ook/README.md b/applications/ook/README.md new file mode 100644 index 0000000000..6408638015 --- /dev/null +++ b/applications/ook/README.md @@ -0,0 +1,40 @@ +# ook + +Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io. + +**Homepage:** + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | +| config.registryUrl | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Cluster URL for the Confluent Schema Registry | +| config.subjectCompatibility | string | `"FORWARD"` | Schema subject compatibility. | +| config.subjectSuffix | string | `""` | Schema subject suffix. Should be empty for production but can be set to a value to create unique subjects in the Confluent Schema Registry for testing. | +| config.topics.ingest | string | `"lsst.square-events.ook.ingest"` | Kafka topic name for ingest events | +| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"ghcr.io/lsst-sqre/ook"` | Squarebot image repository | +| image.tag | string | The appVersion of the chart | Tag of the image | +| imagePullSecrets | list | `[]` | Secret names to use for all Docker pulls | +| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| ingress.path | string | `"/ook"` | Path prefix where Squarebot is hosted | +| nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | | +| podAnnotations | object | `{}` | Annotations for API and worker pods | +| replicaCount | int | `1` | Number of API pods to run | +| resources | object | `{}` | | +| service.port | int | `80` | Port of the service to create and map to the ingress | +| service.type | string | `"ClusterIP"` | Type of service to create | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| serviceAccount.name | string | `""` | | +| tolerations | list | `[]` | | diff --git a/applications/ook/templates/_helpers.tpl b/applications/ook/templates/_helpers.tpl new file mode 100644 index 0000000000..d881f724c0 --- /dev/null +++ b/applications/ook/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "ook.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ook.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ook.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ook.labels" -}} +helm.sh/chart: {{ include "ook.chart" . }} +{{ include "ook.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ook.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ook.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ook.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "ook.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/applications/ook/templates/configmap.yaml b/applications/ook/templates/configmap.yaml new file mode 100644 index 0000000000..582a9ae737 --- /dev/null +++ b/applications/ook/templates/configmap.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "ook.fullname" . }} + labels: + {{- include "ook.labels" . | nindent 4 }} +data: + SAFIR_LOG_LEVEL: {{ .Values.config.logLevel | quote }} + SAFIR_PATH_PREFIX: {{ .Values.ingress.path | quote }} + SAFIR_ENVIRONMENT_URL: {{ .Values.global.baseUrl | quote }} + SAFIR_PROFILE: "production" + OOK_REGISTRY_URL: {{ .Values.config.registryUrl | quote }} + OOK_SUBJECT_SUFFIX: {{ .Values.config.subjectSuffix | quote }} + OOK_SUBJECT_COMPATIBILITY: {{ .Values.config.subjectCompatibility | quote }} + OOK_INGEST_KAFKA_TOPIC: {{ .Values.config.topics.ingest | quote }} + ALGOLIA_DOCUMENT_INDEX: "ook_documents_test" diff --git a/applications/ook/templates/deployment.yaml b/applications/ook/templates/deployment.yaml new file mode 100644 index 0000000000..48b61523e1 --- /dev/null +++ b/applications/ook/templates/deployment.yaml @@ -0,0 +1,139 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ook.fullname" . }} + labels: + {{- include "ook.labels" . | nindent 4 }} + app.kubernetes.io/component: "server" + app.kubernetes.io/part-of: "ook" +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "ook.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "ook.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: "server" + app.kubernetes.io/part-of: "ook" + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "ook.serviceAccountName" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + envFrom: + - configMapRef: + name: {{ include "ook.fullname" . }} + env: + # Writeable directory for concatenating certs. See "tmp" volume. + - name: "KAFKA_CERT_TEMP_DIR" + value: "/tmp/kafka_certs" + # From KafkaAccess + - name: "KAFKA_BOOTSTRAP_SERVERS" + valueFrom: + secretKeyRef: + name: {{ template "ook.fullname" . }}-kafka + key: "bootstrapServers" + - name: "KAFKA_SECURITY_PROTOCOL" + value: "SSL" + # From replicated KafkaUser secret + - name: "KAFKA_SSL_CLUSTER_CAFILE" + value: "/etc/kafkacluster/ca.crt" + - name: "KAFKA_SSL_CLIENT_CAFILE" + value: "/etc/kafkauser/ca.crt" + - name: "KAFKA_SSL_CLIENT_CERTFILE" + value: "/etc/kafkauser/user.crt" + - name: "KAFKA_SSL_CLIENT_KEYFILE" + value: "/etc/kafkauser/user.key" + # From Vault secrets + - name: "ALGOLIA_APP_ID" + valueFrom: + secretKeyRef: + name: {{ template "ook.fullname" . }} + key: "ALGOLIA_APP_ID" + - name: "ALGOLIA_API_KEY" + valueFrom: + secretKeyRef: + name: {{ template "ook.fullname" . }} + key: "ALGOLIA_API_KEY" + - name: "OOK_GITHUB_APP_ID" + valueFrom: + secretKeyRef: + name: {{ template "ook.fullname" . }} + key: "OOK_GITHUB_APP_ID" + - name: "OOK_GITHUB_APP_PRIVATE_KEY" + valueFrom: + secretKeyRef: + name: {{ template "ook.fullname" . }} + key: "OOK_GITHUB_APP_PRIVATE_KEY" + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: "{{ template "ook.fullname" . }}" + mountPath: "/etc/kafkacluster/ca.crt" + subPath: "ca.crt" + - name: "kafka-user" + mountPath: "/etc/kafkauser/ca.crt" + subPath: "ca.crt" + - name: "kafka-user" + mountPath: "/etc/kafkauser/user.crt" + subPath: "user.crt" + - name: "kafka-user" + mountPath: "/etc/kafkauser/user.key" + subPath: "user.key" + - name: "tmp" + mountPath: "/tmp/kafka_certs" + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: "kafka-user" + secret: + secretName: {{ template "ook.fullname" . }}-kafka-user + - name: "{{ template "ook.fullname" . }}" + secret: + secretName: {{ template "ook.fullname" . }} + - name: "tmp" + emptyDir: {} diff --git a/applications/ook/templates/ingress.yaml b/applications/ook/templates/ingress.yaml new file mode 100644 index 0000000000..41fe9578a7 --- /dev/null +++ b/applications/ook/templates/ingress.yaml @@ -0,0 +1,31 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: {{ template "ook.fullname" . }} + labels: + {{- include "ook.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "exec:admin" + loginRedirect: true +template: + metadata: + name: {{ template "ook.fullname" . }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: {{ .Values.ingress.path | quote }} + pathType: "Prefix" + backend: + service: + name: {{ template "ook.fullname" . }} + port: + number: {{ .Values.service.port }} diff --git a/applications/ook/templates/kafkaaccess.yaml b/applications/ook/templates/kafkaaccess.yaml new file mode 100644 index 0000000000..2171625fa9 --- /dev/null +++ b/applications/ook/templates/kafkaaccess.yaml @@ -0,0 +1,14 @@ +apiVersion: access.strimzi.io/v1alpha1 +kind: KafkaAccess +metadata: + name: {{ include "ook.fullname" . }}-kafka +spec: + kafka: + name: sasquatch + namespace: sasquatch + listener: tls + user: + kind: KafkaUser + apiGroup: kafka.strimzi.io + name: ook + namespace: sasquatch diff --git a/applications/ook/templates/kafkauser-secret.yaml b/applications/ook/templates/kafkauser-secret.yaml new file mode 100644 index 0000000000..a332e645a7 --- /dev/null +++ b/applications/ook/templates/kafkauser-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "ook.fullname" . }}-kafka-user + annotations: + replicator.v1.mittwald.de/replicate-from: sasquatch/ook + replicator.v1.mittwald.de/strip-labels: "true" +data: {} diff --git a/applications/ook/templates/networkpolicy.yaml b/applications/ook/templates/networkpolicy.yaml new file mode 100644 index 0000000000..914b196dc6 --- /dev/null +++ b/applications/ook/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "ook.fullname" . }} +spec: + podSelector: + matchLabels: + {{- include "ook.selectorLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/ook/templates/service.yaml b/applications/ook/templates/service.yaml new file mode 100644 index 0000000000..94e4fd5aed --- /dev/null +++ b/applications/ook/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "ook.fullname" . }} + labels: + {{- include "ook.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "ook.selectorLabels" . | nindent 4 }} diff --git a/applications/ook/templates/serviceaccount.yaml b/applications/ook/templates/serviceaccount.yaml new file mode 100644 index 0000000000..47ed6cc775 --- /dev/null +++ b/applications/ook/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ook.serviceAccountName" . }} + labels: + {{- include "ook.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/applications/ook/templates/tests/test-connection.yaml b/applications/ook/templates/tests/test-connection.yaml new file mode 100644 index 0000000000..b701b729d5 --- /dev/null +++ b/applications/ook/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "ook.fullname" . }}-test-connection" + labels: + {{- include "ook.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "ook.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/applications/ook/templates/vaultsecret.yaml b/applications/ook/templates/vaultsecret.yaml new file mode 100644 index 0000000000..9d6d470429 --- /dev/null +++ b/applications/ook/templates/vaultsecret.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "ook.fullname" . }} + labels: + {{- include "ook.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPathPrefix }}/ook" + type: Opaque diff --git a/applications/ook/values-roundtable-dev.yaml b/applications/ook/values-roundtable-dev.yaml new file mode 100644 index 0000000000..91a3f6a1c6 --- /dev/null +++ b/applications/ook/values-roundtable-dev.yaml @@ -0,0 +1,5 @@ +image: + pullPolicy: Always + +config: + logLevel: "DEBUG" diff --git a/applications/ook/values.yaml b/applications/ook/values.yaml new file mode 100644 index 0000000000..f172712030 --- /dev/null +++ b/applications/ook/values.yaml @@ -0,0 +1,103 @@ +# Default values for squarebot. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +# +# Global parameters will be set by parameters injected by Argo CD and should +# not be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + +# -- Number of API pods to run +replicaCount: 1 + +image: + # -- Squarebot image repository + repository: ghcr.io/lsst-sqre/ook + + # -- Image pull policy + pullPolicy: IfNotPresent + + # -- Tag of the image + # @default -- The appVersion of the chart + tag: "" + +# -- Secret names to use for all Docker pulls +imagePullSecrets: [] + +# -- Override the base name for resources +nameOverride: "" + +# -- Override the full name for resources (includes the release name) +fullnameOverride: "" + +serviceAccount: + # -- Specifies whether a service account should be created + create: true + + # -- Annotations to add to the service account + annotations: {} + + # The name of the service account to use. + # @default -- Generated using the fullname template + name: "" + +# -- Annotations for API and worker pods +podAnnotations: {} + +service: + # -- Type of service to create + type: ClusterIP + + # -- Port of the service to create and map to the ingress + port: 80 + +ingress: + # -- Additional annotations to add to the ingress + annotations: {} + + # -- Path prefix where Squarebot is hosted + path: "/ook" + +resources: + {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +config: + # -- Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" + logLevel: "INFO" + + # -- Cluster URL for the Confluent Schema Registry + registryUrl: "http://sasquatch-schema-registry.sasquatch:8081" + + # -- Schema subject suffix. Should be empty for production but can be set + # to a value to create unique subjects in the Confluent Schema Registry + # for testing. + subjectSuffix: "" + + # -- Schema subject compatibility. + subjectCompatibility: "FORWARD" + + topics: + # -- Kafka topic name for ingest events + ingest: "lsst.square-events.ook.ingest" diff --git a/docs/applications/index.rst b/docs/applications/index.rst index 79fb4d856c..de95b55aa6 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -68,4 +68,5 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde giftless/index kubernetes-replicator/index + ook/index squarebot/index diff --git a/docs/applications/ook/index.rst b/docs/applications/ook/index.rst new file mode 100644 index 0000000000..9228055682 --- /dev/null +++ b/docs/applications/ook/index.rst @@ -0,0 +1,19 @@ +.. px-app:: ook + +############################ +ook — Documentation indexing +############################ + +Ook is the librarian service for Rubin Observatory. +Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, https://www.lsst.io. + +.. jinja:: ook + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/ook/values.md b/docs/applications/ook/values.md new file mode 100644 index 0000000000..25c5b97f83 --- /dev/null +++ b/docs/applications/ook/values.md @@ -0,0 +1,12 @@ +```{px-app-values} ook +``` + +# Ook Helm values reference + +Helm values reference table for the {px-app}`ook` application. + +```{include} ../../../applications/ook/README.md +--- +start-after: "## Values" +--- +``` diff --git a/docs/documenteer.toml b/docs/documenteer.toml index 69ba7e0d65..3a72dd9d42 100644 --- a/docs/documenteer.toml +++ b/docs/documenteer.toml @@ -33,4 +33,5 @@ ignore = [ '^https://usdf-rsp-dev.slac.stanford.edu', '^https://github.com/lsst-sqre/phalanx/blob/main/applications/strimzi/values.yaml', '^https://github.com/orgs/', + '^https://ook.lsst.io/', # FIXME readd when Ook docs are published ] diff --git a/environments/README.md b/environments/README.md index 548dad9826..2a814ede11 100644 --- a/environments/README.md +++ b/environments/README.md @@ -28,6 +28,7 @@ | nublado2.enabled | bool | `false` | | | obsloctap.enabled | bool | `false` | | | onepasswordUuid | string | `"dg5afgiadsffeklfr6jykqymeu"` | UUID of the 1Password item in which to find Vault tokens | +| ook.enabled | bool | `false` | | | plot-navigator.enabled | bool | `false` | | | portal.enabled | bool | `false` | | | postgres.enabled | bool | `false` | | diff --git a/environments/templates/ook-application.yaml b/environments/templates/ook-application.yaml new file mode 100644 index 0000000000..65d244bdd3 --- /dev/null +++ b/environments/templates/ook-application.yaml @@ -0,0 +1,37 @@ +{{- if .Values.ook.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "ook" +spec: + finalizers: + - "kubernetes" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "ook" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "ook" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/ook" + repoURL: {{ .Values.repoURL | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPathPrefix" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-base.yaml b/environments/values-base.yaml index 698978b072..adbc29dbc1 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -32,6 +32,8 @@ noteburst: enabled: false nublado: enabled: false +ook: + enabled: false nublado2: enabled: true plot-navigator: diff --git a/environments/values-ccin2p3.yaml b/environments/values-ccin2p3.yaml index 0325ad4e15..e247c46898 100644 --- a/environments/values-ccin2p3.yaml +++ b/environments/values-ccin2p3.yaml @@ -32,6 +32,8 @@ nublado: enabled: false nublado2: enabled: true +ook: + enabled: false plot-navigator: enabled: false portal: diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index e6cb2e88f5..10479deee5 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -35,6 +35,8 @@ nublado: enabled: true nublado2: enabled: true +ook: + enabled: false plot-navigator: enabled: false portal: diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 78ee5ccde3..66436d6da3 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -37,6 +37,8 @@ nublado: enabled: true nublado2: enabled: true +ook: + enabled: false plot-navigator: enabled: true portal: diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index ef3df38d35..af4ec443e2 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -35,6 +35,8 @@ nublado: enabled: true nublado2: enabled: true +ook: + enabled: false plot-navigator: enabled: false portal: diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index fcb75dc490..b410f0003e 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -34,6 +34,8 @@ nublado: enabled: false nublado2: enabled: false +ook: + enabled: false plot-navigator: enabled: false portal: diff --git a/environments/values-roe.yaml b/environments/values-roe.yaml index ce1ed5c4fc..eae03f0fc3 100644 --- a/environments/values-roe.yaml +++ b/environments/values-roe.yaml @@ -34,6 +34,8 @@ nublado: enabled: false nublado2: enabled: true +ook: + enabled: false plot-navigator: enabled: false portal: diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 4325108877..50a3a008ef 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -36,6 +36,8 @@ nublado: enabled: false nublado2: enabled: false +ook: + enabled: true plot-navigator: enabled: false portal: diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index ca564b0db7..dc93d2d54f 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -34,6 +34,8 @@ nublado: enabled: false nublado2: enabled: false +ook: + enabled: false plot-navigator: enabled: false portal: diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index 91a8766a88..06820ef15a 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -34,6 +34,8 @@ nublado: enabled: false nublado2: enabled: true +ook: + enabled: false plot-navigator: enabled: false portal: diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index 910147557b..f10f8bfb14 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -34,6 +34,8 @@ nublado: enabled: false nublado2: enabled: true +ook: + enabled: false plot-navigator: enabled: false portal: diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 26349b35f8..7510d5e056 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -42,6 +42,8 @@ obsloctap: enabled: true obstap: enabled: true +ook: + enabled: false plot-navigator: enabled: true portal: diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index 1cfcabdefa..08563410d7 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -38,6 +38,8 @@ nublado: enabled: false nublado2: enabled: true +ook: + enabled: false plot-navigator: enabled: true portal: diff --git a/environments/values.yaml b/environments/values.yaml index d727400ee2..de79c383d4 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -54,6 +54,8 @@ nublado: enabled: false nublado2: enabled: false +ook: + enabled: false obsloctap: enabled: false plot-navigator: From 8cfce0c51428087eca4dea18b7443b72980e62f4 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Mon, 17 Jul 2023 18:13:01 -0400 Subject: [PATCH 218/308] Add Ook Kafka user and topics - Allow Ook to read from squarebot's app mentions to permit chatbot interface. --- .../square-events/templates/ook-topics.yaml | 13 ++++++ .../square-events/templates/ook-user.yaml | 45 +++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 applications/sasquatch/charts/square-events/templates/ook-topics.yaml create mode 100644 applications/sasquatch/charts/square-events/templates/ook-user.yaml diff --git a/applications/sasquatch/charts/square-events/templates/ook-topics.yaml b/applications/sasquatch/charts/square-events/templates/ook-topics.yaml new file mode 100644 index 0000000000..891533e26c --- /dev/null +++ b/applications/sasquatch/charts/square-events/templates/ook-topics.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: "lsst.square-events.ook.ingest" + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + partitions: 4 + replicas: 3 + config: + # http://kafka.apache.org/documentation/#topicconfigs + retention.ms: 604800000 # 1 week diff --git a/applications/sasquatch/charts/square-events/templates/ook-user.yaml b/applications/sasquatch/charts/square-events/templates/ook-user.yaml new file mode 100644 index 0000000000..0c3bb352cc --- /dev/null +++ b/applications/sasquatch/charts/square-events/templates/ook-user.yaml @@ -0,0 +1,45 @@ +--- +apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaUser +metadata: + name: ook + labels: + strimzi.io/cluster: {{ .Values.cluster.name }} +spec: + template: + secret: + metadata: + annotations: + replicator.v1.mittwald.de/replication-allowed: "true" + replicator.v1.mittwald.de/replication-allowed-namespaces: "ook" + authentication: + type: tls + authorization: + type: simple + acls: + - resource: + type: group + name: "ook" + patternType: literal + operations: + - "Read" + host: "*" + - resource: + type: topic + name: "lsst.square-events.ook.ingest" + patternType: literal + operations: + - "Describe" + - "Read" + - "Write" + host: "*" + - resource: + type: topic + name: "lsst.square-events.squarebot.slack.app.mention" + patternType: literal + type: allow + host: "*" + operations: + - "Read" + - "Describe" + host: "*" From a2251ef297df422491e8e7f5ea1e6513ff8fb712 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 20 Jul 2023 14:16:36 -0400 Subject: [PATCH 219/308] Deploy Ook 0.6.0 --- applications/ook/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/ook/Chart.yaml b/applications/ook/Chart.yaml index 35860c90cc..fde58b0c4d 100644 --- a/applications/ook/Chart.yaml +++ b/applications/ook/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: ook version: 1.0.0 -appVersion: "tickets-DM-39636" +appVersion: "0.6.0" description: Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io. type: application home: https://ook.lsst.io/ From ca5b36260dca2a0582627351d4eec855a4b6c1a9 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 20 Jul 2023 14:30:51 -0400 Subject: [PATCH 220/308] Link SQR-076 to Square Events apps --- applications/ook/Chart.yaml | 5 +++++ applications/squarebot/Chart.yaml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/applications/ook/Chart.yaml b/applications/ook/Chart.yaml index fde58b0c4d..e2e2b5d80c 100644 --- a/applications/ook/Chart.yaml +++ b/applications/ook/Chart.yaml @@ -10,3 +10,8 @@ sources: maintainers: - name: jonathansick url: https://github.com/jonathansick +annotations: + phalanx.lsst.io/docs: | + - id: "SQR-075" + title: "Shared Pydantic schemas as the basis for Kafka/Avro messages in SQuaRE Roundtable" + url: "https://sqr-076.lsst.io/" diff --git a/applications/squarebot/Chart.yaml b/applications/squarebot/Chart.yaml index 70d23309d5..6eca1c0571 100644 --- a/applications/squarebot/Chart.yaml +++ b/applications/squarebot/Chart.yaml @@ -10,3 +10,8 @@ sources: maintainers: - name: jonathansick url: https://github.com/jonathansick +annotations: + phalanx.lsst.io/docs: | + - id: "SQR-075" + title: "Shared Pydantic schemas as the basis for Kafka/Avro messages in SQuaRE Roundtable" + url: "https://sqr-076.lsst.io/" From 88deb9d08bac63960fe53a006305ba5055fc6af2 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 20 Jul 2023 13:43:21 -0700 Subject: [PATCH 221/308] Make nublado3 the default in idf prod --- applications/mobu/values-idfprod.yaml | 3 +++ applications/nublado/values-idfprod.yaml | 2 -- applications/nublado2/values-idfprod.yaml | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/applications/mobu/values-idfprod.yaml b/applications/mobu/values-idfprod.yaml index df45b2880b..964da36c56 100644 --- a/applications/mobu/values-idfprod.yaml +++ b/applications/mobu/values-idfprod.yaml @@ -14,6 +14,7 @@ config: options: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" + use_cachemachine: false max_executions: 1 restart: true - name: "quickbeam" @@ -32,6 +33,7 @@ config: repo_branch: "prod" idle_time: 900 delete_lab: false + use_machemachine: false restart: true - name: "tutorial" count: 1 @@ -49,6 +51,7 @@ config: repo_branch: "prod" max_executions: 1 working_directory: "notebooks/tutorial-notebooks" + use_cachemachine: false restart: true - name: "tap" count: 1 diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml index 71999a9c51..d7b2704849 100644 --- a/applications/nublado/values-idfprod.yaml +++ b/applications/nublado/values-idfprod.yaml @@ -20,7 +20,6 @@ controller: PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-repos.yaml" - HUB_ROUTE: "/n3/hub" S3_ENDPOINT_URL: "https://storage.googleapis.com" NO_ACTIVITY_TIMEOUT: "432000" CULL_KERNEL_IDLE_TIMEOUT: "432000" @@ -78,7 +77,6 @@ controller: jupyterhub: hub: - baseUrl: "/n3" config: ServerApp: shutdown_no_activity_timeout: 432000 diff --git a/applications/nublado2/values-idfprod.yaml b/applications/nublado2/values-idfprod.yaml index ff3cb92991..315d8f26dd 100644 --- a/applications/nublado2/values-idfprod.yaml +++ b/applications/nublado2/values-idfprod.yaml @@ -1,5 +1,6 @@ jupyterhub: hub: + baseUrl: "/n2" config: ServerApp: shutdown_no_activity_timeout: 432000 From 02ac7e3e860e4cf54d72519c103aa896b92c4abd Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 20 Jul 2023 14:11:24 -0700 Subject: [PATCH 222/308] enable nublado3 usdfprod --- applications/nublado/values-usdfprod.yaml | 175 ++++++++++++++++++++++ environments/values-usdfprod.yaml | 2 +- 2 files changed, 176 insertions(+), 1 deletion(-) create mode 100644 applications/nublado/values-usdfprod.yaml diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml new file mode 100644 index 0000000000..e3885bfafe --- /dev/null +++ b/applications/nublado/values-usdfprod.yaml @@ -0,0 +1,175 @@ +controller: + config: + safir: + logLevel: "DEBUG" + fileserver: + enabled: true + timeout: 21600 + + images: + source: + type: "docker" + registry: "docker-registry.slac.stanford.edu" + repository: "lsstsqre/sciplat-lab" + recommendedTag: "recommended" + numReleases: 1 + numWeeklies: 2 + numDailies: 3 + + lab: + pullSecret: "pull-secret" + + homedirSchema: "initialThenUsername" + + env: + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" + DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml" + HUB_ROUTE: "/nb/hub" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" + PGUSER: "rubin" + S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" + https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128" + no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1" + + files: + # Add rubin_users group (there is not yet a simpler way to do this). + /etc/group: + contents: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + rubin_users:x:4085: + + secrets: + - secretName: "nublado-lab-secret" + secretKey: "aws-credentials.ini" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + + volumes: + - containerPath: "/home" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-home" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/project" + subPath: "g" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-group-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/sdf/group/rubin" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-group-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/sdf/data/rubin" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-data-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/scratch" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "sdf-scratch" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/fs/ddn/sdf/group/rubin" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "fs-ddn-sdf-group-rubin" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + - containerPath: "/fs/ddn/sdf/group/lsst" + mode: "rw" + source: + type: "persistentVolumeClaim" + storageClassName: "fs-ddn-sdf-group-lsst" + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: "1Gi" + +proxy: + ingress: + annotations: + # proxy-body-size is temporary until USDF uses our normal ingress-nginx, + # which already configures a larger value. + nginx.ingress.kubernetes.io/proxy-body-size: "50m" + + # These are substantially shorter than the default timeouts (it's not + # clear why). + nginx.ingress.kubernetes.io/proxy-connect-timeout: "30" + nginx.ingress.kubernetes.io/proxy-read-timeout: "20" + +jupyterhub: + hub: + baseUrl: "/nb" + db: + url: "postgresql://nublado3@postgres.postgres/nublado3" + cull: + timeout: 432000 + every: 300 + maxAge: 2160000 diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index 08563410d7..419025cf4c 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -35,7 +35,7 @@ narrativelog: noteburst: enabled: false nublado: - enabled: false + enabled: true nublado2: enabled: true ook: From be6f9192d7a3767fd63ba3e45f8a4d423eebe391 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 20 Jul 2023 15:26:16 -0700 Subject: [PATCH 223/308] remove baseUrl since it's the default --- applications/nublado/values-usdfprod.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml index e3885bfafe..b706852fc2 100644 --- a/applications/nublado/values-usdfprod.yaml +++ b/applications/nublado/values-usdfprod.yaml @@ -166,7 +166,6 @@ proxy: jupyterhub: hub: - baseUrl: "/nb" db: url: "postgresql://nublado3@postgres.postgres/nublado3" cull: From 96edcecaf0611495af25f74fd0646534b30a442a Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 20 Jul 2023 15:28:31 -0700 Subject: [PATCH 224/308] point nublado2 in usdf-prod to /n2 --- applications/nublado2/values-usdfprod.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/nublado2/values-usdfprod.yaml b/applications/nublado2/values-usdfprod.yaml index 26723b22d4..690fd7c7bd 100644 --- a/applications/nublado2/values-usdfprod.yaml +++ b/applications/nublado2/values-usdfprod.yaml @@ -1,6 +1,7 @@ jupyterhub: hub: + baseUrl: "/n2" config: ServerApp: shutdown_no_activity_timeout: 432000 From 77f9df4aaee3622c7719a0e06fd43e1e8f803a58 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 20 Jul 2023 16:11:40 -0700 Subject: [PATCH 225/308] add postgres nublado3 stanza --- applications/postgres/values-usdfprod.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/applications/postgres/values-usdfprod.yaml b/applications/postgres/values-usdfprod.yaml index c7ae91cda0..dbc5324ac3 100644 --- a/applications/postgres/values-usdfprod.yaml +++ b/applications/postgres/values-usdfprod.yaml @@ -1,6 +1,9 @@ jupyterhub_db: user: 'jovyan' db: 'jupyterhub' +nublado3_db: + user: 'nublado3' + db: 'nublado3' gafaelfawr_db: user: 'gafaelfawr' db: 'gafaelfawr' From 26a0a9e5f72392c74396729fd67df3076e8b94da Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Thu, 20 Jul 2023 16:23:59 -0700 Subject: [PATCH 226/308] disable fileserver on usdf --- applications/nublado/values-usdfdev.yaml | 2 +- applications/nublado/values-usdfprod.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index e3885bfafe..bbac615954 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -3,7 +3,7 @@ controller: safir: logLevel: "DEBUG" fileserver: - enabled: true + enabled: false timeout: 21600 images: diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml index b706852fc2..9f38fe9157 100644 --- a/applications/nublado/values-usdfprod.yaml +++ b/applications/nublado/values-usdfprod.yaml @@ -3,7 +3,7 @@ controller: safir: logLevel: "DEBUG" fileserver: - enabled: true + enabled: false timeout: 21600 images: From f090803d62a7db432c851357c2c8c3961dc5912f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 20 Jul 2023 18:47:06 -0700 Subject: [PATCH 227/308] Fix typo in IDF prod mobu configuration --- applications/mobu/values-idfprod.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/mobu/values-idfprod.yaml b/applications/mobu/values-idfprod.yaml index 964da36c56..0f6813639a 100644 --- a/applications/mobu/values-idfprod.yaml +++ b/applications/mobu/values-idfprod.yaml @@ -14,8 +14,8 @@ config: options: repo_url: "https://github.com/lsst-sqre/system-test.git" repo_branch: "prod" - use_cachemachine: false max_executions: 1 + use_cachemachine: false restart: true - name: "quickbeam" count: 1 @@ -33,7 +33,7 @@ config: repo_branch: "prod" idle_time: 900 delete_lab: false - use_machemachine: false + use_cachemachine: false restart: true - name: "tutorial" count: 1 From ba3bc36113c71a706fe7adffc23c3e8026ac2d70 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Fri, 21 Jul 2023 16:38:43 -0700 Subject: [PATCH 228/308] add mreuter to argocd --- applications/argocd/values-usdfdev.yaml | 1 + applications/argocd/values-usdfprod.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index 342c3294f9..94ae11a5f6 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -52,6 +52,7 @@ argo-cd: g, reinking@slac.stanford.edu, role:admin g, smart@slac.stanford.edu, role:admin g, omullan@slac.stanford.edu, role:admin + g, mreuter@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 0fd2ed162c..dd80d9e706 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -51,6 +51,7 @@ argo-cd: g, reinking@slac.stanford.edu, role:admin g, smart@slac.stanford.edu, role:admin g, omullan@slac.stanford.edu, role:admin + g, mreuter@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | From 9dd40879e9099887f5f1db6aa011d9e7e37a5cc1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 07:54:21 +0000 Subject: [PATCH 229/308] Update Helm release redis to v1.0.6 --- applications/gafaelfawr/Chart.yaml | 2 +- applications/noteburst/Chart.yaml | 2 +- applications/portal/Chart.yaml | 2 +- applications/times-square/Chart.yaml | 2 +- applications/vo-cutouts/Chart.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index 6bb95c897d..8e0e2c3cb6 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -9,7 +9,7 @@ appVersion: 9.2.2 dependencies: - name: redis - version: 1.0.5 + version: 1.0.6 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index 6e6c626fc4..bbc6be0d9c 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -13,7 +13,7 @@ maintainers: dependencies: - name: redis - version: 1.0.5 + version: 1.0.6 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml index eefa25cc21..e64be482b1 100644 --- a/applications/portal/Chart.yaml +++ b/applications/portal/Chart.yaml @@ -9,7 +9,7 @@ appVersion: "suit-2023.1.5" dependencies: - name: redis - version: 1.0.5 + version: 1.0.6 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index 3caa5359cb..98540e5d9d 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -12,7 +12,7 @@ appVersion: "0.7.0" dependencies: - name: redis - version: 1.0.5 + version: 1.0.6 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml index 6ff5db7f38..76f821973d 100644 --- a/applications/vo-cutouts/Chart.yaml +++ b/applications/vo-cutouts/Chart.yaml @@ -8,7 +8,7 @@ appVersion: 1.0.0 dependencies: - name: redis - version: 1.0.5 + version: 1.0.6 repository: https://lsst-sqre.github.io/charts/ annotations: From 1313af92f4ffd4bb8a5c1538b291eaeff8df2022 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 09:31:24 +0000 Subject: [PATCH 230/308] Update Helm release argo-cd to v5.41.1 --- applications/argocd/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 35c093f553..f9249a9808 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 5.40.0 + version: 5.41.1 repository: https://argoproj.github.io/argo-helm From b159df9f7c21169628177e95f3b2d2c662c0e3b9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 09:31:28 +0000 Subject: [PATCH 231/308] Update Helm release argo-workflows to v0.32.0 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index d4cc5d8b92..9dd85ab07b 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.31.0 + version: 0.32.0 repository: https://argoproj.github.io/argo-helm From 5ee135213431eb7581b8c7fe6592f6b0c76dcade Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Mon, 24 Jul 2023 10:37:17 -0400 Subject: [PATCH 232/308] Expand kafka data partition at base - Kafka at base has two functions now, it stores topics from the teststand and topics replicated from the summit efd. - Expand Kafka data partition from 500G to 1T --- applications/sasquatch/values-base.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 3f04cc16bb..3fa00a46de 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -23,6 +23,7 @@ strimzi-kafka: kafka: storage: storageClassName: rook-ceph-block + size: 1Ti externalListener: tls: enabled: true From d379c3750ec3ef27edf6400e6542dc6fea3fcc0d Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Mon, 24 Jul 2023 11:19:25 -0400 Subject: [PATCH 233/308] Update title for Giftless docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the name — description format for app titles. --- docs/applications/giftless/index.rst | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/applications/giftless/index.rst b/docs/applications/giftless/index.rst index 383fba05a2..13746c3abb 100644 --- a/docs/applications/giftless/index.rst +++ b/docs/applications/giftless/index.rst @@ -1,14 +1,13 @@ .. px-app:: giftless -####################### -Giftless Git LFS server -####################### +######################### +Giftless — Git LFS server +######################### -Giftless, a Git LFS server provided by Datopian, is the Rubin -Observatory provider of Git LFS services. This implementation provides -both read-only and read-write endpoints for Git LFS. +Giftless, a Git LFS server provided by Datopian, is the Rubin Observatory provider of Git LFS services. +This implementation provides both read-only and read-write endpoints for Git LFS. -See https://giftless.datopian.com/en/latest/index.html for details. +See `Datatopian's documentation `__ for details. .. jinja:: giftless :file: applications/_summary.rst.jinja From 034412b1187c665785264c026788f0148206851c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 24 Jul 2023 07:54:30 +0000 Subject: [PATCH 234/308] Update gcr.io/cloudsql-docker/gce-proxy Docker tag to v1.33.9 --- applications/gafaelfawr/values.yaml | 2 +- applications/sqlproxy-cross-project/values.yaml | 2 +- applications/times-square/values.yaml | 2 +- applications/vo-cutouts/values.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 6ba21a9a0c..b1f9d2142b 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -293,7 +293,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.8" + tag: "1.33.9" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index 0e022d21b9..aee2f73071 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.33.8" + tag: "1.33.9" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 45168cb2bb..640f3afbde 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -123,7 +123,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.8" + tag: "1.33.9" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index f160031c90..6b93573ab6 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -75,7 +75,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.33.8" + tag: "1.33.9" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" From 426fcb3079a646eba4eb36b28bba0c39dea90b25 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 24 Jul 2023 09:31:18 -0700 Subject: [PATCH 235/308] Update Helm docs --- applications/gafaelfawr/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 03c806bbc0..086454aec9 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -17,7 +17,7 @@ Authentication and identity system | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.8"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.9"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index 7a84cea38a..82787a918d 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.33.8"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.33.9"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/times-square/README.md b/applications/times-square/README.md index a2ab3059d3..942f32e22a 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -18,7 +18,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.8"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.9"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index eea5d28e20..f7c9f901cd 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.33.8"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.33.9"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | From b55b41e779a58f1da5332ec862ebd816877fa3dd Mon Sep 17 00:00:00 2001 From: Michael Reuter Date: Fri, 21 Jul 2023 17:30:06 -0700 Subject: [PATCH 236/308] TTS: Update cachemachine to Cycle 32. --- applications/cachemachine/values-tucson-teststand.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/applications/cachemachine/values-tucson-teststand.yaml b/applications/cachemachine/values-tucson-teststand.yaml index 73dde3cb51..f88f37ba79 100644 --- a/applications/cachemachine/values-tucson-teststand.yaml +++ b/applications/cachemachine/values-tucson-teststand.yaml @@ -8,11 +8,11 @@ autostart: "type": "RubinRepoMan", "registry_url": "ts-dockerhub.lsst.org", "repo": "sal-sciplat-lab", - "recommended_tag": "recommended_c0031", + "recommended_tag": "recommended_c0032", "num_releases": 1, "num_weeklies": 3, "num_dailies": 2, - "cycle": 31, + "cycle": 32, "alias_tags": [ "latest", "latest_daily", From 95b8d23d1f1fb1c40eda494da8071ad9a925ce2d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 24 Jul 2023 10:27:53 -0700 Subject: [PATCH 237/308] Ignore bogus pipelines releases for tap-schema Renovate is picking up the annoying global pipelines release tags for sdm_schemas. Try to exclude them. --- renovate.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/renovate.json b/renovate.json index 940c7400f3..226aabab4b 100644 --- a/renovate.json +++ b/renovate.json @@ -3,6 +3,12 @@ "config:recommended" ], "configMigration": true, + "packageRules": [ + { + "matchPackageNames": ["lsstsqre/tap-schema-mock"], + "allowedVersions": "<10" + } + ], "rebaseWhen": "conflicted", "schedule": [ "before 6am on Monday" From 578f0a0b66be18d65a09c7a081e44e6d428466da Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 18 Jul 2023 16:33:58 -0700 Subject: [PATCH 238/308] Convert README to Markdown We're standardizing on Markdown README files for our other projects, so make Phalanx follow suite. --- README.md | 11 +++++++++++ README.rst | 23 ----------------------- pyproject.toml | 2 +- 3 files changed, 12 insertions(+), 24 deletions(-) create mode 100644 README.md delete mode 100644 README.rst diff --git a/README.md b/README.md new file mode 100644 index 0000000000..8d82cd2a83 --- /dev/null +++ b/README.md @@ -0,0 +1,11 @@ +# Phalanx + +This is the Argo CD repository for the Rubin Science Platform. +It stores the root Argo CD application, deployment configuration for the other applications, the installer, and other helper scripts. + +See [phalanx.lsst.io](https://phalanx.lsst.io/) for full documentation. + +Phalanx is developed by the [Vera C. Rubin Observatory](https://www.lsst.org/). + +A phalanx is a SQuaRE deployment (Science Quality and Reliability Engineering, the team responsible for the Rubin Science Platform). +Phalanx is how we ensure that all of our services work together as a unit. diff --git a/README.rst b/README.rst deleted file mode 100644 index 9cd4366892..0000000000 --- a/README.rst +++ /dev/null @@ -1,23 +0,0 @@ -####### -Phalanx -####### - -This is the Argo CD repository for the Rubin Science Platform. -It stores the root Argo CD application, deployment configuration for the other applications, the installer, and other helper scripts. - -See `phalanx.lsst.io `__ for the documentation. - -Phalanx is developed by the `Vera C. Rubin Observatory `__. - -Environments -============ -The environments managed by Argo CD using configuration in this repository are detailed -on `phalanx.lsst.io `__. - -There are some other environments that are used for development and testing and may not be up or reachable at any given moment. - -Naming -====== - -A phalanx is a SQuaRE deployment (Science Quality and Reliability Engineering, the team responsible for the Rubin Science Platform). -Phalanx is how we ensure that all of our services work together as a unit. diff --git a/pyproject.toml b/pyproject.toml index edde5980f7..7714af3e59 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ name = "phalanx" version = "1.0.0" description = "Python support code for the Rubin Phalanx platform." license = {file = "LICENSE"} -readme= "README.rst" +readme= "README.md" keywords = [ "rubin", "lsst", From a899089a16844adfa221f7fd08522fe462ac8989 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 18 Jul 2023 16:35:03 -0700 Subject: [PATCH 239/308] Increase minimum Python version, update pins Require at least Python 3.11, matching our other applications. Update the documenteer version pin for the documentation to not reference a beta version (which gives pip general permission to install beta versions) and pin to less than 1.0 for now. Use the same form of the Pydantic pin that we're using in other repositories. --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7714af3e59..e5b420b10d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ classifiers = [ "Intended Audience :: Developers", "Operating System :: POSIX", ] -requires-python = ">=3.8" +requires-python = ">=3.11" dependencies = [ "PyYAML", "GitPython", @@ -33,10 +33,10 @@ dev = [ "mypy", "types-PyYAML", # Documentation - "documenteer[guide]>=0.7.0b4", + "documenteer[guide]>=0.7.0,<1", "sphinx-diagrams", "sphinx-jinja", - "pydantic<2.0.0", + "pydantic<2", ] [project.scripts] From 73ed5adbc15c1aac3ace4317bd65b74a040f933f Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 18 Jul 2023 16:39:47 -0700 Subject: [PATCH 240/308] Add mypy configuration, fix problems Add the same mypy configuration that we use for other projects and fix a couple of problems found by running current mypy. --- pyproject.toml | 53 ++++++++++++++++++++--------- src/phalanx/docs/models.py | 2 +- src/phalanx/testing/expandcharts.py | 3 +- 3 files changed, 39 insertions(+), 19 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e5b420b10d..dab83a5e41 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,24 @@ build-backend = "setuptools.build_meta" [tool.setuptools_scm] +[tool.black] +line-length = 79 +target-version = ["py311"] +exclude = ''' +/( + \.eggs + | \.git + | \.mypy_cache + | \.tox + | \.venv + | _build + | build + | dist +)/ +''' +# Use single-quoted strings so TOML treats the string like a Python r-string +# Multi-line strings are implicitly treated by black as regular expressions + [tool.coverage.run] parallel = true branch = true @@ -78,23 +96,24 @@ exclude_lines = [ "if TYPE_CHECKING:" ] -[tool.black] -line-length = 79 -target-version = ['py38'] -exclude = ''' -/( - \.eggs - | \.git - | \.mypy_cache - | \.tox - | \.venv - | _build - | build - | dist -)/ -''' -# Use single-quoted strings so TOML treats the string like a Python r-string -# Multi-line strings are implicitly treated by black as regular expressions +[tool.mypy] +disallow_untyped_defs = true +disallow_incomplete_defs = true +ignore_missing_imports = true +local_partial_types = true +no_implicit_reexport = true +plugins = ["pydantic.mypy"] +show_error_codes = true +strict_equality = true +warn_redundant_casts = true +warn_unreachable = true +warn_unused_ignores = true + +[tool.pydantic-mypy] +init_forbid_extra = true +init_typed = true +warn_required_dynamic_aliases = true +warn_untyped_fields = true [tool.isort] include_trailing_comma = true diff --git a/src/phalanx/docs/models.py b/src/phalanx/docs/models.py index 592ce7e5a0..e60d1f043b 100644 --- a/src/phalanx/docs/models.py +++ b/src/phalanx/docs/models.py @@ -297,7 +297,7 @@ def gafaelfawr_roles(self) -> List[Tuple[str, List[str]]]: return roles - def get_app(self, name) -> Optional[Application]: + def get_app(self, name: str) -> Optional[Application]: """Get the named application.""" for app in self.apps: if app.name == name: diff --git a/src/phalanx/testing/expandcharts.py b/src/phalanx/testing/expandcharts.py index fcfd03a817..6b29f533b9 100644 --- a/src/phalanx/testing/expandcharts.py +++ b/src/phalanx/testing/expandcharts.py @@ -14,7 +14,8 @@ from pathlib import Path from typing import TYPE_CHECKING -from git import DiffIndex, Repo +from git import DiffIndex +from git.repo import Repo if TYPE_CHECKING: from typing import List, Sequence From 5b67ea85afeb011bb50eed3d63d7624a2a26b01a Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 18 Jul 2023 16:43:49 -0700 Subject: [PATCH 241/308] Add GitHub CI job to run Python tests Although pytest and mypy environments were defined, we previously weren't running them from CI. Add a GitHub Actions job to run mypy and pytest. --- .github/workflows/ci.yaml | 18 ++++++++++++++++++ tox.ini | 10 +++++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 4c79cf782b..91cf2d0c4f 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -27,6 +27,24 @@ jobs: - name: Run pre-commit uses: pre-commit/action@v3.0.0 + test: + runs-on: ubuntu-latest + timeout-minutes: 10 + + strategy: + matrix: + python: + - "3.11" + + steps: + - uses: actions/checkout@v3 + + - uses: lsst-sqre/run-tox@v1 + with: + python-version: ${{ matrix.python }} + tox-envs: "typing,py,coverage-report" + cache-key-prefix: test + helm: runs-on: ubuntu-latest diff --git a/tox.ini b/tox.ini index 91ac1306f1..d74a6d130d 100644 --- a/tox.ini +++ b/tox.ini @@ -17,11 +17,6 @@ description = Check links in the documentation. commands = sphinx-build --keep-going -n -W -T -b linkcheck -d {envtmpdir}/doctrees docs docs/_build/linkcheck -[testenv:py] -description = Run pytest -commands = - coverage run -m pytest {posargs} - [testenv:coverage-report] description = Compile coverage from each test run. skip_install = true @@ -46,6 +41,11 @@ deps = neophile commands = neophile update {posargs} +[testenv:py] +description = Run pytest +commands = + coverage run -m pytest {posargs} + [testenv:typing] description = Run mypy. commands = From 7dd6fb5745ac4ee5128cb3d3bad224b64d6b4c05 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 18 Jul 2023 17:02:32 -0700 Subject: [PATCH 242/308] Convert to Ruff for linting Remove the flake8 and isort configuration and their pre-commit hooks, and add the Ruff pre-commit linter. Add Ruff configuration based on Gafaelfawr and fix all of the problems that it found in the src and tests directory. Exclude the installer, since that code is about to be rewritten. Enable checks for unresolved merge conflicts in the pre-commit linter. --- .flake8 | 5 -- .pre-commit-config.yaml | 17 ++--- installer/generate_secrets.py | 14 ++-- installer/vault_key.py | 3 +- pyproject.toml | 104 ++++++++++++++++++++++++++-- src/phalanx/__init__.py | 4 +- src/phalanx/docs/models.py | 76 ++++++++++---------- src/phalanx/testing/expandcharts.py | 12 ++-- tests/docs/__init__.py | 0 9 files changed, 160 insertions(+), 75 deletions(-) delete mode 100644 .flake8 create mode 100644 tests/docs/__init__.py diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 63e7cad58a..0000000000 --- a/.flake8 +++ /dev/null @@ -1,5 +0,0 @@ -[flake8] -max-line-length = 79 -# E203: whitespace before :, flake8 disagrees with PEP-8 -# W503: line break after binary operator, flake8 disagrees with PEP-8 -ignore = E203, W503 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 655a5888b8..fa0753bf14 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,8 +2,9 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - - id: trailing-whitespace + - id: check-merge-conflict - id: check-toml + - id: trailing-whitespace - repo: https://github.com/adrienverge/yamllint rev: v1.32.0 @@ -22,12 +23,11 @@ repos: - --template-files=./helm-docs.md.gotmpl - --document-dependency-values=true - - repo: https://github.com/pycqa/isort - rev: 5.12.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.0.278 hooks: - - id: isort - additional_dependencies: - - toml + - id: ruff + args: [--fix, --exit-non-zero-on-fix] - repo: https://github.com/psf/black rev: 23.7.0 @@ -39,8 +39,3 @@ repos: hooks: - id: blacken-docs additional_dependencies: [black==23.7.0] - - - repo: https://github.com/pycqa/flake8 - rev: 6.0.0 - hooks: - - id: flake8 diff --git a/installer/generate_secrets.py b/installer/generate_secrets.py index 67d06ad0dc..df5b407e17 100755 --- a/installer/generate_secrets.py +++ b/installer/generate_secrets.py @@ -6,7 +6,7 @@ import os import secrets from collections import defaultdict -from datetime import datetime, timezone +from datetime import UTC, datetime from pathlib import Path import bcrypt @@ -31,7 +31,7 @@ class SecretGenerator: will be regenerated. """ - def __init__(self, environment, regenerate): + def __init__(self, environment, regenerate) -> None: self.secrets = defaultdict(dict) self.environment = environment self.regenerate = regenerate @@ -109,7 +109,7 @@ def input_file(self, component, name, description): fname = input(prompt_string) if fname: - with open(fname, "r") as f: + with open(fname) as f: self.secrets[component][name] = f.read() @staticmethod @@ -334,9 +334,7 @@ def _argocd(self): h = bcrypt.hashpw( new_pw.encode("ascii"), bcrypt.gensalt(rounds=15) ).decode("ascii") - now_time = datetime.now(timezone.utc).strftime( - "%Y-%m-%dT%H:%M:%SZ" - ) + now_time = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") self._set("argocd", "admin.password", h) self._set("argocd", "admin.passwordMtime", now_time) @@ -426,7 +424,7 @@ class OnePasswordSecretGenerator(SecretGenerator): will be regenerated. """ - def __init__(self, environment, regenerate): + def __init__(self, environment, regenerate) -> None: super().__init__(environment, regenerate) self.op_secrets = {} self.op = new_client_from_environment() @@ -517,7 +515,7 @@ def generate(self): """ super().generate() - for composite_key, secret_value in self.op_secrets.items(): + for composite_key, _secret_value in self.op_secrets.items(): item_component, item_name = composite_key.split() # Special case for components that may not be present in every # environment, but nonetheless might be 1Password secrets (see diff --git a/installer/vault_key.py b/installer/vault_key.py index f7f47b4bad..6e60759ae8 100755 --- a/installer/vault_key.py +++ b/installer/vault_key.py @@ -7,7 +7,7 @@ class VaultKeyRetriever: - def __init__(self): + def __init__(self) -> None: self.op = new_client_from_environment() vault_keys = self.op.get_item( os.environ["VAULT_DOC_UUID"], "RSP-Vault" @@ -23,6 +23,7 @@ def retrieve_key(self, environment, key_type): for e in self.vault_keys: if env_key in e: return e[env_key][key_type]["id"] + return None if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index dab83a5e41..77fc8618f5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,6 +31,7 @@ dev = [ "pytest", "pre-commit", "mypy", + "ruff", "types-PyYAML", # Documentation "documenteer[guide]>=0.7.0,<1", @@ -115,8 +116,101 @@ init_typed = true warn_required_dynamic_aliases = true warn_untyped_fields = true -[tool.isort] -include_trailing_comma = true -multi_line_output = 3 -known_first_party = [] -skip = ["docs/conf.py"] +# The rule used with Ruff configuration is to disable every lint that has +# legitimate exceptions that are not dodgy code, rather than cluttering code +# with noqa markers. This is therefore a reiatively relaxed configuration that +# errs on the side of disabling legitimate lints. +# +# Reference for settings: https://beta.ruff.rs/docs/settings/ +# Reference for rules: https://beta.ruff.rs/docs/rules/ +[tool.ruff] +exclude = [ + "docs/**", + "installer/**", +] +line-length = 79 +ignore = [ + "ANN101", # self should not have a type annotation + "ANN102", # cls should not have a type annotation + "ANN401", # sometimes Any is the right type + "ARG001", # unused function arguments are often legitimate + "ARG002", # unused method arguments are often legitimate + "ARG005", # unused lambda arguments are often legitimate + "BLE001", # we want to catch and report Exception in background tasks + "C414", # nested sorted is how you sort by multiple keys with reverse + "COM812", # omitting trailing commas allows black autoreformatting + "D102", # sometimes we use docstring inheritence + "D104", # don't see the point of documenting every package + "D105", # our style doesn't require docstrings for magic methods + "D106", # Pydantic uses a nested Config class that doesn't warrant docs + "EM101", # justification (duplicate string in traceback) is silly + "EM102", # justification (duplicate string in traceback) is silly + "FBT003", # positional booleans are normal for Pydantic field defaults + "G004", # forbidding logging f-strings is appealing, but not our style + "PD011", # false positive with non-NumPY code that uses .values + "PLR0913", # factory pattern uses constructors with many arguments + "PLR2004", # too aggressive about magic values + "RET505", # disagree that omitting else always makes code more readable + "S105", # good idea but too many false positives on non-passwords + "S106", # good idea but too many false positives on non-passwords + "SIM102", # sometimes the formatting of nested if statements is clearer + "SIM114", # sometimes or conditions result in long lines and awkward code + "SIM117", # sometimes nested with contexts are clearer + "TCH001", # we decided to not maintain separate TYPE_CHECKING blocks + "TCH002", # we decided to not maintain separate TYPE_CHECKING blocks + "TCH003", # we decided to not maintain separate TYPE_CHECKING blocks + "TID252", # if we're going to use relative imports, use them always + "TRY003", # good general advice but lint is way too aggressive + + # Phalanx-specific exclusions. + "T201", # print makes sense to use because Phalanx is interactive +] +select = ["ALL"] +target-version = "py311" + +[tool.ruff.per-file-ignores] +"tests/**" = [ + "D103", # tests don't need docstrings + "PLR0915", # tests are allowed to be long, sometimes that's convenient + "PT012", # way too aggressive about limiting pytest.raises blocks + "S101", # tests should use assert + "SLF001", # tests are allowed to access private members +] + +[tool.ruff.isort] +known-first-party = ["gafaelfawr", "tests"] +split-on-trailing-comma = false + +[tool.ruff.flake8-bugbear] +extend-immutable-calls = [ + "fastapi.Form", + "fastapi.Header", + "fastapi.Depends", + "fastapi.Path", + "fastapi.Query", +] + +# These are too useful as attributes or methods to allow the conflict with the +# built-in to rule out their use. +[tool.ruff.flake8-builtins] +builtins-ignorelist = [ + "all", + "any", + "help", + "id", + "list", + "type", +] + +[tool.ruff.flake8-pytest-style] +fixture-parentheses = false +mark-parentheses = false + +[tool.ruff.pep8-naming] +classmethod-decorators = [ + "pydantic.root_validator", + "pydantic.validator", +] + +[tool.ruff.pydocstyle] +convention = "numpy" diff --git a/src/phalanx/__init__.py b/src/phalanx/__init__.py index a48ec8244e..8b6d52e3cf 100644 --- a/src/phalanx/__init__.py +++ b/src/phalanx/__init__.py @@ -1,6 +1,4 @@ -"""The phalanx package provides support tooling for Phalanx, SQuaRE's -application deployment platform. -""" +"""Support tooling for Phalanx, SQuaRE's application development platform.""" __all__ = ["__version__"] diff --git a/src/phalanx/docs/models.py b/src/phalanx/docs/models.py index e60d1f043b..0bfde18ba1 100644 --- a/src/phalanx/docs/models.py +++ b/src/phalanx/docs/models.py @@ -6,7 +6,7 @@ from dataclasses import dataclass, field from functools import cached_property from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple +from typing import Any import yaml @@ -19,8 +19,10 @@ @dataclass(kw_only=True) class DocLink: - """A model describing a document link, based on an individual array item - in the ``phalanx.lsst.io/docs`` chart annotation. + """A model describing a document link. + + This is based on an individual array item in the ``phalanx.lsst.io/docs`` + chart annotation. """ url: str @@ -29,11 +31,11 @@ class DocLink: title: str """Document title.""" - id: Optional[str] + id: str | None """Document identifier.""" def __str__(self) -> str: - """A reStructuredText-formatted link.""" + """Format as a reStructuredText-formatted link.""" if self.id is not None: label = f"{self.id}: {self.title}" else: @@ -52,13 +54,13 @@ class Application: This name is used to label directories, etc. """ - values: Dict[str, Dict] + values: dict[str, dict] """The parsed Helm values for each environment.""" - chart: Dict[str, Any] + chart: dict[str, Any] """The parsed Helm Chart.yaml file.""" - active_environments: List[str] = field(default_factory=list) + active_environments: list[str] = field(default_factory=list) """Environments where this application is active.""" namespace: str @@ -68,7 +70,7 @@ class Application: """Contents of the README.md from the applications Phalanx directory.""" @property - def homepage_url(self) -> Optional[str]: + def homepage_url(self) -> str | None: """The Helm home field, typically used for the app's docs.""" if "home" in self.chart: return self.chart["home"] @@ -76,7 +78,7 @@ def homepage_url(self) -> Optional[str]: return None @property - def source_urls(self) -> Optional[List[str]]: + def source_urls(self) -> list[str] | None: """Application source URLs, typically from the Helm sources field.""" if "sources" in self.chart: return self.chart["sources"] @@ -85,8 +87,9 @@ def source_urls(self) -> Optional[List[str]]: @property def values_table_md(self) -> str: - """The markdown-formatted Helm values documenation generated by - helm-docs in the README. + """Markdown-formatted Helm values documenation. + + Generated by :command:`helm-docs` in the :file:`README.md`. """ lines = self.readme.splitlines() for i, line in enumerate(lines): @@ -95,19 +98,18 @@ def values_table_md(self) -> str: return "" @cached_property - def doc_links(self) -> List[DocLink]: + def doc_links(self) -> list[DocLink]: """reStructuredText-formatted list of links.""" key = "phalanx.lsst.io/docs" if "annotations" in self.chart and key in self.chart["annotations"]: docs_data = yaml.safe_load(self.chart["annotations"][key]) - docs = [DocLink(**d) for d in docs_data] - return docs + return [DocLink(**d) for d in docs_data] else: return [] @classmethod def load( - cls, *, app_dir: Path, root_dir: Path, env_values: Dict[str, Dict] + cls, *, app_dir: Path, root_dir: Path, env_values: dict[str, dict] ) -> Application: """Load an application from the Phalanx repository. @@ -124,10 +126,7 @@ def load( # Open the chart's README readme_path = app_dir.joinpath("README.md") - if readme_path.is_file(): - readme = readme_path.read_text() - else: - readme = "" + readme = readme_path.read_text() if readme_path.is_file() else "" # Open the chart's Chart.yaml chart_path = app_dir.joinpath("Chart.yaml") @@ -137,14 +136,14 @@ def load( chart = {} # Load the app's values files for each environment - values: Dict[str, Dict] = {} + values: dict[str, dict] = {} for values_path in app_dir.glob("values-*.yaml"): env_name = values_path.stem.removeprefix("values-") values[env_name] = yaml.safe_load(values_path.read_text()) # Determine what environments use this app based on the environment's # values file. - active_environments: List[str] = [] + active_environments: list[str] = [] for env_name, env_configs in env_values.items(): if app_name == "argocd": active_environments.append(env_name) @@ -200,11 +199,11 @@ class Environment: vault_path_prefix: str """The Vault key prefix for this environment.""" - apps: List[Application] + apps: list[Application] """The applications that are enabled for this service.""" @property - def argocd_url(self) -> Optional[str]: + def argocd_url(self) -> str | None: """Path to the Argo CD UI.""" argocd = self.get_app("argocd") if argocd is None: @@ -219,7 +218,7 @@ def argocd_url(self) -> Optional[str]: return "N/A" @property - def argocd_rbac_csv(self) -> Optional[List[str]]: + def argocd_rbac_csv(self) -> list[str] | None: """The Argo CD RBAC table, as a list of CSV lines.""" argocd = self.get_app("argocd") if argocd is None: @@ -234,10 +233,11 @@ def argocd_rbac_csv(self) -> Optional[List[str]]: for line in rbac_csv.splitlines() ] print(lines) - return lines except KeyError: # Some environments may not configure an RBAC return None + else: + return lines @property def identity_provider(self) -> str: @@ -259,13 +259,13 @@ def identity_provider(self) -> str: return "Unknown" @property - def gafaelfawr_roles(self) -> List[Tuple[str, List[str]]]: + def gafaelfawr_roles(self) -> list[tuple[str, list[str]]]: """Gafaelfawr role mapping (reStructuredText). Group strings may be formatted as reStructuredText links to GitHub teams. """ - roles: List[Tuple[str, List[str]]] = [] + roles: list[tuple[str, list[str]]] = [] gafaelfawr = self.get_app("gafaelfawr") if gafaelfawr is None: @@ -280,7 +280,7 @@ def gafaelfawr_roles(self) -> List[Tuple[str, List[str]]]: role_names = sorted(group_mapping.keys()) for role_name in role_names: - groups: List[str] = [] + groups: list[str] = [] for group in group_mapping[role_name]: if isinstance(group, str): # e.g. a comanage group @@ -297,7 +297,7 @@ def gafaelfawr_roles(self) -> List[Tuple[str, List[str]]]: return roles - def get_app(self, name: str) -> Optional[Application]: + def get_app(self, name: str) -> Application | None: """Get the named application.""" for app in self.apps: if app.name == name: @@ -306,14 +306,14 @@ def get_app(self, name: str) -> Optional[Application]: @classmethod def load( - cls, *, values: Dict[str, Any], applications: List[Application] + cls, *, values: dict[str, Any], applications: list[Application] ) -> Environment: """Load an environment by inspecting the Phalanx repository.""" # Extract name from dir/values-envname.yaml name = values["environment"] # Get Application instances active in this environment - apps: List[Application] = [] + apps: list[Application] = [] for app in applications: if app.name == "argocd": # argocd is a special case because it's not toggled per env @@ -334,10 +334,10 @@ def load( class Phalanx: """Root container for Phalanx data.""" - environments: List[Environment] = field(default_factory=list) + environments: list[Environment] = field(default_factory=list) """Phalanx environments.""" - apps: List[Application] = field(default_factory=list) + apps: list[Application] = field(default_factory=list) """Phalanx applications.""" @classmethod @@ -355,11 +355,11 @@ def load_phalanx(cls, root_dir: Path) -> Phalanx: A model of the Phalanx platform, including environment and application configuration. """ - apps: List[Application] = [] - envs: List[Environment] = [] + apps: list[Application] = [] + envs: list[Environment] = [] # Pre-load the values files for each environment - env_values: Dict[str, Dict[str, Any]] = {} + env_values: dict[str, dict[str, Any]] = {} for env_values_path in root_dir.joinpath(ENVIRONMENTS_DIR).glob( "values-*.yaml" ): @@ -380,7 +380,7 @@ def load_phalanx(cls, root_dir: Path) -> Phalanx: apps.sort(key=lambda a: a.name) # Gather environments - for env_name, values in env_values.items(): + for _env_name, values in env_values.items(): env = Environment.load(values=values, applications=apps) envs.append(env) diff --git a/src/phalanx/testing/expandcharts.py b/src/phalanx/testing/expandcharts.py index 6b29f533b9..6e6714ae96 100644 --- a/src/phalanx/testing/expandcharts.py +++ b/src/phalanx/testing/expandcharts.py @@ -18,10 +18,10 @@ from git.repo import Repo if TYPE_CHECKING: - from typing import List, Sequence + from collections.abc import Sequence -def get_changed_charts() -> List[str]: +def get_changed_charts() -> list[str]: """Get a list of charts that have changed relative to main.""" repo = Repo(str(Path.cwd())) @@ -30,7 +30,10 @@ def get_changed_charts() -> List[str]: if (path / "Chart.yaml").exists(): diff = repo.head.commit.diff("origin/main", paths=[str(path)]) for change_type in DiffIndex.change_type: - if any(diff.iter_change_type(change_type)): # type: ignore + changes = diff.iter_change_type( + change_type # type: ignore[arg-type] + ) + if any(changes): print("Found changed chart", path.name) charts.append(path.name) break @@ -38,7 +41,7 @@ def get_changed_charts() -> List[str]: return charts -def get_environments() -> List[str]: +def get_environments() -> list[str]: """Get the list of supported environments.""" science_platform_path = Path.cwd() / "environments" @@ -75,6 +78,7 @@ def expand_chart(chart: str, environments: Sequence[str]) -> None: def main() -> None: + """Entry point for expand-charts command.""" expanded_path = Path.cwd() / "applications-expanded" if expanded_path.exists(): shutil.rmtree(expanded_path) diff --git a/tests/docs/__init__.py b/tests/docs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From 1126e095896f720d8dbb36ea21543f3403c0eb6e Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 09:33:51 -0700 Subject: [PATCH 243/308] Pin dependencies Since the phalanx command-line tool will now be part of the public API, we want to pin dependencies and only update them when we know the new dependencies work. Remove the dependencies from pyproject.toml and move them to files in requirements/* similar to our other Python packages. Add new Makefile targets to update the pinned dependencies, and a make setup target to install only the dependencies required for the command-line tool. --- Makefile | 37 +- pyproject.toml | 5 +- requirements/dev.in | 22 + requirements/dev.txt | 971 ++++++++++++++++++++++++++++++++++++++++++ requirements/main.in | 10 + requirements/main.txt | 60 +++ 6 files changed, 1094 insertions(+), 11 deletions(-) create mode 100644 requirements/dev.in create mode 100644 requirements/dev.txt create mode 100644 requirements/main.in create mode 100644 requirements/main.txt diff --git a/Makefile b/Makefile index 8fd55e5a25..eb94a1ec76 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,39 @@ -.PHONY: +.PHONY: help help: @echo "Make targets for Phalanx:" + @echo "make clean - Remove generated files" @echo "make init - Set up dev environment (install pre-commit hooks)" + @echo "make setup - Install requirements for phalanx command line" + @echo "make update - Update pinned dependencies and run make init" + @echo "make update-deps - Update pinned dependencies" -.PHONY: +.PHONY: clean +clean: + rm -rf .mypy_cache .ruff_cache .tox + make -C docs clean + +.PHONY: init init: pip install --upgrade pre-commit tox pre-commit install - pip install -e ".[dev]" + pip install --editable . + pip install --upgrade -r requirements/main.txt requirements/dev.txt rm -rf .tox -.PHONY: -clean: - rm -rf .tox - make -C docs clean +.PHONY: setup +setup: + pip install --editable . + pip install --upgrade -r requirements/main.txt + +.PHONY: update +update: update-deps init + +.PHONY: update-deps +update-deps: + pip install --upgrade pip-tools pip setuptools + pip-compile --upgrade --resolver=backtracking --build-isolation \ + --generate-hashes --allow-unsafe \ + --output-file requirements/main.txt requirements/main.in + pip-compile --upgrade --resolver=backtracking --build-isolation \ + --generate-hashes --allow-unsafe \ + --output-file requirements/dev.txt requirements/dev.in diff --git a/pyproject.toml b/pyproject.toml index 77fc8618f5..504dd335e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,10 +19,7 @@ classifiers = [ "Operating System :: POSIX", ] requires-python = ">=3.11" -dependencies = [ - "PyYAML", - "GitPython", -] +dependencies = [] [project.optional-dependencies] dev = [ diff --git a/requirements/dev.in b/requirements/dev.in new file mode 100644 index 0000000000..4ecd8c51e5 --- /dev/null +++ b/requirements/dev.in @@ -0,0 +1,22 @@ +# Editable development dependencies. Add direct development, test, and +# documentation dependencies here, as well as implicit dev dependencies +# with constrained versions. +# +# After editing, update requirements/dev.txt by running: +# make update-deps + +-c main.txt + +# Testing +coverage[toml] +mypy +pre-commit +pytest +ruff +types-PyYAML + +# Documentation +documenteer[guide]>=0.7.0,<1 +pydantic<2 +sphinx-diagrams +sphinx-jinja diff --git a/requirements/dev.txt b/requirements/dev.txt new file mode 100644 index 0000000000..4d84730756 --- /dev/null +++ b/requirements/dev.txt @@ -0,0 +1,971 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --config=pyproject.toml --generate-hashes --output-file=requirements/dev.txt requirements/dev.in +# +alabaster==0.7.13 \ + --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \ + --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2 + # via sphinx +babel==2.12.1 \ + --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \ + --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455 + # via sphinx +beautifulsoup4==4.12.2 \ + --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ + --hash=sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a + # via pydata-sphinx-theme +certifi==2023.5.7 \ + --hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \ + --hash=sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716 + # via requests +cfgv==3.3.1 \ + --hash=sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426 \ + --hash=sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736 + # via pre-commit +charset-normalizer==3.2.0 \ + --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \ + --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \ + --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \ + --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \ + --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \ + --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \ + --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \ + --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \ + --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \ + --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \ + --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \ + --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \ + --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \ + --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \ + --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \ + --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \ + --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \ + --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \ + --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \ + --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \ + --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \ + --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \ + --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \ + --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \ + --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \ + --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \ + --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \ + --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \ + --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \ + --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \ + --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \ + --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \ + --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \ + --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \ + --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \ + --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \ + --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \ + --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \ + --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \ + --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \ + --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \ + --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \ + --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \ + --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \ + --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \ + --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \ + --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \ + --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \ + --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \ + --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \ + --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \ + --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \ + --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \ + --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \ + --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \ + --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \ + --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \ + --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \ + --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \ + --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \ + --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \ + --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \ + --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \ + --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \ + --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \ + --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \ + --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \ + --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \ + --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \ + --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \ + --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \ + --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \ + --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \ + --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \ + --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa + # via requests +click==8.1.6 \ + --hash=sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd \ + --hash=sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5 + # via documenteer +contourpy==1.1.0 \ + --hash=sha256:052cc634bf903c604ef1a00a5aa093c54f81a2612faedaa43295809ffdde885e \ + --hash=sha256:084eaa568400cfaf7179b847ac871582199b1b44d5699198e9602ecbbb5f6104 \ + --hash=sha256:0b6616375d7de55797d7a66ee7d087efe27f03d336c27cf1f32c02b8c1a5ac70 \ + --hash=sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882 \ + --hash=sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f \ + --hash=sha256:17cfaf5ec9862bc93af1ec1f302457371c34e688fbd381f4035a06cd47324f48 \ + --hash=sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e \ + --hash=sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a \ + --hash=sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37 \ + --hash=sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a \ + --hash=sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2 \ + --hash=sha256:25ae46595e22f93592d39a7eac3d638cda552c3e1160255258b695f7b58e5655 \ + --hash=sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545 \ + --hash=sha256:2b836d22bd2c7bb2700348e4521b25e077255ebb6ab68e351ab5aa91ca27e027 \ + --hash=sha256:30f511c05fab7f12e0b1b7730ebdc2ec8deedcfb505bc27eb570ff47c51a8f15 \ + --hash=sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94 \ + --hash=sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439 \ + --hash=sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d \ + --hash=sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa \ + --hash=sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae \ + --hash=sha256:62013a2cf68abc80dadfd2307299bfa8f5aa0dcaec5b2954caeb5fa094171103 \ + --hash=sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc \ + --hash=sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa \ + --hash=sha256:911ff4fd53e26b019f898f32db0d4956c9d227d51338fb3b03ec72ff0084ee5f \ + --hash=sha256:9382a1c0bc46230fb881c36229bfa23d8c303b889b788b939365578d762b5c18 \ + --hash=sha256:9f2931ed4741f98f74b410b16e5213f71dcccee67518970c42f64153ea9313b9 \ + --hash=sha256:a67259c2b493b00e5a4d0f7bfae51fb4b3371395e47d079a4446e9b0f4d70e76 \ + --hash=sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493 \ + --hash=sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9 \ + --hash=sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed \ + --hash=sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4 \ + --hash=sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f \ + --hash=sha256:dffcc2ddec1782dd2f2ce1ef16f070861af4fb78c69862ce0aab801495dda6a3 \ + --hash=sha256:e53046c3863828d21d531cc3b53786e6580eb1ba02477e8681009b6aa0870b21 \ + --hash=sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e \ + --hash=sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1 \ + --hash=sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a \ + --hash=sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002 \ + --hash=sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256 + # via matplotlib +coverage[toml]==7.2.7 \ + --hash=sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f \ + --hash=sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2 \ + --hash=sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a \ + --hash=sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a \ + --hash=sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01 \ + --hash=sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6 \ + --hash=sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7 \ + --hash=sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f \ + --hash=sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02 \ + --hash=sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c \ + --hash=sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063 \ + --hash=sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a \ + --hash=sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5 \ + --hash=sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959 \ + --hash=sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97 \ + --hash=sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6 \ + --hash=sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f \ + --hash=sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9 \ + --hash=sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5 \ + --hash=sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f \ + --hash=sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562 \ + --hash=sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe \ + --hash=sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9 \ + --hash=sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f \ + --hash=sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb \ + --hash=sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb \ + --hash=sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1 \ + --hash=sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb \ + --hash=sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250 \ + --hash=sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e \ + --hash=sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511 \ + --hash=sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5 \ + --hash=sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59 \ + --hash=sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2 \ + --hash=sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d \ + --hash=sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3 \ + --hash=sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4 \ + --hash=sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de \ + --hash=sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9 \ + --hash=sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833 \ + --hash=sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0 \ + --hash=sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9 \ + --hash=sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d \ + --hash=sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050 \ + --hash=sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d \ + --hash=sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6 \ + --hash=sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353 \ + --hash=sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb \ + --hash=sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e \ + --hash=sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8 \ + --hash=sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495 \ + --hash=sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2 \ + --hash=sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd \ + --hash=sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27 \ + --hash=sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1 \ + --hash=sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818 \ + --hash=sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4 \ + --hash=sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e \ + --hash=sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850 \ + --hash=sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3 + # via -r requirements/dev.in +cycler==0.11.0 \ + --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \ + --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f + # via matplotlib +diagrams==0.23.3 \ + --hash=sha256:543c707c36a2c896dfdf8f23e993a9c7ae48bb1a667f6baf19151eb98e57a134 \ + --hash=sha256:c497094f9d3600a94bdcfb62b6daf331d2eb7f9b355246e548dae7a4b5c97be0 + # via sphinx-diagrams +distlib==0.3.7 \ + --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ + --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 + # via virtualenv +documenteer[guide]==0.8.3 \ + --hash=sha256:e1514ca8dd96810a6d24d4b21f7b28458a3cf434217e46939ffab2c201791afc \ + --hash=sha256:fbe3ad1740751da8fcc95d809b0a489dc7f14fcdb78b28df85860ae92011c9a2 + # via -r requirements/dev.in +docutils==0.19 \ + --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ + --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc + # via + # myst-parser + # pybtex-docutils + # pydata-sphinx-theme + # sphinx + # sphinx-jinja + # sphinxcontrib-bibtex +filelock==3.12.2 \ + --hash=sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81 \ + --hash=sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec + # via virtualenv +fonttools==4.41.0 \ + --hash=sha256:0614b6348866092d00df3dfb37e037fc06412ca67087de361a2777ea5ed62c16 \ + --hash=sha256:06eac087ea55b3ebb2207d93b5ac56c847163899f05f5a77e1910f688fe10030 \ + --hash=sha256:19d461c801b8904d201c6c38a99bfcfef673bfdfe0c7f026f582ef78896434e0 \ + --hash=sha256:381558eafffc1432d08ca58063e71c7376ecaae48e9318354a90a1049a644845 \ + --hash=sha256:3ee75b8ca48f6c48af25e967dce995ef94e46872b35c7d454b983c62c9c7006d \ + --hash=sha256:415cf7c806a3f56fb280dadcf3c92c85c0415e75665ca957b4a2a2e39c17a5c9 \ + --hash=sha256:465d0f24bf4f75160f441793b55076b7a080a57d3a1f738390af2c20bee24fbb \ + --hash=sha256:4c654b1facf1f3b742e4d9b2dcdf0fa867b1f007b1b4981cc58a75ef5dca2a3c \ + --hash=sha256:50f8bdb421270f71b54695c62785e300fab4bb6127be40bf9f3084962a0c3adb \ + --hash=sha256:5448a87f6ed57ed844b64a05d3792827af584a8584613f6289867f4e77eb603b \ + --hash=sha256:560ea1a604c927399f36742abf342a4c5f3fee8e8e8a484b774dfe9630bd9a91 \ + --hash=sha256:5b1c2b21b40229166a864f2b0aec06d37f0a204066deb1734c93370e0c76339d \ + --hash=sha256:69178674505ec81adf4af2a3bbacd0cb9a37ba7831bc3fca307f80e48ab2767b \ + --hash=sha256:69dbe0154e15b68dd671441ea8f23dad87488b24a6e650d45958f4722819a443 \ + --hash=sha256:6faff25991dec48f8cac882055a09ae1a29fd15bc160bc3d663e789e994664c2 \ + --hash=sha256:72d40a32d6443871ea0d147813caad58394b48729dfa4fbc45dcaac54f9506f2 \ + --hash=sha256:7e22d0144d735f6c7df770509b8c0c33414bf460df0d5dddc98a159e5dbb10eb \ + --hash=sha256:841c491fa3e9c54e8f9cd5dae059e88f45e086aea090c28be9d42f59c8b99e01 \ + --hash=sha256:86edb95c4d1fe4fae2111d7e0c10c6e42b7790b377bcf1952303469eee5b52bb \ + --hash=sha256:8f602dd5bcde7e4241419924f23c6f0d66723dd5408a58c3a2f781745c693f45 \ + --hash=sha256:9387b09694fbf8ac7dcf887069068f81fb4124d05e09557ac7daabfbec1744bd \ + --hash=sha256:b329ae7ce971b5c4148d6cdb8119c0ce4587265b2330d4f2f3776ef851bee020 \ + --hash=sha256:ba2a367ff478cd108d5319c0dc4fd4eb4ea3476dbfc45b00c45718e889cd9463 \ + --hash=sha256:bc9e7b1e268be7a23fc66471b615c324e99c5db39ce8c49dd6dd8e962c7bc1b8 \ + --hash=sha256:c890061915e95b619c1d3cc3c107c6fb021406b701c0c24b03e74830d522f210 \ + --hash=sha256:cc3324e4159e6d1f55c3615b4c1c211f87cc96cc0cc7c946c8447dc1319f2e9d \ + --hash=sha256:d2dae84a3d0f76884a6102c62f2795b2d6602c2c95cfcce74c8a590b6200e533 \ + --hash=sha256:d45f28c20bb67dee0f4a4caae709f40b0693d764b7b2bf2d58890f36b1bfcef0 \ + --hash=sha256:e38bd91eae257f36c2b7245c0278e9cd9d754f3a66b8d2b548c623ba66e387b6 \ + --hash=sha256:e43f6c7f9ba4f9d29edee530e45f9aa162872ec9549398b85971477a99f2a806 \ + --hash=sha256:ea879afd1d6189fca02a85a7868560c9bb8415dccff6b7ae6d81e4f06b3ab30d \ + --hash=sha256:eb9dfa87152bd97019adc387b2f29ef6af601de4386f36570ca537ace96d96ed \ + --hash=sha256:efd59e83223cb77952997fb850c7a7c2a958c9af0642060f536722c2a9e9d53b \ + --hash=sha256:f3fe90dfb297bd8265238c06787911cd81c2cb89ac5b13e1c911928bdabfce0f + # via matplotlib +gitdb==4.0.10 \ + --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ + --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 + # via + # -c requirements/main.txt + # gitpython +gitpython==3.1.32 \ + --hash=sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6 \ + --hash=sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f + # via + # -c requirements/main.txt + # documenteer +graphviz==0.20.1 \ + --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \ + --hash=sha256:8c58f14adaa3b947daf26c19bc1e98c4e0702cdc31cf99153e6f06904d492bf8 + # via diagrams +identify==2.5.25 \ + --hash=sha256:9df2489842707d431b38ce3410ef8df40da5b10a3e28a3fcac1a42523e956409 \ + --hash=sha256:db4de0e758c0db8f81996816cd2f3f2f8c5c8d49a7fd02f3b4109aac6fd80e29 + # via pre-commit +idna==3.4 \ + --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ + --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 + # via requests +imagesize==1.4.1 \ + --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ + --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a + # via sphinx +iniconfig==2.0.0 \ + --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ + --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 + # via pytest +jinja2==3.1.2 \ + --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ + --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 + # via + # diagrams + # myst-parser + # sphinx + # sphinx-jinja +kiwisolver==1.4.4 \ + --hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \ + --hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \ + --hash=sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c \ + --hash=sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c \ + --hash=sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0 \ + --hash=sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4 \ + --hash=sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9 \ + --hash=sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286 \ + --hash=sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767 \ + --hash=sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c \ + --hash=sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6 \ + --hash=sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b \ + --hash=sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004 \ + --hash=sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf \ + --hash=sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494 \ + --hash=sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac \ + --hash=sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626 \ + --hash=sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766 \ + --hash=sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514 \ + --hash=sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6 \ + --hash=sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f \ + --hash=sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d \ + --hash=sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191 \ + --hash=sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d \ + --hash=sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51 \ + --hash=sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f \ + --hash=sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8 \ + --hash=sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454 \ + --hash=sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb \ + --hash=sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da \ + --hash=sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8 \ + --hash=sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de \ + --hash=sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a \ + --hash=sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9 \ + --hash=sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008 \ + --hash=sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3 \ + --hash=sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32 \ + --hash=sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938 \ + --hash=sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1 \ + --hash=sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9 \ + --hash=sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d \ + --hash=sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824 \ + --hash=sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b \ + --hash=sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd \ + --hash=sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2 \ + --hash=sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5 \ + --hash=sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69 \ + --hash=sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3 \ + --hash=sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae \ + --hash=sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597 \ + --hash=sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e \ + --hash=sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955 \ + --hash=sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca \ + --hash=sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a \ + --hash=sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea \ + --hash=sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede \ + --hash=sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4 \ + --hash=sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6 \ + --hash=sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686 \ + --hash=sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408 \ + --hash=sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871 \ + --hash=sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29 \ + --hash=sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750 \ + --hash=sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897 \ + --hash=sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0 \ + --hash=sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2 \ + --hash=sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09 \ + --hash=sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c + # via matplotlib +latexcodec==2.0.1 \ + --hash=sha256:2aa2551c373261cefe2ad3a8953a6d6533e68238d180eb4bb91d7964adb3fe9a \ + --hash=sha256:c277a193638dc7683c4c30f6684e3db728a06efb0dc9cf346db8bd0aa6c5d271 + # via pybtex +linkify-it-py==2.0.2 \ + --hash=sha256:19f3060727842c254c808e99d465c80c49d2c7306788140987a1a7a29b0d6ad2 \ + --hash=sha256:a3a24428f6c96f27370d7fe61d2ac0be09017be5190d68d8658233171f1b6541 + # via markdown-it-py +markdown-it-py[linkify]==3.0.0 \ + --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ + --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb + # via + # documenteer + # mdit-py-plugins + # myst-parser +markupsafe==2.1.3 \ + --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ + --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ + --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ + --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ + --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ + --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ + --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ + --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ + --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ + --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ + --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ + --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ + --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ + --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ + --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ + --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ + --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ + --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ + --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ + --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ + --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ + --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ + --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ + --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ + --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ + --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ + --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ + --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ + --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ + --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ + --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ + --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ + --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ + --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ + --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ + --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ + --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ + --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ + --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ + --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ + --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ + --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ + --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ + --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ + --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ + --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ + --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ + --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ + --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ + --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 + # via jinja2 +matplotlib==3.7.2 \ + --hash=sha256:070f8dddd1f5939e60aacb8fa08f19551f4b0140fab16a3669d5cd6e9cb28fc8 \ + --hash=sha256:0c3cca3e842b11b55b52c6fb8bd6a4088693829acbfcdb3e815fa9b7d5c92c1b \ + --hash=sha256:0f506a1776ee94f9e131af1ac6efa6e5bc7cb606a3e389b0ccb6e657f60bb676 \ + --hash=sha256:12f01b92ecd518e0697da4d97d163b2b3aa55eb3eb4e2c98235b3396d7dad55f \ + --hash=sha256:152ee0b569a37630d8628534c628456b28686e085d51394da6b71ef84c4da201 \ + --hash=sha256:1c308b255efb9b06b23874236ec0f10f026673ad6515f602027cc8ac7805352d \ + --hash=sha256:1cd120fca3407a225168238b790bd5c528f0fafde6172b140a2f3ab7a4ea63e9 \ + --hash=sha256:20f844d6be031948148ba49605c8b96dfe7d3711d1b63592830d650622458c11 \ + --hash=sha256:23fb1750934e5f0128f9423db27c474aa32534cec21f7b2153262b066a581fd1 \ + --hash=sha256:2699f7e73a76d4c110f4f25be9d2496d6ab4f17345307738557d345f099e07de \ + --hash=sha256:26bede320d77e469fdf1bde212de0ec889169b04f7f1179b8930d66f82b30cbc \ + --hash=sha256:2ecb5be2b2815431c81dc115667e33da0f5a1bcf6143980d180d09a717c4a12e \ + --hash=sha256:2f8e4a49493add46ad4a8c92f63e19d548b2b6ebbed75c6b4c7f46f57d36cdd1 \ + --hash=sha256:305e3da477dc8607336ba10bac96986d6308d614706cae2efe7d3ffa60465b24 \ + --hash=sha256:30e1409b857aa8a747c5d4f85f63a79e479835f8dffc52992ac1f3f25837b544 \ + --hash=sha256:318c89edde72ff95d8df67d82aca03861240512994a597a435a1011ba18dbc7f \ + --hash=sha256:35d74ebdb3f71f112b36c2629cf32323adfbf42679e2751252acd468f5001c07 \ + --hash=sha256:50e0a55ec74bf2d7a0ebf50ac580a209582c2dd0f7ab51bc270f1b4a0027454e \ + --hash=sha256:5dea00b62d28654b71ca92463656d80646675628d0828e08a5f3b57e12869e13 \ + --hash=sha256:60c521e21031632aa0d87ca5ba0c1c05f3daacadb34c093585a0be6780f698e4 \ + --hash=sha256:6515e878f91894c2e4340d81f0911857998ccaf04dbc1bba781e3d89cbf70608 \ + --hash=sha256:6d2ff3c984b8a569bc1383cd468fc06b70d7b59d5c2854ca39f1436ae8394117 \ + --hash=sha256:71667eb2ccca4c3537d9414b1bc00554cb7f91527c17ee4ec38027201f8f1603 \ + --hash=sha256:717157e61b3a71d3d26ad4e1770dc85156c9af435659a25ee6407dc866cb258d \ + --hash=sha256:71f7a8c6b124e904db550f5b9fe483d28b896d4135e45c4ea381ad3b8a0e3256 \ + --hash=sha256:936bba394682049919dda062d33435b3be211dc3dcaa011e09634f060ec878b2 \ + --hash=sha256:a1733b8e84e7e40a9853e505fe68cc54339f97273bdfe6f3ed980095f769ddc7 \ + --hash=sha256:a2c1590b90aa7bd741b54c62b78de05d4186271e34e2377e0289d943b3522273 \ + --hash=sha256:a7e28d6396563955f7af437894a36bf2b279462239a41028323e04b85179058b \ + --hash=sha256:a8035ba590658bae7562786c9cc6ea1a84aa49d3afab157e414c9e2ea74f496d \ + --hash=sha256:a8cdb91dddb04436bd2f098b8fdf4b81352e68cf4d2c6756fcc414791076569b \ + --hash=sha256:ac60daa1dc83e8821eed155796b0f7888b6b916cf61d620a4ddd8200ac70cd64 \ + --hash=sha256:af4860132c8c05261a5f5f8467f1b269bf1c7c23902d75f2be57c4a7f2394b3e \ + --hash=sha256:bc221ffbc2150458b1cd71cdd9ddd5bb37962b036e41b8be258280b5b01da1dd \ + --hash=sha256:ce55289d5659b5b12b3db4dc9b7075b70cef5631e56530f14b2945e8836f2d20 \ + --hash=sha256:d9881356dc48e58910c53af82b57183879129fa30492be69058c5b0d9fddf391 \ + --hash=sha256:dbcf59334ff645e6a67cd5f78b4b2cdb76384cdf587fa0d2dc85f634a72e1a3e \ + --hash=sha256:ebf577c7a6744e9e1bd3fee45fc74a02710b214f94e2bde344912d85e0c9af7c \ + --hash=sha256:f081c03f413f59390a80b3e351cc2b2ea0205839714dbc364519bcf51f4b56ca \ + --hash=sha256:fdbb46fad4fb47443b5b8ac76904b2e7a66556844f33370861b4788db0f8816a \ + --hash=sha256:fdcd28360dbb6203fb5219b1a5658df226ac9bebc2542a9e8f457de959d713d0 + # via sphinxext-opengraph +mdit-py-plugins==0.4.0 \ + --hash=sha256:b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9 \ + --hash=sha256:d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b + # via myst-parser +mdurl==0.1.2 \ + --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ + --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba + # via markdown-it-py +mypy==1.4.1 \ + --hash=sha256:01fd2e9f85622d981fd9063bfaef1aed6e336eaacca00892cd2d82801ab7c042 \ + --hash=sha256:0dde1d180cd84f0624c5dcaaa89c89775550a675aff96b5848de78fb11adabcd \ + --hash=sha256:141dedfdbfe8a04142881ff30ce6e6653c9685b354876b12e4fe6c78598b45e2 \ + --hash=sha256:16f0db5b641ba159eff72cff08edc3875f2b62b2fa2bc24f68c1e7a4e8232d01 \ + --hash=sha256:190b6bab0302cec4e9e6767d3eb66085aef2a1cc98fe04936d8a42ed2ba77bb7 \ + --hash=sha256:2460a58faeea905aeb1b9b36f5065f2dc9a9c6e4c992a6499a2360c6c74ceca3 \ + --hash=sha256:34a9239d5b3502c17f07fd7c0b2ae6b7dd7d7f6af35fbb5072c6208e76295816 \ + --hash=sha256:43b592511672017f5b1a483527fd2684347fdffc041c9ef53428c8dc530f79a3 \ + --hash=sha256:43d24f6437925ce50139a310a64b2ab048cb2d3694c84c71c3f2a1626d8101dc \ + --hash=sha256:45d32cec14e7b97af848bddd97d85ea4f0db4d5a149ed9676caa4eb2f7402bb4 \ + --hash=sha256:470c969bb3f9a9efcedbadcd19a74ffb34a25f8e6b0e02dae7c0e71f8372f97b \ + --hash=sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8 \ + --hash=sha256:5703097c4936bbb9e9bce41478c8d08edd2865e177dc4c52be759f81ee4dd26c \ + --hash=sha256:7549fbf655e5825d787bbc9ecf6028731973f78088fbca3a1f4145c39ef09462 \ + --hash=sha256:8207b7105829eca6f3d774f64a904190bb2231de91b8b186d21ffd98005f14a7 \ + --hash=sha256:8c4d8e89aa7de683e2056a581ce63c46a0c41e31bd2b6d34144e2c80f5ea53dc \ + --hash=sha256:98324ec3ecf12296e6422939e54763faedbfcc502ea4a4c38502082711867258 \ + --hash=sha256:9bbcd9ab8ea1f2e1c8031c21445b511442cc45c89951e49bbf852cbb70755b1b \ + --hash=sha256:9d40652cc4fe33871ad3338581dca3297ff5f2213d0df345bcfbde5162abf0c9 \ + --hash=sha256:a2746d69a8196698146a3dbe29104f9eb6a2a4d8a27878d92169a6c0b74435b6 \ + --hash=sha256:ae704dcfaa180ff7c4cfbad23e74321a2b774f92ca77fd94ce1049175a21c97f \ + --hash=sha256:bfdca17c36ae01a21274a3c387a63aa1aafe72bff976522886869ef131b937f1 \ + --hash=sha256:c482e1246726616088532b5e964e39765b6d1520791348e6c9dc3af25b233828 \ + --hash=sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878 \ + --hash=sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f \ + --hash=sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b + # via -r requirements/dev.in +mypy-extensions==1.0.0 \ + --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ + --hash=sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782 + # via mypy +myst-parser==2.0.0 \ + --hash=sha256:7c36344ae39c8e740dad7fdabf5aa6fc4897a813083c6cc9990044eb93656b14 \ + --hash=sha256:ea929a67a6a0b1683cdbe19b8d2e724cd7643f8aa3e7bb18dd65beac3483bead + # via documenteer +nodeenv==1.8.0 \ + --hash=sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2 \ + --hash=sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec + # via pre-commit +numpy==1.25.1 \ + --hash=sha256:012097b5b0d00a11070e8f2e261128c44157a8689f7dedcf35576e525893f4fe \ + --hash=sha256:0d3fe3dd0506a28493d82dc3cf254be8cd0d26f4008a417385cbf1ae95b54004 \ + --hash=sha256:0def91f8af6ec4bb94c370e38c575855bf1d0be8a8fbfba42ef9c073faf2cf19 \ + --hash=sha256:1a180429394f81c7933634ae49b37b472d343cccb5bb0c4a575ac8bbc433722f \ + --hash=sha256:1d5d3c68e443c90b38fdf8ef40e60e2538a27548b39b12b73132456847f4b631 \ + --hash=sha256:20e1266411120a4f16fad8efa8e0454d21d00b8c7cee5b5ccad7565d95eb42dd \ + --hash=sha256:247d3ffdd7775bdf191f848be8d49100495114c82c2bd134e8d5d075fb386a1c \ + --hash=sha256:35a9527c977b924042170a0887de727cd84ff179e478481404c5dc66b4170009 \ + --hash=sha256:38eb6548bb91c421261b4805dc44def9ca1a6eef6444ce35ad1669c0f1a3fc5d \ + --hash=sha256:3d7abcdd85aea3e6cdddb59af2350c7ab1ed764397f8eec97a038ad244d2d105 \ + --hash=sha256:41a56b70e8139884eccb2f733c2f7378af06c82304959e174f8e7370af112e09 \ + --hash=sha256:4a90725800caeaa160732d6b31f3f843ebd45d6b5f3eec9e8cc287e30f2805bf \ + --hash=sha256:6b82655dd8efeea69dbf85d00fca40013d7f503212bc5259056244961268b66e \ + --hash=sha256:6c6c9261d21e617c6dc5eacba35cb68ec36bb72adcff0dee63f8fbc899362588 \ + --hash=sha256:77d339465dff3eb33c701430bcb9c325b60354698340229e1dff97745e6b3efa \ + --hash=sha256:791f409064d0a69dd20579345d852c59822c6aa087f23b07b1b4e28ff5880fcb \ + --hash=sha256:9a3a9f3a61480cc086117b426a8bd86869c213fc4072e606f01c4e4b66eb92bf \ + --hash=sha256:c1516db588987450b85595586605742879e50dcce923e8973f79529651545b57 \ + --hash=sha256:c40571fe966393b212689aa17e32ed905924120737194b5d5c1b20b9ed0fb171 \ + --hash=sha256:d412c1697c3853c6fc3cb9751b4915859c7afe6a277c2bf00acf287d56c4e625 \ + --hash=sha256:d5154b1a25ec796b1aee12ac1b22f414f94752c5f94832f14d8d6c9ac40bcca6 \ + --hash=sha256:d736b75c3f2cb96843a5c7f8d8ccc414768d34b0a75f466c05f3a739b406f10b \ + --hash=sha256:e8f6049c4878cb16960fbbfb22105e49d13d752d4d8371b55110941fb3b17800 \ + --hash=sha256:f76aebc3358ade9eacf9bc2bb8ae589863a4f911611694103af05346637df1b7 \ + --hash=sha256:fd67b306320dcadea700a8f79b9e671e607f8696e98ec255915c0c6d6b818503 + # via + # contourpy + # matplotlib +packaging==23.1 \ + --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \ + --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f + # via + # matplotlib + # pydata-sphinx-theme + # pytest + # sphinx +pillow==10.0.0 \ + --hash=sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5 \ + --hash=sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530 \ + --hash=sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d \ + --hash=sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca \ + --hash=sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891 \ + --hash=sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992 \ + --hash=sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7 \ + --hash=sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3 \ + --hash=sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba \ + --hash=sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3 \ + --hash=sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3 \ + --hash=sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f \ + --hash=sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538 \ + --hash=sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3 \ + --hash=sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d \ + --hash=sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c \ + --hash=sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017 \ + --hash=sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3 \ + --hash=sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223 \ + --hash=sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e \ + --hash=sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3 \ + --hash=sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6 \ + --hash=sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640 \ + --hash=sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334 \ + --hash=sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1 \ + --hash=sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba \ + --hash=sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa \ + --hash=sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0 \ + --hash=sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396 \ + --hash=sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d \ + --hash=sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485 \ + --hash=sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf \ + --hash=sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43 \ + --hash=sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37 \ + --hash=sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2 \ + --hash=sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd \ + --hash=sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86 \ + --hash=sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967 \ + --hash=sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629 \ + --hash=sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568 \ + --hash=sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed \ + --hash=sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f \ + --hash=sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551 \ + --hash=sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3 \ + --hash=sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614 \ + --hash=sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff \ + --hash=sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d \ + --hash=sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883 \ + --hash=sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684 \ + --hash=sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0 \ + --hash=sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de \ + --hash=sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b \ + --hash=sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3 \ + --hash=sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199 \ + --hash=sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51 \ + --hash=sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90 + # via matplotlib +platformdirs==3.9.1 \ + --hash=sha256:1b42b450ad933e981d56e59f1b97495428c9bd60698baab9f3eb3d00d5822421 \ + --hash=sha256:ad8291ae0ae5072f66c16945166cb11c63394c7a3ad1b1bc9828ca3162da8c2f + # via virtualenv +pluggy==1.2.0 \ + --hash=sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849 \ + --hash=sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3 + # via pytest +pre-commit==3.3.3 \ + --hash=sha256:10badb65d6a38caff29703362271d7dca483d01da88f9d7e05d0b97171c136cb \ + --hash=sha256:a2256f489cd913d575c145132ae196fe335da32d91a8294b7afe6622335dd023 + # via -r requirements/dev.in +pybtex==0.24.0 \ + --hash=sha256:818eae35b61733e5c007c3fcd2cfb75ed1bc8b4173c1f70b56cc4c0802d34755 \ + --hash=sha256:e1e0c8c69998452fea90e9179aa2a98ab103f3eed894405b7264e517cc2fcc0f + # via + # pybtex-docutils + # sphinxcontrib-bibtex +pybtex-docutils==1.0.2 \ + --hash=sha256:43aa353b6d498fd5ac30f0073a98e332d061d34fe619d3d50d1761f8fd4aa016 \ + --hash=sha256:6f9e3c25a37bcaac8c4f69513272706ec6253bb708a93d8b4b173f43915ba239 + # via sphinxcontrib-bibtex +pydantic==1.10.11 \ + --hash=sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e \ + --hash=sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7 \ + --hash=sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb \ + --hash=sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151 \ + --hash=sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13 \ + --hash=sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d \ + --hash=sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e \ + --hash=sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6 \ + --hash=sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19 \ + --hash=sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713 \ + --hash=sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f \ + --hash=sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66 \ + --hash=sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e \ + --hash=sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b \ + --hash=sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248 \ + --hash=sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622 \ + --hash=sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae \ + --hash=sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629 \ + --hash=sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604 \ + --hash=sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c \ + --hash=sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f \ + --hash=sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b \ + --hash=sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e \ + --hash=sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999 \ + --hash=sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3 \ + --hash=sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847 \ + --hash=sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c \ + --hash=sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36 \ + --hash=sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216 \ + --hash=sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1 \ + --hash=sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303 \ + --hash=sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588 \ + --hash=sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f \ + --hash=sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528 \ + --hash=sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb \ + --hash=sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f + # via + # -r requirements/dev.in + # documenteer +pydata-sphinx-theme==0.12.0 \ + --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ + --hash=sha256:c17dbab67a3774f06f34f6378e896fcd0668cc8b5da1c1ba017e65cf1df0af58 + # via documenteer +pygments==2.15.1 \ + --hash=sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c \ + --hash=sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1 + # via + # pydata-sphinx-theme + # sphinx + # sphinx-prompt +pyparsing==3.0.9 \ + --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ + --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc + # via matplotlib +pytest==7.4.0 \ + --hash=sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32 \ + --hash=sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a + # via -r requirements/dev.in +python-dateutil==2.8.2 \ + --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ + --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 + # via matplotlib +pyyaml==6.0.1 \ + --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ + --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ + --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \ + --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \ + --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \ + --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \ + --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \ + --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \ + --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \ + --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \ + --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \ + --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \ + --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \ + --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \ + --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \ + --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \ + --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \ + --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \ + --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \ + --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ + --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ + --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ + --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ + --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ + --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \ + --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \ + --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \ + --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \ + --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \ + --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \ + --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \ + --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \ + --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \ + --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \ + --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \ + --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \ + --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \ + --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \ + --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ + --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f + # via + # -c requirements/main.txt + # documenteer + # myst-parser + # pre-commit + # pybtex +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 + # via + # documenteer + # sphinx +ruff==0.0.278 \ + --hash=sha256:1078125123a3c68e92463afacedb7e41b15ccafc09e510c6c755a23087afc8de \ + --hash=sha256:1a90ebd8f2a554db1ee8d12b2f3aa575acbd310a02cd1a9295b3511a4874cf98 \ + --hash=sha256:1a9f1d925204cfba81b18368b7ac943befcfccc3a41e170c91353b674c6b7a66 \ + --hash=sha256:1cae4c07d334eb588f171f1363fa89a8911047eb93184276be11a24dbbc996c7 \ + --hash=sha256:2c62a0bde4d20d087cabce2fa8b012d74c2e985da86d00fb3359880469b90e31 \ + --hash=sha256:38ca1c0c8c1221fe64c0a66784c91501d09a8ed02a4dbfdc117c0ce32a81eefc \ + --hash=sha256:3ce0d620e257b4cad16e2f0c103b2f43a07981668a3763380542e8a131d11537 \ + --hash=sha256:666e739fb2685277b879d493848afe6933e3be30d40f41fe0e571ad479d57d77 \ + --hash=sha256:70d39f5599d8449082ab8ce542fa98e16413145eb411dd1dc16575b44565d52d \ + --hash=sha256:737a0cfb6c36aaa92d97a46957dfd5e55329299074ad06ed12663b98e0c6fc82 \ + --hash=sha256:7545bb037823cd63dca19280f75a523a68bd3e78e003de74609320d6822b5a52 \ + --hash=sha256:8cb380d2d6fdb60656a0b5fa78305535db513fc72ce11f4532cc1641204ef380 \ + --hash=sha256:a48621f5f372d5019662db5b3dbfc5f1450f927683d75f1153fe0ebf20eb9698 \ + --hash=sha256:c25b96602695a147d62a572865b753ef56aff1524abab13b9436724df30f9bd7 \ + --hash=sha256:d11149c7b186f224f2055e437a030cd83b164a43cc0211314c33ad1553ed9c4c \ + --hash=sha256:e131595ab7f4ce61a1650463bd2fe304b49e7d0deb0dfa664b92817c97cdba5f \ + --hash=sha256:ec8b0469b54315803aaf1fbf9a37162a3849424cab6182496f972ad56e0ea702 + # via -r requirements/dev.in +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via + # latexcodec + # pybtex + # python-dateutil +smmap==5.0.0 \ + --hash=sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94 \ + --hash=sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936 + # via + # -c requirements/main.txt + # gitdb +snowballstemmer==2.2.0 \ + --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ + --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a + # via sphinx +soupsieve==2.4.1 \ + --hash=sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8 \ + --hash=sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea + # via beautifulsoup4 +sphinx==6.2.1 \ + --hash=sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b \ + --hash=sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912 + # via + # documenteer + # myst-parser + # pydata-sphinx-theme + # sphinx-autodoc-typehints + # sphinx-automodapi + # sphinx-copybutton + # sphinx-design + # sphinx-jinja + # sphinx-prompt + # sphinxcontrib-bibtex + # sphinxcontrib-jquery + # sphinxext-opengraph +sphinx-autodoc-typehints==1.22 \ + --hash=sha256:71fca2d5eee9b034204e4c686ab20b4d8f5eb9409396216bcae6c87c38e18ea6 \ + --hash=sha256:ef4a8b9d52de66065aa7d3adfabf5a436feb8a2eff07c2ddc31625d8807f2b69 + # via documenteer +sphinx-automodapi==0.15.0 \ + --hash=sha256:06848f261fb127b25d35f27c2c4fddb041e76498733da064504f8077cbd27bec \ + --hash=sha256:fd5871e054df7f3e299dde959afffa849f4d01c6eac274c366b06472afcb06aa + # via documenteer +sphinx-copybutton==0.5.2 \ + --hash=sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd \ + --hash=sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e + # via documenteer +sphinx-design==0.4.1 \ + --hash=sha256:23bf5705eb31296d4451f68b0222a698a8a84396ffe8378dfd9319ba7ab8efd9 \ + --hash=sha256:5b6418ba4a2dc3d83592ea0ff61a52a891fe72195a4c3a18b2fa1c7668ce4708 + # via documenteer +sphinx-diagrams==0.4.0 \ + --hash=sha256:3cf2e0179bdd9ccdb28164fcfcae9b167999a1abe40e159e0c26a225490074d1 \ + --hash=sha256:4860291cb04d6361f898d20ba28dca7345f757cdc240caf144c8bf20c73067a0 + # via -r requirements/dev.in +sphinx-jinja==2.0.2 \ + --hash=sha256:705ebeb9b7a6018ca3f93724315a7c1effa6ba3db44d630e7eaaa15e4ac081a8 \ + --hash=sha256:c6232b59a894139770be1dc6d0b00a379e4288ce78157904e1f8473dea3e0718 + # via -r requirements/dev.in +sphinx-prompt==1.5.0 \ + --hash=sha256:fa4e90d8088b5a996c76087d701fc7e31175f8b9dc4aab03a507e45051067162 + # via documenteer +sphinxcontrib-applehelp==1.0.4 \ + --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \ + --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e + # via sphinx +sphinxcontrib-bibtex==2.5.0 \ + --hash=sha256:71b42e5db0e2e284f243875326bf9936aa9a763282277d75048826fef5b00eaa \ + --hash=sha256:748f726eaca6efff7731012103417ef130ecdcc09501b4d0c54283bf5f059f76 + # via documenteer +sphinxcontrib-devhelp==1.0.2 \ + --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \ + --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4 + # via sphinx +sphinxcontrib-htmlhelp==2.0.1 \ + --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \ + --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903 + # via sphinx +sphinxcontrib-jquery==4.1 \ + --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \ + --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae + # via documenteer +sphinxcontrib-jsmath==1.0.1 \ + --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ + --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 + # via sphinx +sphinxcontrib-mermaid==0.9.2 \ + --hash=sha256:252ef13dd23164b28f16d8b0205cf184b9d8e2b714a302274d9f59eb708e77af \ + --hash=sha256:6795a72037ca55e65663d2a2c1a043d636dc3d30d418e56dd6087d1459d98a5d + # via documenteer +sphinxcontrib-qthelp==1.0.3 \ + --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ + --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 + # via sphinx +sphinxcontrib-serializinghtml==1.1.5 \ + --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \ + --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952 + # via sphinx +sphinxext-opengraph==0.8.2 \ + --hash=sha256:45a693b6704052c426576f0a1f630649c55b4188bc49eb63e9587e24a923db39 \ + --hash=sha256:6a05bdfe5176d9dd0a1d58a504f17118362ab976631213cd36fb44c4c40544c9 + # via documenteer +typed-ast==1.5.5 \ + --hash=sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10 \ + --hash=sha256:045f9930a1550d9352464e5149710d56a2aed23a2ffe78946478f7b5416f1ede \ + --hash=sha256:0635900d16ae133cab3b26c607586131269f88266954eb04ec31535c9a12ef1e \ + --hash=sha256:118c1ce46ce58fda78503eae14b7664163aa735b620b64b5b725453696f2a35c \ + --hash=sha256:16f7313e0a08c7de57f2998c85e2a69a642e97cb32f87eb65fbfe88381a5e44d \ + --hash=sha256:1efebbbf4604ad1283e963e8915daa240cb4bf5067053cf2f0baadc4d4fb51b8 \ + --hash=sha256:2188bc33d85951ea4ddad55d2b35598b2709d122c11c75cffd529fbc9965508e \ + --hash=sha256:2b946ef8c04f77230489f75b4b5a4a6f24c078be4aed241cfabe9cbf4156e7e5 \ + --hash=sha256:335f22ccb244da2b5c296e6f96b06ee9bed46526db0de38d2f0e5a6597b81155 \ + --hash=sha256:381eed9c95484ceef5ced626355fdc0765ab51d8553fec08661dce654a935db4 \ + --hash=sha256:429ae404f69dc94b9361bb62291885894b7c6fb4640d561179548c849f8492ba \ + --hash=sha256:44f214394fc1af23ca6d4e9e744804d890045d1643dd7e8229951e0ef39429b5 \ + --hash=sha256:48074261a842acf825af1968cd912f6f21357316080ebaca5f19abbb11690c8a \ + --hash=sha256:4bc1efe0ce3ffb74784e06460f01a223ac1f6ab31c6bc0376a21184bf5aabe3b \ + --hash=sha256:57bfc3cf35a0f2fdf0a88a3044aafaec1d2f24d8ae8cd87c4f58d615fb5b6311 \ + --hash=sha256:597fc66b4162f959ee6a96b978c0435bd63791e31e4f410622d19f1686d5e769 \ + --hash=sha256:5f7a8c46a8b333f71abd61d7ab9255440d4a588f34a21f126bbfc95f6049e686 \ + --hash=sha256:5fe83a9a44c4ce67c796a1b466c270c1272e176603d5e06f6afbc101a572859d \ + --hash=sha256:61443214d9b4c660dcf4b5307f15c12cb30bdfe9588ce6158f4a005baeb167b2 \ + --hash=sha256:622e4a006472b05cf6ef7f9f2636edc51bda670b7bbffa18d26b255269d3d814 \ + --hash=sha256:6eb936d107e4d474940469e8ec5b380c9b329b5f08b78282d46baeebd3692dc9 \ + --hash=sha256:7f58fabdde8dcbe764cef5e1a7fcb440f2463c1bbbec1cf2a86ca7bc1f95184b \ + --hash=sha256:83509f9324011c9a39faaef0922c6f720f9623afe3fe220b6d0b15638247206b \ + --hash=sha256:8c524eb3024edcc04e288db9541fe1f438f82d281e591c548903d5b77ad1ddd4 \ + --hash=sha256:94282f7a354f36ef5dbce0ef3467ebf6a258e370ab33d5b40c249fa996e590dd \ + --hash=sha256:b445c2abfecab89a932b20bd8261488d574591173d07827c1eda32c457358b18 \ + --hash=sha256:be4919b808efa61101456e87f2d4c75b228f4e52618621c77f1ddcaae15904fa \ + --hash=sha256:bfd39a41c0ef6f31684daff53befddae608f9daf6957140228a08e51f312d7e6 \ + --hash=sha256:c631da9710271cb67b08bd3f3813b7af7f4c69c319b75475436fcab8c3d21bee \ + --hash=sha256:cc95ffaaab2be3b25eb938779e43f513e0e538a84dd14a5d844b8f2932593d88 \ + --hash=sha256:d09d930c2d1d621f717bb217bf1fe2584616febb5138d9b3e8cdd26506c3f6d4 \ + --hash=sha256:d40c10326893ecab8a80a53039164a224984339b2c32a6baf55ecbd5b1df6431 \ + --hash=sha256:d41b7a686ce653e06c2609075d397ebd5b969d821b9797d029fccd71fdec8e04 \ + --hash=sha256:d5c0c112a74c0e5db2c75882a0adf3133adedcdbfd8cf7c9d6ed77365ab90a1d \ + --hash=sha256:e1a976ed4cc2d71bb073e1b2a250892a6e968ff02aa14c1f40eba4f365ffec02 \ + --hash=sha256:e48bf27022897577d8479eaed64701ecaf0467182448bd95759883300ca818c8 \ + --hash=sha256:ed4a1a42df8a3dfb6b40c3d2de109e935949f2f66b19703eafade03173f8f437 \ + --hash=sha256:f0aefdd66f1784c58f65b502b6cf8b121544680456d1cebbd300c2c813899274 \ + --hash=sha256:fc2b8c4e1bc5cd96c1a823a885e6b158f8451cf6f5530e1829390b4d27d0807f \ + --hash=sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a \ + --hash=sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2 + # via diagrams +types-pyyaml==6.0.12.10 \ + --hash=sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f \ + --hash=sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97 + # via -r requirements/dev.in +typing-extensions==4.7.1 \ + --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \ + --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2 + # via + # mypy + # pydantic +uc-micro-py==1.0.2 \ + --hash=sha256:30ae2ac9c49f39ac6dce743bd187fcd2b574b16ca095fa74cd9396795c954c54 \ + --hash=sha256:8c9110c309db9d9e87302e2f4ad2c3152770930d88ab385cd544e7a7e75f3de0 + # via linkify-it-py +urllib3==2.0.4 \ + --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \ + --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4 + # via requests +virtualenv==20.24.0 \ + --hash=sha256:18d1b37fc75cc2670625702d76849a91ebd383768b4e91382a8d51be3246049e \ + --hash=sha256:e2a7cef9da880d693b933db7654367754f14e20650dc60e8ee7385571f8593a3 + # via pre-commit + +# The following packages are considered to be unsafe in a requirements file: +setuptools==68.0.0 \ + --hash=sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f \ + --hash=sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235 + # via nodeenv diff --git a/requirements/main.in b/requirements/main.in new file mode 100644 index 0000000000..0f749b94a7 --- /dev/null +++ b/requirements/main.in @@ -0,0 +1,10 @@ +# Editable runtime dependencies (equivalent to project.dependencies). +# Add direct runtime dependencies here, as well as implicit dependencies +# with constrained versions. These should be sufficient to run the phalanx +# command-line tool. +# +# After editing, update requirements/main.txt by running: +# make update-deps + +GitPython +PyYAML diff --git a/requirements/main.txt b/requirements/main.txt new file mode 100644 index 0000000000..6d0c067fd7 --- /dev/null +++ b/requirements/main.txt @@ -0,0 +1,60 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile --allow-unsafe --config=pyproject.toml --generate-hashes --output-file=requirements/main.txt requirements/main.in +# +gitdb==4.0.10 \ + --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ + --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 + # via gitpython +gitpython==3.1.32 \ + --hash=sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6 \ + --hash=sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f + # via -r requirements/main.in +pyyaml==6.0.1 \ + --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ + --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ + --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \ + --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \ + --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \ + --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \ + --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \ + --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \ + --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \ + --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \ + --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \ + --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \ + --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \ + --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \ + --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \ + --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \ + --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \ + --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \ + --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \ + --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \ + --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \ + --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \ + --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \ + --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \ + --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \ + --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \ + --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \ + --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \ + --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \ + --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \ + --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \ + --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \ + --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \ + --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \ + --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \ + --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \ + --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \ + --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \ + --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ + --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f + # via -r requirements/main.in +smmap==5.0.0 \ + --hash=sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94 \ + --hash=sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936 + # via gitdb From 9c985b028077164d41defe701fee4858673361ec Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 09:36:35 -0700 Subject: [PATCH 244/308] Remove docs/Makefile Move the contents of the clean rule to the top-level make clean target and remove docs/Makefile now that we use tox to build the documentation. --- Makefile | 2 +- docs/Makefile | 11 ----------- 2 files changed, 1 insertion(+), 12 deletions(-) delete mode 100644 docs/Makefile diff --git a/Makefile b/Makefile index eb94a1ec76..e6e0e7177b 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ help: .PHONY: clean clean: - rm -rf .mypy_cache .ruff_cache .tox + rm -rf .mypy_cache .ruff_cache .tox docs/_build make -C docs clean .PHONY: init diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 02d05fdc68..0000000000 --- a/docs/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -# Makefile for Sphinx documentation. -# Use tox -e docs,docs-linkcheck to build the docs. - -.PHONY: help -help: - @echo "Please use \`make ' where is one of" - @echo " clean delete builds" - -.PHONY: clean -clean: - rm -rf _build/* From b4bd856adc09083add868a48310127053065094d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 09:40:11 -0700 Subject: [PATCH 245/308] Update copyright date in LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 6b5e25a46c..e0d1c48a56 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2019-2022 Association of Universities for Research in Astronomy, Inc. (AURA) +Copyright (c) 2019-2023 Association of Universities for Research in Astronomy, Inc. (AURA) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 2749af0413aab9435a5c35e59d348cdc7a805f10 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 09:49:42 -0700 Subject: [PATCH 246/308] Update helm lint workflow Install dependencies from requirements/main.txt instead of relying on pyproject.toml. --- .github/workflows/ci.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 91cf2d0c4f..efdb6ab539 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -60,7 +60,9 @@ jobs: python-version: "3.11" - name: Install test dependencies - run: pip install . + run: | + pip install -r requirements/main.txt + pip install --upgrade . - name: Expand modified charts run: expand-charts From 6df6fdcb5264138689e28e955809e42d77853a9c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 20 Jul 2023 10:40:38 -0700 Subject: [PATCH 247/308] Further dependency management changes This may not be the direction we go, but make everything internally consistent for now. Remove the setup target in favor of only an init target that works the same as other services. Remove the stray dev dependencies in pyproject.toml and fix tox to install from the pinned hashes. --- Makefile | 11 +++-------- pyproject.toml | 17 ----------------- tox.ini | 5 +++-- 3 files changed, 6 insertions(+), 27 deletions(-) diff --git a/Makefile b/Makefile index e6e0e7177b..5e37bccf2e 100644 --- a/Makefile +++ b/Makefile @@ -14,16 +14,11 @@ clean: .PHONY: init init: - pip install --upgrade pre-commit tox - pre-commit install pip install --editable . - pip install --upgrade -r requirements/main.txt requirements/dev.txt + pip install --upgrade -r requirements/main.txt -r requirements/dev.txt rm -rf .tox - -.PHONY: setup -setup: - pip install --editable . - pip install --upgrade -r requirements/main.txt + pip install --upgrade pre-commit tox + pre-commit install .PHONY: update update: update-deps init diff --git a/pyproject.toml b/pyproject.toml index 504dd335e9..201cd7dca0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,23 +19,6 @@ classifiers = [ "Operating System :: POSIX", ] requires-python = ">=3.11" -dependencies = [] - -[project.optional-dependencies] -dev = [ - # Testing - "coverage[toml]", - "pytest", - "pre-commit", - "mypy", - "ruff", - "types-PyYAML", - # Documentation - "documenteer[guide]>=0.7.0,<1", - "sphinx-diagrams", - "sphinx-jinja", - "pydantic<2", -] [project.scripts] expand-charts = "phalanx.testing.expandcharts:main" diff --git a/tox.ini b/tox.ini index d74a6d130d..733e4a0dc2 100644 --- a/tox.ini +++ b/tox.ini @@ -4,8 +4,9 @@ isolated_build = True [testenv] description = Run pytest against {envname}. -extras = - dev +deps = + -r{toxinidir}/requirements/main.txt + -r{toxinidir}/requirements/dev.txt [testenv:docs] description = Build documentation (HTML) with Sphinx. From 2af576d8d786195bb878d4739ddec7a5735ebf64 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 20 Jul 2023 10:42:09 -0700 Subject: [PATCH 248/308] Use make init to install code for testing Technically, only runtime dependencies are required for Helm chart expansion, but use make init to install all dependencies to avoid confusion. --- .github/workflows/ci.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index efdb6ab539..0b837b0196 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -60,9 +60,7 @@ jobs: python-version: "3.11" - name: Install test dependencies - run: | - pip install -r requirements/main.txt - pip install --upgrade . + run: make init - name: Expand modified charts run: expand-charts From ba91ecd8d249e5564ff3392b9a780839be6cb8e0 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 20 Jul 2023 10:46:12 -0700 Subject: [PATCH 249/308] Add .ruff_cache to ignore list for black --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 201cd7dca0..f1fc6b3dcb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,7 @@ exclude = ''' \.eggs | \.git | \.mypy_cache + | \.ruff_cache | \.tox | \.venv | _build From b3d04a84f1ec5efa78797569a5ca8718e90d2737 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 24 Jul 2023 18:01:47 -0700 Subject: [PATCH 250/308] Remove make setup from Makefile help --- Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/Makefile b/Makefile index 5e37bccf2e..9d8f64ccc4 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,6 @@ help: @echo "Make targets for Phalanx:" @echo "make clean - Remove generated files" @echo "make init - Set up dev environment (install pre-commit hooks)" - @echo "make setup - Install requirements for phalanx command line" @echo "make update - Update pinned dependencies and run make init" @echo "make update-deps - Update pinned dependencies" From eb018f1cd1bae900045154e68c3fdc783eb3d4b5 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 25 Jul 2023 09:40:05 -0700 Subject: [PATCH 251/308] Update Argo CD version used for CI --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0b837b0196..97e493fd3c 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -121,7 +121,7 @@ jobs: unzip /tmp/vault.zip sudo mv vault /usr/local/bin/vault sudo chmod +x /usr/local/bin/vault - sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.7/argocd-linux-amd64 + sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.8/argocd-linux-amd64 sudo chmod +x /usr/local/bin/argocd sudo apt-get install socat sudo pip install -r installer/requirements.txt From 0617b37b9ade02ff517ac7e28f6f1f406e4e18ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Jul 2023 17:34:33 +0000 Subject: [PATCH 252/308] Bump certifi from 2023.5.7 to 2023.7.22 in /requirements Bumps [certifi](https://github.com/certifi/python-certifi) from 2023.5.7 to 2023.7.22. - [Commits](https://github.com/certifi/python-certifi/compare/2023.05.07...2023.07.22) --- updated-dependencies: - dependency-name: certifi dependency-type: indirect ... Signed-off-by: dependabot[bot] --- requirements/dev.txt | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/requirements/dev.txt b/requirements/dev.txt index 4d84730756..215c25e2d6 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -16,9 +16,9 @@ beautifulsoup4==4.12.2 \ --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \ --hash=sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a # via pydata-sphinx-theme -certifi==2023.5.7 \ - --hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \ - --hash=sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716 +certifi==2023.7.22 \ + --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ + --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests cfgv==3.3.1 \ --hash=sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426 \ @@ -207,7 +207,7 @@ coverage[toml]==7.2.7 \ --hash=sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e \ --hash=sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850 \ --hash=sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3 - # via -r requirements/dev.in + # via -r dev.in cycler==0.11.0 \ --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \ --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f @@ -223,7 +223,7 @@ distlib==0.3.7 \ documenteer[guide]==0.8.3 \ --hash=sha256:e1514ca8dd96810a6d24d4b21f7b28458a3cf434217e46939ffab2c201791afc \ --hash=sha256:fbe3ad1740751da8fcc95d809b0a489dc7f14fcdb78b28df85860ae92011c9a2 - # via -r requirements/dev.in + # via -r dev.in docutils==0.19 \ --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc @@ -278,13 +278,13 @@ gitdb==4.0.10 \ --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 # via - # -c requirements/main.txt + # -c main.txt # gitpython gitpython==3.1.32 \ --hash=sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6 \ --hash=sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f # via - # -c requirements/main.txt + # -c main.txt # documenteer graphviz==0.20.1 \ --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \ @@ -529,7 +529,7 @@ mypy==1.4.1 \ --hash=sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878 \ --hash=sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f \ --hash=sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b - # via -r requirements/dev.in + # via -r dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ --hash=sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782 @@ -648,7 +648,7 @@ pluggy==1.2.0 \ pre-commit==3.3.3 \ --hash=sha256:10badb65d6a38caff29703362271d7dca483d01da88f9d7e05d0b97171c136cb \ --hash=sha256:a2256f489cd913d575c145132ae196fe335da32d91a8294b7afe6622335dd023 - # via -r requirements/dev.in + # via -r dev.in pybtex==0.24.0 \ --hash=sha256:818eae35b61733e5c007c3fcd2cfb75ed1bc8b4173c1f70b56cc4c0802d34755 \ --hash=sha256:e1e0c8c69998452fea90e9179aa2a98ab103f3eed894405b7264e517cc2fcc0f @@ -697,7 +697,7 @@ pydantic==1.10.11 \ --hash=sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb \ --hash=sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f # via - # -r requirements/dev.in + # -r dev.in # documenteer pydata-sphinx-theme==0.12.0 \ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ @@ -717,7 +717,7 @@ pyparsing==3.0.9 \ pytest==7.4.0 \ --hash=sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32 \ --hash=sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a - # via -r requirements/dev.in + # via -r dev.in python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 @@ -764,7 +764,7 @@ pyyaml==6.0.1 \ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f # via - # -c requirements/main.txt + # -c main.txt # documenteer # myst-parser # pre-commit @@ -793,7 +793,7 @@ ruff==0.0.278 \ --hash=sha256:d11149c7b186f224f2055e437a030cd83b164a43cc0211314c33ad1553ed9c4c \ --hash=sha256:e131595ab7f4ce61a1650463bd2fe304b49e7d0deb0dfa664b92817c97cdba5f \ --hash=sha256:ec8b0469b54315803aaf1fbf9a37162a3849424cab6182496f972ad56e0ea702 - # via -r requirements/dev.in + # via -r dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 @@ -805,7 +805,7 @@ smmap==5.0.0 \ --hash=sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94 \ --hash=sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936 # via - # -c requirements/main.txt + # -c main.txt # gitdb snowballstemmer==2.2.0 \ --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ @@ -850,11 +850,11 @@ sphinx-design==0.4.1 \ sphinx-diagrams==0.4.0 \ --hash=sha256:3cf2e0179bdd9ccdb28164fcfcae9b167999a1abe40e159e0c26a225490074d1 \ --hash=sha256:4860291cb04d6361f898d20ba28dca7345f757cdc240caf144c8bf20c73067a0 - # via -r requirements/dev.in + # via -r dev.in sphinx-jinja==2.0.2 \ --hash=sha256:705ebeb9b7a6018ca3f93724315a7c1effa6ba3db44d630e7eaaa15e4ac081a8 \ --hash=sha256:c6232b59a894139770be1dc6d0b00a379e4288ce78157904e1f8473dea3e0718 - # via -r requirements/dev.in + # via -r dev.in sphinx-prompt==1.5.0 \ --hash=sha256:fa4e90d8088b5a996c76087d701fc7e31175f8b9dc4aab03a507e45051067162 # via documenteer @@ -944,7 +944,7 @@ typed-ast==1.5.5 \ types-pyyaml==6.0.12.10 \ --hash=sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f \ --hash=sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97 - # via -r requirements/dev.in + # via -r dev.in typing-extensions==4.7.1 \ --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \ --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2 From 276f37a8299f6e4507f8b21d878376e67aadbdd9 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Fri, 23 Jun 2023 10:10:25 -0700 Subject: [PATCH 253/308] deploy mini phalanx test pre-commit docs build for new environment --- applications/argocd/values-usdf-tel-rsp.yaml | 85 +++++++ .../gafaelfawr/values-usdf-tel-rsp.yaml | 227 ++++++++++++++++++ .../postgres/values-usdf-tel-rsp.yaml | 8 + .../values-usdf-tel-rsp.yaml | 22 ++ docs/environments/index.rst | 1 + docs/environments/usdf-tel-rsp/index.rst | 10 + environments/values-usdf-tel-rsp.yaml | 81 +++++++ 7 files changed, 434 insertions(+) create mode 100644 applications/argocd/values-usdf-tel-rsp.yaml create mode 100644 applications/gafaelfawr/values-usdf-tel-rsp.yaml create mode 100644 applications/postgres/values-usdf-tel-rsp.yaml create mode 100644 applications/vault-secrets-operator/values-usdf-tel-rsp.yaml create mode 100644 docs/environments/usdf-tel-rsp/index.rst create mode 100644 environments/values-usdf-tel-rsp.yaml diff --git a/applications/argocd/values-usdf-tel-rsp.yaml b/applications/argocd/values-usdf-tel-rsp.yaml new file mode 100644 index 0000000000..280ffe7033 --- /dev/null +++ b/applications/argocd/values-usdf-tel-rsp.yaml @@ -0,0 +1,85 @@ +argo-cd: + redis: + enabled: true + + server: + ingress: + enabled: true + hosts: + - "usdf-tel-rsp.slac.stanford.edu" + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + paths: + - /argo-cd(/|$)(.*) + + extraArgs: + - "--basehref=/argo-cd" + - "--insecure=true" + + env: + - name: HTTP_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + config: + url: https://usdf-tel-rsp.slac.stanford.edu/argo-cd + oidc.config: | + name: SLAC + issuer: https://dex.slac.stanford.edu + clientID: $oidc.clientId + clientSecret: $oidc.clientSecret + # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"] + requestedScopes: ["openid", "profile", "email", "groups"] + # Optional set of OIDC claims to request on the ID token. + requestedIDTokenClaims: {"groups": {"essential": true}} + rbacConfig: + policy.csv: | + g, ytl@slac.stanford.edu, role:admin + g, ppascual@slac.stanford.edu, role:admin + g, pav@slac.stanford.edu, role:admin + g, dspeck@slac.stanford.edu, role:admin + g, afausti@slac.stanford.edu, role:admin + g, mfl@slac.stanford.edu, role:admin + g, cbanek@slac.stanford.edu, role:admin + g, frossie@slac.stanford.edu, role:admin + g, hchiang2@slac.stanford.edu, role:admin + g, athor@slac.stanford.edu, role:admin + g, reinking@slac.stanford.edu, role:admin + g, smart@slac.stanford.edu, role:admin + g, omullan@slac.stanford.edu, role:admin + g, mreuter@slac.stanford.edu, role:admin + scopes: "[email]" + + helm.repositories: | + - url: https://lsst-sqre.github.io/charts/ + name: lsst-sqre + - url: https://charts.helm.sh/stable + name: stable + + repoServer: + + env: + - name: HTTP_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + controller: + + env: + - name: HTTP_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: HTTPS_PROXY + value: http://squid.slac.stanford.edu:3128 + - name: NO_PROXY + value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server + + configs: + secret: + createSecret: false diff --git a/applications/gafaelfawr/values-usdf-tel-rsp.yaml b/applications/gafaelfawr/values-usdf-tel-rsp.yaml new file mode 100644 index 0000000000..04cb16d38d --- /dev/null +++ b/applications/gafaelfawr/values-usdf-tel-rsp.yaml @@ -0,0 +1,227 @@ +replicaCount: 2 + +# Use the CSI storage class so that we can use snapshots. +redis: + persistence: + storageClass: "wekafs--sdf-k8s01" + +config: + databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr" + + oidcServer: + enabled: true + + oidc: + clientId: rubin-usdf-tel-rsp + audience: "rubin-usdf-tel-rsp" + loginUrl: "https://dex.slac.stanford.edu/auth" + tokenUrl: "https://dex.slac.stanford.edu/token" + issuer: "https://dex.slac.stanford.edu" + scopes: + - "openid" + - "email" + - "groups" + - "profile" + usernameClaim: "name" + + ldap: + url: ldaps://ldap-unix.slac.stanford.edu:636 + groupBaseDn: ou=Group,dc=slac,dc=stanford,dc=edu + groupObjectClass: posixGroup + groupMemberAttr: memberUid + userBaseDn: ou=Accounts,dc=slac,dc=stanford,dc=edu + userSearchAttr: uid + addUserGroup: false + uidAttr: uidNumber + gidAttr: gidNumber + nameAttr: gecos + + groupMapping: + "admin:token": + - "rubinmgr" + - "unix-admin" + "admin:users": + - "rubinmgr" + - "unix-admin" + "exec:admin": + - "rubinmgr" + - "unix-admin" + "exec:notebook": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "exec:portal": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "exec:user": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "read:tap": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - "rubin_admin_datasets" + - "rubin_admin_repos" + - "unix-admin" + "read:image": + - "lsst" + - "lsst-ccs" + - "rubin_users" + - "rubin_users-a" + - "rubin_users-b" + - "rubin_users-c" + - "rubin_users-d" + - "rubin_users-e" + - "rubin_users-f" + - "rubin_users-g" + - "rubin_users-h" + - "rubin_users-i" + - "rubin_users-j" + - "rubin_users-k" + - "rubin_users-l" + - "rubin_users-m" + - "rubin_users-n" + - "rubin_users-o" + - "rubin_users-p" + - "rubin_users-q" + - "rubin_users-r" + - "rubin_users-s" + - "rubin_users-t" + - "rubin_users-u" + - "rubin_users-v" + - "rubin_users-w" + - "rubin_users-x" + - "rubin_users-y" + - "rubin_users-z" + - rubin_admin_datasets + - rubin_admin_repos + - "unix-admin" + "write:sasquatch": + - "rubinmgr" + - "unix-admin" + + initialAdmins: + - "afausti" + - "athor" + - "cbanek" + - "frossie" + - "jonathansick" + - "rra" + - "simonkrughoff" + - "ytl" + - "ppascual" diff --git a/applications/postgres/values-usdf-tel-rsp.yaml b/applications/postgres/values-usdf-tel-rsp.yaml new file mode 100644 index 0000000000..c7ae91cda0 --- /dev/null +++ b/applications/postgres/values-usdf-tel-rsp.yaml @@ -0,0 +1,8 @@ +jupyterhub_db: + user: 'jovyan' + db: 'jupyterhub' +gafaelfawr_db: + user: 'gafaelfawr' + db: 'gafaelfawr' + +postgresStorageClass: 'wekafs--sdf-k8s01' diff --git a/applications/vault-secrets-operator/values-usdf-tel-rsp.yaml b/applications/vault-secrets-operator/values-usdf-tel-rsp.yaml new file mode 100644 index 0000000000..bfb0f3700f --- /dev/null +++ b/applications/vault-secrets-operator/values-usdf-tel-rsp.yaml @@ -0,0 +1,22 @@ +vault-secrets-operator: + environmentVars: + - name: VAULT_AUTH_METHOD + value: approle + - name: VAULT_ROLE_ID + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_ROLE_ID + - name: VAULT_SECRET_ID + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_SECRET_ID + - name: VAULT_TOKEN_MAX_TTL + valueFrom: + secretKeyRef: + name: vault-secrets-operator + key: VAULT_TOKEN_MAX_TTL + vault: + address: "https://vault.slac.stanford.edu" + authMethod: approle diff --git a/docs/environments/index.rst b/docs/environments/index.rst index 6f28095e71..a6d39fdcfc 100644 --- a/docs/environments/index.rst +++ b/docs/environments/index.rst @@ -25,3 +25,4 @@ To learn more about operating a Phalanx environment, see the :doc:`/admin/index` tucson-teststand/index usdfdev/index usdfprod/index + usdf-tel-rsp/index diff --git a/docs/environments/usdf-tel-rsp/index.rst b/docs/environments/usdf-tel-rsp/index.rst new file mode 100644 index 0000000000..67daf7a522 --- /dev/null +++ b/docs/environments/usdf-tel-rsp/index.rst @@ -0,0 +1,10 @@ +.. px-env:: usdf-tel-rsp + +############################################################ +usdf-tel-rsp — usdf-tel-rsp.slac.stanford.edu (Dev for USDF) +############################################################ + +``usdf-tel-rsp`` is a development environment for the Rubin Science Platform at the United States Data Facility (USDF) hosted at SLAC. + +.. jinja:: usdf-tel-rsp + :file: environments/_summary.rst.jinja diff --git a/environments/values-usdf-tel-rsp.yaml b/environments/values-usdf-tel-rsp.yaml new file mode 100644 index 0000000000..3ea6a51b91 --- /dev/null +++ b/environments/values-usdf-tel-rsp.yaml @@ -0,0 +1,81 @@ +environment: usdf-tel-rsp +fqdn: usdf-tel-rsp.slac.stanford.edu +vaultPathPrefix: secret/rubin/usdf-tel-rsp +# butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-repos.yaml" +repoURL: https://github.com/lsst-sqre/phalanx.git + +alert-stream-broker: + enabled: false +argo-workflows: + enabled: false +cachemachine: + enabled: false +cert-manager: + enabled: false +datalinker: + enabled: false +exposurelog: + enabled: false +gafaelfawr: + enabled: true +hips: + enabled: false +ingress-nginx: + enabled: false +kubernetes-replicator: + enabled: false +mobu: + enabled: false +moneypenny: + enabled: false +narrativelog: + enabled: false +noteburst: + enabled: false +nublado: + enabled: false +nublado2: + enabled: false +plot-navigator: + enabled: false +portal: + enabled: false +postgres: + enabled: true +sasquatch: + enabled: false +production-tools: + enabled: false +semaphore: + enabled: false +sherlock: + enabled: false +ssotap: + enabled: false +squarebot: + enabled: false +squareone: + enabled: false +squash-api: + enabled: false +strimzi: + enabled: false +strimzi-access-operator: + enabled: false +strimzi-registry-operator: + enabled: false +tap: + enabled: false +tap-schema: + enabled: false +telegraf: + enabled: false +telegraf-ds: + enabled: false +times-square: + enabled: false +vault-secrets-operator: + enabled: true +vo-cutouts: + enabled: false +# comment to test pre-commit From baff6fd1e0a4bda0890dcd09754dfeab22f257b8 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 15:01:08 -0700 Subject: [PATCH 254/308] Change config for Gafaelfawr internal database Add a new Helm configuration option for whether Gafaelfawr should use the cluster-internal PostgreSQL database instead of just using the database URI so that we can trigger optional secrets based on whether that configuration option is set. --- applications/gafaelfawr/README.md | 3 ++- applications/gafaelfawr/templates/configmap.yaml | 4 ++++ applications/gafaelfawr/values-ccin2p3.yaml | 2 +- applications/gafaelfawr/values-minikube.yaml | 2 +- applications/gafaelfawr/values-roe.yaml | 2 +- applications/gafaelfawr/values-usdfdev.yaml | 2 +- applications/gafaelfawr/values-usdfprod.yaml | 2 +- applications/gafaelfawr/values.yaml | 6 +++++- 8 files changed, 16 insertions(+), 7 deletions(-) diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 086454aec9..63c2303239 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -32,7 +32,7 @@ Authentication and identity system | config.cilogon.test | bool | `false` | Whether to use the test instance of CILogon | | config.cilogon.uidClaim | string | `"uidNumber"` | Claim from which to get the numeric UID (only used if not retrieved from LDAP or Firestore) | | config.cilogon.usernameClaim | string | `"uid"` | Claim from which to get the username | -| config.databaseUrl | string | None, must be set if `cloudsql.enabled` is not true | URL for the PostgreSQL database | +| config.databaseUrl | string | None, must be set if neither `cloudsql.enabled` | URL for the PostgreSQL database nor `config.internalDatabase` are true | | config.errorFooter | string | `""` | HTML footer to add to any login error page (will be enclosed in a

tag). | | config.firestore.project | string | Firestore support is disabled | If set, assign UIDs and GIDs using Google Firestore in the given project. Cloud SQL must be enabled and the Cloud SQL service account must have read/write access to that Firestore instance. | | config.forgerock.url | string | ForgeRock Identity Management support is disabled | If set, obtain the GIDs for groups from this ForgeRock Identity Management server. | @@ -40,6 +40,7 @@ Authentication and identity system | config.github.clientId | string | `""` | GitHub client ID. One and only one of this, `config.cilogon.clientId`, or `config.oidc.clientId` must be set. | | config.groupMapping | object | `{}` | Defines a mapping of scopes to groups that provide that scope. See [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes. | | config.initialAdmins | list | `[]` | Usernames to add as administrators when initializing a new database. Used only if there are no administrators. | +| config.internalDatabase | bool | `false` | Whether to use the PostgreSQL server internal to the Kubernetes cluster | | config.knownScopes | object | See the `values.yaml` file | Names and descriptions of all scopes in use. This is used to populate the new token creation page. Only scopes listed here will be options when creating a new token. See [DMTN-235](https://dmtn-235.lsst.io/). | | config.ldap.addUserGroup | bool | `false` | Whether to synthesize a user private group for each user with a GID equal to their UID | | config.ldap.emailAttr | string | `"mail"` | Attribute containing the user's email address | diff --git a/applications/gafaelfawr/templates/configmap.yaml b/applications/gafaelfawr/templates/configmap.yaml index 282516e7d5..e5c02c34d9 100644 --- a/applications/gafaelfawr/templates/configmap.yaml +++ b/applications/gafaelfawr/templates/configmap.yaml @@ -194,6 +194,8 @@ data: gafaelfawr.yaml: | {{- if .Values.cloudsql.enabled }} databaseUrl: "postgresql://gafaelfawr@cloud-sql-proxy/gafaelfawr" + {{- else if .Values.config.internalDatabase }} + databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr" {{- else }} databaseUrl: {{ required "config.databaseUrl must be set" .Values.config.databaseUrl | quote }} {{- end }} @@ -209,6 +211,8 @@ data: gafaelfawr.yaml: | {{- if .Values.cloudsql.enabled }} databaseUrl: "postgresql://gafaelfawr@localhost/gafaelfawr" + {{- else if .Values.config.internalDatabase }} + databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr" {{- else }} databaseUrl: {{ required "config.databaseUrl must be set" .Values.config.databaseUrl | quote }} {{- end }} diff --git a/applications/gafaelfawr/values-ccin2p3.yaml b/applications/gafaelfawr/values-ccin2p3.yaml index b5dee269a4..19498c3a97 100644 --- a/applications/gafaelfawr/values-ccin2p3.yaml +++ b/applications/gafaelfawr/values-ccin2p3.yaml @@ -6,7 +6,7 @@ redis: config: logLevel: "DEBUG" - databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr" + internalDatabase: true # Session length and token expiration (in minutes). issuer: diff --git a/applications/gafaelfawr/values-minikube.yaml b/applications/gafaelfawr/values-minikube.yaml index 46266cb4d5..44c3e841e7 100644 --- a/applications/gafaelfawr/values-minikube.yaml +++ b/applications/gafaelfawr/values-minikube.yaml @@ -4,7 +4,7 @@ redis: enabled: false config: - databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr" + internalDatabase: true # Support OpenID Connect clients like Chronograf. oidcServer: diff --git a/applications/gafaelfawr/values-roe.yaml b/applications/gafaelfawr/values-roe.yaml index 5f7c2128f4..59b349094f 100644 --- a/applications/gafaelfawr/values-roe.yaml +++ b/applications/gafaelfawr/values-roe.yaml @@ -3,7 +3,7 @@ redis: enabled: false config: - databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr" + internalDatabase: true github: clientId: "10172b4db1b67ee31620" diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml index 664ef85758..6ffb339ba2 100644 --- a/applications/gafaelfawr/values-usdfdev.yaml +++ b/applications/gafaelfawr/values-usdfdev.yaml @@ -6,7 +6,7 @@ redis: storageClass: "wekafs--sdf-k8s01" config: - databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr" + internalDatabase: true oidcServer: enabled: true diff --git a/applications/gafaelfawr/values-usdfprod.yaml b/applications/gafaelfawr/values-usdfprod.yaml index d3a93749e9..f5513a3a14 100644 --- a/applications/gafaelfawr/values-usdfprod.yaml +++ b/applications/gafaelfawr/values-usdfprod.yaml @@ -6,7 +6,7 @@ redis: storageClass: "wekafs--sdf-k8s01" config: - databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr" + internalDatabase: true oidcServer: enabled: true diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index b1f9d2142b..85d1353fdd 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -36,8 +36,12 @@ tolerations: [] affinity: {} config: + # -- Whether to use the PostgreSQL server internal to the Kubernetes cluster + internalDatabase: false + # -- URL for the PostgreSQL database - # @default -- None, must be set if `cloudsql.enabled` is not true + # @default -- None, must be set if neither `cloudsql.enabled` + # nor `config.internalDatabase` are true databaseUrl: "" # -- Choose from the text form of Python logging levels From 51d681717e4cf37c9eb3ce4705731c6d9abd9294 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 15:03:40 -0700 Subject: [PATCH 255/308] Add explicit Nublado setting for internal database For both the nublado and nublado2 applications, add an explicit Helm chart configuration option saying to use an internal PostgreSQL database. This is not used directly by the chart, but is needed to trigger optional secrets. --- applications/nublado/README.md | 1 + applications/nublado/values.yaml | 5 +++++ applications/nublado2/README.md | 1 + applications/nublado2/values.yaml | 4 ++++ 4 files changed, 11 insertions(+) diff --git a/applications/nublado/README.md b/applications/nublado/README.md index eb7768219e..17addd2744 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -50,6 +50,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| hub.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. | | hub.timeout.spawn | int | `600` | Timeout for the Kubernetes spawn process in seconds. (Allow long enough to pull uncached images if needed.) | | hub.timeout.startup | int | `90` | Timeout for JupyterLab to start. Currently this sometimes takes over 60 seconds for reasons we don't understand. | | jupyterhub.cull.enabled | bool | `true` | Enable the lab culler. | diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index f01dc79621..0f89b35a56 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -257,6 +257,11 @@ controller: # JupyterHub configuration handled directly by this chart rather than by Zero # to JupyterHub. hub: + # -- Whether to use the cluster-internal PostgreSQL server instead of an + # external server. This is not used directly by the Nublado chart, but + # controls how the database password is managed. + internalDatabase: true + timeout: # -- Timeout for the Kubernetes spawn process in seconds. (Allow long # enough to pull uncached images if needed.) diff --git a/applications/nublado2/README.md b/applications/nublado2/README.md index 8f4ebdf915..8f4eb07907 100644 --- a/applications/nublado2/README.md +++ b/applications/nublado2/README.md @@ -15,6 +15,7 @@ JupyterHub for the Rubin Science Platform | config.base_url | string | `""` | base_url must be set in each instantiation of this chart to the URL of the primary ingress. It's used to construct API requests to the authentication service (which should go through the ingress). | | config.butler_secret_path | string | `""` | butler_secret_path must be set here, because it's passed through to the lab rather than being part of the Hub configuration. | | config.cachemachine_image_policy | string | `"available"` | Cachemachine image policy: "available" or "desired". Use "desired" at instances with streaming image support. | +| config.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. | | config.lab_environment | object | See `values.yaml` | Environment variables to set in spawned lab containers. Each value will be expanded using Jinja 2 templating. | | config.pinned_images | list | `[]` | images to pin to spawner menu | | config.pull_secret_path | string | `""` | pull_secret_path must also be set here; it specifies resources in the lab namespace | diff --git a/applications/nublado2/values.yaml b/applications/nublado2/values.yaml index 35b642c2b8..8585f00d47 100644 --- a/applications/nublado2/values.yaml +++ b/applications/nublado2/values.yaml @@ -178,6 +178,10 @@ jupyterhub: enabled: false config: + # -- Whether to use the cluster-internal PostgreSQL server instead of an + # external server. This is not used directly by the Nublado chart, but + # controls how the database password is managed. + internalDatabase: true # -- base_url must be set in each instantiation of this chart to the URL of # the primary ingress. It's used to construct API requests to the # authentication service (which should go through the ingress). From 89d640f8b0a5b013921a1732e60996425f7a78b6 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 25 Jul 2023 15:41:57 -0700 Subject: [PATCH 256/308] Update pre-commit hooks Update Ruff and helm-docs and fix an issue found by the new Ruff. --- .pre-commit-config.yaml | 4 ++-- src/phalanx/docs/models.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fa0753bf14..1f47f14acc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - -c=.yamllint.yml - repo: https://github.com/norwoodj/helm-docs - rev: v1.11.0 + rev: v1.11.1 hooks: - id: helm-docs args: @@ -24,7 +24,7 @@ repos: - --document-dependency-values=true - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.278 + rev: v0.0.280 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/src/phalanx/docs/models.py b/src/phalanx/docs/models.py index 0bfde18ba1..aa28adf245 100644 --- a/src/phalanx/docs/models.py +++ b/src/phalanx/docs/models.py @@ -380,7 +380,7 @@ def load_phalanx(cls, root_dir: Path) -> Phalanx: apps.sort(key=lambda a: a.name) # Gather environments - for _env_name, values in env_values.items(): + for values in env_values.values(): env = Environment.load(values=values, applications=apps) envs.append(env) From 654419b710c615c79edf083879034efb90ac2afb Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 25 Jul 2023 15:47:36 -0700 Subject: [PATCH 257/308] Update Helm docs for new helm-docs release helm-docs has a new release which no longer adds a newline at the end of README.md files. --- applications/alert-stream-broker/README.md | 2 +- .../alert-stream-broker/charts/alert-database/README.md | 2 +- .../alert-stream-broker/charts/alert-stream-broker/README.md | 2 +- .../charts/alert-stream-schema-registry/README.md | 2 +- .../alert-stream-broker/charts/alert-stream-simulator/README.md | 2 +- applications/argo-workflows/README.md | 2 +- applications/argocd/README.md | 2 +- applications/cachemachine/README.md | 2 +- applications/cert-manager/README.md | 2 +- applications/datalinker/README.md | 2 +- applications/exposurelog/README.md | 2 +- applications/gafaelfawr/README.md | 2 +- applications/giftless/README.md | 2 +- applications/hips/README.md | 2 +- applications/ingress-nginx/README.md | 2 +- applications/kubernetes-replicator/README.md | 2 +- applications/linters/README.md | 2 +- applications/livetap/README.md | 2 +- applications/mobu/README.md | 2 +- applications/moneypenny/README.md | 2 +- applications/narrativelog/README.md | 2 +- applications/noteburst/README.md | 2 +- applications/nublado/README.md | 2 +- applications/nublado2/README.md | 2 +- applications/obsloctap/README.md | 2 +- applications/ook/README.md | 2 +- applications/plot-navigator/README.md | 2 +- applications/portal/README.md | 2 +- applications/postgres/README.md | 2 +- applications/production-tools/README.md | 2 +- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/kafdrop/README.md | 2 +- applications/sasquatch/charts/kafka-connect-manager/README.md | 2 +- applications/sasquatch/charts/rest-proxy/README.md | 2 +- applications/sasquatch/charts/square-events/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/README.md | 2 +- applications/sasquatch/charts/telegraf-kafka-consumer/README.md | 2 +- applications/semaphore/README.md | 2 +- applications/sherlock/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/squarebot/README.md | 2 +- applications/squareone/README.md | 2 +- applications/ssotap/README.md | 2 +- applications/strimzi-access-operator/README.md | 2 +- applications/tap-schema/README.md | 2 +- applications/tap/README.md | 2 +- applications/telegraf-ds/README.md | 2 +- applications/telegraf/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vault-secrets-operator/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- environments/README.md | 2 +- starters/web-service/README.md | 2 +- 53 files changed, 53 insertions(+), 53 deletions(-) diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index e91597a416..1a82e13b0c 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -107,4 +107,4 @@ Alert transmission to community brokers | alert-stream-simulator.replayTopicReplicas | int | `2` | | | alert-stream-simulator.schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | | alert-stream-simulator.staticTopicName | string | `"alerts-static"` | Name of the topic which will hold a static single visit of sample data. | -| alert-stream-simulator.strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | +| alert-stream-simulator.strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-database/README.md b/applications/alert-stream-broker/charts/alert-database/README.md index 30b66902e0..7343a81b47 100644 --- a/applications/alert-stream-broker/charts/alert-database/README.md +++ b/applications/alert-stream-broker/charts/alert-database/README.md @@ -38,4 +38,4 @@ Archival database of alerts sent through the alert stream. | server.serviceAccountName | string | `"alertdb-reader"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database server. | | storage.gcp.alertBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with alert data | | storage.gcp.project | string | `""` | Name of a GCP project that has a bucket for database storage | -| storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data | +| storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data | \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/README.md b/applications/alert-stream-broker/charts/alert-stream-broker/README.md index 394f840d4c..5ed52e1292 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-broker/README.md @@ -38,4 +38,4 @@ Kafka broker cluster for distributing alerts | vaultSecretsPath | string | `""` | Path to the secret resource in Vault | | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | -| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | +| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md index cc6ac85074..1cc74892f7 100644 --- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md @@ -15,4 +15,4 @@ Confluent Schema Registry for managing schema versions for the Alert Stream | schemaSync.image.tag | string | `"tickets-DM-32743"` | Version of the container to use | | schemaSync.subject | string | `"alert-packet"` | Subject name to use when inserting data into the Schema Registry | | schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry to store data. | -| strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | +| strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-simulator/README.md b/applications/alert-stream-broker/charts/alert-stream-simulator/README.md index e0833c4138..724924bad9 100644 --- a/applications/alert-stream-broker/charts/alert-stream-simulator/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-simulator/README.md @@ -22,4 +22,4 @@ Producer which repeatedly publishes a static set of alerts into a Kafka topic | replayTopicReplicas | int | `2` | | | schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | | staticTopicName | string | `"alerts-static"` | Name of the topic which will hold a static single visit of sample data. | -| strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | +| strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | \ No newline at end of file diff --git a/applications/argo-workflows/README.md b/applications/argo-workflows/README.md index a8d76e16da..7daf31f859 100644 --- a/applications/argo-workflows/README.md +++ b/applications/argo-workflows/README.md @@ -22,4 +22,4 @@ Kubernetes workflow engine | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | | ingress.annotations."nginx.ingress.kubernetes.io/use-regex" | string | `"true"` | | -| ingress.scopes[0] | string | `"exec:admin"` | | +| ingress.scopes[0] | string | `"exec:admin"` | | \ No newline at end of file diff --git a/applications/argocd/README.md b/applications/argocd/README.md index 4c2f167dc5..42a1636626 100644 --- a/applications/argocd/README.md +++ b/applications/argocd/README.md @@ -30,4 +30,4 @@ Kubernetes application manager | argo-cd.server.ingress.pathType | string | `"ImplementationSpecific"` | Type of path expression for Argo CD ingress | | argo-cd.server.ingress.paths | list | `["/argo-cd(/|$)(.*)"]` | Paths to route to Argo CD | | argo-cd.server.metrics.enabled | bool | `true` | Enable server metrics service | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | \ No newline at end of file diff --git a/applications/cachemachine/README.md b/applications/cachemachine/README.md index 1ed392e993..1f2dda7790 100644 --- a/applications/cachemachine/README.md +++ b/applications/cachemachine/README.md @@ -28,4 +28,4 @@ JupyterLab image prepuller | serviceAccount | object | `{"annotations":{},"name":""}` | Secret names to use for all Docker pulls | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the cachemachine frontend pod | +| tolerations | list | `[]` | Tolerations for the cachemachine frontend pod | \ No newline at end of file diff --git a/applications/cert-manager/README.md b/applications/cert-manager/README.md index e8155ae527..84d991ca04 100644 --- a/applications/cert-manager/README.md +++ b/applications/cert-manager/README.md @@ -20,4 +20,4 @@ TLS certificate manager | config.email | string | sqre-admin | Contact email address registered with Let's Encrypt | | config.route53.awsAccessKeyId | string | None, must be set if `createIssuer` is true | AWS access key ID for Route 53 (must match `aws-secret-access-key` in Vault secret referenced by `config.vaultSecretPath`) | | config.route53.hostedZone | string | None, must be set if `createIssuer` is true | Route 53 hosted zone in which to create challenge records | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | \ No newline at end of file diff --git a/applications/datalinker/README.md b/applications/datalinker/README.md index a3cfbd378f..c3b14afad5 100644 --- a/applications/datalinker/README.md +++ b/applications/datalinker/README.md @@ -30,4 +30,4 @@ IVOA DataLink-based service and data discovery | podAnnotations | object | `{}` | Annotations for the datalinker deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the datalinker deployment pod | -| tolerations | list | `[]` | Tolerations for the datalinker deployment pod | +| tolerations | list | `[]` | Tolerations for the datalinker deployment pod | \ No newline at end of file diff --git a/applications/exposurelog/README.md b/applications/exposurelog/README.md index b3e08c1aba..634f405a8d 100644 --- a/applications/exposurelog/README.md +++ b/applications/exposurelog/README.md @@ -47,4 +47,4 @@ Log messages related to an exposure | replicaCount | int | `1` | How many exposurelog pods to run | | resources | object | `{}` | Resource limits and requests for the exposurelog pod | | securityContext | object | `{}` | Security context for the exposurelog deployment | -| tolerations | list | `[]` | Tolerations for the exposurelog pod | +| tolerations | list | `[]` | Tolerations for the exposurelog pod | \ No newline at end of file diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 63c2303239..2b9640fcc7 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -110,4 +110,4 @@ Authentication and identity system | redis.tolerations | list | `[]` | Tolerations for the Redis pod | | replicaCount | int | `1` | Number of web frontend pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | -| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | +| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | \ No newline at end of file diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 4979510629..fcec2931a7 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -30,4 +30,4 @@ Git-LFS server with GCS S3 backend, with Rubin-specific auth | server.debug | bool | `false` | Turn on debugging mode | | server.processes | int | `2` | Number of processes for server | | server.threads | int | `2` | Number of threads per process | -| tolerations | list | `[]` | Tolerations for the giftless frontend pod | +| tolerations | list | `[]` | Tolerations for the giftless frontend pod | \ No newline at end of file diff --git a/applications/hips/README.md b/applications/hips/README.md index 01f98617a6..54712cc322 100644 --- a/applications/hips/README.md +++ b/applications/hips/README.md @@ -29,4 +29,4 @@ HiPS tile server backed by Google Cloud Storage | podAnnotations | object | `{}` | Annotations for the hips deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the hips deployment pod | -| tolerations | list | `[]` | Tolerations for the hips deployment pod | +| tolerations | list | `[]` | Tolerations for the hips deployment pod | \ No newline at end of file diff --git a/applications/ingress-nginx/README.md b/applications/ingress-nginx/README.md index 94e146d6cd..78980b1244 100644 --- a/applications/ingress-nginx/README.md +++ b/applications/ingress-nginx/README.md @@ -21,4 +21,4 @@ Ingress controller | ingress-nginx.controller.metrics.enabled | bool | `true` | Enable metrics reporting via Prometheus | | ingress-nginx.controller.podLabels | object | See `values.yaml` | Add labels used by `NetworkPolicy` objects to restrict access to the ingress and thus ensure that auth subrequest handlers run | | ingress-nginx.controller.service.externalTrafficPolicy | string | `"Local"` | Force traffic routing policy to Local so that the external IP in `X-Forwarded-For` will be correct | -| vaultCertificate.enabled | bool | `false` | Whether to store ingress TLS certificate via vault-secrets-operator. Typically "squareone" owns it instead in an RSP. | +| vaultCertificate.enabled | bool | `false` | Whether to store ingress TLS certificate via vault-secrets-operator. Typically "squareone" owns it instead in an RSP. | \ No newline at end of file diff --git a/applications/kubernetes-replicator/README.md b/applications/kubernetes-replicator/README.md index 1ca6da402d..5ce44bec30 100644 --- a/applications/kubernetes-replicator/README.md +++ b/applications/kubernetes-replicator/README.md @@ -25,4 +25,4 @@ Kafka secret replicator | kubernetes-replicator.serviceAccount.privileges[0].apiGroups[1] | string | `"apps"` | | | kubernetes-replicator.serviceAccount.privileges[0].apiGroups[2] | string | `"extensions"` | | | kubernetes-replicator.serviceAccount.privileges[0].resources[0] | string | `"secrets"` | | -| kubernetes-replicator.serviceAccount.privileges[0].resources[1] | string | `"configmaps"` | | +| kubernetes-replicator.serviceAccount.privileges[0].resources[1] | string | `"configmaps"` | | \ No newline at end of file diff --git a/applications/linters/README.md b/applications/linters/README.md index facd3c1375..ff3b7459c5 100644 --- a/applications/linters/README.md +++ b/applications/linters/README.md @@ -24,4 +24,4 @@ Linters running for operational reasons | podAnnotations | object | `{}` | Annotations for the linter pod | | replicaCount | int | `1` | Number of web frontend pods to start | | resources | object | `{}` | Resource limits and requests for the linter pod | -| tolerations | list | `[]` | Tolerations for the linter pod | +| tolerations | list | `[]` | Tolerations for the linter pod | \ No newline at end of file diff --git a/applications/livetap/README.md b/applications/livetap/README.md index ab125fd219..7eca13edcd 100644 --- a/applications/livetap/README.md +++ b/applications/livetap/README.md @@ -62,4 +62,4 @@ IVOA TAP service | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{}` | Resource limits and requests for the UWS database pod | | uws.tolerations | list | `[]` | Tolerations for the UWS database pod | -| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | +| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | \ No newline at end of file diff --git a/applications/mobu/README.md b/applications/mobu/README.md index 582048e66a..169dbee2e2 100644 --- a/applications/mobu/README.md +++ b/applications/mobu/README.md @@ -27,4 +27,4 @@ Continuous integration testing | nodeSelector | object | `{}` | Node selector rules for the mobu frontend pod | | podAnnotations | object | `{}` | Annotations for the mobu frontend pod | | resources | object | `{}` | Resource limits and requests for the mobu frontend pod | -| tolerations | list | `[]` | Tolerations for the mobu frontend pod | +| tolerations | list | `[]` | Tolerations for the mobu frontend pod | \ No newline at end of file diff --git a/applications/moneypenny/README.md b/applications/moneypenny/README.md index 1cfedae207..9bd5518680 100644 --- a/applications/moneypenny/README.md +++ b/applications/moneypenny/README.md @@ -31,4 +31,4 @@ User provisioning actions | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the vo-cutouts frontend pod | | serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | +| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | \ No newline at end of file diff --git a/applications/narrativelog/README.md b/applications/narrativelog/README.md index 281d4e69db..9023cf08d5 100644 --- a/applications/narrativelog/README.md +++ b/applications/narrativelog/README.md @@ -38,4 +38,4 @@ Narrative log service | replicaCount | int | `1` | Number of narrativelog replicas to run | | resources | object | `{}` | Resource limits and requests for the narrativelog pod | | securityContext | object | `{}` | Security context for the narrativelog deployment | -| tolerations | list | `[]` | Tolerations for the narrativelog pod | +| tolerations | list | `[]` | Tolerations for the narrativelog pod | \ No newline at end of file diff --git a/applications/noteburst/README.md b/applications/noteburst/README.md index 2d56c582c5..58af0aee95 100644 --- a/applications/noteburst/README.md +++ b/applications/noteburst/README.md @@ -56,4 +56,4 @@ Noteburst is a notebook execution service for the Rubin Science Platform. | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 17addd2744..386432b776 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -84,4 +84,4 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | Whether to configure iptables to block cloud metadata endpoints. This is unnecessary in our environments (they are blocked by cluster configuration) and thus is disabled to reduce complexity. | | jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | Start command for labs | | jupyterhub.singleuser.defaultUrl | string | `"/lab"` | Default URL prefix for lab endpoints | -| proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | +| proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | \ No newline at end of file diff --git a/applications/nublado2/README.md b/applications/nublado2/README.md index 8f4eb07907..80c3a68da9 100644 --- a/applications/nublado2/README.md +++ b/applications/nublado2/README.md @@ -116,4 +116,4 @@ JupyterHub for the Rubin Science Platform | jupyterhub.singleuser.storage.extraVolumes[6].configMap.name | string | `"group"` | | | jupyterhub.singleuser.storage.extraVolumes[6].name | string | `"group"` | | | jupyterhub.singleuser.storage.type | string | `"none"` | | -| network_policy.enabled | bool | `true` | | +| network_policy.enabled | bool | `true` | | \ No newline at end of file diff --git a/applications/obsloctap/README.md b/applications/obsloctap/README.md index c6cc3d3a5a..b50f7ee849 100644 --- a/applications/obsloctap/README.md +++ b/applications/obsloctap/README.md @@ -20,4 +20,4 @@ Publish observing schedule | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the obsloctap image | | image.repository | string | `"ghcr.io/lsst-dm/obsloctap"` | obsloctap image to use | | image.tag | string | The appVersion of the chart | Tag of obsloctap image to use | -| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | \ No newline at end of file diff --git a/applications/ook/README.md b/applications/ook/README.md index 6408638015..b47e1b5223 100644 --- a/applications/ook/README.md +++ b/applications/ook/README.md @@ -37,4 +37,4 @@ Ook is the librarian service for Rubin Observatory. Ook indexes documentation co | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/plot-navigator/README.md b/applications/plot-navigator/README.md index 6ca85a93d7..b04d84a00a 100644 --- a/applications/plot-navigator/README.md +++ b/applications/plot-navigator/README.md @@ -19,4 +19,4 @@ Panel-based plot viewer | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.repository | string | `"ghcr.io/lsst-dm/pipetask-plot-navigator"` | plot-navigator image to use | | image.tag | string | `""` | | -| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | \ No newline at end of file diff --git a/applications/portal/README.md b/applications/portal/README.md index 08d4a6cecd..cfaf6957c0 100644 --- a/applications/portal/README.md +++ b/applications/portal/README.md @@ -43,4 +43,4 @@ Rubin Science Platform Portal Aspect | replicaCount | int | `1` | Number of pods to start | | resources | object | `{"limits":{"cpu":2,"memory":"6Gi"}}` | Resource limits and requests. The Portal will use (by default) 93% of container RAM. This is a smallish Portal; tweak it as you need to in instance definitions in Phalanx. | | securityContext | object | `{}` | Security context for the Portal pod | -| tolerations | list | `[]` | Tolerations for the Portal pod | +| tolerations | list | `[]` | Tolerations for the Portal pod | \ No newline at end of file diff --git a/applications/postgres/README.md b/applications/postgres/README.md index 25d2e60e20..cfd13e68d8 100644 --- a/applications/postgres/README.md +++ b/applications/postgres/README.md @@ -17,4 +17,4 @@ Postgres RDBMS for LSP | image.tag | string | The appVersion of the chart | Tag of postgres image to use | | postgresStorageClass | string | `"standard"` | Storage class for postgres volume. Set to appropriate value for your deployment: at GKE, "standard" (if you want SSD, "premium-rwo", but if you want a good database maybe it's better to use a cloud database?), on Rubin Observatory Rancher, "rook-ceph-block", elsewhere probably "standard" | | postgresVolumeSize | string | `"1Gi"` | Volume size for postgres. It can generally be very small | -| volumeName | string | `""` | Volume name for postgres, if you use an existing volume that isn't automatically created from the PVC by the storage driver. | +| volumeName | string | `""` | Volume name for postgres, if you use an existing volume that isn't automatically created from the PVC by the storage driver. | \ No newline at end of file diff --git a/applications/production-tools/README.md b/applications/production-tools/README.md index cb7fa475cb..9db9624185 100644 --- a/applications/production-tools/README.md +++ b/applications/production-tools/README.md @@ -25,4 +25,4 @@ A collection of utility pages for monitoring data processing. | podAnnotations | object | `{}` | Annotations for the production-tools deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the production-tools deployment pod | -| tolerations | list | `[]` | Tolerations for the production-tools deployment pod | +| tolerations | list | `[]` | Tolerations for the production-tools deployment pod | \ No newline at end of file diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index a02536bc9a..31cb6b186d 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -347,4 +347,4 @@ Rubin Observatory's telemetry service. | telegraf-kafka-consumer.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods. | | telegraf-kafka-consumer.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods. | | telegraf-kafka-consumer.resources | object | `{}` | Kubernetes resources requests and limits. | -| telegraf-kafka-consumer.tolerations | list | `[]` | Tolerations for pod assignment. | +| telegraf-kafka-consumer.tolerations | list | `[]` | Tolerations for pod assignment. | \ No newline at end of file diff --git a/applications/sasquatch/charts/kafdrop/README.md b/applications/sasquatch/charts/kafdrop/README.md index dea43e6e00..72ffb013aa 100644 --- a/applications/sasquatch/charts/kafdrop/README.md +++ b/applications/sasquatch/charts/kafdrop/README.md @@ -36,4 +36,4 @@ A subchart to deploy the Kafdrop UI for Sasquatch. | server.servlet | object | Defaults to /. | The context path to serve requests on (must end with a /). | | service.annotations | object | `{}` | Service annotations | | service.port | int | `9000` | Service port | -| tolerations | list | `[]` | Tolerations configuration. | +| tolerations | list | `[]` | Tolerations configuration. | \ No newline at end of file diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md index c9ff922025..04edecb431 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/README.md +++ b/applications/sasquatch/charts/kafka-connect-manager/README.md @@ -66,4 +66,4 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. | | s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. | | s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. | -| s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | +| s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | \ No newline at end of file diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index b895087f09..b72e90981b 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -34,4 +34,4 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | resources.requests.memory | string | `"200Mi"` | Kafka REST proxy memory requests | | schemaregistry.url | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL | | service.port | int | `8082` | Kafka REST proxy service port | -| tolerations | list | `[]` | Tolerations configuration. | +| tolerations | list | `[]` | Tolerations configuration. | \ No newline at end of file diff --git a/applications/sasquatch/charts/square-events/README.md b/applications/sasquatch/charts/square-events/README.md index ce28f5c979..96c9f53e2d 100644 --- a/applications/sasquatch/charts/square-events/README.md +++ b/applications/sasquatch/charts/square-events/README.md @@ -6,4 +6,4 @@ Kafka topics and users for SQuaRE Events. | Key | Type | Default | Description | |-----|------|---------|-------------| -| cluster.name | string | `"sasquatch"` | | +| cluster.name | string | `"sasquatch"` | | \ No newline at end of file diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index f6cffe6b8d..b3f18c46ce 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -51,4 +51,4 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | | zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | -| zookeeper.tolerations | list | `[]` | Tolerations for Zookeeper pod assignment. | +| zookeeper.tolerations | list | `[]` | Tolerations for Zookeeper pod assignment. | \ No newline at end of file diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index a8005a8350..9c8021077e 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -30,4 +30,4 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods. | | podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods. | | resources | object | `{}` | Kubernetes resources requests and limits. | -| tolerations | list | `[]` | Tolerations for pod assignment. | +| tolerations | list | `[]` | Tolerations for pod assignment. | \ No newline at end of file diff --git a/applications/semaphore/README.md b/applications/semaphore/README.md index ea3233aef9..8df8f5ff2f 100644 --- a/applications/semaphore/README.md +++ b/applications/semaphore/README.md @@ -40,4 +40,4 @@ Semaphore is the user notification and messaging service for the Rubin Science P | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `false` | Specifies whether a service account should be created. | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/sherlock/README.md b/applications/sherlock/README.md index 459c2c462d..c54fc8c7bb 100644 --- a/applications/sherlock/README.md +++ b/applications/sherlock/README.md @@ -31,4 +31,4 @@ Application ingress status and metrics | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the sherlock deployment pod | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | Tolerations for the sherlock deployment pod | +| tolerations | list | `[]` | Tolerations for the sherlock deployment pod | \ No newline at end of file diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index 82787a918d..fcb9080470 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -25,4 +25,4 @@ GCP SQL Proxy as a service | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod | -| tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | +| tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | \ No newline at end of file diff --git a/applications/squarebot/README.md b/applications/squarebot/README.md index 51b5a0593c..c890062ccb 100644 --- a/applications/squarebot/README.md +++ b/applications/squarebot/README.md @@ -46,4 +46,4 @@ Squarebot feeds events from services like Slack and GitHub into the SQuaRE Event | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/squareone/README.md b/applications/squareone/README.md index 544c9c7c56..98391ce5b4 100644 --- a/applications/squareone/README.md +++ b/applications/squareone/README.md @@ -44,4 +44,4 @@ Squareone is the homepage UI for the Rubin Science Platform. | podAnnotations | object | `{}` | Annotations for squareone pods | | replicaCount | int | `1` | Number of squareone pods to run in the deployment. | | resources | object | `{}` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index ab125fd219..7eca13edcd 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -62,4 +62,4 @@ IVOA TAP service | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{}` | Resource limits and requests for the UWS database pod | | uws.tolerations | list | `[]` | Tolerations for the UWS database pod | -| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | +| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | \ No newline at end of file diff --git a/applications/strimzi-access-operator/README.md b/applications/strimzi-access-operator/README.md index 7b3410f2be..aa6ae9b69a 100644 --- a/applications/strimzi-access-operator/README.md +++ b/applications/strimzi-access-operator/README.md @@ -13,4 +13,4 @@ Strimzi Access Operator | image.tag | string | The appVersion of the chart | Tag of the image | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created. | -| serviceAccount.name | string | `""` | | +| serviceAccount.name | string | `""` | | \ No newline at end of file diff --git a/applications/tap-schema/README.md b/applications/tap-schema/README.md index 4c53f0c3ef..0d651cce04 100644 --- a/applications/tap-schema/README.md +++ b/applications/tap-schema/README.md @@ -20,4 +20,4 @@ The TAP_SCHEMA database | nodeSelector | object | `{}` | Node selector rules for the MySQL pod | | podAnnotations | object | `{}` | Annotations for the MySQL pod | | resources | object | `{}` | Resource limits and requests for the MySQL pod | -| tolerations | list | `[]` | Tolerations for the MySQL pod | +| tolerations | list | `[]` | Tolerations for the MySQL pod | \ No newline at end of file diff --git a/applications/tap/README.md b/applications/tap/README.md index 59c87e5f7b..de764134af 100644 --- a/applications/tap/README.md +++ b/applications/tap/README.md @@ -60,4 +60,4 @@ IVOA TAP service | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{}` | Resource limits and requests for the UWS database pod | | uws.tolerations | list | `[]` | Tolerations for the UWS database pod | -| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | +| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | \ No newline at end of file diff --git a/applications/telegraf-ds/README.md b/applications/telegraf-ds/README.md index ccd81dc59f..e46dc546ea 100644 --- a/applications/telegraf-ds/README.md +++ b/applications/telegraf-ds/README.md @@ -27,4 +27,4 @@ Kubernetes node telemetry collection service | telegraf-ds.resources.limits.memory | string | `"512Mi"` | | | telegraf-ds.serviceAccount.name | string | `"telegraf-ds"` | | | telegraf-ds.volumes[0].configMap.name | string | `"telegraf-generated-config"` | | -| telegraf-ds.volumes[0].name | string | `"telegraf-generated-config"` | | +| telegraf-ds.volumes[0].name | string | `"telegraf-generated-config"` | | \ No newline at end of file diff --git a/applications/telegraf/README.md b/applications/telegraf/README.md index d54785fc49..5c3dae0ff8 100644 --- a/applications/telegraf/README.md +++ b/applications/telegraf/README.md @@ -34,4 +34,4 @@ Application telemetry collection service | telegraf.service.enabled | bool | `false` | | | telegraf.tplVersion | int | `2` | | | telegraf.volumes[0].configMap.name | string | `"telegraf-generated-config"` | | -| telegraf.volumes[0].name | string | `"telegraf-generated-config"` | | +| telegraf.volumes[0].name | string | `"telegraf-generated-config"` | | \ No newline at end of file diff --git a/applications/times-square/README.md b/applications/times-square/README.md index 942f32e22a..5e16c0f22d 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -59,4 +59,4 @@ An API service for managing and rendering parameterized Jupyter notebooks. | serviceAccount.annotations | object | `{}` | Annotations to add to the service account. If CloudSQL is in use, the annotation specifying the Google service account will also be added. | | serviceAccount.create | bool | `false` | Force creation of a service account. Normally, no service account is used or mounted. If CloudSQL is enabled, a service account is always created regardless of this value. | | serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the times-square deployment pod | +| tolerations | list | `[]` | Tolerations for the times-square deployment pod | \ No newline at end of file diff --git a/applications/vault-secrets-operator/README.md b/applications/vault-secrets-operator/README.md index b3e48c2406..9ffebd8f42 100644 --- a/applications/vault-secrets-operator/README.md +++ b/applications/vault-secrets-operator/README.md @@ -11,4 +11,4 @@ | vault-secrets-operator.environmentVars | list | Set `VAULT_TOKEN` and `VAULT_TOKEN_LEASE_DURATION` from secret | Additional environment variables used to configure the operator | | vault-secrets-operator.serviceAccount.createSecret | bool | `false` | Disable creation of a secret for the service account. It shouldn't be needed and it conflicts with the secret we create that contains the credentials for talking to Vault. | | vault-secrets-operator.vault.address | string | `"https://vault.lsst.codes"` | URL of the underlying Vault implementation | -| vault-secrets-operator.vault.reconciliationTime | int | `60` | Sync secrets from vault on this cadence | +| vault-secrets-operator.vault.reconciliationTime | int | `60` | Sync secrets from vault on this cadence | \ No newline at end of file diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index f7c9f901cd..985623fe3f 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -64,4 +64,4 @@ Image cutout service complying with IVOA SODA | redis.tolerations | list | `[]` | Tolerations for the Redis pod | | replicaCount | int | `1` | Number of web frontend pods to start | | resources | object | `{}` | Resource limits and requests for the vo-cutouts frontend pod | -| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | +| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | \ No newline at end of file diff --git a/environments/README.md b/environments/README.md index 2a814ede11..483d6ca230 100644 --- a/environments/README.md +++ b/environments/README.md @@ -53,4 +53,4 @@ | times-square.enabled | bool | `false` | | | vault-secrets-operator.enabled | bool | `false` | | | vaultPathPrefix | string | None, must be set | Prefix for Vault secrets for this environment | -| vo-cutouts.enabled | bool | `false` | | +| vo-cutouts.enabled | bool | `false` | | \ No newline at end of file diff --git a/starters/web-service/README.md b/starters/web-service/README.md index 6e0e4b91e0..4eb9053bb3 100644 --- a/starters/web-service/README.md +++ b/starters/web-service/README.md @@ -24,4 +24,4 @@ Helm starter chart for a new RSP service. | podAnnotations | object | `{}` | Annotations for the deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the deployment pod | -| tolerations | list | `[]` | Tolerations for the deployment pod | +| tolerations | list | `[]` | Tolerations for the deployment pod | \ No newline at end of file From a42b8c6862a51c3478c5a763364ccd671876fca9 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 16:32:02 -0700 Subject: [PATCH 258/308] Change mobu disableSlackAlerts to slackAlerts Reverse the sense of the old mobu configuration option disableSlackAlerts to be slackAlerts and enabled by default, since this works better with conditional secrets. --- applications/alert-stream-broker/README.md | 2 +- .../alert-stream-broker/charts/alert-database/README.md | 2 +- .../alert-stream-broker/charts/alert-stream-broker/README.md | 2 +- .../charts/alert-stream-schema-registry/README.md | 2 +- .../charts/alert-stream-simulator/README.md | 2 +- applications/argo-workflows/README.md | 2 +- applications/argocd/README.md | 2 +- applications/cachemachine/README.md | 2 +- applications/cert-manager/README.md | 2 +- applications/datalinker/README.md | 2 +- applications/exposurelog/README.md | 2 +- applications/gafaelfawr/README.md | 2 +- applications/giftless/README.md | 2 +- applications/hips/README.md | 2 +- applications/ingress-nginx/README.md | 2 +- applications/kubernetes-replicator/README.md | 2 +- applications/linters/README.md | 2 +- applications/livetap/README.md | 2 +- applications/mobu/README.md | 4 ++-- applications/mobu/templates/deployment.yaml | 2 +- applications/mobu/templates/vault-secrets.yaml | 2 ++ applications/mobu/values-minikube.yaml | 2 +- applications/mobu/values-usdfdev.yaml | 2 +- applications/mobu/values-usdfprod.yaml | 2 +- applications/mobu/values.yaml | 4 ++-- applications/moneypenny/README.md | 2 +- applications/narrativelog/README.md | 2 +- applications/noteburst/README.md | 2 +- applications/nublado/README.md | 2 +- applications/nublado2/README.md | 2 +- applications/obsloctap/README.md | 2 +- applications/ook/README.md | 2 +- applications/plot-navigator/README.md | 2 +- applications/portal/README.md | 2 +- applications/postgres/README.md | 2 +- applications/production-tools/README.md | 2 +- applications/sasquatch/README.md | 2 +- applications/sasquatch/charts/kafdrop/README.md | 2 +- applications/sasquatch/charts/kafka-connect-manager/README.md | 2 +- applications/sasquatch/charts/rest-proxy/README.md | 2 +- applications/sasquatch/charts/square-events/README.md | 2 +- applications/sasquatch/charts/strimzi-kafka/README.md | 2 +- .../sasquatch/charts/telegraf-kafka-consumer/README.md | 2 +- applications/semaphore/README.md | 2 +- applications/sherlock/README.md | 2 +- applications/sqlproxy-cross-project/README.md | 2 +- applications/squarebot/README.md | 2 +- applications/squareone/README.md | 2 +- applications/ssotap/README.md | 2 +- applications/strimzi-access-operator/README.md | 2 +- applications/tap-schema/README.md | 2 +- applications/tap/README.md | 2 +- applications/telegraf-ds/README.md | 2 +- applications/telegraf/README.md | 2 +- applications/times-square/README.md | 2 +- applications/vault-secrets-operator/README.md | 2 +- applications/vo-cutouts/README.md | 2 +- environments/README.md | 2 +- starters/web-service/README.md | 2 +- 59 files changed, 62 insertions(+), 60 deletions(-) diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index e91597a416..1a82e13b0c 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -107,4 +107,4 @@ Alert transmission to community brokers | alert-stream-simulator.replayTopicReplicas | int | `2` | | | alert-stream-simulator.schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | | alert-stream-simulator.staticTopicName | string | `"alerts-static"` | Name of the topic which will hold a static single visit of sample data. | -| alert-stream-simulator.strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | +| alert-stream-simulator.strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-database/README.md b/applications/alert-stream-broker/charts/alert-database/README.md index 30b66902e0..7343a81b47 100644 --- a/applications/alert-stream-broker/charts/alert-database/README.md +++ b/applications/alert-stream-broker/charts/alert-database/README.md @@ -38,4 +38,4 @@ Archival database of alerts sent through the alert stream. | server.serviceAccountName | string | `"alertdb-reader"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database server. | | storage.gcp.alertBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with alert data | | storage.gcp.project | string | `""` | Name of a GCP project that has a bucket for database storage | -| storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data | +| storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data | \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/README.md b/applications/alert-stream-broker/charts/alert-stream-broker/README.md index 394f840d4c..5ed52e1292 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-broker/README.md @@ -38,4 +38,4 @@ Kafka broker cluster for distributing alerts | vaultSecretsPath | string | `""` | Path to the secret resource in Vault | | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | -| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | +| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md index cc6ac85074..1cc74892f7 100644 --- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md @@ -15,4 +15,4 @@ Confluent Schema Registry for managing schema versions for the Alert Stream | schemaSync.image.tag | string | `"tickets-DM-32743"` | Version of the container to use | | schemaSync.subject | string | `"alert-packet"` | Subject name to use when inserting data into the Schema Registry | | schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry to store data. | -| strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | +| strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | \ No newline at end of file diff --git a/applications/alert-stream-broker/charts/alert-stream-simulator/README.md b/applications/alert-stream-broker/charts/alert-stream-simulator/README.md index e0833c4138..724924bad9 100644 --- a/applications/alert-stream-broker/charts/alert-stream-simulator/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-simulator/README.md @@ -22,4 +22,4 @@ Producer which repeatedly publishes a static set of alerts into a Kafka topic | replayTopicReplicas | int | `2` | | | schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. | | staticTopicName | string | `"alerts-static"` | Name of the topic which will hold a static single visit of sample data. | -| strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | +| strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions | \ No newline at end of file diff --git a/applications/argo-workflows/README.md b/applications/argo-workflows/README.md index a8d76e16da..7daf31f859 100644 --- a/applications/argo-workflows/README.md +++ b/applications/argo-workflows/README.md @@ -22,4 +22,4 @@ Kubernetes workflow engine | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | | | ingress.annotations."nginx.ingress.kubernetes.io/use-regex" | string | `"true"` | | -| ingress.scopes[0] | string | `"exec:admin"` | | +| ingress.scopes[0] | string | `"exec:admin"` | | \ No newline at end of file diff --git a/applications/argocd/README.md b/applications/argocd/README.md index 4c2f167dc5..42a1636626 100644 --- a/applications/argocd/README.md +++ b/applications/argocd/README.md @@ -30,4 +30,4 @@ Kubernetes application manager | argo-cd.server.ingress.pathType | string | `"ImplementationSpecific"` | Type of path expression for Argo CD ingress | | argo-cd.server.ingress.paths | list | `["/argo-cd(/|$)(.*)"]` | Paths to route to Argo CD | | argo-cd.server.metrics.enabled | bool | `true` | Enable server metrics service | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | \ No newline at end of file diff --git a/applications/cachemachine/README.md b/applications/cachemachine/README.md index 1ed392e993..1f2dda7790 100644 --- a/applications/cachemachine/README.md +++ b/applications/cachemachine/README.md @@ -28,4 +28,4 @@ JupyterLab image prepuller | serviceAccount | object | `{"annotations":{},"name":""}` | Secret names to use for all Docker pulls | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the cachemachine frontend pod | +| tolerations | list | `[]` | Tolerations for the cachemachine frontend pod | \ No newline at end of file diff --git a/applications/cert-manager/README.md b/applications/cert-manager/README.md index e8155ae527..84d991ca04 100644 --- a/applications/cert-manager/README.md +++ b/applications/cert-manager/README.md @@ -20,4 +20,4 @@ TLS certificate manager | config.email | string | sqre-admin | Contact email address registered with Let's Encrypt | | config.route53.awsAccessKeyId | string | None, must be set if `createIssuer` is true | AWS access key ID for Route 53 (must match `aws-secret-access-key` in Vault secret referenced by `config.vaultSecretPath`) | | config.route53.hostedZone | string | None, must be set if `createIssuer` is true | Route 53 hosted zone in which to create challenge records | -| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | \ No newline at end of file diff --git a/applications/datalinker/README.md b/applications/datalinker/README.md index a3cfbd378f..c3b14afad5 100644 --- a/applications/datalinker/README.md +++ b/applications/datalinker/README.md @@ -30,4 +30,4 @@ IVOA DataLink-based service and data discovery | podAnnotations | object | `{}` | Annotations for the datalinker deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the datalinker deployment pod | -| tolerations | list | `[]` | Tolerations for the datalinker deployment pod | +| tolerations | list | `[]` | Tolerations for the datalinker deployment pod | \ No newline at end of file diff --git a/applications/exposurelog/README.md b/applications/exposurelog/README.md index b3e08c1aba..634f405a8d 100644 --- a/applications/exposurelog/README.md +++ b/applications/exposurelog/README.md @@ -47,4 +47,4 @@ Log messages related to an exposure | replicaCount | int | `1` | How many exposurelog pods to run | | resources | object | `{}` | Resource limits and requests for the exposurelog pod | | securityContext | object | `{}` | Security context for the exposurelog deployment | -| tolerations | list | `[]` | Tolerations for the exposurelog pod | +| tolerations | list | `[]` | Tolerations for the exposurelog pod | \ No newline at end of file diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 63c2303239..2b9640fcc7 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -110,4 +110,4 @@ Authentication and identity system | redis.tolerations | list | `[]` | Tolerations for the Redis pod | | replicaCount | int | `1` | Number of web frontend pods to start | | resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod | -| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | +| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod | \ No newline at end of file diff --git a/applications/giftless/README.md b/applications/giftless/README.md index 4979510629..fcec2931a7 100644 --- a/applications/giftless/README.md +++ b/applications/giftless/README.md @@ -30,4 +30,4 @@ Git-LFS server with GCS S3 backend, with Rubin-specific auth | server.debug | bool | `false` | Turn on debugging mode | | server.processes | int | `2` | Number of processes for server | | server.threads | int | `2` | Number of threads per process | -| tolerations | list | `[]` | Tolerations for the giftless frontend pod | +| tolerations | list | `[]` | Tolerations for the giftless frontend pod | \ No newline at end of file diff --git a/applications/hips/README.md b/applications/hips/README.md index 01f98617a6..54712cc322 100644 --- a/applications/hips/README.md +++ b/applications/hips/README.md @@ -29,4 +29,4 @@ HiPS tile server backed by Google Cloud Storage | podAnnotations | object | `{}` | Annotations for the hips deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the hips deployment pod | -| tolerations | list | `[]` | Tolerations for the hips deployment pod | +| tolerations | list | `[]` | Tolerations for the hips deployment pod | \ No newline at end of file diff --git a/applications/ingress-nginx/README.md b/applications/ingress-nginx/README.md index 94e146d6cd..78980b1244 100644 --- a/applications/ingress-nginx/README.md +++ b/applications/ingress-nginx/README.md @@ -21,4 +21,4 @@ Ingress controller | ingress-nginx.controller.metrics.enabled | bool | `true` | Enable metrics reporting via Prometheus | | ingress-nginx.controller.podLabels | object | See `values.yaml` | Add labels used by `NetworkPolicy` objects to restrict access to the ingress and thus ensure that auth subrequest handlers run | | ingress-nginx.controller.service.externalTrafficPolicy | string | `"Local"` | Force traffic routing policy to Local so that the external IP in `X-Forwarded-For` will be correct | -| vaultCertificate.enabled | bool | `false` | Whether to store ingress TLS certificate via vault-secrets-operator. Typically "squareone" owns it instead in an RSP. | +| vaultCertificate.enabled | bool | `false` | Whether to store ingress TLS certificate via vault-secrets-operator. Typically "squareone" owns it instead in an RSP. | \ No newline at end of file diff --git a/applications/kubernetes-replicator/README.md b/applications/kubernetes-replicator/README.md index 1ca6da402d..5ce44bec30 100644 --- a/applications/kubernetes-replicator/README.md +++ b/applications/kubernetes-replicator/README.md @@ -25,4 +25,4 @@ Kafka secret replicator | kubernetes-replicator.serviceAccount.privileges[0].apiGroups[1] | string | `"apps"` | | | kubernetes-replicator.serviceAccount.privileges[0].apiGroups[2] | string | `"extensions"` | | | kubernetes-replicator.serviceAccount.privileges[0].resources[0] | string | `"secrets"` | | -| kubernetes-replicator.serviceAccount.privileges[0].resources[1] | string | `"configmaps"` | | +| kubernetes-replicator.serviceAccount.privileges[0].resources[1] | string | `"configmaps"` | | \ No newline at end of file diff --git a/applications/linters/README.md b/applications/linters/README.md index facd3c1375..ff3b7459c5 100644 --- a/applications/linters/README.md +++ b/applications/linters/README.md @@ -24,4 +24,4 @@ Linters running for operational reasons | podAnnotations | object | `{}` | Annotations for the linter pod | | replicaCount | int | `1` | Number of web frontend pods to start | | resources | object | `{}` | Resource limits and requests for the linter pod | -| tolerations | list | `[]` | Tolerations for the linter pod | +| tolerations | list | `[]` | Tolerations for the linter pod | \ No newline at end of file diff --git a/applications/livetap/README.md b/applications/livetap/README.md index ab125fd219..7eca13edcd 100644 --- a/applications/livetap/README.md +++ b/applications/livetap/README.md @@ -62,4 +62,4 @@ IVOA TAP service | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{}` | Resource limits and requests for the UWS database pod | | uws.tolerations | list | `[]` | Tolerations for the UWS database pod | -| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | +| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | \ No newline at end of file diff --git a/applications/mobu/README.md b/applications/mobu/README.md index 582048e66a..51d0506ee1 100644 --- a/applications/mobu/README.md +++ b/applications/mobu/README.md @@ -13,8 +13,8 @@ Continuous integration testing | affinity | object | `{}` | Affinity rules for the mobu frontend pod | | config.autostart | list | `[]` | Autostart specification. Must be a list of mobu flock specifications. Each flock listed will be automatically started when mobu is started. | | config.debug | bool | `false` | If set to true, include the output from all flocks in the main mobu log and disable structured JSON logging. | -| config.disableSlackAlerts | bool | `false` | If set to true, do not configure mobu to send alerts to Slack. | | config.pathPrefix | string | `"/mobu"` | Prefix for mobu's API routes. | +| config.slackAlerts | bool | `true` | Whether to send alerts and status to Slack. | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | @@ -27,4 +27,4 @@ Continuous integration testing | nodeSelector | object | `{}` | Node selector rules for the mobu frontend pod | | podAnnotations | object | `{}` | Annotations for the mobu frontend pod | | resources | object | `{}` | Resource limits and requests for the mobu frontend pod | -| tolerations | list | `[]` | Tolerations for the mobu frontend pod | +| tolerations | list | `[]` | Tolerations for the mobu frontend pod | \ No newline at end of file diff --git a/applications/mobu/templates/deployment.yaml b/applications/mobu/templates/deployment.yaml index 907c4ff097..d80bb97975 100644 --- a/applications/mobu/templates/deployment.yaml +++ b/applications/mobu/templates/deployment.yaml @@ -24,7 +24,7 @@ spec: containers: - name: {{ .Chart.Name }} env: - {{- if (not .Values.config.disableSlackAlerts) }} + {{- if .Values.config.slackAlerts }} - name: "ALERT_HOOK" valueFrom: secretKeyRef: diff --git a/applications/mobu/templates/vault-secrets.yaml b/applications/mobu/templates/vault-secrets.yaml index 050d8fbadc..b5dfaabaee 100644 --- a/applications/mobu/templates/vault-secrets.yaml +++ b/applications/mobu/templates/vault-secrets.yaml @@ -1,3 +1,4 @@ +{{- if .Values.config.slackAlerts }} apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: @@ -7,3 +8,4 @@ metadata: spec: path: "{{ .Values.global.vaultSecretsPath }}/mobu" type: "Opaque" +{{- end }} diff --git a/applications/mobu/values-minikube.yaml b/applications/mobu/values-minikube.yaml index 6f6e9f20c9..357c0bd2a3 100644 --- a/applications/mobu/values-minikube.yaml +++ b/applications/mobu/values-minikube.yaml @@ -1,2 +1,2 @@ config: - disableSlackAlerts: true + slackAlerts: false diff --git a/applications/mobu/values-usdfdev.yaml b/applications/mobu/values-usdfdev.yaml index 6f6e9f20c9..357c0bd2a3 100644 --- a/applications/mobu/values-usdfdev.yaml +++ b/applications/mobu/values-usdfdev.yaml @@ -1,2 +1,2 @@ config: - disableSlackAlerts: true + slackAlerts: false diff --git a/applications/mobu/values-usdfprod.yaml b/applications/mobu/values-usdfprod.yaml index 6f6e9f20c9..357c0bd2a3 100644 --- a/applications/mobu/values-usdfprod.yaml +++ b/applications/mobu/values-usdfprod.yaml @@ -1,2 +1,2 @@ config: - disableSlackAlerts: true + slackAlerts: false diff --git a/applications/mobu/values.yaml b/applications/mobu/values.yaml index 241d73d661..729c3d979b 100644 --- a/applications/mobu/values.yaml +++ b/applications/mobu/values.yaml @@ -30,8 +30,8 @@ config: # and disable structured JSON logging. debug: false - # -- If set to true, do not configure mobu to send alerts to Slack. - disableSlackAlerts: false + # -- Whether to send alerts and status to Slack. + slackAlerts: true # -- Prefix for mobu's API routes. pathPrefix: "/mobu" diff --git a/applications/moneypenny/README.md b/applications/moneypenny/README.md index 1cfedae207..9bd5518680 100644 --- a/applications/moneypenny/README.md +++ b/applications/moneypenny/README.md @@ -31,4 +31,4 @@ User provisioning actions | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the vo-cutouts frontend pod | | serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | +| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | \ No newline at end of file diff --git a/applications/narrativelog/README.md b/applications/narrativelog/README.md index 281d4e69db..9023cf08d5 100644 --- a/applications/narrativelog/README.md +++ b/applications/narrativelog/README.md @@ -38,4 +38,4 @@ Narrative log service | replicaCount | int | `1` | Number of narrativelog replicas to run | | resources | object | `{}` | Resource limits and requests for the narrativelog pod | | securityContext | object | `{}` | Security context for the narrativelog deployment | -| tolerations | list | `[]` | Tolerations for the narrativelog pod | +| tolerations | list | `[]` | Tolerations for the narrativelog pod | \ No newline at end of file diff --git a/applications/noteburst/README.md b/applications/noteburst/README.md index 2d56c582c5..58af0aee95 100644 --- a/applications/noteburst/README.md +++ b/applications/noteburst/README.md @@ -56,4 +56,4 @@ Noteburst is a notebook execution service for the Rubin Science Platform. | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/nublado/README.md b/applications/nublado/README.md index eb7768219e..a005378364 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -83,4 +83,4 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | Whether to configure iptables to block cloud metadata endpoints. This is unnecessary in our environments (they are blocked by cluster configuration) and thus is disabled to reduce complexity. | | jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | Start command for labs | | jupyterhub.singleuser.defaultUrl | string | `"/lab"` | Default URL prefix for lab endpoints | -| proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | +| proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) | \ No newline at end of file diff --git a/applications/nublado2/README.md b/applications/nublado2/README.md index 8f4ebdf915..d27ffb4242 100644 --- a/applications/nublado2/README.md +++ b/applications/nublado2/README.md @@ -115,4 +115,4 @@ JupyterHub for the Rubin Science Platform | jupyterhub.singleuser.storage.extraVolumes[6].configMap.name | string | `"group"` | | | jupyterhub.singleuser.storage.extraVolumes[6].name | string | `"group"` | | | jupyterhub.singleuser.storage.type | string | `"none"` | | -| network_policy.enabled | bool | `true` | | +| network_policy.enabled | bool | `true` | | \ No newline at end of file diff --git a/applications/obsloctap/README.md b/applications/obsloctap/README.md index c6cc3d3a5a..b50f7ee849 100644 --- a/applications/obsloctap/README.md +++ b/applications/obsloctap/README.md @@ -20,4 +20,4 @@ Publish observing schedule | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the obsloctap image | | image.repository | string | `"ghcr.io/lsst-dm/obsloctap"` | obsloctap image to use | | image.tag | string | The appVersion of the chart | Tag of obsloctap image to use | -| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | \ No newline at end of file diff --git a/applications/ook/README.md b/applications/ook/README.md index 6408638015..b47e1b5223 100644 --- a/applications/ook/README.md +++ b/applications/ook/README.md @@ -37,4 +37,4 @@ Ook is the librarian service for Rubin Observatory. Ook indexes documentation co | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/plot-navigator/README.md b/applications/plot-navigator/README.md index 6ca85a93d7..b04d84a00a 100644 --- a/applications/plot-navigator/README.md +++ b/applications/plot-navigator/README.md @@ -19,4 +19,4 @@ Panel-based plot viewer | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | | image.repository | string | `"ghcr.io/lsst-dm/pipetask-plot-navigator"` | plot-navigator image to use | | image.tag | string | `""` | | -| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| ingress.annotations | object | `{}` | Additional annotations to add to the ingress | \ No newline at end of file diff --git a/applications/portal/README.md b/applications/portal/README.md index 08d4a6cecd..cfaf6957c0 100644 --- a/applications/portal/README.md +++ b/applications/portal/README.md @@ -43,4 +43,4 @@ Rubin Science Platform Portal Aspect | replicaCount | int | `1` | Number of pods to start | | resources | object | `{"limits":{"cpu":2,"memory":"6Gi"}}` | Resource limits and requests. The Portal will use (by default) 93% of container RAM. This is a smallish Portal; tweak it as you need to in instance definitions in Phalanx. | | securityContext | object | `{}` | Security context for the Portal pod | -| tolerations | list | `[]` | Tolerations for the Portal pod | +| tolerations | list | `[]` | Tolerations for the Portal pod | \ No newline at end of file diff --git a/applications/postgres/README.md b/applications/postgres/README.md index 25d2e60e20..cfd13e68d8 100644 --- a/applications/postgres/README.md +++ b/applications/postgres/README.md @@ -17,4 +17,4 @@ Postgres RDBMS for LSP | image.tag | string | The appVersion of the chart | Tag of postgres image to use | | postgresStorageClass | string | `"standard"` | Storage class for postgres volume. Set to appropriate value for your deployment: at GKE, "standard" (if you want SSD, "premium-rwo", but if you want a good database maybe it's better to use a cloud database?), on Rubin Observatory Rancher, "rook-ceph-block", elsewhere probably "standard" | | postgresVolumeSize | string | `"1Gi"` | Volume size for postgres. It can generally be very small | -| volumeName | string | `""` | Volume name for postgres, if you use an existing volume that isn't automatically created from the PVC by the storage driver. | +| volumeName | string | `""` | Volume name for postgres, if you use an existing volume that isn't automatically created from the PVC by the storage driver. | \ No newline at end of file diff --git a/applications/production-tools/README.md b/applications/production-tools/README.md index cb7fa475cb..9db9624185 100644 --- a/applications/production-tools/README.md +++ b/applications/production-tools/README.md @@ -25,4 +25,4 @@ A collection of utility pages for monitoring data processing. | podAnnotations | object | `{}` | Annotations for the production-tools deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the production-tools deployment pod | -| tolerations | list | `[]` | Tolerations for the production-tools deployment pod | +| tolerations | list | `[]` | Tolerations for the production-tools deployment pod | \ No newline at end of file diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index a02536bc9a..31cb6b186d 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -347,4 +347,4 @@ Rubin Observatory's telemetry service. | telegraf-kafka-consumer.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods. | | telegraf-kafka-consumer.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods. | | telegraf-kafka-consumer.resources | object | `{}` | Kubernetes resources requests and limits. | -| telegraf-kafka-consumer.tolerations | list | `[]` | Tolerations for pod assignment. | +| telegraf-kafka-consumer.tolerations | list | `[]` | Tolerations for pod assignment. | \ No newline at end of file diff --git a/applications/sasquatch/charts/kafdrop/README.md b/applications/sasquatch/charts/kafdrop/README.md index dea43e6e00..72ffb013aa 100644 --- a/applications/sasquatch/charts/kafdrop/README.md +++ b/applications/sasquatch/charts/kafdrop/README.md @@ -36,4 +36,4 @@ A subchart to deploy the Kafdrop UI for Sasquatch. | server.servlet | object | Defaults to /. | The context path to serve requests on (must end with a /). | | service.annotations | object | `{}` | Service annotations | | service.port | int | `9000` | Service port | -| tolerations | list | `[]` | Tolerations configuration. | +| tolerations | list | `[]` | Tolerations configuration. | \ No newline at end of file diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md index c9ff922025..04edecb431 100644 --- a/applications/sasquatch/charts/kafka-connect-manager/README.md +++ b/applications/sasquatch/charts/kafka-connect-manager/README.md @@ -66,4 +66,4 @@ A subchart to deploy the Kafka connectors used by Sasquatch. | s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. | | s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. | | s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. | -| s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | +| s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. | \ No newline at end of file diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md index b895087f09..b72e90981b 100644 --- a/applications/sasquatch/charts/rest-proxy/README.md +++ b/applications/sasquatch/charts/rest-proxy/README.md @@ -34,4 +34,4 @@ A subchart to deploy Confluent REST proxy for Sasquatch. | resources.requests.memory | string | `"200Mi"` | Kafka REST proxy memory requests | | schemaregistry.url | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL | | service.port | int | `8082` | Kafka REST proxy service port | -| tolerations | list | `[]` | Tolerations configuration. | +| tolerations | list | `[]` | Tolerations configuration. | \ No newline at end of file diff --git a/applications/sasquatch/charts/square-events/README.md b/applications/sasquatch/charts/square-events/README.md index ce28f5c979..96c9f53e2d 100644 --- a/applications/sasquatch/charts/square-events/README.md +++ b/applications/sasquatch/charts/square-events/README.md @@ -6,4 +6,4 @@ Kafka topics and users for SQuaRE Events. | Key | Type | Default | Description | |-----|------|---------|-------------| -| cluster.name | string | `"sasquatch"` | | +| cluster.name | string | `"sasquatch"` | | \ No newline at end of file diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index f6cffe6b8d..b3f18c46ce 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -51,4 +51,4 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. | | zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. | | zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. | -| zookeeper.tolerations | list | `[]` | Tolerations for Zookeeper pod assignment. | +| zookeeper.tolerations | list | `[]` | Tolerations for Zookeeper pod assignment. | \ No newline at end of file diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index a8005a8350..9c8021077e 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -30,4 +30,4 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods. | | podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods. | | resources | object | `{}` | Kubernetes resources requests and limits. | -| tolerations | list | `[]` | Tolerations for pod assignment. | +| tolerations | list | `[]` | Tolerations for pod assignment. | \ No newline at end of file diff --git a/applications/semaphore/README.md b/applications/semaphore/README.md index ea3233aef9..8df8f5ff2f 100644 --- a/applications/semaphore/README.md +++ b/applications/semaphore/README.md @@ -40,4 +40,4 @@ Semaphore is the user notification and messaging service for the Rubin Science P | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `false` | Specifies whether a service account should be created. | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/sherlock/README.md b/applications/sherlock/README.md index 459c2c462d..c54fc8c7bb 100644 --- a/applications/sherlock/README.md +++ b/applications/sherlock/README.md @@ -31,4 +31,4 @@ Application ingress status and metrics | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the sherlock deployment pod | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | Tolerations for the sherlock deployment pod | +| tolerations | list | `[]` | Tolerations for the sherlock deployment pod | \ No newline at end of file diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index 82787a918d..fcb9080470 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -25,4 +25,4 @@ GCP SQL Proxy as a service | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | | replicaCount | int | `1` | Number of pods to start | | resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod | -| tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | +| tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | \ No newline at end of file diff --git a/applications/squarebot/README.md b/applications/squarebot/README.md index 51b5a0593c..c890062ccb 100644 --- a/applications/squarebot/README.md +++ b/applications/squarebot/README.md @@ -46,4 +46,4 @@ Squarebot feeds events from services like Slack and GitHub into the SQuaRE Event | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created | | serviceAccount.name | string | `""` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/squareone/README.md b/applications/squareone/README.md index 544c9c7c56..98391ce5b4 100644 --- a/applications/squareone/README.md +++ b/applications/squareone/README.md @@ -44,4 +44,4 @@ Squareone is the homepage UI for the Rubin Science Platform. | podAnnotations | object | `{}` | Annotations for squareone pods | | replicaCount | int | `1` | Number of squareone pods to run in the deployment. | | resources | object | `{}` | | -| tolerations | list | `[]` | | +| tolerations | list | `[]` | | \ No newline at end of file diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index ab125fd219..7eca13edcd 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -62,4 +62,4 @@ IVOA TAP service | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{}` | Resource limits and requests for the UWS database pod | | uws.tolerations | list | `[]` | Tolerations for the UWS database pod | -| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | +| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | \ No newline at end of file diff --git a/applications/strimzi-access-operator/README.md b/applications/strimzi-access-operator/README.md index 7b3410f2be..aa6ae9b69a 100644 --- a/applications/strimzi-access-operator/README.md +++ b/applications/strimzi-access-operator/README.md @@ -13,4 +13,4 @@ Strimzi Access Operator | image.tag | string | The appVersion of the chart | Tag of the image | | serviceAccount.annotations | object | `{}` | Annotations to add to the service account | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created. | -| serviceAccount.name | string | `""` | | +| serviceAccount.name | string | `""` | | \ No newline at end of file diff --git a/applications/tap-schema/README.md b/applications/tap-schema/README.md index 4c53f0c3ef..0d651cce04 100644 --- a/applications/tap-schema/README.md +++ b/applications/tap-schema/README.md @@ -20,4 +20,4 @@ The TAP_SCHEMA database | nodeSelector | object | `{}` | Node selector rules for the MySQL pod | | podAnnotations | object | `{}` | Annotations for the MySQL pod | | resources | object | `{}` | Resource limits and requests for the MySQL pod | -| tolerations | list | `[]` | Tolerations for the MySQL pod | +| tolerations | list | `[]` | Tolerations for the MySQL pod | \ No newline at end of file diff --git a/applications/tap/README.md b/applications/tap/README.md index 59c87e5f7b..de764134af 100644 --- a/applications/tap/README.md +++ b/applications/tap/README.md @@ -60,4 +60,4 @@ IVOA TAP service | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{}` | Resource limits and requests for the UWS database pod | | uws.tolerations | list | `[]` | Tolerations for the UWS database pod | -| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | +| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) | \ No newline at end of file diff --git a/applications/telegraf-ds/README.md b/applications/telegraf-ds/README.md index ccd81dc59f..e46dc546ea 100644 --- a/applications/telegraf-ds/README.md +++ b/applications/telegraf-ds/README.md @@ -27,4 +27,4 @@ Kubernetes node telemetry collection service | telegraf-ds.resources.limits.memory | string | `"512Mi"` | | | telegraf-ds.serviceAccount.name | string | `"telegraf-ds"` | | | telegraf-ds.volumes[0].configMap.name | string | `"telegraf-generated-config"` | | -| telegraf-ds.volumes[0].name | string | `"telegraf-generated-config"` | | +| telegraf-ds.volumes[0].name | string | `"telegraf-generated-config"` | | \ No newline at end of file diff --git a/applications/telegraf/README.md b/applications/telegraf/README.md index d54785fc49..5c3dae0ff8 100644 --- a/applications/telegraf/README.md +++ b/applications/telegraf/README.md @@ -34,4 +34,4 @@ Application telemetry collection service | telegraf.service.enabled | bool | `false` | | | telegraf.tplVersion | int | `2` | | | telegraf.volumes[0].configMap.name | string | `"telegraf-generated-config"` | | -| telegraf.volumes[0].name | string | `"telegraf-generated-config"` | | +| telegraf.volumes[0].name | string | `"telegraf-generated-config"` | | \ No newline at end of file diff --git a/applications/times-square/README.md b/applications/times-square/README.md index 942f32e22a..5e16c0f22d 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -59,4 +59,4 @@ An API service for managing and rendering parameterized Jupyter notebooks. | serviceAccount.annotations | object | `{}` | Annotations to add to the service account. If CloudSQL is in use, the annotation specifying the Google service account will also be added. | | serviceAccount.create | bool | `false` | Force creation of a service account. Normally, no service account is used or mounted. If CloudSQL is enabled, a service account is always created regardless of this value. | | serviceAccount.name | string | Name based on the fullname template | Name of the service account to use | -| tolerations | list | `[]` | Tolerations for the times-square deployment pod | +| tolerations | list | `[]` | Tolerations for the times-square deployment pod | \ No newline at end of file diff --git a/applications/vault-secrets-operator/README.md b/applications/vault-secrets-operator/README.md index b3e48c2406..9ffebd8f42 100644 --- a/applications/vault-secrets-operator/README.md +++ b/applications/vault-secrets-operator/README.md @@ -11,4 +11,4 @@ | vault-secrets-operator.environmentVars | list | Set `VAULT_TOKEN` and `VAULT_TOKEN_LEASE_DURATION` from secret | Additional environment variables used to configure the operator | | vault-secrets-operator.serviceAccount.createSecret | bool | `false` | Disable creation of a secret for the service account. It shouldn't be needed and it conflicts with the secret we create that contains the credentials for talking to Vault. | | vault-secrets-operator.vault.address | string | `"https://vault.lsst.codes"` | URL of the underlying Vault implementation | -| vault-secrets-operator.vault.reconciliationTime | int | `60` | Sync secrets from vault on this cadence | +| vault-secrets-operator.vault.reconciliationTime | int | `60` | Sync secrets from vault on this cadence | \ No newline at end of file diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index f7c9f901cd..985623fe3f 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -64,4 +64,4 @@ Image cutout service complying with IVOA SODA | redis.tolerations | list | `[]` | Tolerations for the Redis pod | | replicaCount | int | `1` | Number of web frontend pods to start | | resources | object | `{}` | Resource limits and requests for the vo-cutouts frontend pod | -| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | +| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod | \ No newline at end of file diff --git a/environments/README.md b/environments/README.md index 2a814ede11..483d6ca230 100644 --- a/environments/README.md +++ b/environments/README.md @@ -53,4 +53,4 @@ | times-square.enabled | bool | `false` | | | vault-secrets-operator.enabled | bool | `false` | | | vaultPathPrefix | string | None, must be set | Prefix for Vault secrets for this environment | -| vo-cutouts.enabled | bool | `false` | | +| vo-cutouts.enabled | bool | `false` | | \ No newline at end of file diff --git a/starters/web-service/README.md b/starters/web-service/README.md index 6e0e4b91e0..4eb9053bb3 100644 --- a/starters/web-service/README.md +++ b/starters/web-service/README.md @@ -24,4 +24,4 @@ Helm starter chart for a new RSP service. | podAnnotations | object | `{}` | Annotations for the deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | `{}` | Resource limits and requests for the deployment pod | -| tolerations | list | `[]` | Tolerations for the deployment pod | +| tolerations | list | `[]` | Tolerations for the deployment pod | \ No newline at end of file From b362e6dc1d535b65ca41a6bd500c88f987247c4d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 26 Jul 2023 12:12:50 -0700 Subject: [PATCH 259/308] Bump Gafaelfawr to 9.3.0 No significant changes. --- applications/gafaelfawr/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index 8e0e2c3cb6..49637f1e3c 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,7 +5,7 @@ description: Authentication and identity system home: https://gafaelfawr.lsst.io/ sources: - https://github.com/lsst-sqre/gafaelfawr -appVersion: 9.2.2 +appVersion: 9.3.0 dependencies: - name: redis From cfdbbcfd610c03b023943c35adb01288a7e67af8 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Wed, 26 Jul 2023 13:23:23 -0700 Subject: [PATCH 260/308] rra rbac --- applications/argocd/values-usdfdev.yaml | 1 + applications/argocd/values-usdfprod.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index 94ae11a5f6..b4a5b3543d 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -53,6 +53,7 @@ argo-cd: g, smart@slac.stanford.edu, role:admin g, omullan@slac.stanford.edu, role:admin g, mreuter@slac.stanford.edu, role:admin + g, rra@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index dd80d9e706..01ad92dca1 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -52,6 +52,7 @@ argo-cd: g, smart@slac.stanford.edu, role:admin g, omullan@slac.stanford.edu, role:admin g, mreuter@slac.stanford.edu, role:admin + g, rra@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | From 3225bef649c32d89d445f16003567d7e5ad73a35 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 25 Jul 2023 13:46:37 -0400 Subject: [PATCH 261/308] Switch to exec:notebook scope for noteburst This will let me actually run notebooks on the USDF; will revisit whether this should go back to exec:admin before shipping. --- applications/noteburst/templates/ingress.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/noteburst/templates/ingress.yaml b/applications/noteburst/templates/ingress.yaml index 2fef313df5..2072a48326 100644 --- a/applications/noteburst/templates/ingress.yaml +++ b/applications/noteburst/templates/ingress.yaml @@ -8,7 +8,7 @@ config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: all: - - "exec:admin" + - "exec:notebook" loginRedirect: true template: metadata: From a7ded7c9766d4002098d40e365e4f26795551e89 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 25 Jul 2023 14:06:32 -0400 Subject: [PATCH 262/308] Add exec:notebook to Times Square service token Noteburst is running with exec:notebook scope on its ingress in usdf-dev. --- applications/times-square/templates/gafaelfawrtoken.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/times-square/templates/gafaelfawrtoken.yaml b/applications/times-square/templates/gafaelfawrtoken.yaml index f173ea4fa2..0971a2dcf1 100644 --- a/applications/times-square/templates/gafaelfawrtoken.yaml +++ b/applications/times-square/templates/gafaelfawrtoken.yaml @@ -9,3 +9,4 @@ spec: scopes: - "admin:token" - "exec:admin" + - "exec:notebook" From ab3000bd5069d6c72a8d4d0d41ad1df34aba32a3 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Tue, 25 Jul 2023 14:18:38 -0400 Subject: [PATCH 263/308] Switch Times Square ingress to exec:notebook Same as Noteburst. --- applications/times-square/templates/ingress.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/times-square/templates/ingress.yaml b/applications/times-square/templates/ingress.yaml index 8fd58c6eab..2ff6a6c077 100644 --- a/applications/times-square/templates/ingress.yaml +++ b/applications/times-square/templates/ingress.yaml @@ -8,7 +8,7 @@ config: baseUrl: {{ .Values.global.baseUrl | quote }} scopes: all: - - "exec:admin" + - "exec:notebook" loginRedirect: true template: metadata: From 80f5d894fd3c7966dc87405de9cad37928443f32 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 26 Jul 2023 12:57:04 -0400 Subject: [PATCH 264/308] Add an ingress for /times-square on Squareone This ingress ensures that anyone accessing /times-square is logged in, and redirects for login if necessary. The Times SQuaRE API service has its own ingress for the /times-square/api/ prefix. --- .../templates/ingress-times-square.yaml | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 applications/squareone/templates/ingress-times-square.yaml diff --git a/applications/squareone/templates/ingress-times-square.yaml b/applications/squareone/templates/ingress-times-square.yaml new file mode 100644 index 0000000000..1d09045cab --- /dev/null +++ b/applications/squareone/templates/ingress-times-square.yaml @@ -0,0 +1,31 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: {{ template "squareone.fullname" . }}-times-square + labels: + {{- include "squareone.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "exec:notebook" + loginRedirect: true +template: + metadata: + name: {{ template "squareone.fullname" . }}-times-square + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/times-square" + pathType: "Prefix" + backend: + service: + name: {{ template "squareone.fullname" . }} + port: + number: 80 From dc1590b9758ea36929341f503e85ee34f256094c Mon Sep 17 00:00:00 2001 From: Fritz Mueller Date: Wed, 26 Jul 2023 14:25:37 -0700 Subject: [PATCH 265/308] Redirect IDF ssotap to USDF; update sdm_schemas for new DP0.3 --- applications/ssotap/README.md | 2 +- applications/ssotap/values-idfdev.yaml | 3 +-- applications/ssotap/values-idfint.yaml | 3 +-- applications/ssotap/values-idfprod.yaml | 3 +-- applications/ssotap/values.yaml | 2 +- 5 files changed, 5 insertions(+), 8 deletions(-) diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md index 7eca13edcd..06aea4d46f 100644 --- a/applications/ssotap/README.md +++ b/applications/ssotap/README.md @@ -48,7 +48,7 @@ IVOA TAP service | tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"2.0.2"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"2.1"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod | | tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod | diff --git a/applications/ssotap/values-idfdev.yaml b/applications/ssotap/values-idfdev.yaml index 317f430926..866cac4322 100644 --- a/applications/ssotap/values-idfdev.yaml +++ b/applications/ssotap/values-idfdev.yaml @@ -20,6 +20,5 @@ pg: enabled: false database: "dp03_catalogs" # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23 - # host: "usdf-pg-catalogs.slac.stanford.edu:5432" - host: 104.197.78.194:5432 + host: "usdf-pg-catalogs.slac.stanford.edu:5432" username: "dp03" diff --git a/applications/ssotap/values-idfint.yaml b/applications/ssotap/values-idfint.yaml index 4f6ea30546..cdf447c971 100644 --- a/applications/ssotap/values-idfint.yaml +++ b/applications/ssotap/values-idfint.yaml @@ -22,6 +22,5 @@ pg: enabled: false database: "dp03_catalogs" # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23 - # host: "usdf-pg-catalogs.slac.stanford.edu:5432" - host: 104.197.78.194:5432 + host: "usdf-pg-catalogs.slac.stanford.edu:5432" username: "dp03" diff --git a/applications/ssotap/values-idfprod.yaml b/applications/ssotap/values-idfprod.yaml index 63f900e695..3ea3ff015b 100644 --- a/applications/ssotap/values-idfprod.yaml +++ b/applications/ssotap/values-idfprod.yaml @@ -22,8 +22,7 @@ pg: enabled: false database: "dp03_catalogs" # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23 - # host: "usdf-pg-catalogs.slac.stanford.edu:5432" - host: 104.197.78.194:5432 + host: "usdf-pg-catalogs.slac.stanford.edu:5432" username: "dp03" uws: diff --git a/applications/ssotap/values.yaml b/applications/ssotap/values.yaml index e235b944fe..619108e8aa 100644 --- a/applications/ssotap/values.yaml +++ b/applications/ssotap/values.yaml @@ -130,7 +130,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "2.0.2" + tag: "2.1" # -- Resource limits and requests for the TAP schema database pod resources: {} From 1542b843364c6d20409e058267ac143b44c3a125 Mon Sep 17 00:00:00 2001 From: adam Date: Wed, 26 Jul 2023 16:42:35 -0700 Subject: [PATCH 266/308] Update jupyterlabcontroller version --- applications/nublado/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 5b186050a2..5f731c4266 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -6,7 +6,7 @@ sources: - https://github.com/lsst-sqre/jupyterlab-controller - https://github.com/lsst-sqre/rsp-restspawner home: https://github.com/lsst-sqre/jupyterlab-controller -appVersion: 0.6.1 +appVersion: 0.7.0 dependencies: - name: jupyterhub From ba04274cdab9894c2e52f00fb2b0c236bddefe68 Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Wed, 26 Jul 2023 17:34:47 -0700 Subject: [PATCH 267/308] argocd rbac --- applications/argocd/values-usdfdev.yaml | 2 ++ applications/argocd/values-usdfprod.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml index b4a5b3543d..fa44ff14a9 100644 --- a/applications/argocd/values-usdfdev.yaml +++ b/applications/argocd/values-usdfdev.yaml @@ -54,6 +54,8 @@ argo-cd: g, omullan@slac.stanford.edu, role:admin g, mreuter@slac.stanford.edu, role:admin g, rra@slac.stanford.edu, role:admin + g, fritzm@slac.stanford.edu, role:admin + g, cslater@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 01ad92dca1..86f3188b58 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -53,6 +53,8 @@ argo-cd: g, omullan@slac.stanford.edu, role:admin g, mreuter@slac.stanford.edu, role:admin g, rra@slac.stanford.edu, role:admin + g, fritzm@slac.stanford.edu, role:admin + g, cslater@slac.stanford.edu, role:admin scopes: "[email]" helm.repositories: | From 6149127206b51ca409f7f89afca23356f1021fc2 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 26 Jul 2023 13:10:19 -0400 Subject: [PATCH 268/308] Deploy Times Square 0.8.0 --- applications/times-square/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index 98540e5d9d..7e605e00c4 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -8,7 +8,7 @@ sources: type: application # The default version tag of the times-square docker image -appVersion: "0.7.0" +appVersion: "0.8.0" dependencies: - name: redis From 25514e329a3f4ba1e202960072648f57086c6ed0 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Wed, 26 Jul 2023 13:18:08 -0400 Subject: [PATCH 269/308] Add config.githubOrgs value to Times Square This value configures what GitHub orgs can sync repos to Times Square (provided they've also installed the Times Square GitHub App). --- applications/times-square/README.md | 1 + applications/times-square/templates/configmap.yaml | 1 + applications/times-square/values.yaml | 3 +++ 3 files changed, 5 insertions(+) diff --git a/applications/times-square/README.md b/applications/times-square/README.md index 5e16c0f22d..fac927192e 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -24,6 +24,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | | config.enableGitHubApp | string | `"False"` | Toggle to enable the GitHub App functionality | | config.githubAppId | string | `""` | GitHub application ID | +| config.githubOrgs | string | `"lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst"` | GitHub organizations that can sync repos to Times Square (comma-separated). | | config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | | config.name | string | `"times-square"` | Name of the service. | | config.profile | string | `"production"` | Run profile: "production" or "development" | diff --git a/applications/times-square/templates/configmap.yaml b/applications/times-square/templates/configmap.yaml index 4ee03962d6..739914e85b 100644 --- a/applications/times-square/templates/configmap.yaml +++ b/applications/times-square/templates/configmap.yaml @@ -15,3 +15,4 @@ data: TS_REDIS_QUEUE_URL: {{ required "config.redisQueueUrl must be set" .Values.config.redisQueueUrl | quote }} TS_ENABLE_GITHUB_APP: {{ .Values.config.enableGitHubApp | quote }} TS_GITHUB_APP_ID: {{ .Values.config.githubAppId | quote }} + TS_GITHUB_ORGS: {{ .Values.config.githubOrgs | quote }} diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index 640f3afbde..56260c16f3 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -113,6 +113,9 @@ config: # -- Toggle to enable the GitHub App functionality enableGitHubApp: "False" + # -- GitHub organizations that can sync repos to Times Square (comma-separated). + githubOrgs: "lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst" + cloudsql: # -- Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases # on Google Cloud From 500c114596319e029f5d060e57334cab441c35fa Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 12:20:17 -0700 Subject: [PATCH 270/308] Add monitoring app, enable in roundtable-dev --- applications/monitoring/Chart.yaml | 11 ++ applications/monitoring/README.md | 44 +++++ .../monitoring/templates/_helpers.tpl | 53 ++++++ .../monitoring/templates/cronjobs.yaml | 158 ++++++++++++++++++ .../monitoring/templates/ingress.yaml | 34 ++++ .../monitoring/templates/vault-secret.yaml | 9 + .../monitoring/values-roundtable-dev.yaml | 8 + applications/monitoring/values.yaml | 96 +++++++++++ environments/README.md | 1 + .../templates/monitoring-application.yaml | 37 ++++ environments/values-base.yaml | 2 + environments/values-ccin2p3.yaml | 2 + environments/values-idfdev.yaml | 2 + environments/values-idfint.yaml | 2 + environments/values-idfprod.yaml | 2 + environments/values-minikube.yaml | 2 + environments/values-roe.yaml | 2 + environments/values-roundtable-dev.yaml | 2 + environments/values-roundtable-prod.yaml | 2 + environments/values-summit.yaml | 2 + environments/values-tucson-teststand.yaml | 2 + environments/values-usdf-tel-rsp.yaml | 2 + environments/values-usdfdev.yaml | 2 + environments/values-usdfprod.yaml | 2 + environments/values.yaml | 2 + 25 files changed, 481 insertions(+) create mode 100644 applications/monitoring/Chart.yaml create mode 100644 applications/monitoring/README.md create mode 100644 applications/monitoring/templates/_helpers.tpl create mode 100644 applications/monitoring/templates/cronjobs.yaml create mode 100644 applications/monitoring/templates/ingress.yaml create mode 100644 applications/monitoring/templates/vault-secret.yaml create mode 100644 applications/monitoring/values-roundtable-dev.yaml create mode 100644 applications/monitoring/values.yaml create mode 100644 environments/templates/monitoring-application.yaml diff --git a/applications/monitoring/Chart.yaml b/applications/monitoring/Chart.yaml new file mode 100644 index 0000000000..a57d8d1979 --- /dev/null +++ b/applications/monitoring/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +name: monitoring +version: 0.0.1 +description: Chronograf-based UI for monitoring (data stored in InfluxDBv2) +sources: + - https://github.com/lsst-sqre/rubin-influx-tools +appVersion: 0.2.0 +dependencies: + - name: chronograf + version: 1.2.5 + repository: https://helm.influxdata.com/ diff --git a/applications/monitoring/README.md b/applications/monitoring/README.md new file mode 100644 index 0000000000..dae5de8f29 --- /dev/null +++ b/applications/monitoring/README.md @@ -0,0 +1,44 @@ +# monitoring + +Chronograf-based UI for monitoring (data stored in InfluxDBv2) + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| chronograf.env.CUSTOM_AUTO_REFRESH | string | `"1s=1000"` | | +| chronograf.env.GH_CLIENT_ID | string | `""` | | +| chronograf.env.GH_ORGS | string | `"lsst-sqre"` | | +| chronograf.env.HOST_PAGE_DISABLED | bool | `true` | | +| chronograf.env.INFLUXDB_ORG | string | `"square"` | | +| chronograf.env.INFLUXDB_URL | string | `"https://monitoring.lsst.codes"` | | +| chronograf.envFromSecret | string | `"monitoring"` | | +| chronograf.image.pullPolicy | string | `"IfNotPresent"` | | +| chronograf.image.tag | string | `"1.9.4"` | | +| chronograf.ingress.enabled | bool | `false` | | +| chronograf.oauth.enabled | bool | `false` | | +| chronograf.resources.limits.cpu | int | `4` | | +| chronograf.resources.limits.memory | string | `"30Gi"` | | +| chronograf.resources.requests.cpu | int | `1` | | +| chronograf.resources.requests.memory | string | `"1024Mi"` | | +| chronograf.service.replicas | int | `1` | | +| chronograf.service.type | string | `"ClusterIP"` | | +| chronograf.updateStrategy.type | string | `"Recreate"` | | +| cronjob.debug | bool | `false` | set to true to enable debug logging | +| cronjob.image | object | `{"repository":"ghcr.io/lsst-sqre/rubin-influx-tools","tag":""}` | image for monitoring-related cronjobs | +| cronjob.image.repository | string | `"ghcr.io/lsst-sqre/rubin-influx-tools"` | repository for rubin-influx-tools | +| cronjob.image.tag | string | the appVersion of the chart | tag for rubin-influx-tools | +| cronjob.schedule | object | `{"bucketmaker":"*/15 * * * *","bucketmapper":"3-59/15 * * * *","taskmaker":"6-59/15 * * * *"}` | schedules for jobs | +| cronjob.schedule.bucketmaker | string | `"*/15 * * * *"` | bucketmaker schedule | +| cronjob.schedule.bucketmapper | string | `"3-59/15 * * * *"` | bucketmapper schedule | +| cronjob.schedule.taskmaker | string | `"6-59/15 * * * *"` | taskmaker schedule | +| global.influxdbOrg | string | `"square"` | InfluxDBv2 organization | +| global.influxdbUrl | string | `"https://monitoring.lsst.codes"` | URL for InfluxDBv2 instance | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| ingress.chronograf | object | `{"annotations":{},"hostname":""}` | ingress for Chronograf UI | +| ingress.chronograf.annotations | object | `{}` | Additional annotations to add to the ingress | +| ingress.chronograf.hostname | string | `""` | hostname for Chronograf UI @ default -- None, must be set by each individual instance | \ No newline at end of file diff --git a/applications/monitoring/templates/_helpers.tpl b/applications/monitoring/templates/_helpers.tpl new file mode 100644 index 0000000000..42400e8296 --- /dev/null +++ b/applications/monitoring/templates/_helpers.tpl @@ -0,0 +1,53 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "monitoring.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "monitoring.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "monitoring.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "monitoring.labels" -}} +app.kubernetes.io/name: {{ include "monitoring.name" . }} +helm.sh/chart: {{ include "monitoring.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "monitoring.selectorLabels" -}} +app.kubernetes.io/name: {{ include "monitoring.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/applications/monitoring/templates/cronjobs.yaml b/applications/monitoring/templates/cronjobs.yaml new file mode 100644 index 0000000000..19f0e25490 --- /dev/null +++ b/applications/monitoring/templates/cronjobs.yaml @@ -0,0 +1,158 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: bucketmaker +spec: + schedule: {{ .Values.cronjob.schedule.bucketmaker | quote }} + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + automountServiceAccountToken: false + {{- with .Values.cronjob.tolerations }} + tolerations: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.cronjob.affinity }} + affinity: +{{ toYaml . | indent 12 }} + {{- end }} + containers: + - name: bucketmaker + image: "{{ .Values.cronjob.image.repository }}:{{ .Values.cronjob.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 405 + runAsGroup: 100 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "INFLUXDB_TOKEN" + valueFrom: + secretKeyRef: + name: "monitoring" + key: "influx-alert-token" + - name: "INFLUXDB_ORG" + value: {{ .Values.global.influxdbOrg | quote }} + - name: "INFLUXDB_URL" + value: {{ .Values.global.influxdbUrl | quote }} + {{- with .Values.cronjob.debug }} + - name: "DEBUG" + value: "true" + {{- end }} + command: [ "bucketmaker" ] + volumeMounts: + - mountPath: /tmp + name: tmpdir + volumes: + # We download the phalanx repo into here to determine what our + # active applications are. + - name: tmpdir + emptyDir: {} +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: bucketmapper +spec: + schedule: {{ .Values.cronjob.schedule.bucketmapper | quote }} + successfulJobsHistoryLimit: 1 + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + automountServiceAccountToken: false + {{- with .Values.cronjob.tolerations }} + tolerations: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.cronjob.affinity }} + affinity: +{{ toYaml . | indent 12 }} + {{- end }} + containers: + - name: bucketmapper + image: "{{ .Values.cronjob.image.repository }}:{{ .Values.cronjob.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 405 + runAsGroup: 100 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "INFLUXDB_TOKEN" + valueFrom: + secretKeyRef: + name: "monitoring" + # We should be able to do away with this level of + # privilege as recent Influx versions automatically + # create this mapping, but we would need to change + # our naming conventions to adapt. + key: "admin-token" + - name: "INFLUXDB_ORG" + value: {{ .Values.global.influxdbOrg | quote }} + - name: "INFLUXDB_URL" + value: {{ .Values.global.influxdbUrl | quote }} + {{- with .Values.cronjob.debug }} + - name: "DEBUG" + value: "true" + {{- end }} + command: [ "bucketmapper" ] +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: taskmaker +spec: + successfulJobsHistoryLimit: 1 + schedule: {{ .Values.cronjob.schedule.taskmaker | quote }} + jobTemplate: + spec: + template: + spec: + restartPolicy: Never + automountServiceAccountToken: false + {{- with .Values.cronjob.tolerations }} + tolerations: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.cronjob.affinity }} + affinity: +{{ toYaml . | indent 12 }} + {{- end }} + containers: + - name: taskmaker + image: "{{ .Values.cronjob.image.repository }}:{{ .Values.cronjob.image.tag | default .Chart.AppVersion }}" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + runAsUser: 405 + runAsGroup: 100 + capabilities: + drop: + - all + readOnlyRootFilesystem: true + env: + - name: "INFLUXDB_TOKEN" + valueFrom: + secretKeyRef: + name: "monitoring" + key: "influx-alert-token" + - name: "INFLUXDB_ORG" + value: {{ .Values.global.influxdbOrg | quote }} + - name: "INFLUXDB_URL" + value: {{ .Values.global.influxdbUrl | quote }} + {{- with .Values.cronjob.debug }} + - name: "DEBUG" + value: "true" + {{- end }} + command: [ "taskmaker" ] diff --git a/applications/monitoring/templates/ingress.yaml b/applications/monitoring/templates/ingress.yaml new file mode 100644 index 0000000000..c50f8f15a2 --- /dev/null +++ b/applications/monitoring/templates/ingress.yaml @@ -0,0 +1,34 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: "chronograf" + labels: + {{- include "monitoring.labels" . | nindent 4 }} + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-dns" + {{- with .Values.ingress.chronograf.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +config: + baseUrl: "https://{{ .Values.ingress.chronograf.hostname }}" + scopes: + anonymous: true # We will use Chronograf auth for now. +template: + metadata: + name: "chronograf" + spec: + tls: + - hosts: + - {{ .Values.ingress.chronograf.hostname | quote }} + secretName: tls + rules: + - host: {{ .Values.ingress.chronograf.hostname | quote }} + http: + paths: + - path: "/" + pathType: "Prefix" + backend: + service: + name: monitoring-chronograf + port: + number: 80 diff --git a/applications/monitoring/templates/vault-secret.yaml b/applications/monitoring/templates/vault-secret.yaml new file mode 100644 index 0000000000..fb3657e6fa --- /dev/null +++ b/applications/monitoring/templates/vault-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: {{ include "monitoring.fullname" . }} + labels: + {{- include "monitoring.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/monitoring" + type: Opaque diff --git a/applications/monitoring/values-roundtable-dev.yaml b/applications/monitoring/values-roundtable-dev.yaml new file mode 100644 index 0000000000..031de91c55 --- /dev/null +++ b/applications/monitoring/values-roundtable-dev.yaml @@ -0,0 +1,8 @@ +chronograf: + env: + GH_CLIENT_ID: "e85fe410b0021a251180" +cronjob: + debug: true +ingresses: + chronograf: + hostname: "monitoring-dev.lsst.cloud" diff --git a/applications/monitoring/values.yaml b/applications/monitoring/values.yaml new file mode 100644 index 0000000000..110d5b8c3f --- /dev/null +++ b/applications/monitoring/values.yaml @@ -0,0 +1,96 @@ +chronograf: + ## Image Settings + ## + image: + tag: 1.9.4 + pullPolicy: IfNotPresent + + ## Specify a service type + ## ClusterIP is default + ## ref: http://kubernetes.io/docs/user-guide/services/ + ## + service: + replicas: 1 + type: ClusterIP + + ## Configure resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + requests: + memory: 1024Mi + cpu: 1 + limits: + memory: 30Gi + cpu: 4 + + ## Use our own Gafaelfawr ingress. + ingress: + ## We will use Gafaelfawr ingresses instead + enabled: false + + ## Enable OAuth + oauth: + ## This is a lie: see below. + enabled: false + ## OAuth Settings for OAuth Providers + ## We do not set these here. What we do is set: + ## - TOKEN_SECRET + ## - GH_CLIENT_ID + ## - GH_CLIENT_SECRET + ## - GH_ORGS + ## in the environment. The secrets should come from the "monitoring" + ## secret, which is a VaultSecret, and the rest can just be injected + ## into the deployment env directly. + + ## Extra environment variables that will be passed onto deployment pods + env: + CUSTOM_AUTO_REFRESH: "1s=1000" + GH_CLIENT_ID: "" # Must be specified for each endpoint for the callback + GH_ORGS: "lsst-sqre" + HOST_PAGE_DISABLED: true + INFLUXDB_URL: "https://monitoring.lsst.codes" # Expect this to change + INFLUXDB_ORG: "square" + ## INFLUXDB_TOKEN should be in the monitoring secret as well as + ## TOKEN_SECRET and GH_CLIENT_SECRET. Note that INFLUX_TOKEN is for + ## InfluxDBv1 and INFLUXDB_TOKEN is for v2. + envFromSecret: monitoring + updateStrategy: + type: Recreate + +cronjob: + # -- image for monitoring-related cronjobs + image: + # -- repository for rubin-influx-tools + repository: ghcr.io/lsst-sqre/rubin-influx-tools + # -- tag for rubin-influx-tools + # @default -- the appVersion of the chart + tag: "" + # -- set to true to enable debug logging + debug: false + # -- schedules for jobs + schedule: + # -- bucketmaker schedule + bucketmaker: "*/15 * * * *" + # -- bucketmapper schedule + bucketmapper: "3-59/15 * * * *" + # -- taskmaker schedule + taskmaker: "6-59/15 * * * *" + +ingress: + # -- ingress for Chronograf UI + chronograf: + # -- hostname for Chronograf UI + # @ default -- None, must be set by each individual instance + hostname: "" + # -- Additional annotations to add to the ingress + annotations: {} + +global: + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" + # -- URL for InfluxDBv2 instance + influxdbUrl: "https://monitoring.lsst.codes" # Expect this to change + # -- InfluxDBv2 organization + influxdbOrg: "square" diff --git a/environments/README.md b/environments/README.md index 483d6ca230..755e90de14 100644 --- a/environments/README.md +++ b/environments/README.md @@ -22,6 +22,7 @@ | livetap.enabled | bool | `false` | | | mobu.enabled | bool | `false` | | | moneypenny.enabled | bool | `false` | | +| monitoring.enabled | bool | `false` | | | narrativelog.enabled | bool | `false` | | | noteburst.enabled | bool | `false` | | | nublado.enabled | bool | `false` | | diff --git a/environments/templates/monitoring-application.yaml b/environments/templates/monitoring-application.yaml new file mode 100644 index 0000000000..a985fc3790 --- /dev/null +++ b/environments/templates/monitoring-application.yaml @@ -0,0 +1,37 @@ +{{- if .Values.monitoring.enabled -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "monitoring" +spec: + finalizers: + - "kubernetes" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "monitoring" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "monitoring" + server: "https://kubernetes.default.svc" + project: "default" + source: + path: "applications/monitoring" + repoURL: {{ .Values.repoURL | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.environment }}.yaml" +{{- end -}} diff --git a/environments/values-base.yaml b/environments/values-base.yaml index adbc29dbc1..b5aefd97cc 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -26,6 +26,8 @@ mobu: enabled: false moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: true noteburst: diff --git a/environments/values-ccin2p3.yaml b/environments/values-ccin2p3.yaml index e247c46898..3bf358195a 100644 --- a/environments/values-ccin2p3.yaml +++ b/environments/values-ccin2p3.yaml @@ -24,6 +24,8 @@ ingress-nginx: enabled: true kubernetes-replicator: enabled: false +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 10479deee5..960271bd42 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -27,6 +27,8 @@ mobu: enabled: true moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 66436d6da3..b3a9fb0790 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -29,6 +29,8 @@ mobu: enabled: true moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index af4ec443e2..9cd950d111 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -27,6 +27,8 @@ mobu: enabled: true moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml index b410f0003e..680818694d 100644 --- a/environments/values-minikube.yaml +++ b/environments/values-minikube.yaml @@ -26,6 +26,8 @@ mobu: enabled: true moneypenny: enabled: false +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-roe.yaml b/environments/values-roe.yaml index eae03f0fc3..8cee958cfa 100644 --- a/environments/values-roe.yaml +++ b/environments/values-roe.yaml @@ -26,6 +26,8 @@ mobu: enabled: true moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index 50a3a008ef..c3b346eb4e 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -28,6 +28,8 @@ mobu: enabled: false moneypenny: enabled: false +monitoring: + enabled: true narrativelog: enabled: false noteburst: diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index dc93d2d54f..fc12e79520 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -26,6 +26,8 @@ mobu: enabled: false moneypenny: enabled: false +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index 06820ef15a..4628e29617 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -26,6 +26,8 @@ mobu: enabled: false moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: true noteburst: diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml index f10f8bfb14..093415d77e 100644 --- a/environments/values-tucson-teststand.yaml +++ b/environments/values-tucson-teststand.yaml @@ -26,6 +26,8 @@ mobu: enabled: false moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: true noteburst: diff --git a/environments/values-usdf-tel-rsp.yaml b/environments/values-usdf-tel-rsp.yaml index 3ea6a51b91..c54bf09e0e 100644 --- a/environments/values-usdf-tel-rsp.yaml +++ b/environments/values-usdf-tel-rsp.yaml @@ -28,6 +28,8 @@ mobu: enabled: false moneypenny: enabled: false +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml index 7510d5e056..c95712319d 100644 --- a/environments/values-usdfdev.yaml +++ b/environments/values-usdfdev.yaml @@ -30,6 +30,8 @@ mobu: enabled: true moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml index 419025cf4c..bde5f1f196 100644 --- a/environments/values-usdfprod.yaml +++ b/environments/values-usdfprod.yaml @@ -30,6 +30,8 @@ mobu: enabled: true moneypenny: enabled: true +monitoring: + enabled: false narrativelog: enabled: false noteburst: diff --git a/environments/values.yaml b/environments/values.yaml index de79c383d4..d8df8a7d06 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -46,6 +46,8 @@ mobu: enabled: false moneypenny: enabled: false +monitoring: + enabled: false narrativelog: enabled: false noteburst: From 0cc15c5b85f26117dd77067c98fc344d19d29ec7 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 12:57:00 -0700 Subject: [PATCH 271/308] Add monitoring docs --- docs/applications/index.rst | 1 + docs/applications/monitoring/index.rst | 19 +++++++++++++++++++ docs/applications/monitoring/values.md | 12 ++++++++++++ 3 files changed, 32 insertions(+) create mode 100644 docs/applications/monitoring/index.rst create mode 100644 docs/applications/monitoring/values.md diff --git a/docs/applications/index.rst b/docs/applications/index.rst index de95b55aa6..1b1ef2ca93 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -68,5 +68,6 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde giftless/index kubernetes-replicator/index + monitoring/index ook/index squarebot/index diff --git a/docs/applications/monitoring/index.rst b/docs/applications/monitoring/index.rst new file mode 100644 index 0000000000..133f031725 --- /dev/null +++ b/docs/applications/monitoring/index.rst @@ -0,0 +1,19 @@ +.. px-app:: monitoring + +######################## +Monitoring Chronograf UI +######################## + +Monitoring is an implementation of the Chronograf UI for monitoring the +health and resource usage of Phalanx applications. + +.. jinja:: monitoring + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/monitoring/values.md b/docs/applications/monitoring/values.md new file mode 100644 index 0000000000..cad9e5bc7f --- /dev/null +++ b/docs/applications/monitoring/values.md @@ -0,0 +1,12 @@ +```{px-app-values} monitoring +``` + +# Monitoring Helm values reference + +Helm values reference table for the {px-app}`monitoring` application. + +```{include} ../../../applications/monitoring/README.md +--- +start-after: "## Values" +--- +``` From 59aa3b9e28a5dc328f0956ce32e8662f87a433d0 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 13:48:29 -0700 Subject: [PATCH 272/308] Fix merge conflicts --- applications/cachemachine/values-idfdev.yaml | 34 --- applications/cachemachine/values-idfint.yaml | 37 --- applications/mobu/values-idfdev.yaml | 18 -- applications/mobu/values-idfint.yaml | 17 -- applications/nublado2/values-idfdev.yaml | 62 ----- applications/nublado2/values-idfint.yaml | 246 ------------------- environments/values-idfdev.yaml | 6 +- environments/values-idfint.yaml | 4 +- 8 files changed, 5 insertions(+), 419 deletions(-) delete mode 100644 applications/cachemachine/values-idfdev.yaml delete mode 100644 applications/cachemachine/values-idfint.yaml delete mode 100644 applications/nublado2/values-idfdev.yaml delete mode 100644 applications/nublado2/values-idfint.yaml diff --git a/applications/cachemachine/values-idfdev.yaml b/applications/cachemachine/values-idfdev.yaml deleted file mode 100644 index cc7047215f..0000000000 --- a/applications/cachemachine/values-idfdev.yaml +++ /dev/null @@ -1,34 +0,0 @@ -serviceAccount: - annotations: { - iam.gke.io/gcp-service-account: cachemachine-wi@science-platform-dev-7696.iam.gserviceaccount.com - } - -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoGar", - "registry_url": "us-central1-docker.pkg.dev", - "gar_repository": "sciplat", - "gar_image": "sciplat-lab", - "project_id": "rubin-shared-services-71ec", - "location": "us-central1", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - }, - { - "type": "SimpleRepoMan", - "images": [ - { - "image_url": "us-central1-docker.pkg.dev/rubin-shared-services-71ec/sciplat/sciplat-lab:w_2023_07", - "name": "Weekly 2023_07" - } - ] - } - ] - } diff --git a/applications/cachemachine/values-idfint.yaml b/applications/cachemachine/values-idfint.yaml deleted file mode 100644 index 0e80940198..0000000000 --- a/applications/cachemachine/values-idfint.yaml +++ /dev/null @@ -1,37 +0,0 @@ -image: - tag: "1.2.3" - -serviceAccount: - annotations: { - iam.gke.io/gcp-service-account: cachemachine-wi@science-platform-int-dc5d.iam.gserviceaccount.com - } - -autostart: - jupyter: | - { - "name": "jupyter", - "labels": {}, - "repomen": [ - { - "type": "RubinRepoGar", - "registry_url": "us-central1-docker.pkg.dev", - "gar_repository": "sciplat", - "gar_image": "sciplat-lab", - "project_id": "rubin-shared-services-71ec", - "location": "us-central1", - "recommended_tag": "recommended", - "num_releases": 1, - "num_weeklies": 2, - "num_dailies": 3 - }, - { - "type": "SimpleRepoMan", - "images": [ - { - "image_url": "us-central1-docker.pkg.dev/rubin-shared-services-71ec/sciplat/sciplat-lab:w_2023_07", - "name": "Weekly 2023_07" - } - ] - } - ] - } diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 1ef44e63ab..645b79227f 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -1,24 +1,6 @@ config: debug: true autostart: - - name: "nublado2" - count: 1 - users: - - username: "bot-mobu-user" - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - image: - image_class: "latest-weekly" - repo_url: "https://github.com/lsst-sqre/system-test.git" - repo_branch: "prod" - url_prefix: "/n2" - restart: true - name: "weekly" count: 1 users: diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index 7dc005b4ca..a861a3d54b 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -1,22 +1,5 @@ config: autostart: - - name: "nublado2" - count: 1 - users: - - username: "bot-mobu-nublado2" - scopes: - - "exec:notebook" - - "exec:portal" - - "read:image" - - "read:tap" - business: - type: "NotebookRunner" - options: - repo_url: "https://github.com/lsst-sqre/system-test.git" - repo_branch: "prod" - max_executions: 1 - url_prefix: "/n2" - restart: true - name: "recommended" count: 1 users: diff --git a/applications/nublado2/values-idfdev.yaml b/applications/nublado2/values-idfdev.yaml deleted file mode 100644 index f63977381b..0000000000 --- a/applications/nublado2/values-idfdev.yaml +++ /dev/null @@ -1,62 +0,0 @@ -jupyterhub: - hub: - baseUrl: "/n2" - config: - ServerApp: - shutdown_no_activity_timeout: 432000 - db: - upgrade: true - - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["data-dev.lsst.cloud"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://data-dev.lsst.cloud/login" - -config: - base_url: "https://data-dev.lsst.cloud" - butler_secret_path: "secret/k8s_operator/data-dev.lsst.cloud/butler-secret" - pull_secret_path: "secret/k8s_operator/data-dev.lsst.cloud/pull-secret" - cachemachine_image_policy: "desired" - lab_environment: - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" - S3_ENDPOINT_URL: "https://storage.googleapis.com" - GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/butler-secret/butler-gcs-idf-creds.json" - DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml" - AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks - AUTO_REPO_BRANCH: prod - AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_KERNEL_CONNECTED: "True" - CULL_KERNEL_INTERVAL: "300" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - CULL_TERMINAL_INTERVAL: "300" - volumes: - - name: home - nfs: - path: /share1/home - server: 10.87.86.26 - - name: project - nfs: - path: /share1/project - server: 10.87.86.26 - - name: scratch - nfs: - path: /share1/scratch - server: 10.87.86.26 - volume_mounts: - - name: home - mountPath: /home - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch diff --git a/applications/nublado2/values-idfint.yaml b/applications/nublado2/values-idfint.yaml deleted file mode 100644 index c5812aedb8..0000000000 --- a/applications/nublado2/values-idfint.yaml +++ /dev/null @@ -1,246 +0,0 @@ -jupyterhub: - hub: - baseUrl: "/n2" - config: - ServerApp: - shutdown_no_activity_timeout: 432000 - - cull: - enabled: true - users: false - removeNamedServers: false - timeout: 432000 - every: 300 - maxAge: 2160000 - - ingress: - hosts: ["data-int.lsst.cloud"] - annotations: - nginx.ingress.kubernetes.io/auth-signin: "https://data-int.lsst.cloud/login" -config: - base_url: "https://data-int.lsst.cloud" - butler_secret_path: "secret/k8s_operator/data-int.lsst.cloud/butler-secret" - pull_secret_path: "secret/k8s_operator/data-int.lsst.cloud/pull-secret" - cachemachine_image_policy: "desired" - lab_environment: - PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" - AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" - S3_ENDPOINT_URL: "https://storage.googleapis.com" - GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/butler-secret/butler-gcs-idf-creds.json" - DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" - AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks - AUTO_REPO_BRANCH: prod - AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod - PANDA_AUTH: oidc - PANDA_VERIFY_HOST: "off" - PANDA_AUTH_VO: Rubin - PANDA_URL_SSL: https://pandaserver-doma.cern.ch:25443/server/panda - PANDA_URL: http://pandaserver-doma.cern.ch:25080/server/panda - IDDS_CONFIG: /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template - PANDA_CONFIG_ROOT: "~" - NO_ACTIVITY_TIMEOUT: "432000" - CULL_KERNEL_IDLE_TIMEOUT: "432000" - CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" - sizes: - - name: Small - cpu: 1 - ram: 4096M - - name: Medium - cpu: 2 - ram: 8192M - - name: Large - cpu: 4 - ram: 16384M - - name: Huge - cpu: 8 - ram: 32768M - volumes: - - name: home - nfs: - path: /share1/home - server: 10.22.240.130 - - name: project - nfs: - path: /share1/project - server: 10.22.240.130 - - name: scratch - nfs: - path: /share1/scratch - server: 10.22.240.130 - volume_mounts: - - name: home - mountPath: /home - - name: project - mountPath: /project - - name: scratch - mountPath: /scratch - # Workaround to impose resource quotas at IDF - user_resources_template: | - - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ user_namespace }}" - - apiVersion: v1 - kind: ConfigMap - metadata: - name: group - namespace: "{{ user_namespace }}" - data: - group: | - root:x:0: - bin:x:1: - daemon:x:2: - sys:x:3: - adm:x:4: - tty:x:5: - disk:x:6: - lp:x:7: - mem:x:8: - kmem:x:9: - wheel:x:10: - cdrom:x:11: - mail:x:12: - man:x:15: - dialout:x:18: - floppy:x:19: - games:x:20: - tape:x:33: - video:x:39: - ftp:x:50: - lock:x:54: - audio:x:63: - nobody:x:99: - users:x:100: - utmp:x:22: - utempter:x:35: - input:x:999: - systemd-journal:x:190: - systemd-network:x:192: - dbus:x:81: - ssh_keys:x:998: - lsst_lcl:x:1000:{{ user }} - tss:x:59: - cgred:x:997: - screen:x:84: - jovyan:x:768:{{ user }}{% for g in groups %} - {{ g.name }}:x:{{ g.id }}:{{ user if g.id != gid else "" }}{% endfor %} - - apiVersion: v1 - kind: ConfigMap - metadata: - name: passwd - namespace: "{{ user_namespace }}" - data: - passwd: | - root:x:0:0:root:/root:/bin/bash - bin:x:1:1:bin:/bin:/sbin/nologin - daemon:x:2:2:daemon:/sbin:/sbin/nologin - adm:x:3:4:adm:/var/adm:/sbin/nologin - lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin - sync:x:5:0:sync:/sbin:/bin/sync - shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown - halt:x:7:0:halt:/sbin:/sbin/halt - mail:x:8:12:mail:/var/spool/mail:/sbin/nologin - operator:x:11:0:operator:/root:/sbin/nologin - games:x:12:100:games:/usr/games:/sbin/nologin - ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin - nobody:x:99:99:Nobody:/:/sbin/nologin - systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin - dbus:x:81:81:System message bus:/:/sbin/nologin - lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash - tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin - {{ user }}:x:{{ uid }}:{{ gid if gid else uid }}::/home/{{ user }}:/bin/bash - - apiVersion: v1 - kind: ConfigMap - metadata: - name: dask - namespace: "{{ user_namespace }}" - data: - dask_worker.yml: | - {{ dask_yaml | indent(6) }} - # When we break out the resources we should make this per-instance - # configurable. - - apiVersion: v1 - kind: ConfigMap - metadata: - name: idds-config - namespace: "{{ user_namespace }}" - data: - idds.cfg.client.template: | - # Licensed under the Apache License, Version 2.0 (the "License"); - # You may not use this file except in compliance with the License. - # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - # - # Authors: - # - Wen Guan, , 2020 - [common] - # if logdir is configured, idds will write to idds.log in this directory. - # else idds will go to stdout/stderr. - # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. - # logdir = /var/log/idds - loglevel = INFO - [rest] - host = https://iddsserver.cern.ch:443/idds - #url_prefix = /idds - #cacher_dir = /tmp - cacher_dir = /data/idds - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - imagePullSecrets: - - name: pull-secret - - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ user }}-role" - namespace: "{{ user_namespace }}" - rules: - # cf https://kubernetes.dask.org/en/latest/kubecluster.html - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get","list"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ user }}-rolebinding" - namespace: "{{ user_namespace }}" - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: "{{ user }}-role" - subjects: - - kind: ServiceAccount - name: "{{ user }}-serviceaccount" - namespace: "{{ user_namespace }}" - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: butler-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ butler_secret_path }}" - type: Opaque - - apiVersion: ricoberger.de/v1alpha1 - kind: VaultSecret - metadata: - name: pull-secret - namespace: "{{ user_namespace }}" - spec: - path: "{{ pull_secret_path }}" - type: kubernetes.io/dockerconfigjson - - apiVersion: v1 - kind: ResourceQuota - metadata: - name: user-quota - namespace: "{{ user_namespace }}" - spec: - hard: - limits.cpu: 9 - limits.memory: 27Gi diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 960271bd42..bfdc4fcd34 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -8,7 +8,7 @@ alert-stream-broker: argo-workflows: enabled: true cachemachine: - enabled: true + enabled: false cert-manager: enabled: true datalinker: @@ -26,7 +26,7 @@ kubernetes-replicator: mobu: enabled: true moneypenny: - enabled: true + enabled: false monitoring: enabled: false narrativelog: @@ -36,7 +36,7 @@ noteburst: nublado: enabled: true nublado2: - enabled: true + enabled: false ook: enabled: false plot-navigator: diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index b3a9fb0790..956950ed6b 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -8,7 +8,7 @@ alert-stream-broker: argo-workflows: enabled: false cachemachine: - enabled: true + enabled: false cert-manager: enabled: true datalinker: @@ -38,7 +38,7 @@ noteburst: nublado: enabled: true nublado2: - enabled: true + enabled: false ook: enabled: false plot-navigator: From 3669687953c0d16ad6ba23aebb64437b1cfc5802 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 13:17:57 -0700 Subject: [PATCH 273/308] Add nublado2 back to int --- applications/cachemachine/values-idfint.yaml | 37 +++ applications/mobu/values-idfint.yaml | 17 ++ applications/nublado2/values-idfint.yaml | 246 +++++++++++++++++++ environments/values-idfint.yaml | 4 +- 4 files changed, 302 insertions(+), 2 deletions(-) create mode 100644 applications/cachemachine/values-idfint.yaml create mode 100644 applications/nublado2/values-idfint.yaml diff --git a/applications/cachemachine/values-idfint.yaml b/applications/cachemachine/values-idfint.yaml new file mode 100644 index 0000000000..0e80940198 --- /dev/null +++ b/applications/cachemachine/values-idfint.yaml @@ -0,0 +1,37 @@ +image: + tag: "1.2.3" + +serviceAccount: + annotations: { + iam.gke.io/gcp-service-account: cachemachine-wi@science-platform-int-dc5d.iam.gserviceaccount.com + } + +autostart: + jupyter: | + { + "name": "jupyter", + "labels": {}, + "repomen": [ + { + "type": "RubinRepoGar", + "registry_url": "us-central1-docker.pkg.dev", + "gar_repository": "sciplat", + "gar_image": "sciplat-lab", + "project_id": "rubin-shared-services-71ec", + "location": "us-central1", + "recommended_tag": "recommended", + "num_releases": 1, + "num_weeklies": 2, + "num_dailies": 3 + }, + { + "type": "SimpleRepoMan", + "images": [ + { + "image_url": "us-central1-docker.pkg.dev/rubin-shared-services-71ec/sciplat/sciplat-lab:w_2023_07", + "name": "Weekly 2023_07" + } + ] + } + ] + } diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml index a861a3d54b..7dc005b4ca 100644 --- a/applications/mobu/values-idfint.yaml +++ b/applications/mobu/values-idfint.yaml @@ -1,5 +1,22 @@ config: autostart: + - name: "nublado2" + count: 1 + users: + - username: "bot-mobu-nublado2" + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + repo_url: "https://github.com/lsst-sqre/system-test.git" + repo_branch: "prod" + max_executions: 1 + url_prefix: "/n2" + restart: true - name: "recommended" count: 1 users: diff --git a/applications/nublado2/values-idfint.yaml b/applications/nublado2/values-idfint.yaml new file mode 100644 index 0000000000..c5812aedb8 --- /dev/null +++ b/applications/nublado2/values-idfint.yaml @@ -0,0 +1,246 @@ +jupyterhub: + hub: + baseUrl: "/n2" + config: + ServerApp: + shutdown_no_activity_timeout: 432000 + + cull: + enabled: true + users: false + removeNamedServers: false + timeout: 432000 + every: 300 + maxAge: 2160000 + + ingress: + hosts: ["data-int.lsst.cloud"] + annotations: + nginx.ingress.kubernetes.io/auth-signin: "https://data-int.lsst.cloud/login" +config: + base_url: "https://data-int.lsst.cloud" + butler_secret_path: "secret/k8s_operator/data-int.lsst.cloud/butler-secret" + pull_secret_path: "secret/k8s_operator/data-int.lsst.cloud/pull-secret" + cachemachine_image_policy: "desired" + lab_environment: + PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt" + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini" + S3_ENDPOINT_URL: "https://storage.googleapis.com" + GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/butler-secret/butler-gcs-idf-creds.json" + DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-int-repos.yaml" + AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks + AUTO_REPO_BRANCH: prod + AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod + PANDA_AUTH: oidc + PANDA_VERIFY_HOST: "off" + PANDA_AUTH_VO: Rubin + PANDA_URL_SSL: https://pandaserver-doma.cern.ch:25443/server/panda + PANDA_URL: http://pandaserver-doma.cern.ch:25080/server/panda + IDDS_CONFIG: /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template + PANDA_CONFIG_ROOT: "~" + NO_ACTIVITY_TIMEOUT: "432000" + CULL_KERNEL_IDLE_TIMEOUT: "432000" + CULL_TERMINAL_INACTIVE_TIMEOUT: "432000" + sizes: + - name: Small + cpu: 1 + ram: 4096M + - name: Medium + cpu: 2 + ram: 8192M + - name: Large + cpu: 4 + ram: 16384M + - name: Huge + cpu: 8 + ram: 32768M + volumes: + - name: home + nfs: + path: /share1/home + server: 10.22.240.130 + - name: project + nfs: + path: /share1/project + server: 10.22.240.130 + - name: scratch + nfs: + path: /share1/scratch + server: 10.22.240.130 + volume_mounts: + - name: home + mountPath: /home + - name: project + mountPath: /project + - name: scratch + mountPath: /scratch + # Workaround to impose resource quotas at IDF + user_resources_template: | + - apiVersion: v1 + kind: Namespace + metadata: + name: "{{ user_namespace }}" + - apiVersion: v1 + kind: ConfigMap + metadata: + name: group + namespace: "{{ user_namespace }}" + data: + group: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + tape:x:33: + video:x:39: + ftp:x:50: + lock:x:54: + audio:x:63: + nobody:x:99: + users:x:100: + utmp:x:22: + utempter:x:35: + input:x:999: + systemd-journal:x:190: + systemd-network:x:192: + dbus:x:81: + ssh_keys:x:998: + lsst_lcl:x:1000:{{ user }} + tss:x:59: + cgred:x:997: + screen:x:84: + jovyan:x:768:{{ user }}{% for g in groups %} + {{ g.name }}:x:{{ g.id }}:{{ user if g.id != gid else "" }}{% endfor %} + - apiVersion: v1 + kind: ConfigMap + metadata: + name: passwd + namespace: "{{ user_namespace }}" + data: + passwd: | + root:x:0:0:root:/root:/bin/bash + bin:x:1:1:bin:/bin:/sbin/nologin + daemon:x:2:2:daemon:/sbin:/sbin/nologin + adm:x:3:4:adm:/var/adm:/sbin/nologin + lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin + sync:x:5:0:sync:/sbin:/bin/sync + shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown + halt:x:7:0:halt:/sbin:/sbin/halt + mail:x:8:12:mail:/var/spool/mail:/sbin/nologin + operator:x:11:0:operator:/root:/sbin/nologin + games:x:12:100:games:/usr/games:/sbin/nologin + ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin + nobody:x:99:99:Nobody:/:/sbin/nologin + systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin + dbus:x:81:81:System message bus:/:/sbin/nologin + lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash + tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin + {{ user }}:x:{{ uid }}:{{ gid if gid else uid }}::/home/{{ user }}:/bin/bash + - apiVersion: v1 + kind: ConfigMap + metadata: + name: dask + namespace: "{{ user_namespace }}" + data: + dask_worker.yml: | + {{ dask_yaml | indent(6) }} + # When we break out the resources we should make this per-instance + # configurable. + - apiVersion: v1 + kind: ConfigMap + metadata: + name: idds-config + namespace: "{{ user_namespace }}" + data: + idds.cfg.client.template: | + # Licensed under the Apache License, Version 2.0 (the "License"); + # You may not use this file except in compliance with the License. + # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + # + # Authors: + # - Wen Guan, , 2020 + [common] + # if logdir is configured, idds will write to idds.log in this directory. + # else idds will go to stdout/stderr. + # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. + # logdir = /var/log/idds + loglevel = INFO + [rest] + host = https://iddsserver.cern.ch:443/idds + #url_prefix = /idds + #cacher_dir = /tmp + cacher_dir = /data/idds + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: "{{ user }}-serviceaccount" + namespace: "{{ user_namespace }}" + imagePullSecrets: + - name: pull-secret + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: "{{ user }}-role" + namespace: "{{ user_namespace }}" + rules: + # cf https://kubernetes.dask.org/en/latest/kubecluster.html + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["pods/log"] + verbs: ["get","list"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: "{{ user }}-rolebinding" + namespace: "{{ user_namespace }}" + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "{{ user }}-role" + subjects: + - kind: ServiceAccount + name: "{{ user }}-serviceaccount" + namespace: "{{ user_namespace }}" + - apiVersion: ricoberger.de/v1alpha1 + kind: VaultSecret + metadata: + name: butler-secret + namespace: "{{ user_namespace }}" + spec: + path: "{{ butler_secret_path }}" + type: Opaque + - apiVersion: ricoberger.de/v1alpha1 + kind: VaultSecret + metadata: + name: pull-secret + namespace: "{{ user_namespace }}" + spec: + path: "{{ pull_secret_path }}" + type: kubernetes.io/dockerconfigjson + - apiVersion: v1 + kind: ResourceQuota + metadata: + name: user-quota + namespace: "{{ user_namespace }}" + spec: + hard: + limits.cpu: 9 + limits.memory: 27Gi diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 956950ed6b..b3a9fb0790 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -8,7 +8,7 @@ alert-stream-broker: argo-workflows: enabled: false cachemachine: - enabled: false + enabled: true cert-manager: enabled: true datalinker: @@ -38,7 +38,7 @@ noteburst: nublado: enabled: true nublado2: - enabled: false + enabled: true ook: enabled: false plot-navigator: From abfab9b6e19c082c529aa10743b4c9b52a4d6a7e Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 13:37:19 -0700 Subject: [PATCH 274/308] Remove moneypenny/dev config, add it back to int --- applications/moneypenny/values-idfdev.yaml | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 applications/moneypenny/values-idfdev.yaml diff --git a/applications/moneypenny/values-idfdev.yaml b/applications/moneypenny/values-idfdev.yaml deleted file mode 100644 index 77b96cbe69..0000000000 --- a/applications/moneypenny/values-idfdev.yaml +++ /dev/null @@ -1,15 +0,0 @@ -orders: - commission: - - name: initcommission - image: lsstsqre/inituserhome - securityContext: - runAsUser: 0 - runAsNonRootUser: false - volumeMounts: - - mountPath: /homedirs - name: homedirs - volumes: - - name: homedirs - nfs: - server: 10.87.86.26 - path: /share1/home From a6aa426f2456178b3807a86bf41f2ea689653d01 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 27 Jul 2023 14:27:42 -0700 Subject: [PATCH 275/308] Change nginx logs to error nginx produces a lot of spurious warning messages about spooling large request bodies to disk, which we don't care about. --- applications/ingress-nginx/values-idfdev.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/applications/ingress-nginx/values-idfdev.yaml b/applications/ingress-nginx/values-idfdev.yaml index ce9e5c39ca..b7dfd83db6 100644 --- a/applications/ingress-nginx/values-idfdev.yaml +++ b/applications/ingress-nginx/values-idfdev.yaml @@ -1,4 +1,6 @@ ingress-nginx: controller: + config: + error-log-level: "error" service: loadBalancerIP: "35.225.112.77" From 2fea240890250acea74821870493436120e59c5b Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Fri, 2 Jun 2023 14:21:25 -0700 Subject: [PATCH 276/308] usdf-dev mobu configuration --- applications/gafaelfawr/values-usdfdev.yaml | 2 +- applications/mobu/values-usdfdev.yaml | 53 ++++++++++++++++++++- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml index 6ffb339ba2..74d3c872c6 100644 --- a/applications/gafaelfawr/values-usdfdev.yaml +++ b/applications/gafaelfawr/values-usdfdev.yaml @@ -219,6 +219,6 @@ config: - "frossie" - "jonathansick" - "rra" - - "simonkrughoff" - "ytl" - "ppascual" + - "pav" diff --git a/applications/mobu/values-usdfdev.yaml b/applications/mobu/values-usdfdev.yaml index 357c0bd2a3..c377f9b859 100644 --- a/applications/mobu/values-usdfdev.yaml +++ b/applications/mobu/values-usdfdev.yaml @@ -1,2 +1,53 @@ config: - slackAlerts: false + debug: true + autostart: + - name: "firefighter" + count: 1 + users: + - username: "bot-mobu01" + uidnumber: 45692 + gidnumber: 1126 + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + repo_url: "https://github.com/lsst-sqre/system-test.git" + repo_branch: "prod" + use_cachemachine: false + restart: true + - name: "weekly" + count: 1 + users: + - username: "bot-mobu02" + uidnumber: 45693 + gidnumber: 1126 + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + image: + image_class: "latest-weekly" + repo_url: "https://github.com/lsst-sqre/system-test.git" + repo_branch: "prod" + use_cachemachine: false + restart: true + - name: "tap" + count: 1 + users: + - username: "bot-mobu04" + uidnumber: 45695 + gidnumber: 1126 + scopes: ["read:tap"] + business: + type: "TAPQueryRunner" + options: + query_set: "dp0.2" + restart: true From ec67b3fa08190e1d3e1ce0804bbe871a0daed65a Mon Sep 17 00:00:00 2001 From: pav511 <38131208+pav511@users.noreply.github.com> Date: Fri, 21 Jul 2023 14:38:04 -0700 Subject: [PATCH 277/308] usdf mobu prod config --- applications/mobu/values-usdfdev.yaml | 4 ++-- applications/mobu/values-usdfprod.yaml | 33 +++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/applications/mobu/values-usdfdev.yaml b/applications/mobu/values-usdfdev.yaml index c377f9b859..5fca5d0a48 100644 --- a/applications/mobu/values-usdfdev.yaml +++ b/applications/mobu/values-usdfdev.yaml @@ -42,8 +42,8 @@ config: - name: "tap" count: 1 users: - - username: "bot-mobu04" - uidnumber: 45695 + - username: "bot-mobu03" + uidnumber: 45694 gidnumber: 1126 scopes: ["read:tap"] business: diff --git a/applications/mobu/values-usdfprod.yaml b/applications/mobu/values-usdfprod.yaml index 357c0bd2a3..3bd79a6fe1 100644 --- a/applications/mobu/values-usdfprod.yaml +++ b/applications/mobu/values-usdfprod.yaml @@ -1,2 +1,33 @@ config: - slackAlerts: false + debug: true + autostart: + - name: "firefighter" + count: 1 + users: + - username: "bot-mobu04" + uidnumber: 45695 + gidnumber: 1126 + scopes: + - "exec:notebook" + - "exec:portal" + - "read:image" + - "read:tap" + business: + type: "NotebookRunner" + options: + repo_url: "https://github.com/lsst-sqre/system-test.git" + repo_branch: "prod" + use_cachemachine: false + restart: true + - name: "tap" + count: 1 + users: + - username: "bot-mobu05" + uidnumber: 45696 + gidnumber: 1126 + scopes: ["read:tap"] + business: + type: "TAPQueryRunner" + options: + query_set: "dp0.2" + restart: true From 7edb2bd59f0e09da5dc1b3af38564c9a8cb0343b Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Thu, 27 Jul 2023 19:07:52 -0400 Subject: [PATCH 278/308] Deploy Times Square 0.9.0 --- applications/times-square/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index 7e605e00c4..375745daa6 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -8,7 +8,7 @@ sources: type: application # The default version tag of the times-square docker image -appVersion: "0.8.0" +appVersion: "0.9.0" dependencies: - name: redis From 6e48e5a59de3b1e52b0768fa74a5ceed9bbf1354 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 14:51:38 -0700 Subject: [PATCH 279/308] Add schema checking for application secrets In prepartion for adding new secrets.yaml files to each Phalanx applications, add schema checking for those files using a JSON Schema. Put that schema into an extras directory for the documentation so that it will be installed into the constructed documentation tree by Sphinx and published. --- .pre-commit-config.yaml | 9 +++ docs/conf.py | 3 + docs/extras/schemas/secrets.json | 100 +++++++++++++++++++++++++++++++ 3 files changed, 112 insertions(+) create mode 100644 docs/extras/schemas/secrets.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1f47f14acc..275608fa85 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,6 +13,15 @@ repos: args: - -c=.yamllint.yml + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: 0.23.3 + hooks: + - id: check-jsonschema + files: "^applications/.*/secrets(-[^./-]+)?\\.yaml" + args: ["--schemafile", "docs/extras/schemas/secrets.json"] + - id: check-metaschema + files: "^docs/extras/schemas/.*\\.json" + - repo: https://github.com/norwoodj/helm-docs rev: v1.11.1 hooks: diff --git a/docs/conf.py b/docs/conf.py index 9d4279b46a..fe8a311691 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -32,3 +32,6 @@ linkcheck_exclude_documents = [ r"applications/.*/values", ] + +# Include JSON schemas in the documentation output tree. +html_extra_path = ["extras"] diff --git a/docs/extras/schemas/secrets.json b/docs/extras/schemas/secrets.json new file mode 100644 index 0000000000..7cb33c9eb5 --- /dev/null +++ b/docs/extras/schemas/secrets.json @@ -0,0 +1,100 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://phalanx.lsst.io/schemas/secrets.json", + "title": "Application secrets", + "description": "Schema for secrets required by Phalanx applications", + "type": "object", + "patternProperties": { + "^.+$": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "if": { + "type": "string" + }, + "copy": { + "type": "object", + "properties": { + "if": { + "type": "string" + }, + "application": { + "type": "string" + }, + "key": { + "type": "string" + } + }, + "required": ["application", "key"], + "additionalProperties": false + }, + "generate": { + "type": "object", + "properties": { + "if": { + "type": "string" + }, + "type": { + "enum": [ + "password", + "gafaelfawr-token", + "fernet-key", + "rsa-private-key", + "bcrypt-password-hash", + "mtime" + ] + }, + "source": { + "type": "string" + } + }, + "required": ["type"], + "oneOf": [ + { + "type": "object", + "properties": { + "type": { + "enum": [ + "bcrypt-password-hash", + "mtime" + ] + }, + "source": { + "type": "string" + } + }, + "required": ["type", "source"] + }, + { + "type": "object", + "properties": { + "if": { + "type": "string" + }, + "type": { + "enum": [ + "password", + "gafaelfawr-token", + "fernet-key", + "rsa-private-key" + ] + } + }, + "additionalProperties": false + } + ], + "additionalProperties": false + }, + "value": { + "type": "string" + } + }, + "required": [ + "description" + ] + }, + "additionalProperties": false + } +} From 6b8f8b934a912f01443e2070672cfc63d380349b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 17 Jul 2023 17:03:21 -0700 Subject: [PATCH 280/308] Add definitions for Phalanx application secrets Following the specification in SQR-079, add specifications for the Phalanx application secrets. --- applications/argocd/secrets.yaml | 34 +++++++++ applications/cert-manager/secrets.yaml | 5 ++ applications/exposurelog/secrets.yaml | 4 ++ applications/gafaelfawr/secrets.yaml | 79 +++++++++++++++++++++ applications/ingress-nginx/secrets.yaml | 13 ++++ applications/livetap/secrets.yaml | 4 ++ applications/mobu/secrets.yaml | 13 ++++ applications/narrativelog/secrets.yaml | 4 ++ applications/nublado/secrets-idfdev.yaml | 15 ++++ applications/nublado/secrets.yaml | 23 ++++++ applications/nublado2/secrets-idfdev.yaml | 15 ++++ applications/nublado2/secrets.yaml | 17 +++++ applications/portal/secrets.yaml | 4 ++ applications/postgres/secrets.yaml | 45 ++++++++++++ applications/sherlock/secrets.yaml | 7 ++ applications/ssotap/secrets.yaml | 4 ++ applications/tap/secrets.yaml | 4 ++ applications/telegraf-ds/secrets.yaml | 12 ++++ applications/telegraf/secrets.yaml | 7 ++ applications/vo-cutouts/secrets-idfdev.yaml | 20 ++++++ applications/vo-cutouts/secrets.yaml | 13 ++++ 21 files changed, 342 insertions(+) create mode 100644 applications/argocd/secrets.yaml create mode 100644 applications/cert-manager/secrets.yaml create mode 100644 applications/exposurelog/secrets.yaml create mode 100644 applications/gafaelfawr/secrets.yaml create mode 100644 applications/ingress-nginx/secrets.yaml create mode 100644 applications/livetap/secrets.yaml create mode 100644 applications/mobu/secrets.yaml create mode 100644 applications/narrativelog/secrets.yaml create mode 100644 applications/nublado/secrets-idfdev.yaml create mode 100644 applications/nublado/secrets.yaml create mode 100644 applications/nublado2/secrets-idfdev.yaml create mode 100644 applications/nublado2/secrets.yaml create mode 100644 applications/portal/secrets.yaml create mode 100644 applications/postgres/secrets.yaml create mode 100644 applications/sherlock/secrets.yaml create mode 100644 applications/ssotap/secrets.yaml create mode 100644 applications/tap/secrets.yaml create mode 100644 applications/telegraf-ds/secrets.yaml create mode 100644 applications/telegraf/secrets.yaml create mode 100644 applications/vo-cutouts/secrets-idfdev.yaml create mode 100644 applications/vo-cutouts/secrets.yaml diff --git a/applications/argocd/secrets.yaml b/applications/argocd/secrets.yaml new file mode 100644 index 0000000000..9d16fb2843 --- /dev/null +++ b/applications/argocd/secrets.yaml @@ -0,0 +1,34 @@ +"argocd.admin.plaintext_password": + description: >- + Admin password for Argo CD. This password is normally not used because + Argo CD is configured to use Google or GitHub authentication, but it is + used by the installer (which cannot use external authentication) and is + useful as a fallback if external authentication is not working for some + reason. This secret can be changed at any time. + generate: + type: password +"admin.password": + description: >- + bcrypt hash of the admin password. This is the only version of the admin + password exposed to the running Argo CD pod. It will be updated + automatically if the admin password is changed. + generate: + type: bcrypt-password-hash + source: admin-password +"admin.passwordMtime": + description: "Last modification time for the admin password." + generate: + type: mtime + source: admin-password +"dex.clientSecret": + description: >- + OAuth 2 or OpenID Connect client secret, used to authenticate to GitHub or + Google as part of the authentication flow. This secret can be changed at + any time. +"server.secretkey": + description: >- + Key used to validate user session cookies. Argo CD will generate this + secret if it is missing, but we provide it because the Argo CD secret is + managed via a VaultSecret. + generate: + type: password diff --git a/applications/cert-manager/secrets.yaml b/applications/cert-manager/secrets.yaml new file mode 100644 index 0000000000..714939b88c --- /dev/null +++ b/applications/cert-manager/secrets.yaml @@ -0,0 +1,5 @@ +aws-secret-access-key: + description: >- + AWS credentials with write access to the appropriate Route 53 subdomain in + which Let's Encrypt challenges should be created. + if: config.createIssuer diff --git a/applications/exposurelog/secrets.yaml b/applications/exposurelog/secrets.yaml new file mode 100644 index 0000000000..b318a54aed --- /dev/null +++ b/applications/exposurelog/secrets.yaml @@ -0,0 +1,4 @@ +database-password: + description: "Password for the exposurelog database." + generate: + type: password diff --git a/applications/gafaelfawr/secrets.yaml b/applications/gafaelfawr/secrets.yaml new file mode 100644 index 0000000000..1c89746656 --- /dev/null +++ b/applications/gafaelfawr/secrets.yaml @@ -0,0 +1,79 @@ +bootstrap-token: + description: >- + Token with admin access, regardless of any other scopes or configuration, + which can be used to add new Gafaelfawr administrators and bootstrap + creation of other tokens with arbitrary scopes. To use this token, + retrieve it from the Kubernetes secret and then use it in API calls like + any other Gafaelfawr token. This secret can be changed at any time. + generate: + type: gafaelfawr-token +cilogon-client-secret: + description: >- + Secret used to authenticate to CILogon as part of the OpenID Connect login + protocol to obtain an identity token for the user. This secret can be + changed at any time. + if: config.cilogon.clientId +database-password: + description: >- + Password used to authenticate to the PostgreSQL database used to store + Gafaelfawr data. This password may be changed at any time. + generate: + if: config.internalDatabase + type: password +forgerock-password: + description: >- + Password used to authenticate to a ForgeRock Identity server using HTTP + Basic authentication to retrieve GID mappings for groups. + if: config.forgerock.url +github-client-secret: + description: >- + GitHub OAuth App secret used to authenticate to GitHub as part of the + OAuth 2 login protocol to obtain an identity token for the user. This + secret can be changed at any time. + if: config.github.clientId +ldap-keytab: + description: >- + Kerberos keytab used to authenticate to the LDAP server via GSSAPI binds + to retrieve user and group information. This keytab can be changed at any + time. + if: config.ldap.kerberosConfig +ldap-password: + description: >- + Password to authenticate to the LDAP server via simple binds to retrieve + user and group information. This password can be changed at any time. + if: config.ldap.userDn +oidc-client-secret: + description: >- + Secret used to authenticate to a remote OpenID Connect authentication + server. This secret can be changed at any time. + if: config.oidc.clientId +redis-password: + description: >- + Password used to authenticate Gafaelfawr to its internal Redis server, + deployed as part of the same Argo CD application. This secret can be + changed at any time, but both the Redis server and all Gafaelfawr + deployments will then have to be restarted to pick up the new value. + generate: + type: password +session-secret: + description: >- + Encryption key used to encrypt the contents of Redis and the cookie data + stored in user web browsers that holds their session token and related + information. Changing this secret will invalidate all existing Redis data + and all user authentication cookies. +signing-key: + description: >- + RSA private key used to sign JWTs issued by Gafaelfawr when it acts as an + OpenID Connect server. Changing this secret will invalidate all existing + issued OpenID Connect JWTs. + if: config.oidcServer.enabled + generate: + type: rsa-private-key +slack-webhook: + description: >- + Slack web hook used to report internal errors to Slack. This secret may be + changed at any time. + if: config.slackAlerts + copy: + application: mobu + key: app-alert-webhook diff --git a/applications/ingress-nginx/secrets.yaml b/applications/ingress-nginx/secrets.yaml new file mode 100644 index 0000000000..d473eeb79a --- /dev/null +++ b/applications/ingress-nginx/secrets.yaml @@ -0,0 +1,13 @@ +"tls.key": + description: >- + Private key of the TLS certificate to use for all connections to the + Phalanx environment. + if: vaultCertificate.enabled +"tls.crt": + description: >- + Signed public TLS certificate, including any required chain certificates + tying it back to a root CA, to use for all connections to the Phalanx + environment. This certificate is used regardless of hostname, so it must + be valid for every hostname that will be used to connect to this Phalanx + environment. + if: vaultCertificate.enabled diff --git a/applications/livetap/secrets.yaml b/applications/livetap/secrets.yaml new file mode 100644 index 0000000000..4280c602a3 --- /dev/null +++ b/applications/livetap/secrets.yaml @@ -0,0 +1,4 @@ +"google_creds.json": + description: >- + Google service account credentials used to write async job output to + Google Cloud Storage. diff --git a/applications/mobu/secrets.yaml b/applications/mobu/secrets.yaml new file mode 100644 index 0000000000..696c08e670 --- /dev/null +++ b/applications/mobu/secrets.yaml @@ -0,0 +1,13 @@ +app-alert-webhook: + description: >- + Slack web hook to which to post internal application alerts. This secret + is not used directly by mobu, but is copied from here to all of the + applications that report internal problems to Slack. It should normally be + separate from mobu's own web hook, since the separate identities attached + to the messages helps make the type of mesasge clearer, but the same web + hook as mobu's own alerts can be used in a pinch. + if: config.slackAlerts +ALERT_HOOK: + description: >- + Slack web hook to which mobu should report failures and daily status. + if: config.slackAlerts diff --git a/applications/narrativelog/secrets.yaml b/applications/narrativelog/secrets.yaml new file mode 100644 index 0000000000..6d65b32bd6 --- /dev/null +++ b/applications/narrativelog/secrets.yaml @@ -0,0 +1,4 @@ +database-password: + description: "Password for the narrativelog database." + generate: + type: password diff --git a/applications/nublado/secrets-idfdev.yaml b/applications/nublado/secrets-idfdev.yaml new file mode 100644 index 0000000000..97d5af3ca8 --- /dev/null +++ b/applications/nublado/secrets-idfdev.yaml @@ -0,0 +1,15 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. +"butler-hmac-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the private + key syntax used for HMACs. +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. diff --git a/applications/nublado/secrets.yaml b/applications/nublado/secrets.yaml new file mode 100644 index 0000000000..f0e2c52cee --- /dev/null +++ b/applications/nublado/secrets.yaml @@ -0,0 +1,23 @@ +cryptkeeper_key: + description: "Encryption key for internal key management." + generate: + type: password +crypto_key: + description: "Encryption key for JupyterHub stored state." + generate: + type: password +hub_db_password: + description: "Password to authenticate to the JupyterHub session database." + generate: + type: password + if: hub.internalDatabase +proxy_token: + description: "Token authenticating JupyterHub to the proxy server." + generate: + type: password +slack_webhook: + description: "Slack web hook to which to post alerts." + if: controller.slackAlerts + copy: + application: mobu + key: app-alert-webhook diff --git a/applications/nublado2/secrets-idfdev.yaml b/applications/nublado2/secrets-idfdev.yaml new file mode 100644 index 0000000000..97d5af3ca8 --- /dev/null +++ b/applications/nublado2/secrets-idfdev.yaml @@ -0,0 +1,15 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. +"butler-hmac-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the private + key syntax used for HMACs. +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. diff --git a/applications/nublado2/secrets.yaml b/applications/nublado2/secrets.yaml new file mode 100644 index 0000000000..15d1b5eeba --- /dev/null +++ b/applications/nublado2/secrets.yaml @@ -0,0 +1,17 @@ +cryptkeeper_key: + description: "Encryption key for internal key management." + generate: + type: password +crypto_key: + description: "Encryption key for JupyterHub stored state." + generate: + type: password +hub_db_password: + description: "Password to authenticate to the JupyterHub session database." + generate: + type: password + if: config.internalDatabase +proxy_token: + description: "Token authenticating JupyterHub to the proxy server." + generate: + type: password diff --git a/applications/portal/secrets.yaml b/applications/portal/secrets.yaml new file mode 100644 index 0000000000..24576c9bcf --- /dev/null +++ b/applications/portal/secrets.yaml @@ -0,0 +1,4 @@ +"ADMIN_PASSWORD": + description: "Password used for authentication to internal Redis." + generate: + type: password diff --git a/applications/postgres/secrets.yaml b/applications/postgres/secrets.yaml new file mode 100644 index 0000000000..5e03d36d1a --- /dev/null +++ b/applications/postgres/secrets.yaml @@ -0,0 +1,45 @@ +exposurelog_password: + description: "Password for the exposurelog database." + if: exposurelog_db + copy: + application: exposurelog + key: exposurelog_password +gafaelfawr_password: + description: "Password for the Gafaelfawr database." + if: gafaelfawr_db + copy: + application: gafaelfawr + key: database-password +jupyterhub_password: + description: "Password for the Nublado v2 JupyterHub session database." + if: jupyterhub_db + copy: + application: nublado2 + key: hub_db_password +lovelog_password: + description: "Password for the lovelog database." + if: lovelog_db + generate: + type: password +narrativelog_password: + description: "Password for the narrativelog database." + if: narrativelog_db + copy: + application: narrativelog + key: narrativelog_password +nublado3_password: + description: "Password for the Nublado v3 JupyterHub session database." + if: nublado3_db + copy: + application: nublado + key: hub_db_password +root_password: + description: "Administrator password for the whole PostgreSQL installation." + generate: + type: password +timessquare_password: + description: "Password for the times-square database." + if: timessquare_db + copy: + application: times-square + key: TS_DATABASE_PASSWORD diff --git a/applications/sherlock/secrets.yaml b/applications/sherlock/secrets.yaml new file mode 100644 index 0000000000..401e4d83a3 --- /dev/null +++ b/applications/sherlock/secrets.yaml @@ -0,0 +1,7 @@ +publish_key: + description: >- + Secret used to publish Sherlock data to the central collector service. + Whenever this secret is changed, it will need to be updated in the + central collector service. + generate: + type: password diff --git a/applications/ssotap/secrets.yaml b/applications/ssotap/secrets.yaml new file mode 100644 index 0000000000..4280c602a3 --- /dev/null +++ b/applications/ssotap/secrets.yaml @@ -0,0 +1,4 @@ +"google_creds.json": + description: >- + Google service account credentials used to write async job output to + Google Cloud Storage. diff --git a/applications/tap/secrets.yaml b/applications/tap/secrets.yaml new file mode 100644 index 0000000000..4280c602a3 --- /dev/null +++ b/applications/tap/secrets.yaml @@ -0,0 +1,4 @@ +"google_creds.json": + description: >- + Google service account credentials used to write async job output to + Google Cloud Storage. diff --git a/applications/telegraf-ds/secrets.yaml b/applications/telegraf-ds/secrets.yaml new file mode 100644 index 0000000000..e348603dd9 --- /dev/null +++ b/applications/telegraf-ds/secrets.yaml @@ -0,0 +1,12 @@ +influx-token: + description: >- + Authentication token used to send data to the central InfluxDB 2 database + for monitoring information. This secret can be changed at any time. + copy: + application: telegraf + key: influx-token +org-id: + description: "Organization identity to which monitoring data should be sent." + copy: + application: telegraf + key: influx-token diff --git a/applications/telegraf/secrets.yaml b/applications/telegraf/secrets.yaml new file mode 100644 index 0000000000..15e3b21954 --- /dev/null +++ b/applications/telegraf/secrets.yaml @@ -0,0 +1,7 @@ +influx-token: + description: >- + Authentication token used to send data to the central InfluxDB 2 database + for monitoring information. This secret can be changed at any time. +org-id: + description: "Organization identity to which monitoring data should be sent." + value: "square" diff --git a/applications/vo-cutouts/secrets-idfdev.yaml b/applications/vo-cutouts/secrets-idfdev.yaml new file mode 100644 index 0000000000..57998942f8 --- /dev/null +++ b/applications/vo-cutouts/secrets-idfdev.yaml @@ -0,0 +1,20 @@ +aws-credentials: + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. + copy: + application: nublado + key: "aws-credentials.ini" +google-credentials: + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +postgres-credentials: + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/vo-cutouts/secrets.yaml b/applications/vo-cutouts/secrets.yaml new file mode 100644 index 0000000000..197cd339df --- /dev/null +++ b/applications/vo-cutouts/secrets.yaml @@ -0,0 +1,13 @@ +database-password: + description: >- + Password used to authenticate to the PostgreSQL database used to store + vo-cutouts job information. This password may be changed at any time. +redis-password: + description: >- + Password used to authenticate vo-cutouts to its internal Redis server, + deployed as part of the same Argo CD application and used to manage the + work queue. This secret can be changed at any time, but both the Redis + server and all vo-cutouts deployments will then have to be restarted to + pick up the new value. + generate: + type: password From e0837f4ef772adb325ec0b686adb893298874dcc Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Wed, 19 Jul 2023 17:27:27 -0700 Subject: [PATCH 281/308] Add basic Phalanx secret command-line tool Lay the groundwork for automating Phalanx secret management by adding a command-line tool. Currently, the only command lists the secrets that are required for a given environment. Additional commands will be coming in the future. This introduces a new set of models separate from the Phalanx models used by the documentation. These will eventually be merged, but the models for secret management will require some iteration, so that merger will be easier once they're in their final form. --- pyproject.toml | 2 + requirements/dev.txt | 230 ++++----- requirements/main.in | 3 + requirements/main.txt | 122 +++++ src/phalanx/cli.py | 52 ++ src/phalanx/exceptions.py | 49 ++ src/phalanx/factory.py | 23 + src/phalanx/models/__init__.py | 0 src/phalanx/models/applications.py | 78 +++ src/phalanx/models/environments.py | 39 ++ src/phalanx/models/secrets.py | 237 +++++++++ src/phalanx/services/__init__.py | 0 src/phalanx/services/secrets.py | 273 +++++++++++ src/phalanx/storage/__init__.py | 0 src/phalanx/storage/config.py | 240 +++++++++ tests/cli/__init__.py | 0 tests/cli/secrets_test.py | 21 + .../input/applications/argocd/secrets.yaml | 34 ++ .../applications/argocd/values-idfdev.yaml | 61 +++ .../input/applications/argocd/values.yaml | 83 ++++ .../applications/gafaelfawr/secrets.yaml | 79 +++ .../gafaelfawr/values-idfdev.yaml | 72 +++ .../input/applications/gafaelfawr/values.yaml | 429 ++++++++++++++++ .../data/input/applications/mobu/secrets.yaml | 11 + .../data/input/applications/mobu/values.yaml | 67 +++ .../applications/nublado/secrets-idfdev.yaml | 15 + .../input/applications/nublado/secrets.yaml | 23 + .../applications/nublado/values-idfdev.yaml | 72 +++ .../input/applications/nublado/values.yaml | 462 ++++++++++++++++++ .../input/applications/postgres/secrets.yaml | 45 ++ .../applications/postgres/values-idfdev.yaml | 3 + .../input/applications/postgres/values.yaml | 37 ++ .../data/input/applications/tap/secrets.yaml | 4 + tests/data/input/applications/tap/values.yaml | 184 +++++++ .../input/environments/values-idfdev.yaml | 13 + tests/data/input/environments/values.yaml | 24 + tests/data/output/idfdev/secrets-list | 26 + tests/support/__init__.py | 0 tests/support/data.py | 43 ++ 39 files changed, 3042 insertions(+), 114 deletions(-) create mode 100644 src/phalanx/cli.py create mode 100644 src/phalanx/exceptions.py create mode 100644 src/phalanx/factory.py create mode 100644 src/phalanx/models/__init__.py create mode 100644 src/phalanx/models/applications.py create mode 100644 src/phalanx/models/environments.py create mode 100644 src/phalanx/models/secrets.py create mode 100644 src/phalanx/services/__init__.py create mode 100644 src/phalanx/services/secrets.py create mode 100644 src/phalanx/storage/__init__.py create mode 100644 src/phalanx/storage/config.py create mode 100644 tests/cli/__init__.py create mode 100644 tests/cli/secrets_test.py create mode 100644 tests/data/input/applications/argocd/secrets.yaml create mode 100644 tests/data/input/applications/argocd/values-idfdev.yaml create mode 100644 tests/data/input/applications/argocd/values.yaml create mode 100644 tests/data/input/applications/gafaelfawr/secrets.yaml create mode 100644 tests/data/input/applications/gafaelfawr/values-idfdev.yaml create mode 100644 tests/data/input/applications/gafaelfawr/values.yaml create mode 100644 tests/data/input/applications/mobu/secrets.yaml create mode 100644 tests/data/input/applications/mobu/values.yaml create mode 100644 tests/data/input/applications/nublado/secrets-idfdev.yaml create mode 100644 tests/data/input/applications/nublado/secrets.yaml create mode 100644 tests/data/input/applications/nublado/values-idfdev.yaml create mode 100644 tests/data/input/applications/nublado/values.yaml create mode 100644 tests/data/input/applications/postgres/secrets.yaml create mode 100644 tests/data/input/applications/postgres/values-idfdev.yaml create mode 100644 tests/data/input/applications/postgres/values.yaml create mode 100644 tests/data/input/applications/tap/secrets.yaml create mode 100644 tests/data/input/applications/tap/values.yaml create mode 100644 tests/data/input/environments/values-idfdev.yaml create mode 100644 tests/data/input/environments/values.yaml create mode 100644 tests/data/output/idfdev/secrets-list create mode 100644 tests/support/__init__.py create mode 100644 tests/support/data.py diff --git a/pyproject.toml b/pyproject.toml index f1fc6b3dcb..18e3f7377a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,6 +22,7 @@ requires-python = ">=3.11" [project.scripts] expand-charts = "phalanx.testing.expandcharts:main" +phalanx = "phalanx.cli:main" [project.urls] Homepage = "https://phalanx.lsst.io" @@ -129,6 +130,7 @@ ignore = [ "FBT003", # positional booleans are normal for Pydantic field defaults "G004", # forbidding logging f-strings is appealing, but not our style "PD011", # false positive with non-NumPY code that uses .values + "PLR0911", # way too strict of a function complexity constraint "PLR0913", # factory pattern uses constructors with many arguments "PLR2004", # too aggressive about magic values "RET505", # disagree that omitting else always makes code more readable diff --git a/requirements/dev.txt b/requirements/dev.txt index 215c25e2d6..084092f363 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -104,7 +104,9 @@ charset-normalizer==3.2.0 \ click==8.1.6 \ --hash=sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd \ --hash=sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5 - # via documenteer + # via + # -c requirements/main.txt + # documenteer contourpy==1.1.0 \ --hash=sha256:052cc634bf903c604ef1a00a5aa093c54f81a2612faedaa43295809ffdde885e \ --hash=sha256:084eaa568400cfaf7179b847ac871582199b1b44d5699198e9602ecbbb5f6104 \ @@ -207,7 +209,7 @@ coverage[toml]==7.2.7 \ --hash=sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e \ --hash=sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850 \ --hash=sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3 - # via -r dev.in + # via -r requirements/dev.in cycler==0.11.0 \ --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \ --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f @@ -223,7 +225,7 @@ distlib==0.3.7 \ documenteer[guide]==0.8.3 \ --hash=sha256:e1514ca8dd96810a6d24d4b21f7b28458a3cf434217e46939ffab2c201791afc \ --hash=sha256:fbe3ad1740751da8fcc95d809b0a489dc7f14fcdb78b28df85860ae92011c9a2 - # via -r dev.in + # via -r requirements/dev.in docutils==0.19 \ --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc @@ -238,61 +240,61 @@ filelock==3.12.2 \ --hash=sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81 \ --hash=sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec # via virtualenv -fonttools==4.41.0 \ - --hash=sha256:0614b6348866092d00df3dfb37e037fc06412ca67087de361a2777ea5ed62c16 \ - --hash=sha256:06eac087ea55b3ebb2207d93b5ac56c847163899f05f5a77e1910f688fe10030 \ - --hash=sha256:19d461c801b8904d201c6c38a99bfcfef673bfdfe0c7f026f582ef78896434e0 \ - --hash=sha256:381558eafffc1432d08ca58063e71c7376ecaae48e9318354a90a1049a644845 \ - --hash=sha256:3ee75b8ca48f6c48af25e967dce995ef94e46872b35c7d454b983c62c9c7006d \ - --hash=sha256:415cf7c806a3f56fb280dadcf3c92c85c0415e75665ca957b4a2a2e39c17a5c9 \ - --hash=sha256:465d0f24bf4f75160f441793b55076b7a080a57d3a1f738390af2c20bee24fbb \ - --hash=sha256:4c654b1facf1f3b742e4d9b2dcdf0fa867b1f007b1b4981cc58a75ef5dca2a3c \ - --hash=sha256:50f8bdb421270f71b54695c62785e300fab4bb6127be40bf9f3084962a0c3adb \ - --hash=sha256:5448a87f6ed57ed844b64a05d3792827af584a8584613f6289867f4e77eb603b \ - --hash=sha256:560ea1a604c927399f36742abf342a4c5f3fee8e8e8a484b774dfe9630bd9a91 \ - --hash=sha256:5b1c2b21b40229166a864f2b0aec06d37f0a204066deb1734c93370e0c76339d \ - --hash=sha256:69178674505ec81adf4af2a3bbacd0cb9a37ba7831bc3fca307f80e48ab2767b \ - --hash=sha256:69dbe0154e15b68dd671441ea8f23dad87488b24a6e650d45958f4722819a443 \ - --hash=sha256:6faff25991dec48f8cac882055a09ae1a29fd15bc160bc3d663e789e994664c2 \ - --hash=sha256:72d40a32d6443871ea0d147813caad58394b48729dfa4fbc45dcaac54f9506f2 \ - --hash=sha256:7e22d0144d735f6c7df770509b8c0c33414bf460df0d5dddc98a159e5dbb10eb \ - --hash=sha256:841c491fa3e9c54e8f9cd5dae059e88f45e086aea090c28be9d42f59c8b99e01 \ - --hash=sha256:86edb95c4d1fe4fae2111d7e0c10c6e42b7790b377bcf1952303469eee5b52bb \ - --hash=sha256:8f602dd5bcde7e4241419924f23c6f0d66723dd5408a58c3a2f781745c693f45 \ - --hash=sha256:9387b09694fbf8ac7dcf887069068f81fb4124d05e09557ac7daabfbec1744bd \ - --hash=sha256:b329ae7ce971b5c4148d6cdb8119c0ce4587265b2330d4f2f3776ef851bee020 \ - --hash=sha256:ba2a367ff478cd108d5319c0dc4fd4eb4ea3476dbfc45b00c45718e889cd9463 \ - --hash=sha256:bc9e7b1e268be7a23fc66471b615c324e99c5db39ce8c49dd6dd8e962c7bc1b8 \ - --hash=sha256:c890061915e95b619c1d3cc3c107c6fb021406b701c0c24b03e74830d522f210 \ - --hash=sha256:cc3324e4159e6d1f55c3615b4c1c211f87cc96cc0cc7c946c8447dc1319f2e9d \ - --hash=sha256:d2dae84a3d0f76884a6102c62f2795b2d6602c2c95cfcce74c8a590b6200e533 \ - --hash=sha256:d45f28c20bb67dee0f4a4caae709f40b0693d764b7b2bf2d58890f36b1bfcef0 \ - --hash=sha256:e38bd91eae257f36c2b7245c0278e9cd9d754f3a66b8d2b548c623ba66e387b6 \ - --hash=sha256:e43f6c7f9ba4f9d29edee530e45f9aa162872ec9549398b85971477a99f2a806 \ - --hash=sha256:ea879afd1d6189fca02a85a7868560c9bb8415dccff6b7ae6d81e4f06b3ab30d \ - --hash=sha256:eb9dfa87152bd97019adc387b2f29ef6af601de4386f36570ca537ace96d96ed \ - --hash=sha256:efd59e83223cb77952997fb850c7a7c2a958c9af0642060f536722c2a9e9d53b \ - --hash=sha256:f3fe90dfb297bd8265238c06787911cd81c2cb89ac5b13e1c911928bdabfce0f +fonttools==4.41.1 \ + --hash=sha256:1df1b6f4c7c4bc8201eb47f3b268adbf2539943aa43c400f84556557e3e109c0 \ + --hash=sha256:2a22b2c425c698dcd5d6b0ff0b566e8e9663172118db6fd5f1941f9b8063da9b \ + --hash=sha256:33191f062549e6bb1a4782c22a04ebd37009c09360e2d6686ac5083774d06d95 \ + --hash=sha256:38cdecd8f1fd4bf4daae7fed1b3170dfc1b523388d6664b2204b351820aa78a7 \ + --hash=sha256:3ae64303ba670f8959fdaaa30ba0c2dabe75364fdec1caeee596c45d51ca3425 \ + --hash=sha256:3d1f9471134affc1e3b1b806db6e3e2ad3fa99439e332f1881a474c825101096 \ + --hash=sha256:4e3334d51f0e37e2c6056e67141b2adabc92613a968797e2571ca8a03bd64773 \ + --hash=sha256:4edc795533421e98f60acee7d28fc8d941ff5ac10f44668c9c3635ad72ae9045 \ + --hash=sha256:547ab36a799dded58a46fa647266c24d0ed43a66028cd1cd4370b246ad426cac \ + --hash=sha256:59eba8b2e749a1de85760da22333f3d17c42b66e03758855a12a2a542723c6e7 \ + --hash=sha256:704bccd69b0abb6fab9f5e4d2b75896afa48b427caa2c7988792a2ffce35b441 \ + --hash=sha256:73ef0bb5d60eb02ba4d3a7d23ada32184bd86007cb2de3657cfcb1175325fc83 \ + --hash=sha256:7763316111df7b5165529f4183a334aa24c13cdb5375ffa1dc8ce309c8bf4e5c \ + --hash=sha256:849ec722bbf7d3501a0e879e57dec1fc54919d31bff3f690af30bb87970f9784 \ + --hash=sha256:891cfc5a83b0307688f78b9bb446f03a7a1ad981690ac8362f50518bc6153975 \ + --hash=sha256:952cb405f78734cf6466252fec42e206450d1a6715746013f64df9cbd4f896fa \ + --hash=sha256:a7bbb290d13c6dd718ec2c3db46fe6c5f6811e7ea1e07f145fd8468176398224 \ + --hash=sha256:a9b3cc10dc9e0834b6665fd63ae0c6964c6bc3d7166e9bc84772e0edd09f9fa2 \ + --hash=sha256:aaaef294d8e411f0ecb778a0aefd11bb5884c9b8333cc1011bdaf3b58ca4bd75 \ + --hash=sha256:afce2aeb80be72b4da7dd114f10f04873ff512793d13ce0b19d12b2a4c44c0f0 \ + --hash=sha256:b0938ebbeccf7c80bb9a15e31645cf831572c3a33d5cc69abe436e7000c61b14 \ + --hash=sha256:b2d1ee95be42b80d1f002d1ee0a51d7a435ea90d36f1a5ae331be9962ee5a3f1 \ + --hash=sha256:b927e5f466d99c03e6e20961946314b81d6e3490d95865ef88061144d9f62e38 \ + --hash=sha256:bdd729744ae7ecd7f7311ad25d99da4999003dcfe43b436cf3c333d4e68de73d \ + --hash=sha256:c2071267deaa6d93cb16288613419679c77220543551cbe61da02c93d92df72f \ + --hash=sha256:cac73bbef7734e78c60949da11c4903ee5837168e58772371bd42a75872f4f82 \ + --hash=sha256:da2c2964bdc827ba6b8a91dc6de792620be4da3922c4cf0599f36a488c07e2b2 \ + --hash=sha256:e16a9449f21a93909c5be2f5ed5246420f2316e94195dbfccb5238aaa38f9751 \ + --hash=sha256:e5c2b0a95a221838991e2f0e455dec1ca3a8cc9cd54febd68cc64d40fdb83669 \ + --hash=sha256:ec453a45778524f925a8f20fd26a3326f398bfc55d534e37bab470c5e415caa1 \ + --hash=sha256:edee0900cf0eedb29d17c7876102d6e5a91ee333882b1f5abc83e85b934cadb5 \ + --hash=sha256:f14f3ccea4cc7dd1b277385adf3c3bf18f9860f87eab9c2fb650b0af16800f55 \ + --hash=sha256:f240d9adf0583ac8fc1646afe7f4ac039022b6f8fa4f1575a2cfa53675360b69 \ + --hash=sha256:f48602c0b3fd79cd83a34c40af565fe6db7ac9085c8823b552e6e751e3a5b8be # via matplotlib gitdb==4.0.10 \ --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 # via - # -c main.txt + # -c requirements/main.txt # gitpython gitpython==3.1.32 \ --hash=sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6 \ --hash=sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f # via - # -c main.txt + # -c requirements/main.txt # documenteer graphviz==0.20.1 \ --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \ --hash=sha256:8c58f14adaa3b947daf26c19bc1e98c4e0702cdc31cf99153e6f06904d492bf8 # via diagrams -identify==2.5.25 \ - --hash=sha256:9df2489842707d431b38ce3410ef8df40da5b10a3e28a3fcac1a42523e956409 \ - --hash=sha256:db4de0e758c0db8f81996816cd2f3f2f8c5c8d49a7fd02f3b4109aac6fd80e29 +identify==2.5.26 \ + --hash=sha256:7243800bce2f58404ed41b7c002e53d4d22bcf3ae1b7900c2d7aefd95394bf7f \ + --hash=sha256:c22a8ead0d4ca11f1edd6c9418c3220669b3b7533ada0a0ffa6cc0ef85cf9b54 # via pre-commit idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ @@ -529,7 +531,7 @@ mypy==1.4.1 \ --hash=sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878 \ --hash=sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f \ --hash=sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b - # via -r dev.in + # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ --hash=sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782 @@ -648,7 +650,7 @@ pluggy==1.2.0 \ pre-commit==3.3.3 \ --hash=sha256:10badb65d6a38caff29703362271d7dca483d01da88f9d7e05d0b97171c136cb \ --hash=sha256:a2256f489cd913d575c145132ae196fe335da32d91a8294b7afe6622335dd023 - # via -r dev.in + # via -r requirements/dev.in pybtex==0.24.0 \ --hash=sha256:818eae35b61733e5c007c3fcd2cfb75ed1bc8b4173c1f70b56cc4c0802d34755 \ --hash=sha256:e1e0c8c69998452fea90e9179aa2a98ab103f3eed894405b7264e517cc2fcc0f @@ -659,45 +661,45 @@ pybtex-docutils==1.0.2 \ --hash=sha256:43aa353b6d498fd5ac30f0073a98e332d061d34fe619d3d50d1761f8fd4aa016 \ --hash=sha256:6f9e3c25a37bcaac8c4f69513272706ec6253bb708a93d8b4b173f43915ba239 # via sphinxcontrib-bibtex -pydantic==1.10.11 \ - --hash=sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e \ - --hash=sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7 \ - --hash=sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb \ - --hash=sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151 \ - --hash=sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13 \ - --hash=sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d \ - --hash=sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e \ - --hash=sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6 \ - --hash=sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19 \ - --hash=sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713 \ - --hash=sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f \ - --hash=sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66 \ - --hash=sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e \ - --hash=sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b \ - --hash=sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248 \ - --hash=sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622 \ - --hash=sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae \ - --hash=sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629 \ - --hash=sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604 \ - --hash=sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c \ - --hash=sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f \ - --hash=sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b \ - --hash=sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e \ - --hash=sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999 \ - --hash=sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3 \ - --hash=sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847 \ - --hash=sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c \ - --hash=sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36 \ - --hash=sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216 \ - --hash=sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1 \ - --hash=sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303 \ - --hash=sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588 \ - --hash=sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f \ - --hash=sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528 \ - --hash=sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb \ - --hash=sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f +pydantic==1.10.12 \ + --hash=sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303 \ + --hash=sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe \ + --hash=sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47 \ + --hash=sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494 \ + --hash=sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33 \ + --hash=sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86 \ + --hash=sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d \ + --hash=sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c \ + --hash=sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a \ + --hash=sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565 \ + --hash=sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb \ + --hash=sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62 \ + --hash=sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62 \ + --hash=sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0 \ + --hash=sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523 \ + --hash=sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d \ + --hash=sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405 \ + --hash=sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f \ + --hash=sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b \ + --hash=sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718 \ + --hash=sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed \ + --hash=sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb \ + --hash=sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5 \ + --hash=sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc \ + --hash=sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942 \ + --hash=sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe \ + --hash=sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246 \ + --hash=sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350 \ + --hash=sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303 \ + --hash=sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09 \ + --hash=sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33 \ + --hash=sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8 \ + --hash=sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a \ + --hash=sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1 \ + --hash=sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6 \ + --hash=sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d # via - # -r dev.in + # -r requirements/dev.in # documenteer pydata-sphinx-theme==0.12.0 \ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ @@ -717,7 +719,7 @@ pyparsing==3.0.9 \ pytest==7.4.0 \ --hash=sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32 \ --hash=sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a - # via -r dev.in + # via -r requirements/dev.in python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 @@ -764,7 +766,7 @@ pyyaml==6.0.1 \ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f # via - # -c main.txt + # -c requirements/main.txt # documenteer # myst-parser # pre-commit @@ -775,25 +777,25 @@ requests==2.31.0 \ # via # documenteer # sphinx -ruff==0.0.278 \ - --hash=sha256:1078125123a3c68e92463afacedb7e41b15ccafc09e510c6c755a23087afc8de \ - --hash=sha256:1a90ebd8f2a554db1ee8d12b2f3aa575acbd310a02cd1a9295b3511a4874cf98 \ - --hash=sha256:1a9f1d925204cfba81b18368b7ac943befcfccc3a41e170c91353b674c6b7a66 \ - --hash=sha256:1cae4c07d334eb588f171f1363fa89a8911047eb93184276be11a24dbbc996c7 \ - --hash=sha256:2c62a0bde4d20d087cabce2fa8b012d74c2e985da86d00fb3359880469b90e31 \ - --hash=sha256:38ca1c0c8c1221fe64c0a66784c91501d09a8ed02a4dbfdc117c0ce32a81eefc \ - --hash=sha256:3ce0d620e257b4cad16e2f0c103b2f43a07981668a3763380542e8a131d11537 \ - --hash=sha256:666e739fb2685277b879d493848afe6933e3be30d40f41fe0e571ad479d57d77 \ - --hash=sha256:70d39f5599d8449082ab8ce542fa98e16413145eb411dd1dc16575b44565d52d \ - --hash=sha256:737a0cfb6c36aaa92d97a46957dfd5e55329299074ad06ed12663b98e0c6fc82 \ - --hash=sha256:7545bb037823cd63dca19280f75a523a68bd3e78e003de74609320d6822b5a52 \ - --hash=sha256:8cb380d2d6fdb60656a0b5fa78305535db513fc72ce11f4532cc1641204ef380 \ - --hash=sha256:a48621f5f372d5019662db5b3dbfc5f1450f927683d75f1153fe0ebf20eb9698 \ - --hash=sha256:c25b96602695a147d62a572865b753ef56aff1524abab13b9436724df30f9bd7 \ - --hash=sha256:d11149c7b186f224f2055e437a030cd83b164a43cc0211314c33ad1553ed9c4c \ - --hash=sha256:e131595ab7f4ce61a1650463bd2fe304b49e7d0deb0dfa664b92817c97cdba5f \ - --hash=sha256:ec8b0469b54315803aaf1fbf9a37162a3849424cab6182496f972ad56e0ea702 - # via -r dev.in +ruff==0.0.280 \ + --hash=sha256:2dae8f2d9c44c5c49af01733c2f7956f808db682a4193180dedb29dd718d7bbe \ + --hash=sha256:2e7c15828d09f90e97bea8feefcd2907e8c8ce3a1f959c99f9b4b3469679f33c \ + --hash=sha256:37359cd67d2af8e09110a546507c302cbea11c66a52d2a9b6d841d465f9962d4 \ + --hash=sha256:48ed5aca381050a4e2f6d232db912d2e4e98e61648b513c350990c351125aaec \ + --hash=sha256:4a7d52457b5dfcd3ab24b0b38eefaead8e2dca62b4fbf10de4cd0938cf20ce30 \ + --hash=sha256:581c43e4ac5e5a7117ad7da2120d960a4a99e68ec4021ec3cd47fe1cf78f8380 \ + --hash=sha256:5f972567163a20fb8c2d6afc60c2ea5ef8b68d69505760a8bd0377de8984b4f6 \ + --hash=sha256:7008fc6ca1df18b21fa98bdcfc711dad5f94d0fc3c11791f65e460c48ef27c82 \ + --hash=sha256:7784e3606352fcfb193f3cd22b2e2117c444cb879ef6609ec69deabd662b0763 \ + --hash=sha256:7a37dab70114671d273f203268f6c3366c035fe0c8056614069e90a65e614bfc \ + --hash=sha256:83e8f372fa5627eeda5b83b5a9632d2f9c88fc6d78cead7e2a1f6fb05728d137 \ + --hash=sha256:8ffa7347ad11643f29de100977c055e47c988cd6d9f5f5ff83027600b11b9189 \ + --hash=sha256:b7de5b8689575918e130e4384ed9f539ce91d067c0a332aedef6ca7188adac2d \ + --hash=sha256:bd58af46b0221efb95966f1f0f7576df711cb53e50d2fdb0e83c2f33360116a4 \ + --hash=sha256:d878370f7e9463ac40c253724229314ff6ebe4508cdb96cb536e1af4d5a9cd4f \ + --hash=sha256:ef6ee3e429fd29d6a5ceed295809e376e6ece5b0f13c7e703efaf3d3bcb30b96 \ + --hash=sha256:fe7118c1eae3fda17ceb409629c7f3b5a22dffa7caf1f6796776936dca1fe653 + # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 @@ -805,7 +807,7 @@ smmap==5.0.0 \ --hash=sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94 \ --hash=sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936 # via - # -c main.txt + # -c requirements/main.txt # gitdb snowballstemmer==2.2.0 \ --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \ @@ -850,11 +852,11 @@ sphinx-design==0.4.1 \ sphinx-diagrams==0.4.0 \ --hash=sha256:3cf2e0179bdd9ccdb28164fcfcae9b167999a1abe40e159e0c26a225490074d1 \ --hash=sha256:4860291cb04d6361f898d20ba28dca7345f757cdc240caf144c8bf20c73067a0 - # via -r dev.in + # via -r requirements/dev.in sphinx-jinja==2.0.2 \ --hash=sha256:705ebeb9b7a6018ca3f93724315a7c1effa6ba3db44d630e7eaaa15e4ac081a8 \ --hash=sha256:c6232b59a894139770be1dc6d0b00a379e4288ce78157904e1f8473dea3e0718 - # via -r dev.in + # via -r requirements/dev.in sphinx-prompt==1.5.0 \ --hash=sha256:fa4e90d8088b5a996c76087d701fc7e31175f8b9dc4aab03a507e45051067162 # via documenteer @@ -941,10 +943,10 @@ typed-ast==1.5.5 \ --hash=sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a \ --hash=sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2 # via diagrams -types-pyyaml==6.0.12.10 \ - --hash=sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f \ - --hash=sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97 - # via -r dev.in +types-pyyaml==6.0.12.11 \ + --hash=sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b \ + --hash=sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d + # via -r requirements/dev.in typing-extensions==4.7.1 \ --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \ --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2 @@ -959,9 +961,9 @@ urllib3==2.0.4 \ --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \ --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4 # via requests -virtualenv==20.24.0 \ - --hash=sha256:18d1b37fc75cc2670625702d76849a91ebd383768b4e91382a8d51be3246049e \ - --hash=sha256:e2a7cef9da880d693b933db7654367754f14e20650dc60e8ee7385571f8593a3 +virtualenv==20.24.2 \ + --hash=sha256:43a3052be36080548bdee0b42919c88072037d50d56c28bd3f853cbe92b953ff \ + --hash=sha256:fd8a78f46f6b99a67b7ec5cf73f92357891a7b3a40fd97637c27f854aae3b9e0 # via pre-commit # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/main.in b/requirements/main.in index 0f749b94a7..96ae05bf8c 100644 --- a/requirements/main.in +++ b/requirements/main.in @@ -6,5 +6,8 @@ # After editing, update requirements/main.txt by running: # make update-deps +bcrypt +click +cryptography GitPython PyYAML diff --git a/requirements/main.txt b/requirements/main.txt index 6d0c067fd7..ca05a71863 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -4,6 +4,124 @@ # # pip-compile --allow-unsafe --config=pyproject.toml --generate-hashes --output-file=requirements/main.txt requirements/main.in # +bcrypt==4.0.1 \ + --hash=sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535 \ + --hash=sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0 \ + --hash=sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410 \ + --hash=sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd \ + --hash=sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665 \ + --hash=sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab \ + --hash=sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71 \ + --hash=sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215 \ + --hash=sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b \ + --hash=sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda \ + --hash=sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9 \ + --hash=sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a \ + --hash=sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344 \ + --hash=sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f \ + --hash=sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d \ + --hash=sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c \ + --hash=sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c \ + --hash=sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2 \ + --hash=sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d \ + --hash=sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e \ + --hash=sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3 + # via -r requirements/main.in +cffi==1.15.1 \ + --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ + --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ + --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ + --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ + --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ + --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ + --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ + --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ + --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ + --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ + --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ + --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ + --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ + --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ + --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ + --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ + --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ + --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ + --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ + --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ + --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ + --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ + --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ + --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ + --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ + --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ + --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ + --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ + --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ + --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ + --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ + --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ + --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ + --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ + --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ + --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ + --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ + --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ + --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ + --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ + --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ + --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ + --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ + --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ + --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ + --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ + --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ + --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ + --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ + --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ + --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ + --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ + --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ + --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ + --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ + --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ + --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ + --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ + --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ + --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ + --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ + --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ + --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ + --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 + # via cryptography +click==8.1.6 \ + --hash=sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd \ + --hash=sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5 + # via -r requirements/main.in +cryptography==41.0.2 \ + --hash=sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711 \ + --hash=sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7 \ + --hash=sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd \ + --hash=sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e \ + --hash=sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58 \ + --hash=sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0 \ + --hash=sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d \ + --hash=sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83 \ + --hash=sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831 \ + --hash=sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766 \ + --hash=sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b \ + --hash=sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c \ + --hash=sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182 \ + --hash=sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f \ + --hash=sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa \ + --hash=sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4 \ + --hash=sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a \ + --hash=sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2 \ + --hash=sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76 \ + --hash=sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5 \ + --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \ + --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \ + --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14 + # via -r requirements/main.in gitdb==4.0.10 \ --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 @@ -12,6 +130,10 @@ gitpython==3.1.32 \ --hash=sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6 \ --hash=sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f # via -r requirements/main.in +pycparser==2.21 \ + --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ + --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 + # via cffi pyyaml==6.0.1 \ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py new file mode 100644 index 0000000000..a73010b429 --- /dev/null +++ b/src/phalanx/cli.py @@ -0,0 +1,52 @@ +"""Phalanx command-line interface.""" + +from __future__ import annotations + +import click + +from phalanx.factory import Factory + +__all__ = [ + "help", + "secrets_list", +] + + +@click.group(context_settings={"help_option_names": ["-h", "--help"]}) +@click.version_option(message="%(version)s") +def main() -> None: + """Administrative command-line interface for gafaelfawr.""" + + +@main.command() +@click.argument("topic", default=None, required=False, nargs=1) +@click.pass_context +def help(ctx: click.Context, topic: str | None) -> None: + """Show help for any command.""" + # The help command implementation is taken from + # https://www.burgundywall.com/post/having-click-help-subcommand + if topic: + if topic in main.commands: + click.echo(main.commands[topic].get_help(ctx)) + else: + raise click.UsageError(f"Unknown help topic {topic}", ctx) + else: + if not ctx.parent: + raise RuntimeError("help called without topic or parent") + click.echo(ctx.parent.get_help()) + + +@main.group() +def secrets() -> None: + """Secret manipulation commands.""" + + +@secrets.command("list") +@click.argument("environment") +def secrets_list(environment: str) -> None: + """List all secrets required for a given environment.""" + factory = Factory() + secrets_service = factory.create_secrets_service() + secrets = secrets_service.list_secrets(environment) + for secret in secrets: + print(secret.application, secret.key) diff --git a/src/phalanx/exceptions.py b/src/phalanx/exceptions.py new file mode 100644 index 0000000000..3d867a5f69 --- /dev/null +++ b/src/phalanx/exceptions.py @@ -0,0 +1,49 @@ +"""Exceptions for the Phalanx command-line tool.""" + +from __future__ import annotations + +from collections.abc import Iterable + +from .models.secrets import RequiredSecret, Secret + +__all__ = [ + "InvalidEnvironmentConfigError", + "InvalidSecretConfigError", + "UnknownEnvironmentError", + "UnresolvedSecretsError", +] + + +class InvalidEnvironmentConfigError(Exception): + """Configuration for an environment is invalid.""" + + def __init__(self, name: str, error: str) -> None: + msg = "Invalid configuration for environment {name}: {error}" + super().__init__(msg) + + +class InvalidSecretConfigError(Exception): + """Secret configuration is invalid.""" + + def __init__(self, config: Secret | RequiredSecret, error: str) -> None: + name = f"{config.application}/{config.key}" + msg = f"Invalid configuration for secret {name}: {error}" + super().__init__(msg) + + +class UnresolvedSecretsError(Exception): + """Some secrets could not be resolved.""" + + def __init__(self, secrets: Iterable[RequiredSecret]) -> None: + names = [f"{u.application}/{u.key}" for u in secrets] + names_str = ", ".join(names) + msg = f"Some secrets could not be resolved: {names_str}" + super().__init__(msg) + + +class UnknownEnvironmentError(Exception): + """No configuration found for an environment name.""" + + def __init__(self, name: str) -> None: + msg = f"No configuration found for environment {name}" + super().__init__(msg) diff --git a/src/phalanx/factory.py b/src/phalanx/factory.py new file mode 100644 index 0000000000..f48269d5de --- /dev/null +++ b/src/phalanx/factory.py @@ -0,0 +1,23 @@ +"""Factory for Phalanx support code components.""" + +from __future__ import annotations + +from .services.secrets import SecretsService +from .storage.config import ConfigStorage + +__all__ = ["Factory"] + + +class Factory: + """Factory to create Phalanx components.""" + + def create_secrets_service(self) -> SecretsService: + """Create service for manipulating Phalanx secrets. + + Returns + ------- + SecretsService + Service for manipulating secrets. + """ + config_storage = ConfigStorage() + return SecretsService(config_storage) diff --git a/src/phalanx/models/__init__.py b/src/phalanx/models/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/phalanx/models/applications.py b/src/phalanx/models/applications.py new file mode 100644 index 0000000000..c1274b79b9 --- /dev/null +++ b/src/phalanx/models/applications.py @@ -0,0 +1,78 @@ +"""Pydantic models for Phalanx applications.""" + +from __future__ import annotations + +from typing import Any + +from pydantic import BaseModel + +from .secrets import RequiredSecret, Secret + +__all__ = [ + "Application", + "ApplicationInstance", +] + + +class Application(BaseModel): + """A Phalanx application.""" + + name: str + """Name of the application.""" + + values: dict[str, Any] + """Base Helm chart values.""" + + environment_values: dict[str, dict[str, Any]] + """Per-environment Helm chart overrides by environment name.""" + + secrets: list[Secret] + """Base secret configuration for the application.""" + + environment_secrets: dict[str, list[Secret]] + """Per-environment secrets for the application.""" + + +class ApplicationInstance(BaseModel): + """A Phalanx application as configured for a specific environment.""" + + name: str + """Name of the application.""" + + environment: str + """Name of the environment for which the application is configured.""" + + values: dict[str, Any] + """Merged Helm values for the application in this environment.""" + + secrets: list[RequiredSecret] = [] + """Secrets required for this application in this environment.""" + + def is_condition_met(self, condition: str | None) -> bool: + """Determine whether a secret condition has been met. + + Conditions are used both for the secret as a whole and for the + ``copy`` and ``generate`` sections. The condition is met if it either + is `None` or if it is a string pointing to a values parameter for the + application instance that is set to a true value. + + Parameters + ---------- + condition + Condition to check. + + Returns + ------- + bool + `True` if the condition was met or does not exist, `False` + otherwise. + """ + if not condition: + return True + path = condition.split(".") + values = self.values + for key in path: + if key not in values: + return False + values = values[key] + return bool(values) diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py new file mode 100644 index 0000000000..9c49ff7c85 --- /dev/null +++ b/src/phalanx/models/environments.py @@ -0,0 +1,39 @@ +"""Pydantic models for Phalanx environments.""" + +from __future__ import annotations + +from pydantic import BaseModel + +from .applications import ApplicationInstance + +__all__ = [ + "Environment", + "EnvironmentConfig", +] + + +class EnvironmentConfig(BaseModel): + """Configuration for a Phalanx environment. + + This is a partial model for the environment :file:`values.yaml` file. + """ + + environment: str + """Name of the environment.""" + + applications: list[str] + """List of enabled applications.""" + + +class Environment(BaseModel): + """A Phalanx environment and its associated settings.""" + + name: str + """Name of the environment.""" + + applications: dict[str, ApplicationInstance] + """Applications enabled for that environment, by name.""" + + def all_applications(self) -> list[ApplicationInstance]: + """Return enabled applications in sorted order.""" + return sorted(self.applications.values(), key=lambda a: a.name) diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py new file mode 100644 index 0000000000..6cce3329cf --- /dev/null +++ b/src/phalanx/models/secrets.py @@ -0,0 +1,237 @@ +"""Pydantic models for Phalanx application secrets.""" + +from __future__ import annotations + +from enum import Enum +from typing import Any, Self + +from pydantic import BaseModel, Extra, Field, SecretStr, validator + +__all__ = [ + "RequiredSecret", + "RequiredSecretConfig", + "ResolvedSecret", + "Secret", + "SecretConfig", + "SecretCopyRules", + "SecretGenerateRules", + "SecretGenerateType", +] + + +class SecretCopyRules(BaseModel): + """Rules for copying a secret value from another secret.""" + + application: str + """Application from which the secret should be copied.""" + + key: str + """Secret key from which the secret should be copied.""" + + condition: str | None = Field( + None, + description=( + "Helm chart value that, if set, indicates the secret should be" + " copied" + ), + alias="if", + ) + + class Config: + allow_population_by_field_name = True + extra = Extra.forbid + + +class SecretGenerateType(Enum): + """Type of secret for generated secrets.""" + + password = "password" + gafaelfawr_token = "gafaelfawr-token" + fernet_key = "fernet-key" + rsa_private_key = "rsa-private-key" + bcrypt_password_hash = "bcrypt-password-hash" + mtime = "mtime" + + +class SecretGenerateRules(BaseModel): + """Rules for generating a secret value.""" + + type: SecretGenerateType + """Type of secret.""" + + source: str | None = None + """Key of secret on which this secret is based. + + This may only be set by secrets of type ``bcrypt-password-hash`` or + ``mtime``. + """ + + condition: str | None = Field( + None, + description=( + "Helm chart value that, if set, indicates the secret should be" + " generated" + ), + alias="if", + ) + + class Config: + allow_population_by_field_name = True + extra = Extra.forbid + + @validator("source") + def _validate_source( + cls, v: str | None, values: dict[str, Any] + ) -> str | None: + secret_type = values["type"] + want_value = secret_type in ( + SecretGenerateType.bcrypt_password_hash, + SecretGenerateType.mtime, + ) + if v is None and want_value: + msg = f"source not set for secret of type {secret_type}" + raise ValueError(msg) + if v is not None and not want_value: + msg = f"source not allowed for secret of type {secret_type}" + raise ValueError(msg) + return v + + +class RequiredSecretConfig(BaseModel): + """Specification for an application secret after checking ``if``. + + The general class for secret configuration is `SecretConfig`. This is the + same model except without the ``if`` (``condition``) attribute and is used + for secrets that have already been filtered for whether they are required + by a given application instance. + """ + + description: str + """Description of the secret.""" + + copy_rules: SecretCopyRules | None = Field( + None, + description="Rules for where the secret should be copied from", + alias="copy", + ) + + generate: SecretGenerateRules | None = None + """Rules for how the secret should be generated.""" + + value: SecretStr | None = None + """Secret value.""" + + class Config: + allow_population_by_field_name = True + extra = Extra.forbid + + @validator("generate") + def _validate_generate( + cls, v: SecretGenerateRules | None, values: dict[str, Any] + ) -> SecretGenerateRules | None: + has_copy = "copy" in values and "condition" not in values["copy"] + if v and has_copy: + msg = "both copy and generate may not be set for the same secret" + raise ValueError(msg) + return v + + @validator("value") + def _validate_value( + cls, v: SecretStr | None, values: dict[str, Any] + ) -> SecretStr | None: + has_copy = values.get("copy") and "condition" not in values["copy"] + has_generate = ( + values.get("generate") and "condition" not in values["generate"] + ) + if v and (has_copy or has_generate): + msg = "value may not be set if copy or generate is set" + raise ValueError(msg) + return v + + +class SecretConfig(RequiredSecretConfig): + """Specification for an application secret. + + Represents the on-disk schema for secret configurations. + """ + + condition: str | None = Field( + None, + description=( + "Helm chart value that, if set, indicates the secret should be" + " generated" + ), + alias="if", + ) + + +class Secret(SecretConfig): + """An application secret. + + The same as `SecretConfig` except augmented with the secret application + and key for internal convenience. + """ + + key: str + """Key of the secret.""" + + application: str + """Application of the secret.""" + + +class RequiredSecret(SecretConfig): + """An application secret required for this instance. + + This represents the secret configuration for an application instance after + filtering out secrets that are not relevant to the instance's environment + and adding the secret application and key information from context. + """ + + key: str + """Key of the secret.""" + + application: str + """Application of the secret.""" + + @classmethod + def from_secret(cls, secret: Secret) -> Self: + """Convert from a `Secret` assuming its condition was met. + + Parameters + ---------- + secret + Secret with a condition that has been met. + + Returns + ------- + RequiredSecret + Secret with no top-level condition, since it has been satisfied. + """ + attrs = secret.dict() + if "condition" in attrs: + del attrs["condition"] + return cls(**attrs) + + +class ResolvedSecret(BaseModel): + """A secret that has been resolved for a given application instance. + + Secret resolution means that the configuration has been translated into + either a secret value or knowledge that the secret is a static secret that + must come from elsewhere. + """ + + key: str + """Key of the secret.""" + + application: str + """Application for which the secret is required.""" + + value: SecretStr | None = None + """Value of the secret if known.""" + + static: bool = False + """Whether this is a static secret. + + Static secrets are those whose values come from an external source. + """ diff --git a/src/phalanx/services/__init__.py b/src/phalanx/services/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py new file mode 100644 index 0000000000..099a121059 --- /dev/null +++ b/src/phalanx/services/secrets.py @@ -0,0 +1,273 @@ +"""Service to manipulate Phalanx secrets.""" + +from __future__ import annotations + +import os +import secrets +from base64 import urlsafe_b64encode +from collections import defaultdict +from dataclasses import dataclass +from datetime import UTC, datetime +from enum import Enum + +import bcrypt +from cryptography.fernet import Fernet +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from pydantic import SecretStr + +from ..exceptions import InvalidSecretConfigError, UnresolvedSecretsError +from ..models.applications import ApplicationInstance +from ..models.environments import Environment +from ..models.secrets import ( + RequiredSecret, + ResolvedSecret, + SecretGenerateRules, + SecretGenerateType, +) +from ..storage.config import ConfigStorage + +__all__ = ["SecretsService"] + + +class _SecretStatus(Enum): + """Status of a secret resolution.""" + + DROP = "DROP" + KEEP = "KEEP" + PENDING = "PENDING" + + +@dataclass +class _SecretResolution: + """Status of the resolution of a secret.""" + + status: _SecretStatus + """Status of the secret.""" + + secret: ResolvedSecret | None = None + """Resolved secret, if status is ``KEEP``.""" + + +class SecretsService: + """Service to manipulate Phalanx secrets. + + Parameters + ---------- + config_storage + Storage object for the Phalanx configuration. + """ + + def __init__(self, config_storage: ConfigStorage) -> None: + self._config = config_storage + + def list_secrets(self, environment_name: str) -> list[ResolvedSecret]: + """List all required secrets for the given environment. + + Parameters + ---------- + environment_name + Name of the environment. + + Returns + ------- + list of ResolvedSecret + Secrets required for the given environment. + """ + environment = self._config.load_environment(environment_name) + secrets = [] + for application in environment.all_applications(): + secrets.extend(application.secrets) + return self._resolve_secrets(secrets, environment) + + def _generate_secret( + self, config: SecretGenerateRules, source: SecretStr | None = None + ) -> SecretStr: + """Generate the value of a secret. + + Parameters + ---------- + config + Rules for generating the secret. + source + Secret on which this secret is based. + + Returns + ------- + SecretStr + Newly-generated secret. + """ + match config.type: + case SecretGenerateType.password: + return SecretStr(secrets.token_hex(32)) + case SecretGenerateType.gafaelfawr_token: + key = urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") + secret = urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") + return SecretStr(f"gt-{key}.{secret}") + case SecretGenerateType.fernet_key: + return SecretStr(Fernet.generate_key().decode()) + case SecretGenerateType.rsa_private_key: + private_key = rsa.generate_private_key( + backend=default_backend(), + public_exponent=65537, + key_size=2048, + ) + private_key_bytes = private_key.private_bytes( + serialization.Encoding.PEM, + serialization.PrivateFormat.PKCS8, + serialization.NoEncryption(), + ) + return SecretStr(private_key_bytes.decode()) + case SecretGenerateType.bcrypt_password_hash: + if not source: + raise RuntimeError("bcrypt-password-hash with no source") + password_hash = bcrypt.hashpw( + source.get_secret_value().encode(), + bcrypt.gensalt(rounds=15), + ) + return SecretStr(password_hash.decode()) + case SecretGenerateType.mtime: + date = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") + return SecretStr(date) + + def _resolve_secrets( + self, secrets: list[RequiredSecret], environment: Environment + ) -> list[ResolvedSecret]: + """Resolve the secrets for a Phalanx environment. + + Resolving secrets is the process where the secret configuration is + resolved using per-environment Helm chart values to generate the list + of secrets required for a given environment and their values. + + Parameters + ---------- + secrets + Secret configuration by application and key. + environment + Phalanx environment for which to resolve secrets. + + Returns + ------- + list of ResolvedSecret + Resolved secrets by application and secret key. + + Raises + ------ + UnresolvedSecretsError + Raised if some secrets could not be resolved. + """ + resolved: defaultdict[str, dict[str, ResolvedSecret]] + resolved = defaultdict(dict) + unresolved = list(secrets) + left = len(unresolved) + while unresolved: + secrets = unresolved + unresolved = [] + for config in secrets: + instance = environment.applications[config.application] + resolution = self._resolve_secret(config, instance, resolved) + if resolution.status == _SecretStatus.KEEP: + secret = resolution.secret + if not secret: + raise RuntimeError("Resolved secret with no secret") + resolved[secret.application][secret.key] = secret + if resolution.status == _SecretStatus.PENDING: + unresolved.append(config) + if len(unresolved) >= left: + raise UnresolvedSecretsError(unresolved) + left = len(unresolved) + return sorted( + [s for sl in resolved.values() for s in sl.values()], + key=lambda s: (s.application, s.key), + ) + + def _resolve_secret( + self, + config: RequiredSecret, + instance: ApplicationInstance, + resolved: dict[str, dict[str, ResolvedSecret]], + ) -> _SecretResolution: + """Resolve a single secret. + + Parameters + ---------- + config + Configuration of the secret. + instance + Application instance owning this secret. + resolved + Other secrets for that environment that have already been + resolved. + + Returns + ------- + SecretResolution + Results of attempting to resolve this secret. + + Raises + ------ + InvalidSecretConfigError + Raised if the secret configuration has conflicting rules. + """ + # If a value was already provided, this is the easy case. + if config.value: + return _SecretResolution( + status=_SecretStatus.KEEP, + secret=ResolvedSecret( + key=config.key, + application=config.application, + value=config.value, + ), + ) + + # See if either generate or copy were configured for this secret. + should_copy = False + if config.copy_rules: + condition = config.copy_rules.condition + should_copy = instance.is_condition_met(condition) + should_generate = False + if config.generate: + condition = config.generate.condition + should_generate = instance.is_condition_met(condition) + if should_copy and should_generate: + msg = "Copy and generate rules conflict" + raise InvalidSecretConfigError(config, msg) + + # Do the copying or generation. + if should_copy and config.copy_rules: + application = config.copy_rules.application + other = resolved.get(application, {}).get(config.copy_rules.key) + if not other: + return _SecretResolution(status=_SecretStatus.PENDING) + return _SecretResolution( + status=_SecretStatus.KEEP, + secret=ResolvedSecret( + key=config.key, + application=config.application, + value=other.value, + ), + ) + elif should_generate and config.generate: + if config.generate.source: + other_key = config.generate.source + other = resolved.get(config.application, {}).get(other_key) + if not other: + return _SecretResolution(status=_SecretStatus.PENDING) + value = self._generate_secret(config.generate, other.value) + else: + value = self._generate_secret(config.generate) + return _SecretResolution( + status=_SecretStatus.KEEP, + secret=ResolvedSecret( + key=config.key, + application=config.application, + value=value, + ), + ) + + # The remaining case is that the secret is a static secret. + secret = ResolvedSecret( + key=config.key, application=config.application, static=True + ) + return _SecretResolution(status=_SecretStatus.KEEP, secret=secret) diff --git a/src/phalanx/storage/__init__.py b/src/phalanx/storage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py new file mode 100644 index 0000000000..72abe6a0ec --- /dev/null +++ b/src/phalanx/storage/config.py @@ -0,0 +1,240 @@ +"""Parsing and analysis of Phalanx configuration.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any + +import yaml + +from ..exceptions import UnknownEnvironmentError +from ..models.applications import Application, ApplicationInstance +from ..models.environments import Environment, EnvironmentConfig +from ..models.secrets import RequiredSecret, Secret, SecretConfig + +__all__ = ["ConfigStorage"] + + +def _merge_overrides( + base: dict[str, Any], overrides: dict[str, Any] +) -> dict[str, Any]: + """Merge values settings with overrides. + + Parameters + ---------- + base + Base settings. + overrides + Overrides that should take precedence. + + Returns + ------- + dict + Merged dictionary. + """ + for key, value in overrides.items(): + if key in base: + if isinstance(base[key], dict) and isinstance(value, dict): + _merge_overrides(base[key], value) + else: + base[key] = value + else: + base[key] = value + return base + + +class ConfigStorage: + """Analyze Phalanx configuration and convert it to models.""" + + def __init__(self) -> None: + self._path = Path.cwd() + + def load_environment(self, environment_name: str) -> Environment: + """Load the configuration of a Phalanx environment from disk. + + Parameters + ---------- + environment_name + Name of the environment. + + Returns + ------- + Environment + Environment configuration. + + Raises + ------ + UnknownEnvironmentError + Raised if the named environment has no configuration. + """ + config = self._load_environment_config(environment_name) + applications = [self._load_application(a) for a in config.applications] + instances = { + a.name: self._resolve_application(a, environment_name) + for a in applications + } + return Environment(name=config.environment, applications=instances) + + def _load_application(self, name: str) -> Application: + """Load the configuration for an application from disk. + + Parameters + ---------- + name + Name of the application. + + Returns + ------- + Application + Application data. + """ + base_path = Path.cwd() / "applications" / name + + # Load main values file. + values_path = base_path / "values.yaml" + if values_path.exists(): + with values_path.open("r") as fh: + values = yaml.safe_load(fh) + else: + values = {} + + # Load environment-specific values files. + environment_values = {} + for path in base_path.glob("values-*.yaml"): + env_name = path.stem[len("values-") :] + with path.open("r") as fh: + environment_values[env_name] = yaml.safe_load(fh) + + # Load the secrets configuration. + secrets_path = base_path / "secrets.yaml" + secrets = [] + if secrets_path.exists(): + with secrets_path.open("r") as fh: + raw_secrets = yaml.safe_load(fh) + for key, raw_config in raw_secrets.items(): + config = SecretConfig.parse_obj(raw_config) + secret = Secret(key=key, application=name, **config.dict()) + secrets.append(secret) + + # Load the environment-specific secrets configuration. + environment_secrets = {} + for path in base_path.glob("secrets-*.yaml"): + env_name = path.stem[len("secrets-") :] + with path.open("r") as fh: + raw_secrets = yaml.safe_load(fh) + env_secrets = [] + for key, raw_config in raw_secrets.items(): + config = SecretConfig.parse_obj(raw_config) + secret = Secret(key=key, application=name, **config.dict()) + env_secrets.append(secret) + environment_secrets[env_name] = env_secrets + + # Return the resulting application. + return Application( + name=name, + values=values, + environment_values=environment_values, + secrets=secrets, + environment_secrets=environment_secrets, + ) + + def _load_environment_config( + self, environment_name: str + ) -> EnvironmentConfig: + """Load the configuration for a Phalanx environment. + + Parameters + ---------- + environment_name + Name of the environent. + + Returns + ------- + Environment + Loaded environment. + + Raises + ------ + InvalidEnvironmentConfigError + Raised if the configuration for an environment is invalid. + UnknownEnvironmentError + Raised if the named environment has no configuration. + """ + values_name = f"values-{environment_name}.yaml" + values_path = Path.cwd() / "environments" / values_name + if not values_path.exists(): + raise UnknownEnvironmentError(environment_name) + with values_path.open() as fh: + values = yaml.safe_load(fh) + + # Eventually this will have more structure, but for now assume any + # key whose value is a dictionary with an enabled key is indicating an + # application that is or is not enabled. + applications = [] + for key, value in values.items(): + if isinstance(value, dict) and "enabled" in value: + if value["enabled"]: + applications.append(key) + + # For now, this is hard-coded, but we'll eventually figure it out from + # the Argo CD Application resource templates. + applications.append("argocd") + + # Return the configuration. + return EnvironmentConfig( + environment=environment_name, applications=sorted(applications) + ) + + def _resolve_application( + self, application: Application, environment_name: str + ) -> ApplicationInstance: + """Resolve an application to its environment-specific configuration. + + Parameters + ---------- + application + Application to resolve. + environment_name + Name of the environment the application should be configured for. + + Returns + ------- + ApplicationInstance + Resolved application. + """ + # Merge values with any environment overrides. + values = application.values + if environment_name in application.environment_values: + env_values = application.environment_values[environment_name] + values = _merge_overrides(values, env_values) + + # Merge secrets with any environment secrets. + if environment_name in application.environment_secrets: + env_secrets = application.environment_secrets[environment_name] + extra_secrets = {s.key: s for s in env_secrets} + secrets = [] + for secret in application.secrets: + if secret.key in extra_secrets: + secrets.append(extra_secrets[secret.key]) + del extra_secrets[secret.key] + else: + secrets.append(secret) + secrets.extend(extra_secrets.values()) + else: + secrets = application.secrets + + # Create an initial application instance without secrets so that we + # can use its class methods. + instance = ApplicationInstance( + name=application.name, + environment=environment_name, + values=values, + ) + + # Filter out the secrets that don't apply to this instance. + instance.secrets = [ + RequiredSecret.from_secret(s) + for s in secrets + if instance.is_condition_met(s.condition) + ] + return instance diff --git a/tests/cli/__init__.py b/tests/cli/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py new file mode 100644 index 0000000000..547c7bb7d9 --- /dev/null +++ b/tests/cli/secrets_test.py @@ -0,0 +1,21 @@ +"""Tests for the secrets command-line subcommand.""" + +from __future__ import annotations + +import os + +from click.testing import CliRunner +from phalanx.cli import main + +from ..support.data import phalanx_test_path, read_output_data + + +def test_list() -> None: + input_path = phalanx_test_path() + os.chdir(str(input_path)) + runner = CliRunner() + result = runner.invoke( + main, ["secrets", "list", "idfdev"], catch_exceptions=False + ) + assert result.exit_code == 0 + assert result.output == read_output_data("idfdev", "secrets-list") diff --git a/tests/data/input/applications/argocd/secrets.yaml b/tests/data/input/applications/argocd/secrets.yaml new file mode 100644 index 0000000000..8c32f231ab --- /dev/null +++ b/tests/data/input/applications/argocd/secrets.yaml @@ -0,0 +1,34 @@ +"admin.plaintext_password": + description: >- + Admin password for Argo CD. This password is normally not used because + Argo CD is configured to use Google or GitHub authentication, but it is + used by the installer (which cannot use external authentication) and is + useful as a fallback if external authentication is not working for some + reason. This secret can be changed at any time. + generate: + type: password +"admin.password": + description: >- + bcrypt hash of the admin password. This is the only version of the admin + password exposed to the running Argo CD pod. It will be updated + automatically if the admin password is changed. + generate: + type: bcrypt-password-hash + source: admin.plaintext_password +"admin.passwordMtime": + description: "Last modification time for the admin password." + generate: + type: mtime + source: admin.plaintext_password +"dex.clientSecret": + description: >- + OAuth 2 or OpenID Connect client secret, used to authenticate to GitHub or + Google as part of the authentication flow. This secret can be changed at + any time. +"server.secretkey": + description: >- + Key used to validate user session cookies. Argo CD will generate this + secret if it is missing, but we provide it because the Argo CD secret is + managed via a VaultSecret. + generate: + type: password diff --git a/tests/data/input/applications/argocd/values-idfdev.yaml b/tests/data/input/applications/argocd/values-idfdev.yaml new file mode 100644 index 0000000000..8586477a32 --- /dev/null +++ b/tests/data/input/applications/argocd/values-idfdev.yaml @@ -0,0 +1,61 @@ +argo-cd: + # Some time we may want to play with this more, but currently we're + # just using GafaelfawrIngress to protect Argo Workflows and requiring + # 'exec:admin' scope. It is theoretically possible to piggyback + # Workflows off of Dex SSO, but how to actually hook up the RBAC is + # going to need a lot of experimentation, creating service tokens, etc. + + #dex: + # env: + # - name: ARGO_WORKFLOWS_SSO_CLIENT_SECRET + # valueFrom: + # secretKeyRef: + # name: argo-sso-secret + # key: client-secret + + server: + ingress: + hosts: + - "data-dev.lsst.cloud" + + config: + url: "https://data-dev.lsst.cloud/argo-cd" + dex.config: | + connectors: + # Auth using Google. + # See https://dexidp.io/docs/connectors/google/ + - type: google + id: google + name: Google + config: + clientID: 176818997517-o2tu9978r099fnsnh1acd608gkmopfhu.apps.googleusercontent.com + clientSecret: $dex.clientSecret + hostedDomains: + - lsst.cloud + redirectURI: https://data-dev.lsst.cloud/argo-cd/api/dex/callback + + # Again, change this if we want to use SSO + + # staticClients: + # - id: argo-workflows-sso + # name: Argo Workflow + # redirectURIs: + # - https://data-dev-workflows.lsst.cloud/oauth2/callback + # secretEnv: ARGO_WORKFLOWS_SSO_CLIENT_SECRET + + rbacConfig: + policy.csv: | + g, adam@lsst.cloud, role:admin + g, afausti@lsst.cloud, role:admin + g, christine@lsst.cloud, role:admin + g, dspeck@lsst.cloud, role:admin + g, frossie@lsst.cloud, role:admin + g, jsick@lsst.cloud, role:admin + g, krughoff@lsst.cloud, role:admin + g, rra@lsst.cloud, role:admin + g, gpdf@lsst.cloud, role:admin + g, loi@lsst.cloud, role:admin + g, roby@lsst.cloud, role:admin + g, kkoehler@lsst.cloud, role:admin + g, fritzm@lsst.cloud, role:admin + scopes: "[email]" diff --git a/tests/data/input/applications/argocd/values.yaml b/tests/data/input/applications/argocd/values.yaml new file mode 100644 index 0000000000..dcb51f1c8c --- /dev/null +++ b/tests/data/input/applications/argocd/values.yaml @@ -0,0 +1,83 @@ +# Argo CD configuration +# https://github.com/argoproj/argo-helm/blob/main/charts/argo-cd/values.yaml + +argo-cd: + global: + logging: + # -- Set the global logging format. Either: `text` or `json` + format: "json" + + redis: + metrics: + # -- Enable Redis metrics service + enabled: true + + controller: + metrics: + # -- Enable controller metrics service + enabled: true + + applicationLabels: + # -- Enable adding additional labels to `argocd_app_labels` metric + enabled: true + + # -- Labels to add to `argocd_app_labels` metric + labels: ["name", "instance"] + + repoServer: + metrics: + # -- Enable repo server metrics service + enabled: true + + notifications: + metrics: + # -- Enable notifications metrics service + enabled: true + + server: + metrics: + # -- Enable server metrics service + enabled: true + + ingress: + # -- Create an ingress for the Argo CD server + enabled: true + + # -- Additional annotations to add to the Argo CD ingress + # @default -- Rewrite requests to remove `/argo-cd/` prefix + annotations: + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + + # -- Ingress class to use for Argo CD ingress + ingressClassName: "nginx" + + # -- Paths to route to Argo CD + paths: + - "/argo-cd(/|$)(.*)" + + # -- Type of path expression for Argo CD ingress + pathType: "ImplementationSpecific" + + configs: + cm: + # -- Configure resource comparison + resource.compareoptions: | + ignoreAggregatedRoles: true + + params: + # -- Do not use TLS (this is terminated at the ingress) + server.insecure: true + + # -- Base href for `index.html` when running under a reverse proxy + server.basehref: "/argo-cd" + + secret: + # -- Create the Argo CD secret (we manage this with Vault) + createSecret: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/tests/data/input/applications/gafaelfawr/secrets.yaml b/tests/data/input/applications/gafaelfawr/secrets.yaml new file mode 100644 index 0000000000..1c89746656 --- /dev/null +++ b/tests/data/input/applications/gafaelfawr/secrets.yaml @@ -0,0 +1,79 @@ +bootstrap-token: + description: >- + Token with admin access, regardless of any other scopes or configuration, + which can be used to add new Gafaelfawr administrators and bootstrap + creation of other tokens with arbitrary scopes. To use this token, + retrieve it from the Kubernetes secret and then use it in API calls like + any other Gafaelfawr token. This secret can be changed at any time. + generate: + type: gafaelfawr-token +cilogon-client-secret: + description: >- + Secret used to authenticate to CILogon as part of the OpenID Connect login + protocol to obtain an identity token for the user. This secret can be + changed at any time. + if: config.cilogon.clientId +database-password: + description: >- + Password used to authenticate to the PostgreSQL database used to store + Gafaelfawr data. This password may be changed at any time. + generate: + if: config.internalDatabase + type: password +forgerock-password: + description: >- + Password used to authenticate to a ForgeRock Identity server using HTTP + Basic authentication to retrieve GID mappings for groups. + if: config.forgerock.url +github-client-secret: + description: >- + GitHub OAuth App secret used to authenticate to GitHub as part of the + OAuth 2 login protocol to obtain an identity token for the user. This + secret can be changed at any time. + if: config.github.clientId +ldap-keytab: + description: >- + Kerberos keytab used to authenticate to the LDAP server via GSSAPI binds + to retrieve user and group information. This keytab can be changed at any + time. + if: config.ldap.kerberosConfig +ldap-password: + description: >- + Password to authenticate to the LDAP server via simple binds to retrieve + user and group information. This password can be changed at any time. + if: config.ldap.userDn +oidc-client-secret: + description: >- + Secret used to authenticate to a remote OpenID Connect authentication + server. This secret can be changed at any time. + if: config.oidc.clientId +redis-password: + description: >- + Password used to authenticate Gafaelfawr to its internal Redis server, + deployed as part of the same Argo CD application. This secret can be + changed at any time, but both the Redis server and all Gafaelfawr + deployments will then have to be restarted to pick up the new value. + generate: + type: password +session-secret: + description: >- + Encryption key used to encrypt the contents of Redis and the cookie data + stored in user web browsers that holds their session token and related + information. Changing this secret will invalidate all existing Redis data + and all user authentication cookies. +signing-key: + description: >- + RSA private key used to sign JWTs issued by Gafaelfawr when it acts as an + OpenID Connect server. Changing this secret will invalidate all existing + issued OpenID Connect JWTs. + if: config.oidcServer.enabled + generate: + type: rsa-private-key +slack-webhook: + description: >- + Slack web hook used to report internal errors to Slack. This secret may be + changed at any time. + if: config.slackAlerts + copy: + application: mobu + key: app-alert-webhook diff --git a/tests/data/input/applications/gafaelfawr/values-idfdev.yaml b/tests/data/input/applications/gafaelfawr/values-idfdev.yaml new file mode 100644 index 0000000000..fc371da3c2 --- /dev/null +++ b/tests/data/input/applications/gafaelfawr/values-idfdev.yaml @@ -0,0 +1,72 @@ +# Use the CSI storage class so that we can use snapshots. +redis: + persistence: + storageClass: "standard-rwo" + +config: + logLevel: "DEBUG" + slackAlerts: true + + cilogon: + clientId: "cilogon:/client_id/46f9ae932fd30e9fb1b246972a3c0720" + enrollmentUrl: "https://id-dev.lsst.cloud/registry/co_petitions/start/coef:6" + test: true + usernameClaim: "username" + + ldap: + url: "ldaps://ldap-test.cilogon.org" + userDn: "uid=readonly_user,ou=system,o=LSST,o=CO,dc=lsst_dev,dc=org" + groupBaseDn: "ou=groups,o=LSST,o=CO,dc=lsst_dev,dc=org" + groupObjectClass: "eduMember" + groupMemberAttr: "hasMember" + userBaseDn: "ou=people,o=LSST,o=CO,dc=lsst_dev,dc=org" + userSearchAttr: "voPersonApplicationUID" + addUserGroup: true + + firestore: + project: "rsp-firestore-dev-31c4" + + # Support OpenID Connect clients like Chronograf. + oidcServer: + enabled: true + + # User quota settings for services. + quota: + default: + notebook: + cpu: 4.0 + memory: 16 + + groupMapping: + "admin:jupyterlab": + - "g_admins" + "admin:provision": + - "g_admins" + "exec:admin": + - "g_admins" + "exec:internal-tools": + - "g_users" + "exec:notebook": + - "g_users" + "exec:portal": + - "g_users" + "read:image": + - "g_users" + "read:tap": + - "g_users" + "write:sasquatch": + - "g_admins" + + initialAdmins: + - "adam" + - "afausti" + - "cbanek" + - "frossie" + - "jsick" + - "rra" + - "simonkrughoff" + +cloudsql: + enabled: true + instanceConnectionName: "science-platform-dev-7696:us-central1:science-platform-dev-e9e11de2" + serviceAccount: "gafaelfawr@science-platform-dev-7696.iam.gserviceaccount.com" diff --git a/tests/data/input/applications/gafaelfawr/values.yaml b/tests/data/input/applications/gafaelfawr/values.yaml new file mode 100644 index 0000000000..42bb9e5064 --- /dev/null +++ b/tests/data/input/applications/gafaelfawr/values.yaml @@ -0,0 +1,429 @@ +# Default values for Gafaelfawr. + +# -- Override the base name for resources +nameOverride: "" + +# -- Override the full name for resources (includes the release name) +fullnameOverride: "" + +# -- Number of web frontend pods to start +replicaCount: 1 + +image: + # -- Gafaelfawr image to use + repository: "ghcr.io/lsst-sqre/gafaelfawr" + + # -- Pull policy for the Gafaelfawr image + pullPolicy: "IfNotPresent" + + # -- Tag of Gafaelfawr image to use + # @default -- The appVersion of the chart + tag: "" + +# -- Resource limits and requests for the Gafaelfawr frontend pod +resources: {} + +# -- Annotations for the Gafaelfawr frontend pod +podAnnotations: {} + +# -- Node selector rules for the Gafaelfawr frontend pod +nodeSelector: {} + +# -- Tolerations for the Gafaelfawr frontend pod +tolerations: [] + +# -- Affinity rules for the Gafaelfawr frontend pod +affinity: {} + +config: + # -- Whether to use the PostgreSQL server internal to the Kubernetes cluster + internalDatabase: false + + # -- URL for the PostgreSQL database + # @default -- None, must be set if neither `cloudsql.enabled` nor + # `config.internalDatabase` are true + databaseUrl: "" + + # -- Choose from the text form of Python logging levels + logLevel: "INFO" + + # -- Session length and token expiration (in minutes) + # @default -- `43200` (30 days) + tokenLifetimeMinutes: 43200 + + # -- List of netblocks used for internal Kubernetes IP addresses, used to + # determine the true client IP for logging + # @default -- [`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`] + proxies: + - "10.0.0.0/8" + - "172.16.0.0/12" + - "192.168.0.0/16" + + # -- HTML footer to add to any login error page (will be enclosed in a

+ # tag). + errorFooter: "" + + # -- Whether to send certain serious alerts to Slack. If `true`, the + # `slack-webhook` secret must also be set. + slackAlerts: false + + github: + # -- GitHub client ID. One and only one of this, `config.cilogon.clientId`, + # or `config.oidc.clientId` must be set. + clientId: "" + + cilogon: + # -- CILogon client ID. One and only one of this, + # `config.github.clientId`, or `config.oidc.clientId` must be set. + clientId: "" + + # -- Where to send the user if their username cannot be found in LDAP + # @default -- Login fails with an error + enrollmentUrl: "" + + # -- Whether to use the test instance of CILogon + test: false + + # -- Additional parameters to add + loginParams: + skin: "LSST" + + # -- Claim from which to get the username + # @default -- `"uid"` + usernameClaim: "" + + # -- Claim from which to get the numeric UID (only used if not retrieved + # from LDAP or Firestore) + # @default -- `"uidNumber"` + uidClaim: "" + + # -- Claim from which to get the primary GID (only used if not retrieved + # from LDAP or Firestore) + # @default -- Do not set a primary GID + gidClaim: "" + + # -- Claim from which to get the group membership (only used if not + # retrieved from LDAP) + # @default -- `"isMemberOf"` + groupsClaim: "" + + oidc: + # -- Client ID for generic OpenID Connect support. One and only one of + # this, `config.cilogon.clientId`, or `config.github.clientId` must be set. + clientId: "" + + # -- Audience for the JWT token + # @default -- Value of `config.oidc.clientId` + audience: "" + + # -- URL to which to redirect the user for authorization + # @default -- None, must be set + loginUrl: "" + + # -- Additional parameters to add to the login request + loginParams: {} + + # -- URL from which to retrieve the token for the user + # @default -- None, must be set + tokenUrl: "" + + # -- Where to send the user if their username cannot be found in LDAP + # @default -- Login fails with an error + enrollmentUrl: "" + + # -- Issuer for the JWT token + # @default -- None, must be set + issuer: "" + + # -- Scopes to request from the OpenID Connect provider + scopes: + - "openid" + + # -- Claim from which to get the username + # @default -- `"sub"` + usernameClaim: "" + + # -- Claim from which to get the numeric UID (only used if not retrieved + # from LDAP or Firestore) + # @default -- `"uidNumber"` + uidClaim: "" + + # -- Claim from which to get the primary GID (only used if not retrieved + # from LDAP or Firestore) + # @default -- Do not set a primary GID + gidClaim: "" + + # -- Claim from which to get the group membership (only used if not + # retrieved from LDAP) + # @default -- `"isMemberOf"` + groupsClaim: "" + + ldap: + # -- LDAP server URL from which to retrieve user group information + # @default -- Do not use LDAP + url: "" + + # -- Bind DN for simple bind authentication. If set, `ldap-secret` must be + # set in the Gafaelfawr Vault secret. Set this or `kerberosConfig`, not + # both. + # @default -- Use anonymous binds + userDn: "" + + # -- Enable GSSAPI (Kerberos) binds to LDAP using this `krb5.conf` file. + # If set, `ldap-keytab` must be set in the Gafaelfawr Vault secret. Set + # either this or `userDn`, not both. + # @default -- Use anonymous binds + kerberosConfig: "" + + # -- Base DN for the LDAP search to find a user's groups + # @default -- None, must be set + groupBaseDn: "" + + # -- Object class containing group information + groupObjectClass: "posixGroup" + + # -- Member attribute of the object class. Values must match the username + # returned in the token from the OpenID Connect authentication server. + groupMemberAttr: "member" + + # -- Base DN for the LDAP search to find a user's entry + # @default -- Get user metadata from the upstream authentication provider + userBaseDn: "" + + # -- Search attribute containing the user's username + userSearchAttr: "uid" + + # -- Attribute containing the user's full name + nameAttr: "displayName" + + # -- Attribute containing the user's email address + emailAttr: "mail" + + # -- Attribute containing the user's UID number (set to `uidNumber` for + # most LDAP servers) + # @default -- Get UID from upstream authentication provider + uidAttr: "" + + # -- Attribute containing the user's primary GID (set to `gidNumber` for + # most LDAP servers) + # @default -- Use GID of user private group + gidAttr: "" + + # -- Whether to synthesize a user private group for each user with a GID + # equal to their UID + addUserGroup: false + + firestore: + # -- If set, assign UIDs and GIDs using Google Firestore in the given + # project. Cloud SQL must be enabled and the Cloud SQL service account + # must have read/write access to that Firestore instance. + # @default -- Firestore support is disabled + project: "" + + forgerock: + # -- If set, obtain the GIDs for groups from this ForgeRock Identity + # Management server. + # @default -- ForgeRock Identity Management support is disabled + url: "" + + # -- Username to use for HTTP Basic authentication to ForgeRock Identity + # Managemnt. The corresponding password must be in the + # `forgerock-passsword` key of the Gafaelfawr Vault secret. + # @default -- None, must be set if `config.forgerock.url` is set + username: "" + + oidcServer: + # -- Whether to support OpenID Connect clients. If set to true, + # `oidc-server-secrets` must be set in the Gafaelfawr secret. + enabled: false + + # -- Quota settings (see + # [Quotas](https://gafaelfawr.lsst.io/user-guide/helm.html#quotas)). + quota: {} + + # -- Usernames to add as administrators when initializing a new database. + # Used only if there are no administrators. + initialAdmins: [] + + # -- Names and descriptions of all scopes in use. This is used to populate + # the new token creation page. Only scopes listed here will be options when + # creating a new token. See [DMTN-235](https://dmtn-235.lsst.io/). + # @default -- See the `values.yaml` file + knownScopes: + "admin:jupyterlab": >- + Can create and destroy labs for any user + "admin:token": >- + Can create and modify tokens for any user + "admin:provision": >- + Can perform privileged user provisioning + "exec:admin": >- + Administrative access to all APIs + "exec:internal-tools": >- + Use project-internal tools. + "exec:notebook": >- + Use the Notebook Aspect + "exec:portal": >- + Use the Portal Aspect + "read:alertdb": >- + Retrieve alert packets and schemas from the alert archive database + "read:image": >- + Retrieve images from project datasets + "read:tap": >- + Execute SELECT queries in the TAP interface on project datasets + "write:sasquatch": >- + "Write access to the Sasquatch telemetry service" + "user:token": >- + Can create and modify user tokens + + # -- Defines a mapping of scopes to groups that provide that scope. See + # [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes. + groupMapping: {} + +ingress: + # -- Defines additional FQDNs for Gafaelfawr. This doesn't work for + # cookie or browser authentication, but for token-based services like + # git-lfs or the webdav server it does. + additionalHosts: [] + +cloudsql: + # -- Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google + # Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as + # a separate service (behind a `NetworkPolicy`) for other, lower-traffic + # services. + enabled: false + + image: + # -- Cloud SQL Auth Proxy image to use + repository: "gcr.io/cloudsql-docker/gce-proxy" + + # -- Cloud SQL Auth Proxy tag to use + tag: "1.33.8" + + # -- Pull policy for Cloud SQL Auth Proxy images + pullPolicy: "IfNotPresent" + + # -- Instance connection name for a CloudSQL PostgreSQL instance + # @default -- None, must be set if Cloud SQL Auth Proxy is enabled + instanceConnectionName: "" + + # -- The Google service account that has an IAM binding to the `gafaelfawr` + # Kubernetes service account and has the `cloudsql.client` role + # @default -- None, must be set if Cloud SQL Auth Proxy is enabled + serviceAccount: "" + + # -- Resource limits and requests for the Cloud SQL Proxy pod + resources: {} + + # -- Annotations for the Cloud SQL Proxy pod + podAnnotations: {} + + # -- Node selection rules for the Cloud SQL Proxy pod + nodeSelector: {} + + # -- Tolerations for the Cloud SQL Proxy pod + tolerations: [] + + # -- Affinity rules for the Cloud SQL Proxy pod + affinity: {} + +maintenance: + # -- Cron schedule string for Gafaelfawr data consistency audit (in UTC) + auditSchedule: "30 3 * * *" + + # -- Cron schedule string for Gafaelfawr periodic maintenance (in UTC) + maintenanceSchedule: "5 * * * *" + + # -- Resource limits and requests for Gafaelfawr maintenance and audit pods + resources: {} + + # -- Annotations for Gafaelfawr maintenance and audit pods + podAnnotations: {} + + # -- Node selection rules for Gafaelfawr maintenance and audit pods + nodeSelector: {} + + # -- Tolerations for Gafaelfawr maintenance and audit pods + tolerations: [] + + # -- Affinity rules for Gafaelfawr maintenance and audit pods + affinity: {} + +operator: + # -- Resource limits and requests for the Gafaelfawr Kubernetes operator + resources: {} + + # -- Annotations for the token management pod + podAnnotations: {} + + # -- Node selection rules for the token management pod + nodeSelector: {} + + # -- Tolerations for the token management pod + tolerations: [] + + # -- Affinity rules for the token management pod + affinity: {} + +redis: + config: + # -- Name of secret containing Redis password (may require changing if + # fullnameOverride is set) + secretName: "gafaelfawr-secret" + + # -- Key inside secret from which to get the Redis password (do not + # change) + secretKey: "redis-password" + + persistence: + # -- Whether to persist Redis storage and thus tokens. Setting this to + # false will use `emptyDir` and reset all tokens on every restart. Only + # use this for a test deployment. + enabled: true + + # -- Amount of persistent storage to request + size: "1Gi" + + # -- Class of storage to request + storageClass: "" + + # -- Access mode of storage to request + accessMode: "ReadWriteOnce" + + # -- Use an existing PVC, not dynamic provisioning. If this is set, the + # size, storageClass, and accessMode settings are ignored. + volumeClaimName: "" + + # -- Resource limits and requests for the Redis pod + # @default -- See `values.yaml` + resources: + limits: + cpu: "1" + requests: + cpu: "100m" + + # -- Pod annotations for the Redis pod + podAnnotations: {} + + # -- Node selection rules for the Redis pod + nodeSelector: {} + + # -- Tolerations for the Redis pod + tolerations: [] + + # -- Affinity rules for the Redis pod + affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/tests/data/input/applications/mobu/secrets.yaml b/tests/data/input/applications/mobu/secrets.yaml new file mode 100644 index 0000000000..87f8c046fd --- /dev/null +++ b/tests/data/input/applications/mobu/secrets.yaml @@ -0,0 +1,11 @@ +app-alert-webhook: + description: >- + Slack web hook to which to post internal application alerts. This secret + is not used directly by mobu, but is copied from here to all of the + applications that report internal problems to Slack. It should normally be + separate from mobu's own web hook, since the separate identities attached + to the messages helps make the type of mesasge clearer, but the same web + hook as mobu's own alerts can be used in a pinch. +ALERT_HOOK: + description: >- + Slack web hook to which mobu should report failures and daily status. diff --git a/tests/data/input/applications/mobu/values.yaml b/tests/data/input/applications/mobu/values.yaml new file mode 100644 index 0000000000..241d73d661 --- /dev/null +++ b/tests/data/input/applications/mobu/values.yaml @@ -0,0 +1,67 @@ +# Default values for mobu. + +# -- Override the base name for resources +nameOverride: "" + +# -- Override the full name for resources (includes the release name) +fullnameOverride: "" + +image: + # -- mobu image to use + repository: "ghcr.io/lsst-sqre/mobu" + + # -- Pull policy for the mobu image + pullPolicy: "IfNotPresent" + + # -- Tag of mobu image to use + # @default -- The appVersion of the chart + tag: "" + +ingress: + # -- Additional annotations to add to the ingress + annotations: {} + +config: + # -- Autostart specification. Must be a list of mobu flock specifications. + # Each flock listed will be automatically started when mobu is started. + autostart: [] + + # -- If set to true, include the output from all flocks in the main mobu log + # and disable structured JSON logging. + debug: false + + # -- If set to true, do not configure mobu to send alerts to Slack. + disableSlackAlerts: false + + # -- Prefix for mobu's API routes. + pathPrefix: "/mobu" + +# -- Resource limits and requests for the mobu frontend pod +resources: {} + +# -- Annotations for the mobu frontend pod +podAnnotations: {} + +# -- Node selector rules for the mobu frontend pod +nodeSelector: {} + +# -- Tolerations for the mobu frontend pod +tolerations: [] + +# -- Affinity rules for the mobu frontend pod +affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/tests/data/input/applications/nublado/secrets-idfdev.yaml b/tests/data/input/applications/nublado/secrets-idfdev.yaml new file mode 100644 index 0000000000..97d5af3ca8 --- /dev/null +++ b/tests/data/input/applications/nublado/secrets-idfdev.yaml @@ -0,0 +1,15 @@ +"aws-credentials.ini": + description: >- + Google Cloud Storage credentials to the Butler data store, formatted using + AWS syntax for use with boto. +"butler-gcs-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. +"butler-hmac-idf-creds.json": + description: >- + Google Cloud Storage credentials to the Butler data store in the private + key syntax used for HMACs. +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. diff --git a/tests/data/input/applications/nublado/secrets.yaml b/tests/data/input/applications/nublado/secrets.yaml new file mode 100644 index 0000000000..f0e2c52cee --- /dev/null +++ b/tests/data/input/applications/nublado/secrets.yaml @@ -0,0 +1,23 @@ +cryptkeeper_key: + description: "Encryption key for internal key management." + generate: + type: password +crypto_key: + description: "Encryption key for JupyterHub stored state." + generate: + type: password +hub_db_password: + description: "Password to authenticate to the JupyterHub session database." + generate: + type: password + if: hub.internalDatabase +proxy_token: + description: "Token authenticating JupyterHub to the proxy server." + generate: + type: password +slack_webhook: + description: "Slack web hook to which to post alerts." + if: controller.slackAlerts + copy: + application: mobu + key: app-alert-webhook diff --git a/tests/data/input/applications/nublado/values-idfdev.yaml b/tests/data/input/applications/nublado/values-idfdev.yaml new file mode 100644 index 0000000000..d886967228 --- /dev/null +++ b/tests/data/input/applications/nublado/values-idfdev.yaml @@ -0,0 +1,72 @@ +controller: + googleServiceAccount: "nublado-controller@science-platform-dev-7696.iam.gserviceaccount.com" + slackAlerts: true + config: + safir: + logLevel: "DEBUG" + fileserver: + enabled: true + timeout: 43200 + images: + source: + type: "google" + location: "us-central1" + projectId: "rubin-shared-services-71ec" + repository: "sciplat" + image: "sciplat-lab" + recommendedTag: "recommended" + numReleases: 1 + numWeeklies: 2 + numDailies: 3 + lab: + env: + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod" + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" + PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt" + DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml" + GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json" + S3_ENDPOINT_URL: "https://storage.googleapis.com" + initContainers: + - name: "initdir" + image: "ghcr.io/lsst-sqre/initdir:0.0.4" + privileged: true + volumes: + - containerPath: "/home" + mode: "rw" + source: + type: nfs + serverPath: "/share1/home" + server: "10.87.86.26" + + secrets: + - secretName: "nublado-lab-secret" + secretKey: "aws-credentials.ini" + - secretName: "nublado-lab-secret" + secretKey: "butler-gcs-idf-creds.json" + - secretName: "nublado-lab-secret" + secretKey: "butler-hmac-idf-creds.json" + - secretName: "nublado-lab-secret" + secretKey: "postgres-credentials.txt" + volumes: + - containerPath: "/home" + mode: "rw" + source: + type: nfs + serverPath: "/share1/home" + server: "10.87.86.26" + - containerPath: "/project" + mode: "rw" + source: + type: nfs + serverPath: "/share1/project" + server: "10.87.86.26" + - containerPath: "/scratch" + mode: "rw" + source: + type: nfs + serverPath: "/share1/scratch" + server: "10.87.86.26" +jupyterhub: + hub: + db: + url: "postgresql://nublado3@postgres.postgres/nublado3" diff --git a/tests/data/input/applications/nublado/values.yaml b/tests/data/input/applications/nublado/values.yaml new file mode 100644 index 0000000000..0f89b35a56 --- /dev/null +++ b/tests/data/input/applications/nublado/values.yaml @@ -0,0 +1,462 @@ +# Default values for Nublado. + +controller: + image: + # -- nublado image to use + repository: ghcr.io/lsst-sqre/jupyterlab-controller + + # -- Pull policy for the nublado image + pullPolicy: IfNotPresent + + # -- Tag of nublado image to use + # @default -- The appVersion of the chart + tag: "" + + # -- Affinity rules for the lab controller pod + affinity: {} + + # -- Node selector rules for the lab controller pod + nodeSelector: {} + + # -- Annotations for the lab controller pod + podAnnotations: {} + + # -- Resource limits and requests for the lab controller pod + resources: {} + + # -- Tolerations for the lab controller pod + tolerations: [] + + ingress: + # -- Additional annotations to add for the lab controller pod ingress + annotations: {} + + # -- If Google Artifact Registry is used as the image source, the Google + # service account that has an IAM binding to the `nublado-controller` + # Kubernetes service account and has the Artifact Registry reader role + # @default -- None, must be set when using Google Artifact Registry + googleServiceAccount: "" + + # -- Whether to enable Slack alerts. If set to true, `slack_webhook` must be + # set in the corresponding Nublado Vault secret. + slackAlerts: false + + # Passed as YAML to the lab controller. + config: + fileserver: + # -- Enable fileserver management + enabled: false + + # -- Image for fileserver container + image: ghcr.io/lsst-sqre/worblehat + + # -- Tag for fileserver container + tag: 0.1.0 + + # -- Pull policy for fileserver container + pullPolicy: IfNotPresent + + # -- Timeout for user fileservers, in seconds + timeout: 3600 + + # -- Namespace for user fileservers + namespace: fileservers + + images: + # -- Source for prepulled images. For Docker, set `type` to `docker`, + # `registry` to the hostname and `repository` to the name of the + # repository. For Google Artifact Repository, set `type` to `google`, + # `location` to the region, `projectId` to the Google project, + # `repository` to the name of the repository, and `image` to the name of + # the image. + # @default -- None, must be specified + source: {} + + # -- Tag marking the recommended image (shown first in the menu) + recommendedTag: "recommended" + + # -- Number of most-recent releases to prepull. + numReleases: 1 + + # -- Number of most-recent weeklies to prepull. + numWeeklies: 2 + + # -- Number of most-recent dailies to prepull. + numDailies: 3 + + # -- Restrict images to this SAL cycle, if given. + cycle: null + + # -- List of additional image tags to prepull. Listing the image tagged + # as recommended here is recommended when using a Docker image source to + # ensure its name can be expanded properly in the menu. + pin: [] + + # -- Additional tags besides `recommendedTag` that should be recognized + # as aliases. + aliasTags: [] + + lab: + # -- Environment variables to set for every user lab. + # @default -- See `values.yaml` + env: + API_ROUTE: "/api" + AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod" + CULL_KERNEL_IDLE_TIMEOUT: "432000" # These might be set from group? + CULL_KERNEL_CONNECTED: "True" + CULL_KERNEL_INTERVAL: "300" + FIREFLY_ROUTE: "/portal/app" + HUB_ROUTE: "/nb/hub" + NO_ACTIVITY_TIMEOUT: "432000" # Also from group? + TAP_ROUTE: "/api/tap" + + # -- Containers run as init containers with each user pod. Each should + # set `name`, `image` (a Docker image reference), and `privileged`, and + # may contain `volumes` (similar to the main `volumes` + # configuration). If `privileged` is true, the container will run as + # root with `allowPrivilegeEscalation` true. Otherwise it will, run as + # UID 1000. + initcontainers: [] + + # -- Pull secret to use for labs. Set to the string `pull-secret` to use + # the normal pull secret from Vault. + # @default -- Do not use a pull secret + pullSecret: null + + # -- Secrets to set in the user pods. Each should have a `secretKey` key + # pointing to a secret in the same namespace as the controller + # (generally `nublado-secret`) and `secretRef` pointing to a field in + # that key. + secrets: [] + + # -- Available lab sizes. Names must be chosen from `fine`, + # `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`, + # `gargantuan`, and `colossal` in that order. Each should specify the + # maximum CPU equivalents and memory. SI prefixes for memory are + # supported. + # @default -- See `values.yaml` (specifies `small`, `medium`, and + # `large`) + sizes: + small: + cpu: 1.0 + memory: 4Gi + medium: + cpu: 2.0 + memory: 8Gi + large: + cpu: 4.0 + memory: 16Gi + + # -- Volumes that should be mounted in lab pods. This supports NFS, + # HostPath, and PVC volume types (differentiated in source.type) + volumes: [] + # volumes: + # - containerPath: "/project" + # mode: "rw" + # source: + # type: nfs + # serverPath: "/share1/project" + # server: "10.87.86.26" + + # -- Files to be mounted as ConfigMaps inside the user lab pod. + # `contents` contains the file contents. Set `modify` to true to make + # the file writable in the pod. + # @default -- See `values.yaml` + files: + /etc/passwd: + modify: true + contents: | + root:x:0:0:root:/root:/bin/bash + bin:x:1:1:bin:/bin:/sbin/nologin + daemon:x:2:2:daemon:/sbin:/sbin/nologin + adm:x:3:4:adm:/var/adm:/sbin/nologin + lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin + sync:x:5:0:sync:/sbin:/bin/sync + shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown + halt:x:7:0:halt:/sbin:/sbin/halt + mail:x:8:12:mail:/var/spool/mail:/sbin/nologin + operator:x:11:0:operator:/root:/sbin/nologin + games:x:12:100:games:/usr/games:/sbin/nologin + ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin + tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin + dbus:x:81:81:System message bus:/:/sbin/nologin + nobody:x:99:99:Nobody:/:/sbin/nologin + systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin + lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash + /etc/group: + modify: true + contents: | + root:x:0: + bin:x:1: + daemon:x:2: + sys:x:3: + adm:x:4: + tty:x:5: + disk:x:6: + lp:x:7: + mem:x:8: + kmem:x:9: + wheel:x:10: + cdrom:x:11: + mail:x:12: + man:x:15: + dialout:x:18: + floppy:x:19: + games:x:20: + utmp:x:22: + tape:x:33: + utempter:x:35: + video:x:39: + ftp:x:50: + lock:x:54: + tss:x:59: + audio:x:63: + dbus:x:81: + screen:x:84: + nobody:x:99: + users:x:100: + systemd-journal:x:190: + systemd-network:x:192: + cgred:x:997: + ssh_keys:x:998: + input:x:999: + /opt/lsst/software/jupyterlab/lsst_dask.yml: + modify: false + contents: | + # No longer used, but preserves compatibility with runlab.sh + dask_worker.yml: | + enabled: false + /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template: + modify: false + contents: | + # Licensed under the Apache License, Version 2.0 (the "License"); + # You may not use this file except in compliance with the License. + # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + # + # Authors: + # - Wen Guan, , 2020 + [common] + # if logdir is configured, idds will write to idds.log in this directory. + # else idds will go to stdout/stderr. + # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs. + # logdir = /var/log/idds + loglevel = INFO + [rest] + host = https://iddsserver.cern.ch:443/idds + #url_prefix = /idds + #cacher_dir = /tmp + cacher_dir = /data/idds + + safir: + # -- Level of Python logging + logLevel: "INFO" + + # -- Path prefix that will be routed to the controller + pathPrefix: "/nublado" + +# JupyterHub configuration handled directly by this chart rather than by Zero +# to JupyterHub. +hub: + # -- Whether to use the cluster-internal PostgreSQL server instead of an + # external server. This is not used directly by the Nublado chart, but + # controls how the database password is managed. + internalDatabase: true + + timeout: + # -- Timeout for the Kubernetes spawn process in seconds. (Allow long + # enough to pull uncached images if needed.) + spawn: 600 + + # -- Timeout for JupyterLab to start. Currently this sometimes takes over + # 60 seconds for reasons we don't understand. + startup: 90 + +# JupyterHub proxy configuration handled directly by this chart rather than by +# Zero to JupyterHub. +proxy: + ingress: + # -- Additional annotations to add to the proxy ingress (also used to talk + # to JupyterHub and all user labs) + # @default -- Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m + annotations: + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + +# Configuration for the Zero to JupyterHub subchart. +jupyterhub: + hub: + # -- Whether to require metrics requests to be authenticated + authenticatePrometheus: false + + image: + # -- Image to use for JupyterHub + name: ghcr.io/lsst-sqre/rsp-restspawner + + # -- Tag of image to use for JupyterHub + tag: 0.3.2 + + # -- Resource limits and requests + resources: + limits: + cpu: 900m + memory: 1Gi # Should support about 200 users + + db: + # -- Type of database to use + type: "postgres" + + # -- Database password (not used) + # @default -- Comes from nublado-secret + password: "true" + + # -- URL of PostgreSQL server + # @default -- Use the in-cluster PostgreSQL installed by Phalanx + url: "postgresql://jovyan@postgres.postgres/jupyterhub" + + # -- Security context for JupyterHub container + containerSecurityContext: + runAsUser: 768 + runAsGroup: 768 + allowPrivilegeEscalation: false + + # -- Base URL on which JupyterHub listens + baseUrl: "/nb" + + # -- Existing secret to use for private keys + existingSecret: "nublado-secret" + + # -- Additional environment variables to set + # @default -- Gets `JUPYTERHUB_CRYPT_KEY` from `nublado-secret` + extraEnv: + JUPYTERHUB_CRYPT_KEY: + valueFrom: + secretKeyRef: + name: "nublado-secret" + key: "hub.config.CryptKeeper.keys" + + # -- Additional volumes to make available to JupyterHub + # @default -- The `hub-config` `ConfigMap` and the Gafaelfawr token + extraVolumes: + - name: "hub-config" + configMap: + name: "hub-config" + - name: "hub-gafaelfawr-token" + secret: + secretName: "hub-gafaelfawr-token" + + # -- Additional volume mounts for JupyterHub + # @default -- `hub-config` and the Gafaelfawr token + extraVolumeMounts: + - name: "hub-config" + mountPath: "/usr/local/etc/jupyterhub/jupyterhub_config.d" + - name: "hub-gafaelfawr-token" + mountPath: "/etc/gafaelfawr" + + networkPolicy: + # -- Whether to enable the default `NetworkPolicy` (currently, the + # upstream one does not work correctly) + enabled: false + + loadRoles: + server: + # -- Default scopes for the user's lab, overridden to allow the lab to + # delete itself (which we use for our added menu items) + scopes: ["self"] + + prePuller: + continuous: + # -- Whether to run the JupyterHub continuous prepuller (the Nublado + # controller does its own prepulling) + enabled: false + + hook: + # -- Whether to run the JupyterHub hook prepuller (the Nublado + # controller does its own prepulling) + enabled: false + + singleuser: + cloudMetadata: + # -- Whether to configure iptables to block cloud metadata endpoints. + # This is unnecessary in our environments (they are blocked by cluster + # configuration) and thus is disabled to reduce complexity. + blockWithIptables: false + + # -- Start command for labs + cmd: "/opt/lsst/software/jupyterlab/runlab.sh" + + # -- Default URL prefix for lab endpoints + defaultUrl: "/lab" + + proxy: + service: + # -- Only expose the proxy to the cluster, overriding the default of + # exposing the proxy directly to the Internet + type: ClusterIP + + chp: + networkPolicy: + # -- Enable access to the proxy from other namespaces, since we put + # each user's lab environment in its own namespace + interNamespaceAccessLabels: accept + + # This currently causes Minikube deployment in GH-actions to fail. + # We want it sometime but it's not critical; it will help with + # scale-down + # pdb: + # enabled: true + # minAvailable: 1 + + # Rather than using the JupyterHub-provided ingress, which requires us to + # repeat the global host name and manually configure authentication, we + # instead install our own GafaelfawrIngress. + ingress: + # -- Whether to enable the default ingress + enabled: false + + cull: + # -- Enable the lab culler. + enabled: true + + # -- Default idle timeout before the lab is automatically deleted in + # seconds + # @default -- 2592000 (30 days) + timeout: 2592000 + + # -- How frequently to check for idle labs in seconds + # @default -- 600 (10 minutes) + every: 600 + + # -- Whether to log out the server when culling their lab + users: true + + # -- Whether to remove named servers when culling their lab + removeNamedServers: true + + # -- Maximum age of a lab regardless of activity + # @default -- 5184000 (60 days) + maxAge: 5184000 + + scheduling: + userScheduler: + # -- Whether the user scheduler should be enabled + enabled: false + + userPlaceholder: + # -- Whether to spawn placeholder pods representing fake users to force + # autoscaling in advance of running out of resources + enabled: false + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/tests/data/input/applications/postgres/secrets.yaml b/tests/data/input/applications/postgres/secrets.yaml new file mode 100644 index 0000000000..5e03d36d1a --- /dev/null +++ b/tests/data/input/applications/postgres/secrets.yaml @@ -0,0 +1,45 @@ +exposurelog_password: + description: "Password for the exposurelog database." + if: exposurelog_db + copy: + application: exposurelog + key: exposurelog_password +gafaelfawr_password: + description: "Password for the Gafaelfawr database." + if: gafaelfawr_db + copy: + application: gafaelfawr + key: database-password +jupyterhub_password: + description: "Password for the Nublado v2 JupyterHub session database." + if: jupyterhub_db + copy: + application: nublado2 + key: hub_db_password +lovelog_password: + description: "Password for the lovelog database." + if: lovelog_db + generate: + type: password +narrativelog_password: + description: "Password for the narrativelog database." + if: narrativelog_db + copy: + application: narrativelog + key: narrativelog_password +nublado3_password: + description: "Password for the Nublado v3 JupyterHub session database." + if: nublado3_db + copy: + application: nublado + key: hub_db_password +root_password: + description: "Administrator password for the whole PostgreSQL installation." + generate: + type: password +timessquare_password: + description: "Password for the times-square database." + if: timessquare_db + copy: + application: times-square + key: TS_DATABASE_PASSWORD diff --git a/tests/data/input/applications/postgres/values-idfdev.yaml b/tests/data/input/applications/postgres/values-idfdev.yaml new file mode 100644 index 0000000000..20c336e86a --- /dev/null +++ b/tests/data/input/applications/postgres/values-idfdev.yaml @@ -0,0 +1,3 @@ +nublado3_db: + user: "nublado3" + db: "nublado3" diff --git a/tests/data/input/applications/postgres/values.yaml b/tests/data/input/applications/postgres/values.yaml new file mode 100644 index 0000000000..ded4248a51 --- /dev/null +++ b/tests/data/input/applications/postgres/values.yaml @@ -0,0 +1,37 @@ +# Default values for fileserver. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Set to non-empty to enable debugging output +debug: "" + +image: + # -- postgres image to use + repository: "lsstsqre/lsp-postgres" + + # -- Pull policy for the postgres image + pullPolicy: "IfNotPresent" + + # -- Tag of postgres image to use + # @default -- The appVersion of the chart + tag: "" + +# -- Volume size for postgres. It can generally be very small +postgresVolumeSize: "1Gi" + +# -- Storage class for postgres volume. Set to appropriate value for your +# deployment: at GKE, "standard" (if you want SSD, "premium-rwo", but if you +# want a good database maybe it's better to use a cloud database?), on Rubin +# Observatory Rancher, "rook-ceph-block", elsewhere probably "standard" +postgresStorageClass: "standard" + +# -- Volume name for postgres, if you use an existing volume that isn't +# automatically created from the PVC by the storage driver. +volumeName: "" + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/tests/data/input/applications/tap/secrets.yaml b/tests/data/input/applications/tap/secrets.yaml new file mode 100644 index 0000000000..4280c602a3 --- /dev/null +++ b/tests/data/input/applications/tap/secrets.yaml @@ -0,0 +1,4 @@ +"google_creds.json": + description: >- + Google service account credentials used to write async job output to + Google Cloud Storage. diff --git a/tests/data/input/applications/tap/values.yaml b/tests/data/input/applications/tap/values.yaml new file mode 100644 index 0000000000..9132115129 --- /dev/null +++ b/tests/data/input/applications/tap/values.yaml @@ -0,0 +1,184 @@ +# Default values for cadc-tap. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Override the base name for resources +nameOverride: "" + +# -- Override the full name for resources (includes the release name) +fullnameOverride: "cadc-tap" + +# -- Number of pods to start +replicaCount: 1 + +image: + # -- tap image to use + repository: "ghcr.io/lsst-sqre/lsst-tap-service" + + # -- Pull policy for the tap image + pullPolicy: "IfNotPresent" + + # -- Tag of tap image to use + # @default -- The appVersion of the chart + tag: "" + +# Settings for the ingress rules. +ingress: + # -- Additional annotations to use for endpoints that allow anonymous + # access, such as `/capabilities` and `/availability` + anonymousAnnotations: {} + + # -- Additional annotations to use for endpoints that are authenticated, + # such as `/sync`, `/async`, and `/tables` + authenticatedAnnotations: {} + +# -- Resource limits and requests for the Gafaelfawr frontend pod +resources: {} + +# -- Annotations for the Gafaelfawr frontend pod +podAnnotations: {} + +# -- Node selector rules for the Gafaelfawr frontend pod +nodeSelector: {} + +# -- Tolerations for the Gafaelfawr frontend pod +tolerations: [] + +# -- Affinity rules for the Gafaelfawr frontend pod +affinity: {} + +# -- Path to the Vault secret (`secret/k8s_operator//tap`, for example) +# @default -- None, must be set +vaultSecretsPath: "" + +config: + # -- Address to a MySQL database containing TAP schema data + tapSchemaAddress: "cadc-tap-schema-db:3306" + + # -- Datalink payload URL + datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip" + + # -- Gafaelfawr hostname to get user information from a token + # @default -- Value of `ingress.host` + gafaelfawrHost: "" + + # -- Name of GCS bucket in which to store results + # @default -- None, must be set + gcsBucket: "" + + # -- Base URL for results stored in GCS bucket + # @default -- None, must be set + gcsBucketUrl: "" + + # -- GCS bucket type (GCS or S3) + # @default -- GCS + gcsBucketType: "GCS" + + # -- Java heap size, which will set the maximum size of the heap. Otherwise + # Java would determine it based on how much memory is available and black + # maths. + jvmMaxHeapSize: 4G + +qserv: + # -- QServ hostname:port to connect to + # @default -- `"mock-qserv:3306"` (the mock QServ) + host: "mock-qserv:3306" + + mock: + # -- Spin up a container to pretend to be QServ. + enabled: true + + image: + # -- Mock QServ image to use + repository: "ghcr.io/lsst-sqre/lsst-tap-mock-qserv" + + # -- Pull policy for the mock QServ image + pullPolicy: "IfNotPresent" + + # -- Tag of mock QServ image to use + # @default -- The appVersion of the chart + tag: "" + + # -- Resource limits and requests for the mock QServ pod + resources: {} + + # -- Annotations for the mock QServ pod + podAnnotations: {} + + # -- Node selection rules for the mock QServ pod + nodeSelector: {} + + # -- Tolerations for the mock QServ pod + tolerations: [] + + # -- Affinity rules for the mock QServ pod + affinity: {} + +tapSchema: + image: + # -- TAP schema image to ue. This must be overridden by each environment + # with the TAP schema for that environment. + repository: "lsstsqre/tap-schema-mock" + + # -- Pull policy for the TAP schema image + pullPolicy: "IfNotPresent" + + # -- Tag of TAP schema image + tag: "2.0.2" + + # -- Resource limits and requests for the TAP schema database pod + resources: {} + + # -- Annotations for the mock QServ pod + podAnnotations: {} + + # -- Node selection rules for the mock QServ pod + nodeSelector: {} + + # -- Tolerations for the mock QServ pod + tolerations: [] + + # -- Affinity rules for the mock QServ pod + affinity: {} + +uws: + image: + # -- UWS database image to use + repository: "ghcr.io/lsst-sqre/lsst-tap-uws-db" + + # -- Pull policy for the UWS database image + pullPolicy: "IfNotPresent" + + # -- Tag of UWS database image to use + # @default -- The appVersion of the chart + tag: "" + + # -- Resource limits and requests for the UWS database pod + resources: {} + + # -- Annotations for the UWS databse pod + podAnnotations: {} + + # -- Node selection rules for the UWS database pod + nodeSelector: {} + + # -- Tolerations for the UWS database pod + tolerations: [] + + # -- Affinity rules for the UWS database pod + affinity: {} + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: "" + + # -- Host name for ingress + # @default -- Set by Argo CD + host: "" + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: "" diff --git a/tests/data/input/environments/values-idfdev.yaml b/tests/data/input/environments/values-idfdev.yaml new file mode 100644 index 0000000000..5ac78c010a --- /dev/null +++ b/tests/data/input/environments/values-idfdev.yaml @@ -0,0 +1,13 @@ +environment: idfdev +fqdn: data-dev.lsst.cloud +vaultPathPrefix: secret/k8s_operator/data-dev.lsst.cloud +butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml" + +gafaelfawr: + enabled: true +mobu: + enabled: true +nublado: + enabled: true +postgres: + enabled: true diff --git a/tests/data/input/environments/values.yaml b/tests/data/input/environments/values.yaml new file mode 100644 index 0000000000..1739532a1f --- /dev/null +++ b/tests/data/input/environments/values.yaml @@ -0,0 +1,24 @@ +# These four settings should be set in each environment values-*.yaml file. + +# -- Name of the environment +# @default -- None, must be set +environment: "" + +# -- Fully-qualified domain name where the environment is running +# @default -- None, must be set +fqdn: "" + +# -- Prefix for Vault secrets for this environment +# @default -- None, must be set +vaultPathPrefix: "" + +# -- Butler repository index to use for this environment +# @default -- None, must be set +butlerRepositoryIndex: "" + +gafaelfawr: + enabled: false +nublado: + enabled: false +postgres: + enabled: false diff --git a/tests/data/output/idfdev/secrets-list b/tests/data/output/idfdev/secrets-list new file mode 100644 index 0000000000..1acf5b3628 --- /dev/null +++ b/tests/data/output/idfdev/secrets-list @@ -0,0 +1,26 @@ +argocd admin.password +argocd admin.passwordMtime +argocd admin.plaintext_password +argocd dex.clientSecret +argocd server.secretkey +gafaelfawr bootstrap-token +gafaelfawr cilogon-client-secret +gafaelfawr database-password +gafaelfawr ldap-password +gafaelfawr redis-password +gafaelfawr session-secret +gafaelfawr signing-key +gafaelfawr slack-webhook +mobu ALERT_HOOK +mobu app-alert-webhook +nublado aws-credentials.ini +nublado butler-gcs-idf-creds.json +nublado butler-hmac-idf-creds.json +nublado cryptkeeper_key +nublado crypto_key +nublado hub_db_password +nublado postgres-credentials.txt +nublado proxy_token +nublado slack_webhook +postgres nublado3_password +postgres root_password diff --git a/tests/support/__init__.py b/tests/support/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/support/data.py b/tests/support/data.py new file mode 100644 index 0000000000..8dcedf8873 --- /dev/null +++ b/tests/support/data.py @@ -0,0 +1,43 @@ +"""Utilities for managing test data.""" + +from __future__ import annotations + +from pathlib import Path + +__all__ = [ + "phalanx_test_path", + "read_output_data", +] + + +def phalanx_test_path() -> Path: + """Return path to Phalanx test data. + + Returns + ------- + Path + Path to test input data. The directory will contain test data in the + layout of a Phalanx repository to test information gathering and + analysis. + """ + return Path(__file__).parent.parent / "data" / "input" + + +def read_output_data(environment: str, filename: str) -> str: + """Read test output data and return it. + + Parameters + ---------- + environment + Name of the environment under :filename:`data/output` that the test + output is for. + filename + File containing the output data. + + Returns + ------- + str + Contents of the file. + """ + base_path = Path(__file__).parent.parent / "data" / "output" + return (base_path / environment / filename).read_text() From e39a2348610f9cee9f4b924a823a0a6770e00dc7 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 27 Jul 2023 12:55:16 -0700 Subject: [PATCH 282/308] Rework the secret models The secret model inheritance was confusing and required some copies that were unnecessary for the type system. We were also resolving the top-level secret condition during environment loading, but not resolving conditions on copy and generate until secret resolution, but there's no reason not to resolve all the conditions at the same time. Restructure the models to have Conditional versions of not only the secret but also the copy and generate rules, and resolve all of the conditions during configuration loading. Rely on the type system to catch any use of the condition after configuration loading. --- src/phalanx/exceptions.py | 48 +++++++++--- src/phalanx/models/applications.py | 35 ++++----- src/phalanx/models/secrets.py | 105 +++++++++++--------------- src/phalanx/services/secrets.py | 30 ++------ src/phalanx/storage/config.py | 116 ++++++++++++++++++++--------- 5 files changed, 187 insertions(+), 147 deletions(-) diff --git a/src/phalanx/exceptions.py b/src/phalanx/exceptions.py index 3d867a5f69..acb5322d3a 100644 --- a/src/phalanx/exceptions.py +++ b/src/phalanx/exceptions.py @@ -4,7 +4,7 @@ from collections.abc import Iterable -from .models.secrets import RequiredSecret, Secret +from .models.secrets import Secret __all__ = [ "InvalidEnvironmentConfigError", @@ -15,7 +15,15 @@ class InvalidEnvironmentConfigError(Exception): - """Configuration for an environment is invalid.""" + """Configuration for an environment is invalid. + + Parameters + ---------- + name + Name of the environment. + error + Error message. + """ def __init__(self, name: str, error: str) -> None: msg = "Invalid configuration for environment {name}: {error}" @@ -23,18 +31,34 @@ def __init__(self, name: str, error: str) -> None: class InvalidSecretConfigError(Exception): - """Secret configuration is invalid.""" - - def __init__(self, config: Secret | RequiredSecret, error: str) -> None: - name = f"{config.application}/{config.key}" + """Secret configuration is invalid. + + Parameters + ---------- + application + Name of the application. + key + Secret key. + error + Error message. + """ + + def __init__(self, application: str, key: str, error: str) -> None: + name = f"{application}/{key}" msg = f"Invalid configuration for secret {name}: {error}" super().__init__(msg) class UnresolvedSecretsError(Exception): - """Some secrets could not be resolved.""" + """Some secrets could not be resolved. - def __init__(self, secrets: Iterable[RequiredSecret]) -> None: + Parameters + ---------- + secrets + Secrets that could not be resolved. + """ + + def __init__(self, secrets: Iterable[Secret]) -> None: names = [f"{u.application}/{u.key}" for u in secrets] names_str = ", ".join(names) msg = f"Some secrets could not be resolved: {names_str}" @@ -42,7 +66,13 @@ def __init__(self, secrets: Iterable[RequiredSecret]) -> None: class UnknownEnvironmentError(Exception): - """No configuration found for an environment name.""" + """No configuration found for an environment name. + + Parameters + ---------- + name + Name of the environment. + """ def __init__(self, name: str) -> None: msg = f"No configuration found for environment {name}" diff --git a/src/phalanx/models/applications.py b/src/phalanx/models/applications.py index c1274b79b9..65ef052758 100644 --- a/src/phalanx/models/applications.py +++ b/src/phalanx/models/applications.py @@ -6,7 +6,7 @@ from pydantic import BaseModel -from .secrets import RequiredSecret, Secret +from .secrets import ConditionalSecretConfig, Secret __all__ = [ "Application", @@ -26,11 +26,11 @@ class Application(BaseModel): environment_values: dict[str, dict[str, Any]] """Per-environment Helm chart overrides by environment name.""" - secrets: list[Secret] - """Base secret configuration for the application.""" + secrets: dict[str, ConditionalSecretConfig] + """Secrets for the application, by secret key.""" - environment_secrets: dict[str, list[Secret]] - """Per-environment secrets for the application.""" + environment_secrets: dict[str, dict[str, ConditionalSecretConfig]] + """Per-environment secrets for the application, by secret key.""" class ApplicationInstance(BaseModel): @@ -45,31 +45,28 @@ class ApplicationInstance(BaseModel): values: dict[str, Any] """Merged Helm values for the application in this environment.""" - secrets: list[RequiredSecret] = [] + secrets: list[Secret] = [] """Secrets required for this application in this environment.""" - def is_condition_met(self, condition: str | None) -> bool: - """Determine whether a secret condition has been met. + def is_values_setting_true(self, setting: str) -> bool: + """Determine whether a given Helm values setting is true. - Conditions are used both for the secret as a whole and for the - ``copy`` and ``generate`` sections. The condition is met if it either - is `None` or if it is a string pointing to a values parameter for the - application instance that is set to a true value. + The values setting is considered true if the corresponding values + parameter is present and set to a true value (a non-empty array or + dictionary or a string, number, or boolean value that evaluates to + true in Python). Parameters ---------- - condition - Condition to check. + setting + Setting to check. Returns ------- bool - `True` if the condition was met or does not exist, `False` - otherwise. + `True` if the setting was set to a true value, `False` otherwise. """ - if not condition: - return True - path = condition.split(".") + path = setting.split(".") values = self.values for key in path: if key not in values: diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py index 6cce3329cf..6e1332d99f 100644 --- a/src/phalanx/models/secrets.py +++ b/src/phalanx/models/secrets.py @@ -3,13 +3,14 @@ from __future__ import annotations from enum import Enum -from typing import Any, Self +from typing import Any from pydantic import BaseModel, Extra, Field, SecretStr, validator __all__ = [ - "RequiredSecret", - "RequiredSecretConfig", + "ConditionalSecretConfig", + "ConditionalSecretCopyRules", + "ConditionalSecretGenerateRules", "ResolvedSecret", "Secret", "SecretConfig", @@ -28,6 +29,14 @@ class SecretCopyRules(BaseModel): key: str """Secret key from which the secret should be copied.""" + class Config: + allow_population_by_field_name = True + extra = Extra.forbid + + +class ConditionalSecretCopyRules(SecretCopyRules): + """Possibly conditional rules for copying a secret value from another.""" + condition: str | None = Field( None, description=( @@ -37,10 +46,6 @@ class SecretCopyRules(BaseModel): alias="if", ) - class Config: - allow_population_by_field_name = True - extra = Extra.forbid - class SecretGenerateType(Enum): """Type of secret for generated secrets.""" @@ -66,15 +71,6 @@ class SecretGenerateRules(BaseModel): ``mtime``. """ - condition: str | None = Field( - None, - description=( - "Helm chart value that, if set, indicates the secret should be" - " generated" - ), - alias="if", - ) - class Config: allow_population_by_field_name = True extra = Extra.forbid @@ -97,14 +93,21 @@ def _validate_source( return v -class RequiredSecretConfig(BaseModel): - """Specification for an application secret after checking ``if``. +class ConditionalSecretGenerateRules(SecretGenerateRules): + """Possibly conditional rules for generating a secret value.""" - The general class for secret configuration is `SecretConfig`. This is the - same model except without the ``if`` (``condition``) attribute and is used - for secrets that have already been filtered for whether they are required - by a given application instance. - """ + condition: str | None = Field( + None, + description=( + "Helm chart value that, if set, indicates the secret should be" + " generated" + ), + alias="if", + ) + + +class SecretConfig(BaseModel): + """Specification for an application secret.""" description: str """Description of the secret.""" @@ -149,10 +152,15 @@ def _validate_value( return v -class SecretConfig(RequiredSecretConfig): - """Specification for an application secret. +class ConditionalSecretConfig(SecretConfig): + """Possibly conditional specification for an application secret. + + This class represents the on-disk schema for secret configurations, which + may include conditions on the secret itself and on its copy and generate + rules. Those conditions cannot be evaluated until the configuration of an + application for a specific environment is known. - Represents the on-disk schema for secret configurations. + The equivalent class with the conditions evaluated is `SecretConfig`. """ condition: str | None = Field( @@ -164,9 +172,18 @@ class SecretConfig(RequiredSecretConfig): alias="if", ) + copy_rules: ConditionalSecretCopyRules | None = Field( + None, + description="Rules for where the secret should be copied from", + alias="copy", + ) + + generate: ConditionalSecretGenerateRules | None = None + """Rules for how the secret should be generated.""" + class Secret(SecretConfig): - """An application secret. + """Specification for an application secret for a specific environment. The same as `SecretConfig` except augmented with the secret application and key for internal convenience. @@ -179,40 +196,6 @@ class Secret(SecretConfig): """Application of the secret.""" -class RequiredSecret(SecretConfig): - """An application secret required for this instance. - - This represents the secret configuration for an application instance after - filtering out secrets that are not relevant to the instance's environment - and adding the secret application and key information from context. - """ - - key: str - """Key of the secret.""" - - application: str - """Application of the secret.""" - - @classmethod - def from_secret(cls, secret: Secret) -> Self: - """Convert from a `Secret` assuming its condition was met. - - Parameters - ---------- - secret - Secret with a condition that has been met. - - Returns - ------- - RequiredSecret - Secret with no top-level condition, since it has been satisfied. - """ - attrs = secret.dict() - if "condition" in attrs: - del attrs["condition"] - return cls(**attrs) - - class ResolvedSecret(BaseModel): """A secret that has been resolved for a given application instance. diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 099a121059..7b196b3551 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -17,12 +17,12 @@ from cryptography.hazmat.primitives.asymmetric import rsa from pydantic import SecretStr -from ..exceptions import InvalidSecretConfigError, UnresolvedSecretsError +from ..exceptions import UnresolvedSecretsError from ..models.applications import ApplicationInstance from ..models.environments import Environment from ..models.secrets import ( - RequiredSecret, ResolvedSecret, + Secret, SecretGenerateRules, SecretGenerateType, ) @@ -132,7 +132,7 @@ def _generate_secret( return SecretStr(date) def _resolve_secrets( - self, secrets: list[RequiredSecret], environment: Environment + self, secrets: list[Secret], environment: Environment ) -> list[ResolvedSecret]: """Resolve the secrets for a Phalanx environment. @@ -184,7 +184,7 @@ def _resolve_secrets( def _resolve_secret( self, - config: RequiredSecret, + config: Secret, instance: ApplicationInstance, resolved: dict[str, dict[str, ResolvedSecret]], ) -> _SecretResolution: @@ -204,11 +204,6 @@ def _resolve_secret( ------- SecretResolution Results of attempting to resolve this secret. - - Raises - ------ - InvalidSecretConfigError - Raised if the secret configuration has conflicting rules. """ # If a value was already provided, this is the easy case. if config.value: @@ -221,21 +216,8 @@ def _resolve_secret( ), ) - # See if either generate or copy were configured for this secret. - should_copy = False + # Do copying or generation if configured. if config.copy_rules: - condition = config.copy_rules.condition - should_copy = instance.is_condition_met(condition) - should_generate = False - if config.generate: - condition = config.generate.condition - should_generate = instance.is_condition_met(condition) - if should_copy and should_generate: - msg = "Copy and generate rules conflict" - raise InvalidSecretConfigError(config, msg) - - # Do the copying or generation. - if should_copy and config.copy_rules: application = config.copy_rules.application other = resolved.get(application, {}).get(config.copy_rules.key) if not other: @@ -248,7 +230,7 @@ def _resolve_secret( value=other.value, ), ) - elif should_generate and config.generate: + if config.generate: if config.generate.source: other_key = config.generate.source other = resolved.get(config.application, {}).get(other_key) diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 72abe6a0ec..813215377e 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -7,10 +7,10 @@ import yaml -from ..exceptions import UnknownEnvironmentError +from ..exceptions import InvalidSecretConfigError, UnknownEnvironmentError from ..models.applications import Application, ApplicationInstance from ..models.environments import Environment, EnvironmentConfig -from ..models.secrets import RequiredSecret, Secret, SecretConfig +from ..models.secrets import ConditionalSecretConfig, Secret __all__ = ["ConfigStorage"] @@ -75,6 +75,33 @@ def load_environment(self, environment_name: str) -> Environment: } return Environment(name=config.environment, applications=instances) + def _is_condition_satisfied( + self, instance: ApplicationInstance, condition: str | None + ) -> bool: + """Evaluate a secret condition on an application instance. + + This is a convenience wrapper around + `ApplicationInstance.is_is_values_setting_true` that also treats a + `None` condition parameter as true. + + Parameters + ---------- + instance + Application instance for a specific environment. + condition + Condition, or `None` if there is no condition. + + Returns + ------- + bool + `True` if condition is `None` or corresponds to a values setting + whose value is true, `False` otherwise. + """ + if not condition: + return True + else: + return instance.is_values_setting_true(condition) + def _load_application(self, name: str) -> Application: """Load the configuration for an application from disk. @@ -107,14 +134,14 @@ def _load_application(self, name: str) -> Application: # Load the secrets configuration. secrets_path = base_path / "secrets.yaml" - secrets = [] + secrets = {} if secrets_path.exists(): with secrets_path.open("r") as fh: raw_secrets = yaml.safe_load(fh) - for key, raw_config in raw_secrets.items(): - config = SecretConfig.parse_obj(raw_config) - secret = Secret(key=key, application=name, **config.dict()) - secrets.append(secret) + secrets = { + k: ConditionalSecretConfig.parse_obj(s) + for k, s in raw_secrets.items() + } # Load the environment-specific secrets configuration. environment_secrets = {} @@ -122,12 +149,10 @@ def _load_application(self, name: str) -> Application: env_name = path.stem[len("secrets-") :] with path.open("r") as fh: raw_secrets = yaml.safe_load(fh) - env_secrets = [] - for key, raw_config in raw_secrets.items(): - config = SecretConfig.parse_obj(raw_config) - secret = Secret(key=key, application=name, **config.dict()) - env_secrets.append(secret) - environment_secrets[env_name] = env_secrets + environment_secrets[env_name] = { + k: ConditionalSecretConfig.parse_obj(s) + for k, s in raw_secrets.items() + } # Return the resulting application. return Application( @@ -201,6 +226,11 @@ def _resolve_application( ------- ApplicationInstance Resolved application. + + Raises + ------ + InvalidSecretConfigError + Raised if the secret configuration has conflicting rules. """ # Merge values with any environment overrides. values = application.values @@ -208,21 +238,6 @@ def _resolve_application( env_values = application.environment_values[environment_name] values = _merge_overrides(values, env_values) - # Merge secrets with any environment secrets. - if environment_name in application.environment_secrets: - env_secrets = application.environment_secrets[environment_name] - extra_secrets = {s.key: s for s in env_secrets} - secrets = [] - for secret in application.secrets: - if secret.key in extra_secrets: - secrets.append(extra_secrets[secret.key]) - del extra_secrets[secret.key] - else: - secrets.append(secret) - secrets.extend(extra_secrets.values()) - else: - secrets = application.secrets - # Create an initial application instance without secrets so that we # can use its class methods. instance = ApplicationInstance( @@ -231,10 +246,43 @@ def _resolve_application( values=values, ) - # Filter out the secrets that don't apply to this instance. - instance.secrets = [ - RequiredSecret.from_secret(s) - for s in secrets - if instance.is_condition_met(s.condition) - ] + # Merge secrets with any environment secrets. + secrets = application.secrets + if environment_name in application.environment_secrets: + secrets = application.secrets.copy() + secrets.update(application.environment_secrets[environment_name]) + + # Evaluate the conditions on all of the secrets. Both the top-level + # condition and any conditions on the copy and generate rules will be + # resolved, so that any subsequent processing based on the instance no + # longer needs to worry about conditions. + required_secrets = [] + for key, config in secrets.items(): + if not self._is_condition_satisfied(instance, config.condition): + continue + copy = config.copy_rules + if copy: + condition = copy.condition + if not self._is_condition_satisfied(instance, condition): + copy = None + generate = config.generate + if generate: + condition = generate.condition + if not self._is_condition_satisfied(instance, condition): + generate = None + if copy and generate: + msg = "Copy and generate rules conflict" + raise InvalidSecretConfigError(instance.name, key, msg) + secret = Secret( + application=application.name, + key=key, + description=config.description, + copy_rules=copy, + generate=generate, + value=config.value, + ) + required_secrets.append(secret) + + # Add the secrets to the new instance and return it. + instance.secrets = required_secrets return instance From 3c4b1d1d9be41a2f8cef8cc9fd690db3a0379776 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Thu, 27 Jul 2023 16:57:36 -0700 Subject: [PATCH 283/308] Fix some secret description bugs Ensure that secrets can be listed for the IDF environments and for minikube. The T&S environments are not yet working because they want to take the Slack alert webhook from mobu and mobu isn't enabled for those environments. This will be fixed eventually by enabling mobu without any monkeys. --- applications/argocd/secrets.yaml | 6 +++--- applications/postgres/values-minikube.yaml | 9 --------- src/phalanx/storage/config.py | 4 +++- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/applications/argocd/secrets.yaml b/applications/argocd/secrets.yaml index 9d16fb2843..8c32f231ab 100644 --- a/applications/argocd/secrets.yaml +++ b/applications/argocd/secrets.yaml @@ -1,4 +1,4 @@ -"argocd.admin.plaintext_password": +"admin.plaintext_password": description: >- Admin password for Argo CD. This password is normally not used because Argo CD is configured to use Google or GitHub authentication, but it is @@ -14,12 +14,12 @@ automatically if the admin password is changed. generate: type: bcrypt-password-hash - source: admin-password + source: admin.plaintext_password "admin.passwordMtime": description: "Last modification time for the admin password." generate: type: mtime - source: admin-password + source: admin.plaintext_password "dex.clientSecret": description: >- OAuth 2 or OpenID Connect client secret, used to authenticate to GitHub or diff --git a/applications/postgres/values-minikube.yaml b/applications/postgres/values-minikube.yaml index 1dc388a3ea..e048b69fac 100644 --- a/applications/postgres/values-minikube.yaml +++ b/applications/postgres/values-minikube.yaml @@ -1,14 +1,5 @@ debug: "true" -jupyterhub_db: - user: "jovyan" - db: "jupyterhub" -exposurelog_db: - user: "exposurelog" - db: "exposurelog" gafaelfawr_db: user: "gafaelfawr" db: "gafaelfawr" -narrativelog_db: - user: "narrativelog" - db: "narrativelog" postgresStorageClass: "standard" diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 813215377e..bea5005a8e 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -130,7 +130,9 @@ def _load_application(self, name: str) -> Application: for path in base_path.glob("values-*.yaml"): env_name = path.stem[len("values-") :] with path.open("r") as fh: - environment_values[env_name] = yaml.safe_load(fh) + env_values = yaml.safe_load(fh) + if env_values: + environment_values[env_name] = env_values # Load the secrets configuration. secrets_path = base_path / "secrets.yaml" From 4d3c1d429ae41df51ae0ba7d4f3ade8fa9b3494b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 28 Jul 2023 10:32:40 -0700 Subject: [PATCH 284/308] Rework the secret generator models Move the code to generate a secret into a method on the secret generator rules model, since there is no business logic there. Add the understanding that certain generation types require a source key directly to the model so that Pydantic will do model checking. The second change unfortunately makes the model hierarchy more complex because of conditions. Move the definition of the condition attribute to a mixin to avoid repeating so much code, and add a couple of type aliases to simplify things. This unfortunately still requires an isinstance check when generating secrets, but it keeps the YAML describing a secret fairly simple. --- src/phalanx/models/secrets.py | 204 ++++++++++++++++++++------------ src/phalanx/services/secrets.py | 76 +----------- 2 files changed, 132 insertions(+), 148 deletions(-) diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py index 6e1332d99f..3ac95fcfd5 100644 --- a/src/phalanx/models/secrets.py +++ b/src/phalanx/models/secrets.py @@ -2,24 +2,51 @@ from __future__ import annotations +import os +import secrets +from base64 import urlsafe_b64encode +from datetime import UTC, datetime from enum import Enum -from typing import Any +from typing import Any, Literal +import bcrypt +from cryptography.fernet import Fernet +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa from pydantic import BaseModel, Extra, Field, SecretStr, validator __all__ = [ + "ConditionalMixin", "ConditionalSecretConfig", "ConditionalSecretCopyRules", "ConditionalSecretGenerateRules", + "ConditionalSimpleSecretGenerateRules", + "ConditionalSourceSecretGenerateRules", "ResolvedSecret", "Secret", "SecretConfig", "SecretCopyRules", "SecretGenerateRules", "SecretGenerateType", + "SimpleSecretGenerateRules", + "SourceSecretGenerateRules", ] +class ConditionalMixin(BaseModel): + """Mix-in class for elements that may have a condition.""" + + condition: str | None = Field( + None, + description=( + "Helm chart value that, if set, indicates the secret should be" + " copied" + ), + alias="if", + ) + + class SecretCopyRules(BaseModel): """Rules for copying a secret value from another secret.""" @@ -34,20 +61,11 @@ class Config: extra = Extra.forbid -class ConditionalSecretCopyRules(SecretCopyRules): +class ConditionalSecretCopyRules(SecretCopyRules, ConditionalMixin): """Possibly conditional rules for copying a secret value from another.""" - condition: str | None = Field( - None, - description=( - "Helm chart value that, if set, indicates the secret should be" - " copied" - ), - alias="if", - ) - -class SecretGenerateType(Enum): +class SecretGenerateType(str, Enum): """Type of secret for generated secrets.""" password = "password" @@ -58,52 +76,91 @@ class SecretGenerateType(Enum): mtime = "mtime" -class SecretGenerateRules(BaseModel): - """Rules for generating a secret value.""" +class SimpleSecretGenerateRules(BaseModel): + """Rules for generating a secret value with no source information.""" + + type: Literal[ + SecretGenerateType.password, + SecretGenerateType.gafaelfawr_token, + SecretGenerateType.fernet_key, + SecretGenerateType.rsa_private_key, + ] + """Type of secret.""" + + class Config: + allow_population_by_field_name = True + extra = Extra.forbid - type: SecretGenerateType + def generate(self) -> SecretStr: + """Generate a new secret following these rules.""" + match self.type: + case SecretGenerateType.password: + return SecretStr(secrets.token_hex(32)) + case SecretGenerateType.gafaelfawr_token: + key = urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") + secret = urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") + return SecretStr(f"gt-{key}.{secret}") + case SecretGenerateType.fernet_key: + return SecretStr(Fernet.generate_key().decode()) + case SecretGenerateType.rsa_private_key: + private_key = rsa.generate_private_key( + backend=default_backend(), + public_exponent=65537, + key_size=2048, + ) + private_key_bytes = private_key.private_bytes( + serialization.Encoding.PEM, + serialization.PrivateFormat.PKCS8, + serialization.NoEncryption(), + ) + return SecretStr(private_key_bytes.decode()) + + +class ConditionalSimpleSecretGenerateRules( + SimpleSecretGenerateRules, ConditionalMixin +): + """Conditional rules for generating a secret value with no source.""" + + +class SourceSecretGenerateRules(BaseModel): + """Rules for generating a secret from another secret.""" + + type: Literal[ + SecretGenerateType.bcrypt_password_hash, + SecretGenerateType.mtime, + ] """Type of secret.""" - source: str | None = None + source: str """Key of secret on which this secret is based. This may only be set by secrets of type ``bcrypt-password-hash`` or ``mtime``. """ - class Config: - allow_population_by_field_name = True - extra = Extra.forbid + def generate(self, source: SecretStr) -> SecretStr: + match self.type: + case SecretGenerateType.bcrypt_password_hash: + password_hash = bcrypt.hashpw( + source.get_secret_value().encode(), + bcrypt.gensalt(rounds=15), + ) + return SecretStr(password_hash.decode()) + case SecretGenerateType.mtime: + date = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") + return SecretStr(date) - @validator("source") - def _validate_source( - cls, v: str | None, values: dict[str, Any] - ) -> str | None: - secret_type = values["type"] - want_value = secret_type in ( - SecretGenerateType.bcrypt_password_hash, - SecretGenerateType.mtime, - ) - if v is None and want_value: - msg = f"source not set for secret of type {secret_type}" - raise ValueError(msg) - if v is not None and not want_value: - msg = f"source not allowed for secret of type {secret_type}" - raise ValueError(msg) - return v +class ConditionalSourceSecretGenerateRules( + SourceSecretGenerateRules, ConditionalMixin +): + """Conditional rules for generating a secret from another secret.""" -class ConditionalSecretGenerateRules(SecretGenerateRules): - """Possibly conditional rules for generating a secret value.""" - condition: str | None = Field( - None, - description=( - "Helm chart value that, if set, indicates the secret should be" - " generated" - ), - alias="if", - ) +SecretGenerateRules = SimpleSecretGenerateRules | SourceSecretGenerateRules +ConditionalSecretGenerateRules = ( + ConditionalSimpleSecretGenerateRules | ConditionalSourceSecretGenerateRules +) class SecretConfig(BaseModel): @@ -128,10 +185,33 @@ class Config: allow_population_by_field_name = True extra = Extra.forbid + +class ConditionalSecretConfig(SecretConfig, ConditionalMixin): + """Possibly conditional specification for an application secret. + + This class represents the on-disk schema for secret configurations, which + may include conditions on the secret itself and on its copy and generate + rules. Those conditions cannot be evaluated until the configuration of an + application for a specific environment is known. + + The equivalent class with the conditions evaluated is `SecretConfig`. + """ + + copy_rules: ConditionalSecretCopyRules | None = Field( + None, + description="Rules for where the secret should be copied from", + alias="copy", + ) + + generate: ConditionalSecretGenerateRules | None = None + """Rules for how the secret should be generated.""" + @validator("generate") def _validate_generate( - cls, v: SecretGenerateRules | None, values: dict[str, Any] - ) -> SecretGenerateRules | None: + cls, + v: ConditionalSecretGenerateRules | None, + values: dict[str, Any], + ) -> ConditionalSecretGenerateRules | None: has_copy = "copy" in values and "condition" not in values["copy"] if v and has_copy: msg = "both copy and generate may not be set for the same secret" @@ -152,36 +232,6 @@ def _validate_value( return v -class ConditionalSecretConfig(SecretConfig): - """Possibly conditional specification for an application secret. - - This class represents the on-disk schema for secret configurations, which - may include conditions on the secret itself and on its copy and generate - rules. Those conditions cannot be evaluated until the configuration of an - application for a specific environment is known. - - The equivalent class with the conditions evaluated is `SecretConfig`. - """ - - condition: str | None = Field( - None, - description=( - "Helm chart value that, if set, indicates the secret should be" - " generated" - ), - alias="if", - ) - - copy_rules: ConditionalSecretCopyRules | None = Field( - None, - description="Rules for where the secret should be copied from", - alias="copy", - ) - - generate: ConditionalSecretGenerateRules | None = None - """Rules for how the secret should be generated.""" - - class Secret(SecretConfig): """Specification for an application secret for a specific environment. diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 7b196b3551..01a4b5f6b1 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -2,30 +2,14 @@ from __future__ import annotations -import os -import secrets -from base64 import urlsafe_b64encode from collections import defaultdict from dataclasses import dataclass -from datetime import UTC, datetime from enum import Enum -import bcrypt -from cryptography.fernet import Fernet -from cryptography.hazmat.backends import default_backend -from cryptography.hazmat.primitives import serialization -from cryptography.hazmat.primitives.asymmetric import rsa -from pydantic import SecretStr - from ..exceptions import UnresolvedSecretsError from ..models.applications import ApplicationInstance from ..models.environments import Environment -from ..models.secrets import ( - ResolvedSecret, - Secret, - SecretGenerateRules, - SecretGenerateType, -) +from ..models.secrets import ResolvedSecret, Secret, SourceSecretGenerateRules from ..storage.config import ConfigStorage __all__ = ["SecretsService"] @@ -81,56 +65,6 @@ def list_secrets(self, environment_name: str) -> list[ResolvedSecret]: secrets.extend(application.secrets) return self._resolve_secrets(secrets, environment) - def _generate_secret( - self, config: SecretGenerateRules, source: SecretStr | None = None - ) -> SecretStr: - """Generate the value of a secret. - - Parameters - ---------- - config - Rules for generating the secret. - source - Secret on which this secret is based. - - Returns - ------- - SecretStr - Newly-generated secret. - """ - match config.type: - case SecretGenerateType.password: - return SecretStr(secrets.token_hex(32)) - case SecretGenerateType.gafaelfawr_token: - key = urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") - secret = urlsafe_b64encode(os.urandom(16)).decode().rstrip("=") - return SecretStr(f"gt-{key}.{secret}") - case SecretGenerateType.fernet_key: - return SecretStr(Fernet.generate_key().decode()) - case SecretGenerateType.rsa_private_key: - private_key = rsa.generate_private_key( - backend=default_backend(), - public_exponent=65537, - key_size=2048, - ) - private_key_bytes = private_key.private_bytes( - serialization.Encoding.PEM, - serialization.PrivateFormat.PKCS8, - serialization.NoEncryption(), - ) - return SecretStr(private_key_bytes.decode()) - case SecretGenerateType.bcrypt_password_hash: - if not source: - raise RuntimeError("bcrypt-password-hash with no source") - password_hash = bcrypt.hashpw( - source.get_secret_value().encode(), - bcrypt.gensalt(rounds=15), - ) - return SecretStr(password_hash.decode()) - case SecretGenerateType.mtime: - date = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ") - return SecretStr(date) - def _resolve_secrets( self, secrets: list[Secret], environment: Environment ) -> list[ResolvedSecret]: @@ -231,14 +165,14 @@ def _resolve_secret( ), ) if config.generate: - if config.generate.source: + if isinstance(config.generate, SourceSecretGenerateRules): other_key = config.generate.source other = resolved.get(config.application, {}).get(other_key) - if not other: + if not (other and other.value): return _SecretResolution(status=_SecretStatus.PENDING) - value = self._generate_secret(config.generate, other.value) + value = config.generate.generate(other.value) else: - value = self._generate_secret(config.generate) + value = config.generate.generate() return _SecretResolution( status=_SecretStatus.KEEP, secret=ResolvedSecret( From a3e69ec3d3493021edd6124b643e5606aa39586a Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 28 Jul 2023 10:38:22 -0700 Subject: [PATCH 285/308] Stop resolving secrets when generating a list Now that we resolve all conditions while loading the environment, we don't need to generate new secret values for secrets when just returning a list of configured secrets. Keep the generation code, since we'll use it later, but simplify how lists of secrets are returned. --- src/phalanx/services/secrets.py | 4 ++-- src/phalanx/storage/config.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 01a4b5f6b1..1e5b455bd9 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -46,7 +46,7 @@ class SecretsService: def __init__(self, config_storage: ConfigStorage) -> None: self._config = config_storage - def list_secrets(self, environment_name: str) -> list[ResolvedSecret]: + def list_secrets(self, environment_name: str) -> list[Secret]: """List all required secrets for the given environment. Parameters @@ -63,7 +63,7 @@ def list_secrets(self, environment_name: str) -> list[ResolvedSecret]: secrets = [] for application in environment.all_applications(): secrets.extend(application.secrets) - return self._resolve_secrets(secrets, environment) + return secrets def _resolve_secrets( self, secrets: list[Secret], environment: Environment diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index bea5005a8e..632a7c86f2 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -286,5 +286,7 @@ def _resolve_application( required_secrets.append(secret) # Add the secrets to the new instance and return it. - instance.secrets = required_secrets + instance.secrets = sorted( + required_secrets, key=lambda s: (s.application, s.key) + ) return instance From 389e4b277496d895c9f1fd1697bbfd4b558741d6 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 28 Jul 2023 12:13:26 -0700 Subject: [PATCH 286/308] Generate secrets schema from Pydantic Rather than hand-writing a secrets schema, generate one from the Pydantic models, which should be more maintainable and not repeat the same information. Still commit it to the source tree, since that makes pre-commit schema checking easier, but add a test that the current schema is up-to-date. --- docs/extras/schemas/secrets.json | 185 ++++++++++++++++++------------- src/phalanx/cli.py | 38 ++++++- src/phalanx/models/secrets.py | 15 +-- tests/cli/secrets_test.py | 15 +++ 4 files changed, 166 insertions(+), 87 deletions(-) diff --git a/docs/extras/schemas/secrets.json b/docs/extras/schemas/secrets.json index 7cb33c9eb5..3aebc02f71 100644 --- a/docs/extras/schemas/secrets.json +++ b/docs/extras/schemas/secrets.json @@ -1,100 +1,135 @@ { - "$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://phalanx.lsst.io/schemas/secrets.json", - "title": "Application secrets", - "description": "Schema for secrets required by Phalanx applications", + "title": "Phalanx application secret definitions", "type": "object", - "patternProperties": { - "^.+$": { + "additionalProperties": { + "$ref": "#/definitions/ConditionalSecretConfig" + }, + "definitions": { + "ConditionalSecretCopyRules": { + "title": "ConditionalSecretCopyRules", + "description": "Possibly conditional rules for copying a secret value from another.", "type": "object", "properties": { - "description": { + "if": { + "title": "Condition", + "description": "Configuration only applies if this Helm chart setting is set to a true value", + "type": "string" + }, + "application": { + "title": "Application", "type": "string" }, + "key": { + "title": "Key", + "type": "string" + } + }, + "required": [ + "application", + "key" + ], + "additionalProperties": false + }, + "ConditionalSimpleSecretGenerateRules": { + "title": "ConditionalSimpleSecretGenerateRules", + "description": "Conditional rules for generating a secret value with no source.", + "type": "object", + "properties": { "if": { + "title": "Condition", + "description": "Configuration only applies if this Helm chart setting is set to a true value", + "type": "string" + }, + "type": { + "title": "Type", + "enum": [ + "password", + "gafaelfawr-token", + "fernet-key", + "rsa-private-key" + ], + "type": "string" + } + }, + "required": [ + "type" + ], + "additionalProperties": false + }, + "ConditionalSourceSecretGenerateRules": { + "title": "ConditionalSourceSecretGenerateRules", + "description": "Conditional rules for generating a secret from another secret.", + "type": "object", + "properties": { + "if": { + "title": "Condition", + "description": "Configuration only applies if this Helm chart setting is set to a true value", + "type": "string" + }, + "type": { + "title": "Type", + "enum": [ + "bcrypt-password-hash", + "mtime" + ], + "type": "string" + }, + "source": { + "title": "Source", + "type": "string" + } + }, + "required": [ + "type", + "source" + ] + }, + "ConditionalSecretConfig": { + "title": "ConditionalSecretConfig", + "description": "Possibly conditional specification for an application secret.", + "type": "object", + "properties": { + "if": { + "title": "Condition", + "description": "Configuration only applies if this Helm chart setting is set to a true value", + "type": "string" + }, + "description": { + "title": "Description", "type": "string" }, "copy": { - "type": "object", - "properties": { - "if": { - "type": "string" - }, - "application": { - "type": "string" - }, - "key": { - "type": "string" + "title": "Copy", + "description": "Rules for where the secret should be copied from", + "allOf": [ + { + "$ref": "#/definitions/ConditionalSecretCopyRules" } - }, - "required": ["application", "key"], - "additionalProperties": false + ] }, "generate": { - "type": "object", - "properties": { - "if": { - "type": "string" - }, - "type": { - "enum": [ - "password", - "gafaelfawr-token", - "fernet-key", - "rsa-private-key", - "bcrypt-password-hash", - "mtime" - ] - }, - "source": { - "type": "string" - } - }, - "required": ["type"], - "oneOf": [ + "title": "Generate", + "anyOf": [ { - "type": "object", - "properties": { - "type": { - "enum": [ - "bcrypt-password-hash", - "mtime" - ] - }, - "source": { - "type": "string" - } - }, - "required": ["type", "source"] + "$ref": "#/definitions/ConditionalSimpleSecretGenerateRules" }, { - "type": "object", - "properties": { - "if": { - "type": "string" - }, - "type": { - "enum": [ - "password", - "gafaelfawr-token", - "fernet-key", - "rsa-private-key" - ] - } - }, - "additionalProperties": false + "$ref": "#/definitions/ConditionalSourceSecretGenerateRules" } - ], - "additionalProperties": false + ] }, "value": { - "type": "string" + "title": "Value", + "type": "string", + "writeOnly": true, + "format": "password" } }, "required": [ "description" - ] - }, - "additionalProperties": false + ], + "additionalProperties": false + } } -} +} \ No newline at end of file diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index a73010b429..76cf032db2 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -2,13 +2,20 @@ from __future__ import annotations +import json +import sys +from pathlib import Path + import click +from pydantic.tools import schema_of -from phalanx.factory import Factory +from .factory import Factory +from .models.secrets import ConditionalSecretConfig __all__ = [ "help", "secrets_list", + "secrets_schema", ] @@ -50,3 +57,32 @@ def secrets_list(environment: str) -> None: secrets = secrets_service.list_secrets(environment) for secret in secrets: print(secret.application, secret.key) + + +@secrets.command("schema") +@click.option( + "-o", + "--output", + type=click.Path(path_type=Path), + default=None, + help="Path to which to write schema.", +) +def secrets_schema(*, output: Path | None) -> None: + """Generate schema for application secret definition.""" + schema = schema_of( + dict[str, ConditionalSecretConfig], + title="Phalanx application secret definitions", + ) + + # Pydantic v1 doesn't have any way that I can find to add attributes to + # the top level of a schema that isn't generated from a model, and the + # top-level secrets schema is a dict, so manually add in the $id attribute + # pointing to the canonical URL. Do this in a slightly odd way so that the + # $id attribute will be at the top of the file, not at the bottom. + schema = {"$id": "https://phalanx.lsst.io/schemas/secrets.json", **schema} + + json_schema = json.dumps(schema, indent=2) + if output: + output.write_text(json_schema) + else: + sys.stdout.write(json_schema) diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py index 3ac95fcfd5..c0b8834041 100644 --- a/src/phalanx/models/secrets.py +++ b/src/phalanx/models/secrets.py @@ -40,9 +40,10 @@ class ConditionalMixin(BaseModel): condition: str | None = Field( None, description=( - "Helm chart value that, if set, indicates the secret should be" - " copied" + "Configuration only applies if this Helm chart setting is set to a" + " true value" ), + title="Condition", alias="if", ) @@ -187,15 +188,7 @@ class Config: class ConditionalSecretConfig(SecretConfig, ConditionalMixin): - """Possibly conditional specification for an application secret. - - This class represents the on-disk schema for secret configurations, which - may include conditions on the secret itself and on its copy and generate - rules. Those conditions cannot be evaluated until the configuration of an - application for a specific environment is known. - - The equivalent class with the conditions evaluated is `SecretConfig`. - """ + """Possibly conditional specification for an application secret.""" copy_rules: ConditionalSecretCopyRules | None = Field( None, diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py index 547c7bb7d9..0d628cc3e8 100644 --- a/tests/cli/secrets_test.py +++ b/tests/cli/secrets_test.py @@ -3,6 +3,7 @@ from __future__ import annotations import os +from pathlib import Path from click.testing import CliRunner from phalanx.cli import main @@ -10,6 +11,20 @@ from ..support.data import phalanx_test_path, read_output_data +def test_generate_schema() -> None: + runner = CliRunner() + result = runner.invoke(main, ["secrets", "schema"], catch_exceptions=False) + assert result.exit_code == 0 + current = ( + Path(__file__).parent.parent.parent + / "docs" + / "extras" + / "schemas" + / "secrets.json" + ) + assert result.output == current.read_text() + + def test_list() -> None: input_path = phalanx_test_path() os.chdir(str(input_path)) From ab2d6d0ee5d1d06366fbaffeac4021b4f11cd988 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 28 Jul 2023 13:38:30 -0700 Subject: [PATCH 287/308] Add support for static secret template Add a phalanx secrets static-template command that generates a YAML template for the static secrets for an environment. --- applications/gafaelfawr/secrets.yaml | 2 + docs/about/introduction.rst | 2 +- src/phalanx/cli.py | 10 ++++ src/phalanx/services/secrets.py | 33 +++++++++++ src/phalanx/yaml.py | 32 ++++++++++ tests/cli/secrets_test.py | 19 ++++-- .../applications/gafaelfawr/secrets.yaml | 2 + tests/data/output/idfdev/static-secrets.yaml | 58 +++++++++++++++++++ tests/support/data.py | 24 ++++++++ tox.ini | 2 +- 10 files changed, 178 insertions(+), 6 deletions(-) create mode 100644 src/phalanx/yaml.py create mode 100644 tests/data/output/idfdev/static-secrets.yaml diff --git a/applications/gafaelfawr/secrets.yaml b/applications/gafaelfawr/secrets.yaml index 1c89746656..972698fe99 100644 --- a/applications/gafaelfawr/secrets.yaml +++ b/applications/gafaelfawr/secrets.yaml @@ -61,6 +61,8 @@ session-secret: stored in user web browsers that holds their session token and related information. Changing this secret will invalidate all existing Redis data and all user authentication cookies. + generate: + type: fernet-key signing-key: description: >- RSA private key used to sign JWTs issued by Gafaelfawr when it acts as an diff --git a/docs/about/introduction.rst b/docs/about/introduction.rst index 186eda79bc..83be13fdbd 100644 --- a/docs/about/introduction.rst +++ b/docs/about/introduction.rst @@ -65,7 +65,7 @@ In Phalanx, the word *application* specifically refers to a Helm chart located i That Helm chart directory includes the Kubernetes templates and Docker image references to deploy the application, as well as values files to configure the application for each environment. Argo CD -======= +======== `Argo CD`_ manages the Kubernetes deployments of each application's Helm chart from the Phalanx repository. Each environment runs its own instance of Argo CD (as Argo CD is itself an application in Phalanx). diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index 76cf032db2..dadf32ae19 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -16,6 +16,7 @@ "help", "secrets_list", "secrets_schema", + "secrets_static_template", ] @@ -86,3 +87,12 @@ def secrets_schema(*, output: Path | None) -> None: output.write_text(json_schema) else: sys.stdout.write(json_schema) + + +@secrets.command("static-template") +@click.argument("environment") +def secrets_static_template(environment: str) -> None: + """Generate a template for providing static secrets for an environment.""" + factory = Factory() + secrets_service = factory.create_secrets_service() + sys.stdout.write(secrets_service.generate_static_template(environment)) diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 1e5b455bd9..a1046bf8d1 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -6,11 +6,14 @@ from dataclasses import dataclass from enum import Enum +import yaml + from ..exceptions import UnresolvedSecretsError from ..models.applications import ApplicationInstance from ..models.environments import Environment from ..models.secrets import ResolvedSecret, Secret, SourceSecretGenerateRules from ..storage.config import ConfigStorage +from ..yaml import YAMLFoldedString __all__ = ["SecretsService"] @@ -46,6 +49,36 @@ class SecretsService: def __init__(self, config_storage: ConfigStorage) -> None: self._config = config_storage + def generate_static_template(self, environment_name: str) -> str: + """Generate a template for providing static secrets. + + The template provides space for all static secrets required for a + given environment. The resulting file, once the values have been + added, can be used as input to other secret commands instead of an + external secret source such as 1Password. + + Parameters + ---------- + environment_name + Name of the environment. + + Returns + ------- + dict + YAML template the user can fill out, as a string. + """ + secrets = self.list_secrets(environment_name) + template: defaultdict[str, dict[str, dict[str, str | None]]] + template = defaultdict(dict) + for secret in secrets: + static = not (secret.copy_rules or secret.generate or secret.value) + if static: + template[secret.application][secret.key] = { + "description": YAMLFoldedString(secret.description), + "value": None, + } + return yaml.dump(template, width=72) + def list_secrets(self, environment_name: str) -> list[Secret]: """List all required secrets for the given environment. diff --git a/src/phalanx/yaml.py b/src/phalanx/yaml.py new file mode 100644 index 0000000000..3ff6dc3091 --- /dev/null +++ b/src/phalanx/yaml.py @@ -0,0 +1,32 @@ +"""Utility functions for manipulating YAML. + +In several places in the Phalanx code, we want to be able to wrap long strings +to make them more readable or be able to dump `collections.defaultdict` +objects without adding special object tagging. This module collects utility +functions to make this easier. +""" + +from __future__ import annotations + +from collections import defaultdict + +import yaml +from yaml.representer import Representer + +__all__ = ["YAMLFoldedString"] + + +class YAMLFoldedString(str): + """A string that will be folded when encoded in YAML.""" + + __slots__ = () + + +def _folded_string_representer( + dumper: yaml.Dumper, data: YAMLFoldedString +) -> yaml.Node: + return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=">") + + +yaml.add_representer(YAMLFoldedString, _folded_string_representer) +yaml.add_representer(defaultdict, Representer.represent_dict) diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py index 0d628cc3e8..28084c96b2 100644 --- a/tests/cli/secrets_test.py +++ b/tests/cli/secrets_test.py @@ -11,7 +11,18 @@ from ..support.data import phalanx_test_path, read_output_data -def test_generate_schema() -> None: +def test_list() -> None: + input_path = phalanx_test_path() + os.chdir(str(input_path)) + runner = CliRunner() + result = runner.invoke( + main, ["secrets", "list", "idfdev"], catch_exceptions=False + ) + assert result.exit_code == 0 + assert result.output == read_output_data("idfdev", "secrets-list") + + +def test_schema() -> None: runner = CliRunner() result = runner.invoke(main, ["secrets", "schema"], catch_exceptions=False) assert result.exit_code == 0 @@ -25,12 +36,12 @@ def test_generate_schema() -> None: assert result.output == current.read_text() -def test_list() -> None: +def test_static_template() -> None: input_path = phalanx_test_path() os.chdir(str(input_path)) runner = CliRunner() result = runner.invoke( - main, ["secrets", "list", "idfdev"], catch_exceptions=False + main, ["secrets", "static-template", "idfdev"], catch_exceptions=False ) assert result.exit_code == 0 - assert result.output == read_output_data("idfdev", "secrets-list") + assert result.output == read_output_data("idfdev", "static-secrets.yaml") diff --git a/tests/data/input/applications/gafaelfawr/secrets.yaml b/tests/data/input/applications/gafaelfawr/secrets.yaml index 1c89746656..972698fe99 100644 --- a/tests/data/input/applications/gafaelfawr/secrets.yaml +++ b/tests/data/input/applications/gafaelfawr/secrets.yaml @@ -61,6 +61,8 @@ session-secret: stored in user web browsers that holds their session token and related information. Changing this secret will invalidate all existing Redis data and all user authentication cookies. + generate: + type: fernet-key signing-key: description: >- RSA private key used to sign JWTs issued by Gafaelfawr when it acts as an diff --git a/tests/data/output/idfdev/static-secrets.yaml b/tests/data/output/idfdev/static-secrets.yaml new file mode 100644 index 0000000000..b111f7740f --- /dev/null +++ b/tests/data/output/idfdev/static-secrets.yaml @@ -0,0 +1,58 @@ +argocd: + dex.clientSecret: + description: >- + OAuth 2 or OpenID Connect client secret, used to authenticate to GitHub + or Google as part of the authentication flow. This secret can be changed + at any time. + value: null +gafaelfawr: + cilogon-client-secret: + description: >- + Secret used to authenticate to CILogon as part of the OpenID Connect + login protocol to obtain an identity token for the user. This secret + can be changed at any time. + value: null + database-password: + description: >- + Password used to authenticate to the PostgreSQL database used to store + Gafaelfawr data. This password may be changed at any time. + value: null + ldap-password: + description: >- + Password to authenticate to the LDAP server via simple binds to retrieve + user and group information. This password can be changed at any time. + value: null +mobu: + ALERT_HOOK: + description: >- + Slack web hook to which mobu should report failures and daily status. + value: null + app-alert-webhook: + description: >- + Slack web hook to which to post internal application alerts. This secret + is not used directly by mobu, but is copied from here to all of the + applications that report internal problems to Slack. It should normally + be separate from mobu's own web hook, since the separate identities + attached to the messages helps make the type of mesasge clearer, but + the same web hook as mobu's own alerts can be used in a pinch. + value: null +nublado: + aws-credentials.ini: + description: >- + Google Cloud Storage credentials to the Butler data store, formatted + using AWS syntax for use with boto. + value: null + butler-gcs-idf-creds.json: + description: >- + Google Cloud Storage credentials to the Butler data store in the native + Google syntax, containing the private asymmetric key. + value: null + butler-hmac-idf-creds.json: + description: >- + Google Cloud Storage credentials to the Butler data store in the private + key syntax used for HMACs. + value: null + postgres-credentials.txt: + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + value: null diff --git a/tests/support/data.py b/tests/support/data.py index 8dcedf8873..eb6c71b0cf 100644 --- a/tests/support/data.py +++ b/tests/support/data.py @@ -3,6 +3,9 @@ from __future__ import annotations from pathlib import Path +from typing import Any + +import yaml __all__ = [ "phalanx_test_path", @@ -41,3 +44,24 @@ def read_output_data(environment: str, filename: str) -> str: """ base_path = Path(__file__).parent.parent / "data" / "output" return (base_path / environment / filename).read_text() + + +def read_output_yaml(environment: str, filename: str) -> dict[str, Any]: + """Read test output data as YAML and return the parsed format. + + Parameters + ---------- + environment + Name of the environment under :filename:`data/output` that the test + output is for. + filename + File containing the output data. + + Returns + ------- + dict + Parsed version of the YAML. + """ + base_path = Path(__file__).parent.parent / "data" / "output" + with (base_path / environment / filename).open() as fh: + return yaml.safe_load(fh) diff --git a/tox.ini b/tox.ini index 733e4a0dc2..f6f7632211 100644 --- a/tox.ini +++ b/tox.ini @@ -45,7 +45,7 @@ commands = neophile update {posargs} [testenv:py] description = Run pytest commands = - coverage run -m pytest {posargs} + coverage run -m pytest -vvv {posargs} [testenv:typing] description = Run mypy. From 45f0c07aa88e181a89e009ff942fbc0191a7294c Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 28 Jul 2023 13:48:42 -0700 Subject: [PATCH 288/308] Ignore links to new private USDF deployment usdf-tel-rsp.slac.stanford.edu is a new environment whose links are not publicly accessible, so exclude it from the documentation link check similar to our other environments. --- docs/documenteer.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/documenteer.toml b/docs/documenteer.toml index 3a72dd9d42..68c4b5ae13 100644 --- a/docs/documenteer.toml +++ b/docs/documenteer.toml @@ -31,6 +31,7 @@ ignore = [ '^https://roundtable.lsst.cloud', '^https://usdf-rsp.slac.stanford.edu', '^https://usdf-rsp-dev.slac.stanford.edu', + '^https://usdf-tel-rsp.slac.stanford.edu', '^https://github.com/lsst-sqre/phalanx/blob/main/applications/strimzi/values.yaml', '^https://github.com/orgs/', '^https://ook.lsst.io/', # FIXME readd when Ook docs are published From f0a791676b098a00c82079f6322aff355a112a36 Mon Sep 17 00:00:00 2001 From: Brianna Smart Date: Fri, 28 Jul 2023 15:59:50 -0700 Subject: [PATCH 289/308] Update Kafka version Currently the IP addresses for brokers 3-5 are not permanently provisioned on IDF. Temp lower replicas to 3 to get around not having provisioned IPs. --- applications/alert-stream-broker/README.md | 2 +- .../charts/alert-stream-broker/README.md | 2 +- .../charts/alert-stream-broker/values.yaml | 2 +- .../alert-stream-broker/values-idfint.yaml | 16 ++++++++-------- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index 1a82e13b0c..a2a9ca8599 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -67,7 +67,7 @@ Alert transmission to community brokers | alert-stream-broker.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | alert-stream-broker.kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | alert-stream-broker.kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | -| alert-stream-broker.kafka.version | string | `"3.2.3"` | Version of Kafka to deploy. | +| alert-stream-broker.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | | alert-stream-broker.nameOverride | string | `""` | | | alert-stream-broker.strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | | alert-stream-broker.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/README.md b/applications/alert-stream-broker/charts/alert-stream-broker/README.md index 5ed52e1292..8b61066caf 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/charts/alert-stream-broker/README.md @@ -25,7 +25,7 @@ Kafka broker cluster for distributing alerts | kafka.replicas | int | `3` | Number of Kafka broker replicas to run. | | kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. | | kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. | -| kafka.version | string | `"3.2.3"` | Version of Kafka to deploy. | +| kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. | | nameOverride | string | `""` | | | strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. | | superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. | diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml index 35e107ae7a..5803b74943 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml @@ -9,7 +9,7 @@ cluster: kafka: # -- Version of Kafka to deploy. - version: 3.2.3 + version: 3.4.0 # -- Encoding version for messages, see # https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. logMessageFormatVersion: 3.2 diff --git a/applications/alert-stream-broker/values-idfint.yaml b/applications/alert-stream-broker/values-idfint.yaml index b60dc8a259..3b94677a96 100644 --- a/applications/alert-stream-broker/values-idfint.yaml +++ b/applications/alert-stream-broker/values-idfint.yaml @@ -18,14 +18,14 @@ alert-stream-broker: host: alert-stream-int-broker-1.lsst.cloud - ip: "35.238.84.221" host: alert-stream-int-broker-2.lsst.cloud - - ip: "35.188.93.220" - host: alert-stream-int-broker-3.lsst.cloud - - ip: "35.224.219.71" - host: alert-stream-int-broker-4.lsst.cloud - - ip: "35.232.51.105" - host: alert-stream-int-broker-5.lsst.cloud - - replicas: 6 + # - ip: "35.184.182.182" + # host: alert-stream-int-broker-3.lsst.cloud + # - ip: "35.232.191.72" + # host: alert-stream-int-broker-4.lsst.cloud + # - ip: "34.27.122.46" + # host: alert-stream-int-broker-5.lsst.cloud + + replicas: 3 storage: size: 1500Gi From 3abf759f2dc27d2591d6000344f9c33f8673925b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 07:51:43 +0000 Subject: [PATCH 290/308] Update Helm release argo-workflows to v0.32.1 --- applications/argo-workflows/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 9dd85ab07b..72a59f0a91 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.32.0 + version: 0.32.1 repository: https://argoproj.github.io/argo-helm From 2912cf174813512ad20c78332bbb3fc8ebea5532 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 07:51:48 +0000 Subject: [PATCH 291/308] Update Helm release cert-manager to v1.12.3 --- applications/cert-manager/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml index 115fb6d946..38b8d24d30 100644 --- a/applications/cert-manager/Chart.yaml +++ b/applications/cert-manager/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/cert-manager/cert-manager dependencies: - name: cert-manager - version: v1.12.2 + version: v1.12.3 repository: https://charts.jetstack.io From a71f88bac892d1145ef9727fba7e7be558c12121 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 10:27:19 +0000 Subject: [PATCH 292/308] Update Helm release vault-secrets-operator to v2.5.1 --- applications/vault-secrets-operator/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml index 49372ad751..b85db7d0ca 100644 --- a/applications/vault-secrets-operator/Chart.yaml +++ b/applications/vault-secrets-operator/Chart.yaml @@ -5,7 +5,7 @@ sources: - https://github.com/ricoberger/vault-secrets-operator dependencies: - name: vault-secrets-operator - version: 2.5.0 + version: 2.5.1 repository: https://ricoberger.github.io/helm-charts/ annotations: phalanx.lsst.io/docs: | From 52d896e6192acbd3bcb469953ed153515a4706f8 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 31 Jul 2023 10:27:24 +0000 Subject: [PATCH 293/308] Update medyagh/setup-minikube action to v0.0.14 --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 97e493fd3c..ab222cac1a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -104,7 +104,7 @@ jobs: - name: Setup Minikube if: steps.filter.outputs.minikube == 'true' - uses: medyagh/setup-minikube@v0.0.13 + uses: medyagh/setup-minikube@v0.0.14 with: kubernetes-version: "v1.27.3" cpus: max From 0f590ab248c6df1d59ea6c9db57a91f0178c60d0 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 14:17:58 -0700 Subject: [PATCH 294/308] Correct config key name --- applications/monitoring/values-roundtable-dev.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/monitoring/values-roundtable-dev.yaml b/applications/monitoring/values-roundtable-dev.yaml index 031de91c55..ec948c5cce 100644 --- a/applications/monitoring/values-roundtable-dev.yaml +++ b/applications/monitoring/values-roundtable-dev.yaml @@ -3,6 +3,6 @@ chronograf: GH_CLIENT_ID: "e85fe410b0021a251180" cronjob: debug: true -ingresses: +ingress: chronograf: hostname: "monitoring-dev.lsst.cloud" From be24cf9bdcc641e0b2f17f1df4b2f8083f193e79 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 14:20:12 -0700 Subject: [PATCH 295/308] Correct ingress definition --- applications/monitoring/templates/ingress.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/applications/monitoring/templates/ingress.yaml b/applications/monitoring/templates/ingress.yaml index c50f8f15a2..6fbe0a7427 100644 --- a/applications/monitoring/templates/ingress.yaml +++ b/applications/monitoring/templates/ingress.yaml @@ -29,6 +29,6 @@ template: pathType: "Prefix" backend: service: - name: monitoring-chronograf - port: - number: 80 + name: monitoring-chronograf + port: + number: 80 From 15794d7ce48ce8214f2bbcb2249619d2b6d5c47f Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 14:49:56 -0700 Subject: [PATCH 296/308] Correct ingress indentation --- .../monitoring/templates/ingress.yaml | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/applications/monitoring/templates/ingress.yaml b/applications/monitoring/templates/ingress.yaml index 6fbe0a7427..e8d7a66d19 100644 --- a/applications/monitoring/templates/ingress.yaml +++ b/applications/monitoring/templates/ingress.yaml @@ -16,17 +16,17 @@ config: template: metadata: name: "chronograf" - spec: - tls: - - hosts: - - {{ .Values.ingress.chronograf.hostname | quote }} - secretName: tls - rules: - - host: {{ .Values.ingress.chronograf.hostname | quote }} - http: - paths: - - path: "/" - pathType: "Prefix" + spec: + tls: + - hosts: + - {{ .Values.ingress.chronograf.hostname | quote }} + secretName: tls + rules: + - host: {{ .Values.ingress.chronograf.hostname | quote }} + http: + paths: + - path: "/" + pathType: "Prefix" backend: service: name: monitoring-chronograf From 96842051c0f0b4aa466956b5f1c0db79f959c589 Mon Sep 17 00:00:00 2001 From: adam Date: Thu, 27 Jul 2023 14:51:31 -0700 Subject: [PATCH 297/308] Correct ingress indentation --- applications/monitoring/templates/ingress.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/monitoring/templates/ingress.yaml b/applications/monitoring/templates/ingress.yaml index e8d7a66d19..5c5b641f3c 100644 --- a/applications/monitoring/templates/ingress.yaml +++ b/applications/monitoring/templates/ingress.yaml @@ -27,8 +27,8 @@ template: paths: - path: "/" pathType: "Prefix" - backend: - service: - name: monitoring-chronograf - port: - number: 80 + backend: + service: + name: monitoring-chronograf + port: + number: 80 From 7ffee46475a1289fb844fa1050472cd96678197f Mon Sep 17 00:00:00 2001 From: adam Date: Fri, 28 Jul 2023 10:26:38 -0700 Subject: [PATCH 298/308] Add monitoring-dev to GF for anonymous auth --- applications/gafaelfawr/values-roundtable-dev.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/applications/gafaelfawr/values-roundtable-dev.yaml b/applications/gafaelfawr/values-roundtable-dev.yaml index 8af02989fb..98dc65924b 100644 --- a/applications/gafaelfawr/values-roundtable-dev.yaml +++ b/applications/gafaelfawr/values-roundtable-dev.yaml @@ -47,3 +47,4 @@ ingress: additionalHosts: - "git-lfs-dev.lsst.cloud" - "git-lfs-dev-rw.lsst.cloud" + - "monitoring-dev.lsst.cloud" From a60ee560ea9f5fafca3060d9653107c8a2415e6b Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 31 Jul 2023 12:14:48 -0700 Subject: [PATCH 299/308] fix giftless annotations --- applications/giftless/templates/ingress.yaml | 23 ++++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml index 40e18393a5..b990ead3b7 100644 --- a/applications/giftless/templates/ingress.yaml +++ b/applications/giftless/templates/ingress.yaml @@ -4,17 +4,17 @@ metadata: name: {{ include "giftless.fullname" . }} labels: {{- include "giftless.labels" . | nindent 4 }} - annotations: - cert-manager.io/cluster-issuer: "letsencrypt-dns" - {{- with .Values.ingress.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} config: baseUrl: "https://{{ .Values.ingress.hostname.readonly }}" scopes: anonymous: true template: metadata: + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-dns" + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 6 }} + {{- end }} name: {{ include "giftless.fullname" . }} spec: tls: @@ -36,15 +36,9 @@ template: apiVersion: gafaelfawr.lsst.io/v1alpha1 kind: GafaelfawrIngress metadata: - name: "giftless-rw" name: {{ template "giftless.fullname" . }}-rw labels: - {{- include "giftless-rw.labels" . | nindent 4 }} - annotations: - cert-manager.io/cluster-issuer: "letsencrypt-dns" - {{- with .Values.ingress.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} + {{- include "giftless.labels" . | nindent 4 }} config: baseUrl: "https://{{ .Values.ingress.hostname.readwrite }}" scopes: @@ -53,6 +47,11 @@ config: template: metadata: name: {{ template "giftless.fullname" . }}-rw + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-dns" + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 6 }} + {{- end }} spec: tls: - hosts: From 08017bb01c7f286f2f377fa267f6a7a4c4064f36 Mon Sep 17 00:00:00 2001 From: adam Date: Mon, 31 Jul 2023 12:15:56 -0700 Subject: [PATCH 300/308] Fix monitoring ingress annotations --- applications/monitoring/templates/ingress.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/applications/monitoring/templates/ingress.yaml b/applications/monitoring/templates/ingress.yaml index 5c5b641f3c..0cfac115df 100644 --- a/applications/monitoring/templates/ingress.yaml +++ b/applications/monitoring/templates/ingress.yaml @@ -4,11 +4,6 @@ metadata: name: "chronograf" labels: {{- include "monitoring.labels" . | nindent 4 }} - annotations: - cert-manager.io/cluster-issuer: "letsencrypt-dns" - {{- with .Values.ingress.chronograf.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} config: baseUrl: "https://{{ .Values.ingress.chronograf.hostname }}" scopes: @@ -16,6 +11,11 @@ config: template: metadata: name: "chronograf" + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-dns" + {{- with .Values.ingress.chronograf.annotations }} + {{- toYaml . | nindent 6 }} + {{- end }} spec: tls: - hosts: From 5608b9bf58664ee160225d5e10bbf611bed9e797 Mon Sep 17 00:00:00 2001 From: Jonathan Sick Date: Mon, 31 Jul 2023 17:43:07 -0400 Subject: [PATCH 301/308] Deploy Times Square 0.9.1 See https://github.com/lsst-sqre/times-square/pull/56 --- applications/times-square/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index 375745daa6..17c186befe 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -8,7 +8,7 @@ sources: type: application # The default version tag of the times-square docker image -appVersion: "0.9.0" +appVersion: "0.9.1" dependencies: - name: redis From c187b3012cabbc82ad49ce90f0f34eb98c09339d Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Tue, 1 Aug 2023 08:38:12 -0700 Subject: [PATCH 302/308] Expand influxdb-source PV to 10TB at base - This is required for restoring historical EFD data --- applications/sasquatch/values-base.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 3fa00a46de..13393be76c 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -64,7 +64,7 @@ source-influxdb: enabled: true persistence: storageClass: rook-ceph-block - size: 5Ti + size: 10Ti ingress: enabled: true hostname: base-lsp.lsst.codes From 905c2934d488d829aa9d19c445e1273b96bfc2be Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Fri, 28 Jul 2023 17:25:54 -0700 Subject: [PATCH 303/308] Add basic Vault support to the Phalanx CLI Add secrets vault-secrets and secrets audit commands. The former generates JSON versions of the Vault contents of a given environment and saves them to disk, used primarily for testing and debugging. The latter checks the list of secrets required by a given environment against the list of secrets in Vault, and reports any cases where secrets are missing, incorrect, or unexpectedly present. --- requirements/dev.txt | 31 ++- requirements/main.in | 2 + requirements/main.txt | 212 +++++++++++++++- src/phalanx/cli.py | 26 ++ src/phalanx/factory.py | 16 +- src/phalanx/models/environments.py | 22 +- src/phalanx/models/secrets.py | 6 - src/phalanx/services/secrets.py | 232 ++++++++++++------ src/phalanx/storage/config.py | 20 +- src/phalanx/storage/vault.py | 73 ++++++ tests/cli/secrets_test.py | 56 +++++ tests/conftest.py | 15 ++ .../input/environments/values-idfdev.yaml | 1 + tests/data/input/environments/values.yaml | 4 + tests/data/input/vault/idfdev/argocd.json | 6 + tests/data/input/vault/idfdev/gafaelfawr.json | 8 + tests/data/input/vault/idfdev/mobu.json | 4 + tests/data/input/vault/idfdev/nublado.json | 6 + tests/data/input/vault/idfdev/postgres.json | 4 + tests/data/output/idfdev/secrets-audit | 14 ++ tests/support/vault.py | 87 +++++++ 21 files changed, 742 insertions(+), 103 deletions(-) create mode 100644 src/phalanx/storage/vault.py create mode 100644 tests/conftest.py create mode 100644 tests/data/input/vault/idfdev/argocd.json create mode 100644 tests/data/input/vault/idfdev/gafaelfawr.json create mode 100644 tests/data/input/vault/idfdev/mobu.json create mode 100644 tests/data/input/vault/idfdev/nublado.json create mode 100644 tests/data/input/vault/idfdev/postgres.json create mode 100644 tests/data/output/idfdev/secrets-audit create mode 100644 tests/support/vault.py diff --git a/requirements/dev.txt b/requirements/dev.txt index 084092f363..e6ad8ddedb 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -19,7 +19,9 @@ beautifulsoup4==4.12.2 \ certifi==2023.7.22 \ --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 - # via requests + # via + # -c requirements/main.txt + # requests cfgv==3.3.1 \ --hash=sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426 \ --hash=sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736 @@ -100,7 +102,9 @@ charset-normalizer==3.2.0 \ --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \ --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \ --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa - # via requests + # via + # -c requirements/main.txt + # requests click==8.1.6 \ --hash=sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd \ --hash=sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5 @@ -222,9 +226,9 @@ distlib==0.3.7 \ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -documenteer[guide]==0.8.3 \ - --hash=sha256:e1514ca8dd96810a6d24d4b21f7b28458a3cf434217e46939ffab2c201791afc \ - --hash=sha256:fbe3ad1740751da8fcc95d809b0a489dc7f14fcdb78b28df85860ae92011c9a2 +documenteer[guide]==0.8.4 \ + --hash=sha256:c92a0766766bcdcbbbd3b06fbb251b5c2dbad41f81be37677cc61fbd58604594 \ + --hash=sha256:f54553006cc2416613163644a93b20c7f1934e9026de08d52a7e35055cb37e19 # via -r requirements/dev.in docutils==0.19 \ --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ @@ -299,7 +303,9 @@ identify==2.5.26 \ idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 - # via requests + # via + # -c requirements/main.txt + # requests imagesize==1.4.1 \ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a @@ -699,6 +705,7 @@ pydantic==1.10.12 \ --hash=sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6 \ --hash=sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d # via + # -c requirements/main.txt # -r requirements/dev.in # documenteer pydata-sphinx-theme==0.12.0 \ @@ -775,6 +782,7 @@ requests==2.31.0 \ --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 # via + # -c requirements/main.txt # documenteer # sphinx ruff==0.0.280 \ @@ -845,9 +853,9 @@ sphinx-copybutton==0.5.2 \ --hash=sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd \ --hash=sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e # via documenteer -sphinx-design==0.4.1 \ - --hash=sha256:23bf5705eb31296d4451f68b0222a698a8a84396ffe8378dfd9319ba7ab8efd9 \ - --hash=sha256:5b6418ba4a2dc3d83592ea0ff61a52a891fe72195a4c3a18b2fa1c7668ce4708 +sphinx-design==0.5.0 \ + --hash=sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e \ + --hash=sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00 # via documenteer sphinx-diagrams==0.4.0 \ --hash=sha256:3cf2e0179bdd9ccdb28164fcfcae9b167999a1abe40e159e0c26a225490074d1 \ @@ -951,6 +959,7 @@ typing-extensions==4.7.1 \ --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \ --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2 # via + # -c requirements/main.txt # mypy # pydantic uc-micro-py==1.0.2 \ @@ -960,7 +969,9 @@ uc-micro-py==1.0.2 \ urllib3==2.0.4 \ --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \ --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4 - # via requests + # via + # -c requirements/main.txt + # requests virtualenv==20.24.2 \ --hash=sha256:43a3052be36080548bdee0b42919c88072037d50d56c28bd3f853cbe92b953ff \ --hash=sha256:fd8a78f46f6b99a67b7ec5cf73f92357891a7b3a40fd97637c27f854aae3b9e0 diff --git a/requirements/main.in b/requirements/main.in index 96ae05bf8c..f17684e983 100644 --- a/requirements/main.in +++ b/requirements/main.in @@ -10,4 +10,6 @@ bcrypt click cryptography GitPython +hvac PyYAML +safir diff --git a/requirements/main.txt b/requirements/main.txt index ca05a71863..1a626f61f2 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -4,6 +4,12 @@ # # pip-compile --allow-unsafe --config=pyproject.toml --generate-hashes --output-file=requirements/main.txt requirements/main.in # +anyio==3.7.1 \ + --hash=sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780 \ + --hash=sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5 + # via + # httpcore + # starlette bcrypt==4.0.1 \ --hash=sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535 \ --hash=sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0 \ @@ -27,6 +33,13 @@ bcrypt==4.0.1 \ --hash=sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e \ --hash=sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3 # via -r requirements/main.in +certifi==2023.7.22 \ + --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ + --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 + # via + # httpcore + # httpx + # requests cffi==1.15.1 \ --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ @@ -93,6 +106,83 @@ cffi==1.15.1 \ --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 # via cryptography +charset-normalizer==3.2.0 \ + --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \ + --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \ + --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \ + --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \ + --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \ + --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \ + --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \ + --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \ + --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \ + --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \ + --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \ + --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \ + --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \ + --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \ + --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \ + --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \ + --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \ + --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \ + --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \ + --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \ + --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \ + --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \ + --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \ + --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \ + --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \ + --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \ + --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \ + --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \ + --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \ + --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \ + --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \ + --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \ + --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \ + --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \ + --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \ + --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \ + --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \ + --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \ + --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \ + --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \ + --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \ + --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \ + --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \ + --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \ + --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \ + --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \ + --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \ + --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \ + --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \ + --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \ + --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \ + --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \ + --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \ + --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \ + --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \ + --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \ + --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \ + --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \ + --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \ + --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \ + --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \ + --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \ + --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \ + --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \ + --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \ + --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \ + --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \ + --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \ + --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \ + --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \ + --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \ + --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \ + --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \ + --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \ + --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa + # via requests click==8.1.6 \ --hash=sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd \ --hash=sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5 @@ -121,7 +211,18 @@ cryptography==41.0.2 \ --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \ --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \ --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14 - # via -r requirements/main.in + # via + # -r requirements/main.in + # pyjwt + # safir +fastapi==0.100.1 \ + --hash=sha256:522700d7a469e4a973d92321ab93312448fbe20fca9c8da97effc7e7bc56df23 \ + --hash=sha256:ec6dd52bfc4eff3063cfcd0713b43c87640fefb2687bbbe3d8a08d94049cdf32 + # via safir +gidgethub==5.3.0 \ + --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ + --hash=sha256:9ece7d37fbceb819b80560e7ed58f936e48a65d37ec5f56db79145156b426a25 + # via safir gitdb==4.0.10 \ --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 @@ -130,10 +231,80 @@ gitpython==3.1.32 \ --hash=sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6 \ --hash=sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f # via -r requirements/main.in +h11==0.14.0 \ + --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \ + --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761 + # via httpcore +httpcore==0.17.3 \ + --hash=sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888 \ + --hash=sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87 + # via httpx +httpx==0.24.1 \ + --hash=sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd \ + --hash=sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd + # via safir +hvac==1.1.1 \ + --hash=sha256:466e883665b4082933106b292649f9fba3bc0709a1ec1729e9e35b29477164b3 \ + --hash=sha256:f9dbcc46b98b250c785eb1050aa11ee34a0c8b6616b75218cf1346a9817992f9 + # via -r requirements/main.in +idna==3.4 \ + --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ + --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 + # via + # anyio + # httpx + # requests pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi +pydantic==1.10.12 \ + --hash=sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303 \ + --hash=sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe \ + --hash=sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47 \ + --hash=sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494 \ + --hash=sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33 \ + --hash=sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86 \ + --hash=sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d \ + --hash=sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c \ + --hash=sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a \ + --hash=sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565 \ + --hash=sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb \ + --hash=sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62 \ + --hash=sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62 \ + --hash=sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0 \ + --hash=sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523 \ + --hash=sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d \ + --hash=sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405 \ + --hash=sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f \ + --hash=sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b \ + --hash=sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718 \ + --hash=sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed \ + --hash=sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb \ + --hash=sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5 \ + --hash=sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc \ + --hash=sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942 \ + --hash=sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe \ + --hash=sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246 \ + --hash=sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350 \ + --hash=sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303 \ + --hash=sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09 \ + --hash=sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33 \ + --hash=sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8 \ + --hash=sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a \ + --hash=sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1 \ + --hash=sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6 \ + --hash=sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d + # via + # fastapi + # safir +pyhcl==0.4.4 \ + --hash=sha256:2d9b9dcdf1023d812bfed561ba72c99104c5b3f52e558d595130a44ce081b003 + # via hvac +pyjwt[crypto]==2.8.0 \ + --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ + --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 + # via gidgethub pyyaml==6.0.1 \ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \ @@ -176,7 +347,46 @@ pyyaml==6.0.1 \ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f # via -r requirements/main.in +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 + # via hvac +safir==4.3.1 \ + --hash=sha256:6d1fcb7aba10e02fd456076d29e38aaa8699574f52b0fc2f326a9ee3958b41ea \ + --hash=sha256:da473520785428ae3b9da80406403054d46c089a34d0beceeb88c4cb78925cd3 + # via -r requirements/main.in smmap==5.0.0 \ --hash=sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94 \ --hash=sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936 # via gitdb +sniffio==1.3.0 \ + --hash=sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101 \ + --hash=sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384 + # via + # anyio + # httpcore + # httpx +starlette==0.27.0 \ + --hash=sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75 \ + --hash=sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91 + # via + # fastapi + # safir +structlog==23.1.0 \ + --hash=sha256:270d681dd7d163c11ba500bc914b2472d2b50a8ef00faa999ded5ff83a2f906b \ + --hash=sha256:79b9e68e48b54e373441e130fa447944e6f87a05b35de23138e475c05d0f7e0e + # via safir +typing-extensions==4.7.1 \ + --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \ + --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2 + # via + # fastapi + # pydantic +uritemplate==4.1.1 \ + --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \ + --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e + # via gidgethub +urllib3==2.0.4 \ + --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \ + --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4 + # via requests diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index dadf32ae19..d438923abd 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -14,9 +14,11 @@ __all__ = [ "help", + "secrets_audit", "secrets_list", "secrets_schema", "secrets_static_template", + "secrets_vault_secrets", ] @@ -49,6 +51,15 @@ def secrets() -> None: """Secret manipulation commands.""" +@secrets.command("audit") +@click.argument("environment") +def secrets_audit(environment: str) -> None: + """Audit the secrets for the given environment for inconsistencies.""" + factory = Factory() + secrets_service = factory.create_secrets_service() + sys.stdout.write(secrets_service.audit(environment)) + + @secrets.command("list") @click.argument("environment") def secrets_list(environment: str) -> None: @@ -96,3 +107,18 @@ def secrets_static_template(environment: str) -> None: factory = Factory() secrets_service = factory.create_secrets_service() sys.stdout.write(secrets_service.generate_static_template(environment)) + + +@secrets.command("vault-secrets") +@click.argument("environment") +@click.argument("output", type=click.Path(path_type=Path)) +def secrets_vault_secrets(environment: str, output: Path) -> None: + """Write the Vault secrets for the given environment. + + One JSON file per application with secrets will be created in the output + directory, containing the secrets for that application. If the value of a + secret is not known, it will be written as null. + """ + factory = Factory() + secrets_service = factory.create_secrets_service() + secrets_service.generate_vault_secrets(environment, output) diff --git a/src/phalanx/factory.py b/src/phalanx/factory.py index f48269d5de..b3cd820534 100644 --- a/src/phalanx/factory.py +++ b/src/phalanx/factory.py @@ -4,6 +4,7 @@ from .services.secrets import SecretsService from .storage.config import ConfigStorage +from .storage.vault import VaultStorage __all__ = ["Factory"] @@ -11,6 +12,16 @@ class Factory: """Factory to create Phalanx components.""" + def create_config_storage(self) -> ConfigStorage: + """Create storage layer for the Phalanx configuration. + + Returns + ------- + ConfigStorage + Storage service for loading the Phalanx configuration. + """ + return ConfigStorage() + def create_secrets_service(self) -> SecretsService: """Create service for manipulating Phalanx secrets. @@ -19,5 +30,6 @@ def create_secrets_service(self) -> SecretsService: SecretsService Service for manipulating secrets. """ - config_storage = ConfigStorage() - return SecretsService(config_storage) + config_storage = self.create_config_storage() + vault_storage = VaultStorage() + return SecretsService(config_storage, vault_storage) diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index 9c49ff7c85..59ef5896fb 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -2,7 +2,8 @@ from __future__ import annotations -from pydantic import BaseModel +from pydantic import BaseModel, Field +from safir.pydantic import CamelCaseModel from .applications import ApplicationInstance @@ -12,7 +13,7 @@ ] -class EnvironmentConfig(BaseModel): +class EnvironmentConfig(CamelCaseModel): """Configuration for a Phalanx environment. This is a partial model for the environment :file:`values.yaml` file. @@ -21,8 +22,15 @@ class EnvironmentConfig(BaseModel): environment: str """Name of the environment.""" - applications: list[str] - """List of enabled applications.""" + vault_url: str + """URL of Vault server for this environment.""" + + vault_path_prefix: str + """Prefix of Vault paths, including the Kv2 mount point.""" + + applications: list[str] = Field( + [], description="List of enabled applications" + ) class Environment(BaseModel): @@ -31,6 +39,12 @@ class Environment(BaseModel): name: str """Name of the environment.""" + vault_url: str + """URL of Vault server for this environment.""" + + vault_path_prefix: str + """Prefix of Vault paths, including the Kv2 mount point.""" + applications: dict[str, ApplicationInstance] """Applications enabled for that environment, by name.""" diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py index c0b8834041..14b295bae0 100644 --- a/src/phalanx/models/secrets.py +++ b/src/phalanx/models/secrets.py @@ -255,9 +255,3 @@ class ResolvedSecret(BaseModel): value: SecretStr | None = None """Value of the secret if known.""" - - static: bool = False - """Whether this is a static secret. - - Static secrets are those whose values come from an external source. - """ diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index a1046bf8d1..1a81a95acb 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -2,41 +2,24 @@ from __future__ import annotations +import json from collections import defaultdict -from dataclasses import dataclass -from enum import Enum +from pathlib import Path import yaml +from pydantic import SecretStr from ..exceptions import UnresolvedSecretsError from ..models.applications import ApplicationInstance from ..models.environments import Environment from ..models.secrets import ResolvedSecret, Secret, SourceSecretGenerateRules from ..storage.config import ConfigStorage +from ..storage.vault import VaultStorage from ..yaml import YAMLFoldedString __all__ = ["SecretsService"] -class _SecretStatus(Enum): - """Status of a secret resolution.""" - - DROP = "DROP" - KEEP = "KEEP" - PENDING = "PENDING" - - -@dataclass -class _SecretResolution: - """Status of the resolution of a secret.""" - - status: _SecretStatus - """Status of the secret.""" - - secret: ResolvedSecret | None = None - """Resolved secret, if status is ``KEEP``.""" - - class SecretsService: """Service to manipulate Phalanx secrets. @@ -44,12 +27,77 @@ class SecretsService: ---------- config_storage Storage object for the Phalanx configuration. + vault_storage + Storage object for Vault. """ - def __init__(self, config_storage: ConfigStorage) -> None: + def __init__( + self, config_storage: ConfigStorage, vault_storage: VaultStorage + ) -> None: self._config = config_storage + self._vault = vault_storage + + def audit(self, env_name: str) -> str: + """Compare existing secrets to configuration and report problems. - def generate_static_template(self, environment_name: str) -> str: + Parameters + ---------- + env_name + Name of the environment to audit. + + Returns + ------- + str + Audit report as a text document. + """ + environment = self._config.load_environment(env_name) + vault_client = self._vault.get_vault_client(environment) + + # Retrieve all the current secrets from Vault and resolve all of the + # secrets. + secrets = [] + vault_secrets = {} + for application in environment.all_applications(): + secrets.extend(application.secrets) + name = application.name + vault_secret = vault_client.get_application_secrets(name) + vault_secrets[name] = vault_secret + resolved = self._resolve_secrets(secrets, environment, vault_secrets) + + # Compare the resolved secrets to the Vault data. + missing = [] + mismatch = [] + unknown = [] + for app_name, values in resolved.items(): + for key, value in values.items(): + if key in vault_secrets[app_name]: + if value.value: + expected = value.value.get_secret_value() + else: + expected = None + vault = vault_secrets[app_name][key].get_secret_value() + if expected != vault: + import logging + + logging.error("mismatch %s %s", expected, vault) + mismatch.append(f"{app_name} {key}") + del vault_secrets[app_name][key] + else: + missing.append(f"{app_name} {key}") + unknown = [f"{a} {k}" for a, lv in vault_secrets.items() for k in lv] + + # Generate the textual report. + report = "" + if missing: + report += "Missing secrets:\n• " + "\n• ".join(missing) + "\n" + if mismatch: + report += "Incorrect secrets:\n• " + "\n• ".join(mismatch) + "\n" + if unknown: + unknown_str = "\n ".join(unknown) + report += "Unknown secrets in Vault:\n• " + unknown_str + "\n" + return report + + def generate_static_template(self, env_name: str) -> str: """Generate a template for providing static secrets. The template provides space for all static secrets required for a @@ -59,7 +107,7 @@ def generate_static_template(self, environment_name: str) -> str: Parameters ---------- - environment_name + env_name Name of the environment. Returns @@ -67,7 +115,7 @@ def generate_static_template(self, environment_name: str) -> str: dict YAML template the user can fill out, as a string. """ - secrets = self.list_secrets(environment_name) + secrets = self.list_secrets(env_name) template: defaultdict[str, dict[str, dict[str, str | None]]] template = defaultdict(dict) for secret in secrets: @@ -79,12 +127,48 @@ def generate_static_template(self, environment_name: str) -> str: } return yaml.dump(template, width=72) - def list_secrets(self, environment_name: str) -> list[Secret]: + def generate_vault_secrets(self, env_name: str, path: Path) -> None: + """Generate JSON files containing the Vault secrets for an environment. + + One file per application with secrets will be written to the provided + path. Each file will be named after the application with ``.json`` + appended, and will contain the secret values for that application. + Secrets that are required but have no known value will be written as + null. + + Parameters + ---------- + env_name + Name of the environment. + path + Output path. + """ + environment = self._config.load_environment(env_name) + vault_client = self._vault.get_vault_client(environment) + secrets = [] + vault_secrets = {} + for application in environment.all_applications(): + secrets.extend(application.secrets) + name = application.name + vault_secret = vault_client.get_application_secrets(name) + vault_secrets[name] = vault_secret + resolved = self._resolve_secrets(secrets, environment, vault_secrets) + for app_name, values in resolved.items(): + app_secrets: dict[str, str | None] = {} + for key, secret in values.items(): + if secret.value: + app_secrets[key] = secret.value.get_secret_value() + else: + app_secrets[key] = None + with (path / f"{app_name}.json").open("w") as fh: + json.dump(app_secrets, fh, indent=2) + + def list_secrets(self, env_name: str) -> list[Secret]: """List all required secrets for the given environment. Parameters ---------- - environment_name + env_name Name of the environment. Returns @@ -92,15 +176,18 @@ def list_secrets(self, environment_name: str) -> list[Secret]: list of ResolvedSecret Secrets required for the given environment. """ - environment = self._config.load_environment(environment_name) + environment = self._config.load_environment(env_name) secrets = [] for application in environment.all_applications(): secrets.extend(application.secrets) return secrets def _resolve_secrets( - self, secrets: list[Secret], environment: Environment - ) -> list[ResolvedSecret]: + self, + secrets: list[Secret], + environment: Environment, + vault_secrets: dict[str, dict[str, SecretStr]], + ) -> dict[str, dict[str, ResolvedSecret]]: """Resolve the secrets for a Phalanx environment. Resolving secrets is the process where the secret configuration is @@ -113,10 +200,13 @@ def _resolve_secrets( Secret configuration by application and key. environment Phalanx environment for which to resolve secrets. + vault_secrets + Current values from Vault. These will be used if compatible with + the secret definitions. Returns ------- - list of ResolvedSecret + dict Resolved secrets by application and secret key. Raises @@ -132,29 +222,30 @@ def _resolve_secrets( secrets = unresolved unresolved = [] for config in secrets: - instance = environment.applications[config.application] - resolution = self._resolve_secret(config, instance, resolved) - if resolution.status == _SecretStatus.KEEP: - secret = resolution.secret - if not secret: - raise RuntimeError("Resolved secret with no secret") + vault_values = vault_secrets[config.application] + secret = self._resolve_secret( + config=config, + instance=environment.applications[config.application], + resolved=resolved, + current_value=vault_values.get(config.key), + ) + if secret: resolved[secret.application][secret.key] = secret - if resolution.status == _SecretStatus.PENDING: + else: unresolved.append(config) if len(unresolved) >= left: raise UnresolvedSecretsError(unresolved) left = len(unresolved) - return sorted( - [s for sl in resolved.values() for s in sl.values()], - key=lambda s: (s.application, s.key), - ) + return resolved def _resolve_secret( self, + *, config: Secret, instance: ApplicationInstance, resolved: dict[str, dict[str, ResolvedSecret]], - ) -> _SecretResolution: + current_value: SecretStr | None, + ) -> ResolvedSecret | None: """Resolve a single secret. Parameters @@ -166,21 +257,22 @@ def _resolve_secret( resolved Other secrets for that environment that have already been resolved. + current_value + Current secret value in Vault, if known. Returns ------- - SecretResolution - Results of attempting to resolve this secret. + ResolvedSecret or None + Resolved value of the secret, or `None` if the secret cannot yet + be resolved (because, for example, the secret from which it is + copied has not yet been resolved). """ # If a value was already provided, this is the easy case. if config.value: - return _SecretResolution( - status=_SecretStatus.KEEP, - secret=ResolvedSecret( - key=config.key, - application=config.application, - value=config.value, - ), + return ResolvedSecret( + key=config.key, + application=config.application, + value=config.value, ) # Do copying or generation if configured. @@ -188,35 +280,29 @@ def _resolve_secret( application = config.copy_rules.application other = resolved.get(application, {}).get(config.copy_rules.key) if not other: - return _SecretResolution(status=_SecretStatus.PENDING) - return _SecretResolution( - status=_SecretStatus.KEEP, - secret=ResolvedSecret( - key=config.key, - application=config.application, - value=other.value, - ), + return None + return ResolvedSecret( + key=config.key, + application=config.application, + value=other.value, ) - if config.generate: + if config.generate and not current_value: if isinstance(config.generate, SourceSecretGenerateRules): other_key = config.generate.source other = resolved.get(config.application, {}).get(other_key) if not (other and other.value): - return _SecretResolution(status=_SecretStatus.PENDING) + return None value = config.generate.generate(other.value) else: value = config.generate.generate() - return _SecretResolution( - status=_SecretStatus.KEEP, - secret=ResolvedSecret( - key=config.key, - application=config.application, - value=value, - ), + return ResolvedSecret( + key=config.key, + application=config.application, + value=value, ) - # The remaining case is that the secret is a static secret. - secret = ResolvedSecret( - key=config.key, application=config.application, static=True + # The remaining case is that the secret is a static secret or a + # generated secret for which we already have a value. + return ResolvedSecret( + key=config.key, application=config.application, value=current_value ) - return _SecretResolution(status=_SecretStatus.KEEP, secret=secret) diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py index 632a7c86f2..6bc838f06a 100644 --- a/src/phalanx/storage/config.py +++ b/src/phalanx/storage/config.py @@ -73,7 +73,12 @@ def load_environment(self, environment_name: str) -> Environment: a.name: self._resolve_application(a, environment_name) for a in applications } - return Environment(name=config.environment, applications=instances) + return Environment( + name=config.environment, + vault_url=config.vault_url, + vault_path_prefix=config.vault_path_prefix, + applications=instances, + ) def _is_condition_satisfied( self, instance: ApplicationInstance, condition: str | None @@ -193,10 +198,12 @@ def _load_environment_config( raise UnknownEnvironmentError(environment_name) with values_path.open() as fh: values = yaml.safe_load(fh) + environment = EnvironmentConfig.parse_obj(values) - # Eventually this will have more structure, but for now assume any - # key whose value is a dictionary with an enabled key is indicating an - # application that is or is not enabled. + # Eventually this will have more structure and will be parsed directly + # by Pydantic, but for now assume any key whose value is a dictionary + # with an enabled key is indicating an application that is or is not + # enabled. applications = [] for key, value in values.items(): if isinstance(value, dict) and "enabled" in value: @@ -208,9 +215,8 @@ def _load_environment_config( applications.append("argocd") # Return the configuration. - return EnvironmentConfig( - environment=environment_name, applications=sorted(applications) - ) + environment.applications = sorted(applications) + return environment def _resolve_application( self, application: Application, environment_name: str diff --git a/src/phalanx/storage/vault.py b/src/phalanx/storage/vault.py new file mode 100644 index 0000000000..3bf7efb007 --- /dev/null +++ b/src/phalanx/storage/vault.py @@ -0,0 +1,73 @@ +"""Store, retrieve, and manipulate data stored in Vault.""" + +from __future__ import annotations + +import hvac +from pydantic import SecretStr + +from ..models.environments import Environment + +__all__ = ["VaultClient", "VaultStorage"] + + +class VaultClient: + """Store, retrieve, and manipulate data stored in Vault. + + The Vault authentication token is taken from either the ``VAULT_TOKEN`` + environment variable or a :file:`.vault-token` file in the user's home + directory. + + Parameters + ---------- + url + URL of the Vault server. + path + Path within that Vault server where secrets for an environment are + stored. + """ + + def __init__(self, url: str, path: str) -> None: + mount, path = path.split("/", 1) + self._vault = hvac.Client(url) + self._vault.secrets.kv.default_kv_version = 2 + self._path = path + + def get_application_secrets( + self, application: str + ) -> dict[str, SecretStr]: + """Get the secrets for an application currently stored in Vault. + + Parameters + ---------- + application + Name of the application. + + Returns + ------- + dict of pydantic.SecretStr + Mapping from secret key to its Vault from vault. + """ + path = f"{self._path}/{application}" + r = self._vault.secrets.kv.read_secret( + path=path, raise_on_deleted_version=True + ) + return {k: SecretStr(v) for k, v in r["data"]["data"].items()} + + +class VaultStorage: + """Create Vault clients for specific environments.""" + + def get_vault_client(self, env: Environment) -> VaultClient: + """Return a Vault client configured for the given environment. + + Parameters + ---------- + env + Phalanx environment. + + Returns + ------- + VaultClient + Vault client configured to manage secrets for that environment. + """ + return VaultClient(env.vault_url, env.vault_path_prefix) diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py index 28084c96b2..aa0ad36b76 100644 --- a/tests/cli/secrets_test.py +++ b/tests/cli/secrets_test.py @@ -2,13 +2,34 @@ from __future__ import annotations +import json import os +import re from pathlib import Path from click.testing import CliRunner from phalanx.cli import main +from phalanx.factory import Factory from ..support.data import phalanx_test_path, read_output_data +from ..support.vault import MockVaultClient + + +def test_audit(mock_vault: MockVaultClient) -> None: + input_path = phalanx_test_path() + os.chdir(str(input_path)) + input_path / "vault" / "idfdev" + factory = Factory() + config_storage = factory.create_config_storage() + environment = config_storage.load_environment("idfdev") + mock_vault.load_test_data(environment.vault_path_prefix, "idfdev") + + runner = CliRunner() + result = runner.invoke( + main, ["secrets", "audit", "idfdev"], catch_exceptions=False + ) + assert result.exit_code == 0 + assert result.output == read_output_data("idfdev", "secrets-audit") def test_list() -> None: @@ -45,3 +66,38 @@ def test_static_template() -> None: ) assert result.exit_code == 0 assert result.output == read_output_data("idfdev", "static-secrets.yaml") + + +def test_vault_secrets(tmp_path: Path, mock_vault: MockVaultClient) -> None: + input_path = phalanx_test_path() + vault_input_path = input_path / "vault" / "idfdev" + os.chdir(str(input_path)) + factory = Factory() + config_storage = factory.create_config_storage() + environment = config_storage.load_environment("idfdev") + mock_vault.load_test_data(environment.vault_path_prefix, "idfdev") + + runner = CliRunner() + result = runner.invoke( + main, + ["secrets", "vault-secrets", "idfdev", str(tmp_path)], + catch_exceptions=False, + ) + assert result.exit_code == 0 + assert result.output == "" + + expected_files = {p.name for p in vault_input_path.iterdir()} + output_files = {p.name for p in tmp_path.iterdir()} + assert expected_files == output_files + + # The output files will contain generated secrets that were missing from + # the input paths. Spot-check just one of those to see if it's correct. + # More comprehensive testing of secret generation will be done elsewhere. + with (vault_input_path / "argocd.json").open() as fh: + expected_argocd = json.load(fh) + with (tmp_path / "argocd.json").open() as fh: + output_argocd = json.load(fh) + assert output_argocd["server.secretkey"] + assert re.match("^[0-9a-f]{64}$", output_argocd["server.secretkey"]) + del output_argocd["server.secretkey"] + assert expected_argocd == output_argocd diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000..cb8acf4222 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,15 @@ +"""Test fixtures.""" + +from __future__ import annotations + +from collections.abc import Iterator + +import pytest + +from .support.vault import MockVaultClient, patch_vault + + +@pytest.fixture +def mock_vault() -> Iterator[MockVaultClient]: + """Mock out the HVAC Vault client API.""" + yield from patch_vault() diff --git a/tests/data/input/environments/values-idfdev.yaml b/tests/data/input/environments/values-idfdev.yaml index 5ac78c010a..52573351d0 100644 --- a/tests/data/input/environments/values-idfdev.yaml +++ b/tests/data/input/environments/values-idfdev.yaml @@ -1,5 +1,6 @@ environment: idfdev fqdn: data-dev.lsst.cloud +vaultUrl: https://vault.lsst.codes/ vaultPathPrefix: secret/k8s_operator/data-dev.lsst.cloud butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml" diff --git a/tests/data/input/environments/values.yaml b/tests/data/input/environments/values.yaml index 1739532a1f..00e05a072e 100644 --- a/tests/data/input/environments/values.yaml +++ b/tests/data/input/environments/values.yaml @@ -8,6 +8,10 @@ environment: "" # @default -- None, must be set fqdn: "" +# -- URL of Vault server for this environment +# @default -- None, must be set +vaultUrl: "" + # -- Prefix for Vault secrets for this environment # @default -- None, must be set vaultPathPrefix: "" diff --git a/tests/data/input/vault/idfdev/argocd.json b/tests/data/input/vault/idfdev/argocd.json new file mode 100644 index 0000000000..2c771b39cc --- /dev/null +++ b/tests/data/input/vault/idfdev/argocd.json @@ -0,0 +1,6 @@ +{ + "admin.password": "$2b$15$kRdR2PlZ.tV5tavXViDlv.QB7FYk0HH8IMTiGk9spSashzIqgmcJ.", + "admin.passwordMtime": "2021-10-08T21:31:31Z", + "admin.plaintext_password": "6f80a0863ae18da19d53e99b9ce67f82f54bc6164f368b6863471002992f0063", + "dex.clientSecret": "some-random-secret" +} diff --git a/tests/data/input/vault/idfdev/gafaelfawr.json b/tests/data/input/vault/idfdev/gafaelfawr.json new file mode 100644 index 0000000000..4341934715 --- /dev/null +++ b/tests/data/input/vault/idfdev/gafaelfawr.json @@ -0,0 +1,8 @@ +{ + "bootstrap-token": "gt-xESy7hFgaLI3t7Cg8TMy8Q.gev0rVktVpL6GGZs6kv5fg", + "cilogon": "y", + "cilogon-client-secret": "some-cilogon-password", + "database-password": "some-database-password", + "ldap-password": "some-ldap-password", + "redis-password": "3b17413a28b5be73cc963a98558cb50eadf337e78d422476810fd562d9538492" +} diff --git a/tests/data/input/vault/idfdev/mobu.json b/tests/data/input/vault/idfdev/mobu.json new file mode 100644 index 0000000000..8f8391d71a --- /dev/null +++ b/tests/data/input/vault/idfdev/mobu.json @@ -0,0 +1,4 @@ +{ + "ALERT_HOOK": "https://hooks.slack.com/mobu-slack-hook", + "app-alert-webhook": "https://hooks.slack.com/app-slack-hook" +} diff --git a/tests/data/input/vault/idfdev/nublado.json b/tests/data/input/vault/idfdev/nublado.json new file mode 100644 index 0000000000..4e9c9f89f6 --- /dev/null +++ b/tests/data/input/vault/idfdev/nublado.json @@ -0,0 +1,6 @@ +{ + "cryptkeeper_key": "411820d5a6cc6c3bcd73c1fbd61f9c9eb06454272825345ba0cbe0304fef4168", + "crypto_key": "59d636b6428cde68710166611187371e58ea3ff7a9e07e75b2286fc08f7763c8", + "hub_db_password": "2e188c579b159d59f83e478203261c91fe2f5db3858111bc318f0f9d5dbe055e", + "slack_webhook": "https://hooks.slack.com/app-slack-hook" +} diff --git a/tests/data/input/vault/idfdev/postgres.json b/tests/data/input/vault/idfdev/postgres.json new file mode 100644 index 0000000000..55729b763b --- /dev/null +++ b/tests/data/input/vault/idfdev/postgres.json @@ -0,0 +1,4 @@ +{ + "nublado3_password": "e1e4cde6276b8612837ca0a0ef74b16796004d91388480aee8843d9cc21079a6", + "root_password": "a7605f445f47bfabdb3a35f5a4eca85ee57a74ff3b266bdb6c11c69c4451e8d2f23574f13879bba2c4520f454f5034ece24f642278315931ca2dad1be384534a" +} diff --git a/tests/data/output/idfdev/secrets-audit b/tests/data/output/idfdev/secrets-audit new file mode 100644 index 0000000000..6fdfe22865 --- /dev/null +++ b/tests/data/output/idfdev/secrets-audit @@ -0,0 +1,14 @@ +Missing secrets: +• argocd server.secretkey +• gafaelfawr session-secret +• gafaelfawr signing-key +• gafaelfawr slack-webhook +• nublado aws-credentials.ini +• nublado butler-gcs-idf-creds.json +• nublado butler-hmac-idf-creds.json +• nublado postgres-credentials.txt +• nublado proxy_token +Incorrect secrets: +• postgres nublado3_password +Unknown secrets in Vault: +• gafaelfawr cilogon diff --git a/tests/support/vault.py b/tests/support/vault.py new file mode 100644 index 0000000000..d8648329c3 --- /dev/null +++ b/tests/support/vault.py @@ -0,0 +1,87 @@ +"""Mock Vault API for testing.""" + +from __future__ import annotations + +import json +from collections import defaultdict +from collections.abc import Iterator +from typing import Any +from unittest.mock import patch + +import hvac + +from .data import phalanx_test_path + +__all__ = [ + "MockVaultClient", + "patch_vault", +] + + +class MockVaultClient: + """Mock Vault client for testing.""" + + def __init__(self) -> None: + self.secrets = self + self.kv = self + self._data: defaultdict[str, dict[str, dict[str, str]]] + self._data = defaultdict(dict) + self._paths: dict[str, str] = {} + + def load_test_data(self, path: str, environment: str) -> None: + """Load Vault test data for the given environment. + + This method is not part of the Vault API. It is intended for use by + the test suite to set up a test. + + Parameters + ---------- + path + Path to the environment data in Vault. + environment + Name of the environment for which to load Vault test data. + """ + _, app_path = path.split("/", 1) + self._paths[app_path] = environment + data_path = phalanx_test_path() / "vault" / environment + for app_data_path in data_path.iterdir(): + application = app_data_path.stem + with app_data_path.open() as fh: + self._data[environment][application] = json.load(fh) + + def read_secret( + self, path: str, raise_on_deleted_version: bool | None = None + ) -> dict[str, Any]: + """Read a secret from Vault. + + Parameters + ---------- + path + Vault path to the secret. + raise_on_deleted_version + Whether to raise an exception if the most recent version is + deleted (required to be `True`). + + Returns + ------- + dict + Reply matching the Vault client reply structure. + """ + assert raise_on_deleted_version + base_path, application = path.rsplit("/", 1) + environment = self._paths[base_path] + values = self._data[environment][application] + return {"data": {"data": values}} + + +def patch_vault() -> Iterator[MockVaultClient]: + """Replace the HVAC Vault client with a mock class. + + Yields + ------ + MockVaultClient + Mock HVAC Vault client. + """ + mock_vault = MockVaultClient() + with patch.object(hvac, "Client", return_value=mock_vault): + yield mock_vault From 60aad193f9ba3ac5e86dca905b0a35528691a451 Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 31 Jul 2023 17:16:04 -0700 Subject: [PATCH 304/308] Move collecting secrets into the model Rather than duplicating the code to create a list of all application secrets for an environment, move it into the model. --- src/phalanx/models/environments.py | 14 ++++++++++++++ src/phalanx/services/secrets.py | 13 ++++--------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py index 59ef5896fb..4d15824f49 100644 --- a/src/phalanx/models/environments.py +++ b/src/phalanx/models/environments.py @@ -6,6 +6,7 @@ from safir.pydantic import CamelCaseModel from .applications import ApplicationInstance +from .secrets import Secret __all__ = [ "Environment", @@ -51,3 +52,16 @@ class Environment(BaseModel): def all_applications(self) -> list[ApplicationInstance]: """Return enabled applications in sorted order.""" return sorted(self.applications.values(), key=lambda a: a.name) + + def all_secrets(self) -> list[Secret]: + """Return a list of all secrets regardless of application. + + Returns + ------- + list of Secret + All secrets from all applications. + """ + secrets = [] + for application in self.all_applications(): + secrets.extend(application.secrets) + return secrets diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 1a81a95acb..b145013f13 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -55,10 +55,9 @@ def audit(self, env_name: str) -> str: # Retrieve all the current secrets from Vault and resolve all of the # secrets. - secrets = [] + secrets = environment.all_secrets() vault_secrets = {} for application in environment.all_applications(): - secrets.extend(application.secrets) name = application.name vault_secret = vault_client.get_application_secrets(name) vault_secrets[name] = vault_secret @@ -145,10 +144,9 @@ def generate_vault_secrets(self, env_name: str, path: Path) -> None: """ environment = self._config.load_environment(env_name) vault_client = self._vault.get_vault_client(environment) - secrets = [] + secrets = environment.all_secrets() vault_secrets = {} for application in environment.all_applications(): - secrets.extend(application.secrets) name = application.name vault_secret = vault_client.get_application_secrets(name) vault_secrets[name] = vault_secret @@ -173,14 +171,11 @@ def list_secrets(self, env_name: str) -> list[Secret]: Returns ------- - list of ResolvedSecret + list of Secret Secrets required for the given environment. """ environment = self._config.load_environment(env_name) - secrets = [] - for application in environment.all_applications(): - secrets.extend(application.secrets) - return secrets + return environment.all_secrets() def _resolve_secrets( self, From d88deed2dc904152eaf0d382a0899443b6528b7b Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Mon, 31 Jul 2023 17:20:43 -0700 Subject: [PATCH 305/308] Add method to get all secrets for an environment Avoid some repeated code by adding a new method to VaultClient to retrieve all Vault secrets for a given environment. --- src/phalanx/services/secrets.py | 12 ++---------- src/phalanx/storage/vault.py | 23 ++++++++++++++++++++++- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index b145013f13..8d40b784ad 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -56,11 +56,7 @@ def audit(self, env_name: str) -> str: # Retrieve all the current secrets from Vault and resolve all of the # secrets. secrets = environment.all_secrets() - vault_secrets = {} - for application in environment.all_applications(): - name = application.name - vault_secret = vault_client.get_application_secrets(name) - vault_secrets[name] = vault_secret + vault_secrets = vault_client.get_environment_secrets(environment) resolved = self._resolve_secrets(secrets, environment, vault_secrets) # Compare the resolved secrets to the Vault data. @@ -145,11 +141,7 @@ def generate_vault_secrets(self, env_name: str, path: Path) -> None: environment = self._config.load_environment(env_name) vault_client = self._vault.get_vault_client(environment) secrets = environment.all_secrets() - vault_secrets = {} - for application in environment.all_applications(): - name = application.name - vault_secret = vault_client.get_application_secrets(name) - vault_secrets[name] = vault_secret + vault_secrets = vault_client.get_environment_secrets(environment) resolved = self._resolve_secrets(secrets, environment, vault_secrets) for app_name, values in resolved.items(): app_secrets: dict[str, str | None] = {} diff --git a/src/phalanx/storage/vault.py b/src/phalanx/storage/vault.py index 3bf7efb007..20266ce221 100644 --- a/src/phalanx/storage/vault.py +++ b/src/phalanx/storage/vault.py @@ -45,7 +45,7 @@ def get_application_secrets( Returns ------- dict of pydantic.SecretStr - Mapping from secret key to its Vault from vault. + Mapping from secret key to its secret from Vault. """ path = f"{self._path}/{application}" r = self._vault.secrets.kv.read_secret( @@ -53,6 +53,27 @@ def get_application_secrets( ) return {k: SecretStr(v) for k, v in r["data"]["data"].items()} + def get_environment_secrets( + self, environment: Environment + ) -> dict[str, dict[str, SecretStr]]: + """Get the secrets for an environment currently stored in Vault. + + Parameters + ---------- + environment + Name of the environment. + + Returns + ------- + dict of dict + Mapping from application to secret key to its secret from Vault. + """ + vault_secrets = {} + for application in environment.all_applications(): + vault_secret = self.get_application_secrets(application.name) + vault_secrets[application.name] = vault_secret + return vault_secrets + class VaultStorage: """Create Vault clients for specific environments.""" From 420b43388c3c2d44b416197c9cc4cfabb941e14a Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 1 Aug 2023 09:58:45 -0700 Subject: [PATCH 306/308] Switch coverage tracking methods Rather than generating separate coverage files that have to be combined, use the pytest-cov plugin to store the coverage in only one file. This reduces the extra files created in the working directory while iterating on the py test. Update Python dependencies. --- requirements/dev.in | 1 + requirements/dev.txt | 104 +++++++++++++++++++++++-------------------- tox.ini | 3 +- 3 files changed, 58 insertions(+), 50 deletions(-) diff --git a/requirements/dev.in b/requirements/dev.in index 4ecd8c51e5..5f81da8931 100644 --- a/requirements/dev.in +++ b/requirements/dev.in @@ -12,6 +12,7 @@ coverage[toml] mypy pre-commit pytest +pytest-cov ruff types-PyYAML diff --git a/requirements/dev.txt b/requirements/dev.txt index e6ad8ddedb..da6822d4d1 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -213,7 +213,9 @@ coverage[toml]==7.2.7 \ --hash=sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e \ --hash=sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850 \ --hash=sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3 - # via -r requirements/dev.in + # via + # -r requirements/dev.in + # pytest-cov cycler==0.11.0 \ --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \ --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f @@ -550,32 +552,32 @@ nodeenv==1.8.0 \ --hash=sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2 \ --hash=sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec # via pre-commit -numpy==1.25.1 \ - --hash=sha256:012097b5b0d00a11070e8f2e261128c44157a8689f7dedcf35576e525893f4fe \ - --hash=sha256:0d3fe3dd0506a28493d82dc3cf254be8cd0d26f4008a417385cbf1ae95b54004 \ - --hash=sha256:0def91f8af6ec4bb94c370e38c575855bf1d0be8a8fbfba42ef9c073faf2cf19 \ - --hash=sha256:1a180429394f81c7933634ae49b37b472d343cccb5bb0c4a575ac8bbc433722f \ - --hash=sha256:1d5d3c68e443c90b38fdf8ef40e60e2538a27548b39b12b73132456847f4b631 \ - --hash=sha256:20e1266411120a4f16fad8efa8e0454d21d00b8c7cee5b5ccad7565d95eb42dd \ - --hash=sha256:247d3ffdd7775bdf191f848be8d49100495114c82c2bd134e8d5d075fb386a1c \ - --hash=sha256:35a9527c977b924042170a0887de727cd84ff179e478481404c5dc66b4170009 \ - --hash=sha256:38eb6548bb91c421261b4805dc44def9ca1a6eef6444ce35ad1669c0f1a3fc5d \ - --hash=sha256:3d7abcdd85aea3e6cdddb59af2350c7ab1ed764397f8eec97a038ad244d2d105 \ - --hash=sha256:41a56b70e8139884eccb2f733c2f7378af06c82304959e174f8e7370af112e09 \ - --hash=sha256:4a90725800caeaa160732d6b31f3f843ebd45d6b5f3eec9e8cc287e30f2805bf \ - --hash=sha256:6b82655dd8efeea69dbf85d00fca40013d7f503212bc5259056244961268b66e \ - --hash=sha256:6c6c9261d21e617c6dc5eacba35cb68ec36bb72adcff0dee63f8fbc899362588 \ - --hash=sha256:77d339465dff3eb33c701430bcb9c325b60354698340229e1dff97745e6b3efa \ - --hash=sha256:791f409064d0a69dd20579345d852c59822c6aa087f23b07b1b4e28ff5880fcb \ - --hash=sha256:9a3a9f3a61480cc086117b426a8bd86869c213fc4072e606f01c4e4b66eb92bf \ - --hash=sha256:c1516db588987450b85595586605742879e50dcce923e8973f79529651545b57 \ - --hash=sha256:c40571fe966393b212689aa17e32ed905924120737194b5d5c1b20b9ed0fb171 \ - --hash=sha256:d412c1697c3853c6fc3cb9751b4915859c7afe6a277c2bf00acf287d56c4e625 \ - --hash=sha256:d5154b1a25ec796b1aee12ac1b22f414f94752c5f94832f14d8d6c9ac40bcca6 \ - --hash=sha256:d736b75c3f2cb96843a5c7f8d8ccc414768d34b0a75f466c05f3a739b406f10b \ - --hash=sha256:e8f6049c4878cb16960fbbfb22105e49d13d752d4d8371b55110941fb3b17800 \ - --hash=sha256:f76aebc3358ade9eacf9bc2bb8ae589863a4f911611694103af05346637df1b7 \ - --hash=sha256:fd67b306320dcadea700a8f79b9e671e607f8696e98ec255915c0c6d6b818503 +numpy==1.25.2 \ + --hash=sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2 \ + --hash=sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55 \ + --hash=sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf \ + --hash=sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01 \ + --hash=sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca \ + --hash=sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901 \ + --hash=sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d \ + --hash=sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4 \ + --hash=sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf \ + --hash=sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380 \ + --hash=sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044 \ + --hash=sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545 \ + --hash=sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f \ + --hash=sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f \ + --hash=sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3 \ + --hash=sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364 \ + --hash=sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9 \ + --hash=sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418 \ + --hash=sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f \ + --hash=sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295 \ + --hash=sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3 \ + --hash=sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187 \ + --hash=sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926 \ + --hash=sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357 \ + --hash=sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760 # via # contourpy # matplotlib @@ -645,9 +647,9 @@ pillow==10.0.0 \ --hash=sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51 \ --hash=sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90 # via matplotlib -platformdirs==3.9.1 \ - --hash=sha256:1b42b450ad933e981d56e59f1b97495428c9bd60698baab9f3eb3d00d5822421 \ - --hash=sha256:ad8291ae0ae5072f66c16945166cb11c63394c7a3ad1b1bc9828ca3162da8c2f +platformdirs==3.10.0 \ + --hash=sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d \ + --hash=sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d # via virtualenv pluggy==1.2.0 \ --hash=sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849 \ @@ -726,6 +728,12 @@ pyparsing==3.0.9 \ pytest==7.4.0 \ --hash=sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32 \ --hash=sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a + # via + # -r requirements/dev.in + # pytest-cov +pytest-cov==4.1.0 \ + --hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \ + --hash=sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a # via -r requirements/dev.in python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ @@ -785,24 +793,24 @@ requests==2.31.0 \ # -c requirements/main.txt # documenteer # sphinx -ruff==0.0.280 \ - --hash=sha256:2dae8f2d9c44c5c49af01733c2f7956f808db682a4193180dedb29dd718d7bbe \ - --hash=sha256:2e7c15828d09f90e97bea8feefcd2907e8c8ce3a1f959c99f9b4b3469679f33c \ - --hash=sha256:37359cd67d2af8e09110a546507c302cbea11c66a52d2a9b6d841d465f9962d4 \ - --hash=sha256:48ed5aca381050a4e2f6d232db912d2e4e98e61648b513c350990c351125aaec \ - --hash=sha256:4a7d52457b5dfcd3ab24b0b38eefaead8e2dca62b4fbf10de4cd0938cf20ce30 \ - --hash=sha256:581c43e4ac5e5a7117ad7da2120d960a4a99e68ec4021ec3cd47fe1cf78f8380 \ - --hash=sha256:5f972567163a20fb8c2d6afc60c2ea5ef8b68d69505760a8bd0377de8984b4f6 \ - --hash=sha256:7008fc6ca1df18b21fa98bdcfc711dad5f94d0fc3c11791f65e460c48ef27c82 \ - --hash=sha256:7784e3606352fcfb193f3cd22b2e2117c444cb879ef6609ec69deabd662b0763 \ - --hash=sha256:7a37dab70114671d273f203268f6c3366c035fe0c8056614069e90a65e614bfc \ - --hash=sha256:83e8f372fa5627eeda5b83b5a9632d2f9c88fc6d78cead7e2a1f6fb05728d137 \ - --hash=sha256:8ffa7347ad11643f29de100977c055e47c988cd6d9f5f5ff83027600b11b9189 \ - --hash=sha256:b7de5b8689575918e130e4384ed9f539ce91d067c0a332aedef6ca7188adac2d \ - --hash=sha256:bd58af46b0221efb95966f1f0f7576df711cb53e50d2fdb0e83c2f33360116a4 \ - --hash=sha256:d878370f7e9463ac40c253724229314ff6ebe4508cdb96cb536e1af4d5a9cd4f \ - --hash=sha256:ef6ee3e429fd29d6a5ceed295809e376e6ece5b0f13c7e703efaf3d3bcb30b96 \ - --hash=sha256:fe7118c1eae3fda17ceb409629c7f3b5a22dffa7caf1f6796776936dca1fe653 +ruff==0.0.282 \ + --hash=sha256:01b76309ddab16eb258dabc5e86e73e6542f59f3ea6b4ab886ecbcfc80ce062c \ + --hash=sha256:0710ea2cadc504b96c1d94c414a7802369d0fff2ab7c94460344bba69135cb40 \ + --hash=sha256:1f05f5e6d6df6f8b1974c08f963c33f0a4d8cfa15cba12d35ca3ece8e9be5b1f \ + --hash=sha256:2ca52536e1c7603fe4cbb5ad9dc141df47c3200df782f5ec559364716ea27f96 \ + --hash=sha256:3f30c9958ab9cb02bf0c574c629e87c19454cbbdb82750e49e3d1559a5a8f216 \ + --hash=sha256:47a7a9366ab8e4ee20df9339bef172eec7b2e9e123643bf3ede005058f5b114e \ + --hash=sha256:5374b40b6d860d334d28678a53a92f0bf04b53acdf0395900361ad54ce71cd1d \ + --hash=sha256:826e4de98e91450a6fe699a4e4a7cf33b9a90a2c5c270dc5b202241c37359ff8 \ + --hash=sha256:aab9ed5bfba6b0a2242a7ec9a72858c802ceeaf0076fe72b2ad455639275f22c \ + --hash=sha256:bd25085c42ebaffe336ed7bda8a0ae7b6c454a5f386ec8b2299503f79bd12bdf \ + --hash=sha256:d1ccbceb44e94fe2205b63996166e98a513a19ed23ec01d7193b7494b94ba30d \ + --hash=sha256:d99758f8bbcb8f8da99acabf711ffad5e7a015247adf27211100b3586777fd56 \ + --hash=sha256:e177cbb6dc0b1dbef5e999900d798b73e33602abf9b6c62d5d2cbe101026d931 \ + --hash=sha256:eee9c8c50bc77eb9c0811c91d9d67ff39fe4f394c2f44ada37dac6d45e50c9f1 \ + --hash=sha256:ef677c26bae756e4c98af6d8972da83caea550bc92ffef97a6e939ca5b24ad06 \ + --hash=sha256:f03fba9621533d67d7ab995847467d78b9337e3697779ef2cea6f1deaee5fbef \ + --hash=sha256:f51bbb64f8f29e444c16d21b269ba82e25f8d536beda3df7c9fe1816297e508e # via -r requirements/dev.in six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ diff --git a/tox.ini b/tox.ini index f6f7632211..f34398c283 100644 --- a/tox.ini +++ b/tox.ini @@ -25,7 +25,6 @@ deps = coverage[toml]>=5.0.2 depends = py commands = - coverage combine coverage report [testenv:lint] @@ -45,7 +44,7 @@ commands = neophile update {posargs} [testenv:py] description = Run pytest commands = - coverage run -m pytest -vvv {posargs} + pytest -vv --cov=phalanx --cov-branch --cov-report= {posargs} [testenv:typing] description = Run mypy. From 9766ef2ce295ea46a324119979f0b4373b1ec88d Mon Sep 17 00:00:00 2001 From: Russ Allbery Date: Tue, 1 Aug 2023 13:56:18 -0700 Subject: [PATCH 307/308] Don't resolve secrets when saving Vault secrets To simplify the semantics of the phalanx secrets vault-secrets command, just store the Vault data as it is present in Vault without going through the secret resolution code. Previously, secrets were generated if missing, which confused the semantics of the command. --- src/phalanx/cli.py | 2 +- src/phalanx/services/secrets.py | 42 ++++++++++++++++----------------- tests/cli/secrets_test.py | 18 ++++---------- 3 files changed, 26 insertions(+), 36 deletions(-) diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py index d438923abd..6585058b67 100644 --- a/src/phalanx/cli.py +++ b/src/phalanx/cli.py @@ -121,4 +121,4 @@ def secrets_vault_secrets(environment: str, output: Path) -> None: """ factory = Factory() secrets_service = factory.create_secrets_service() - secrets_service.generate_vault_secrets(environment, output) + secrets_service.save_vault_secrets(environment, output) diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py index 8d40b784ad..ead4b40138 100644 --- a/src/phalanx/services/secrets.py +++ b/src/phalanx/services/secrets.py @@ -122,7 +122,23 @@ def generate_static_template(self, env_name: str) -> str: } return yaml.dump(template, width=72) - def generate_vault_secrets(self, env_name: str, path: Path) -> None: + def list_secrets(self, env_name: str) -> list[Secret]: + """List all required secrets for the given environment. + + Parameters + ---------- + env_name + Name of the environment. + + Returns + ------- + list of Secret + Secrets required for the given environment. + """ + environment = self._config.load_environment(env_name) + return environment.all_secrets() + + def save_vault_secrets(self, env_name: str, path: Path) -> None: """Generate JSON files containing the Vault secrets for an environment. One file per application with secrets will be written to the provided @@ -140,35 +156,17 @@ def generate_vault_secrets(self, env_name: str, path: Path) -> None: """ environment = self._config.load_environment(env_name) vault_client = self._vault.get_vault_client(environment) - secrets = environment.all_secrets() vault_secrets = vault_client.get_environment_secrets(environment) - resolved = self._resolve_secrets(secrets, environment, vault_secrets) - for app_name, values in resolved.items(): + for app_name, values in vault_secrets.items(): app_secrets: dict[str, str | None] = {} for key, secret in values.items(): - if secret.value: - app_secrets[key] = secret.value.get_secret_value() + if secret: + app_secrets[key] = secret.get_secret_value() else: app_secrets[key] = None with (path / f"{app_name}.json").open("w") as fh: json.dump(app_secrets, fh, indent=2) - def list_secrets(self, env_name: str) -> list[Secret]: - """List all required secrets for the given environment. - - Parameters - ---------- - env_name - Name of the environment. - - Returns - ------- - list of Secret - Secrets required for the given environment. - """ - environment = self._config.load_environment(env_name) - return environment.all_secrets() - def _resolve_secrets( self, secrets: list[Secret], diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py index aa0ad36b76..ddc5555265 100644 --- a/tests/cli/secrets_test.py +++ b/tests/cli/secrets_test.py @@ -4,7 +4,6 @@ import json import os -import re from pathlib import Path from click.testing import CliRunner @@ -89,15 +88,8 @@ def test_vault_secrets(tmp_path: Path, mock_vault: MockVaultClient) -> None: expected_files = {p.name for p in vault_input_path.iterdir()} output_files = {p.name for p in tmp_path.iterdir()} assert expected_files == output_files - - # The output files will contain generated secrets that were missing from - # the input paths. Spot-check just one of those to see if it's correct. - # More comprehensive testing of secret generation will be done elsewhere. - with (vault_input_path / "argocd.json").open() as fh: - expected_argocd = json.load(fh) - with (tmp_path / "argocd.json").open() as fh: - output_argocd = json.load(fh) - assert output_argocd["server.secretkey"] - assert re.match("^[0-9a-f]{64}$", output_argocd["server.secretkey"]) - del output_argocd["server.secretkey"] - assert expected_argocd == output_argocd + for expected_path in vault_input_path.iterdir(): + with expected_path.open() as fh: + expected = json.load(fh) + with (tmp_path / expected_path.name).open() as fh: + assert expected == json.load(fh) From 3ac354c08452d2aa642952dbef39d500d2c9b89d Mon Sep 17 00:00:00 2001 From: Angelo Fausti Date: Wed, 2 Aug 2023 18:47:56 -0700 Subject: [PATCH 308/308] Uniformize influxdb resource requests across all instances --- applications/sasquatch/README.md | 8 ++++---- applications/sasquatch/values.yaml | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index 31cb6b186d..cf927ae99c 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -31,8 +31,8 @@ Rubin Observatory's telemetry service. | influxdb-staging.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments | | influxdb-staging.resources.limits.cpu | int | `8` | | | influxdb-staging.resources.limits.memory | string | `"96Gi"` | | -| influxdb-staging.resources.requests.cpu | int | `1` | | -| influxdb-staging.resources.requests.memory | string | `"1Gi"` | | +| influxdb-staging.resources.requests.cpu | int | `8` | | +| influxdb-staging.resources.requests.memory | string | `"96Gi"` | | | influxdb-staging.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. | | influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config | | influxdb.enabled | bool | `true` | Enable InfluxDB. | @@ -70,8 +70,8 @@ Rubin Observatory's telemetry service. | influxdb2.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments. | | influxdb2.resources.limits.cpu | int | `8` | | | influxdb2.resources.limits.memory | string | `"96Gi"` | | -| influxdb2.resources.requests.cpu | int | `1` | | -| influxdb2.resources.requests.memory | string | `"1Gi"` | | +| influxdb2.resources.requests.cpu | int | `8` | | +| influxdb2.resources.requests.memory | string | `"16Gi"` | | | kafdrop.enabled | bool | `true` | Enable Kafdrop. | | kafka-connect-manager | object | `{}` | Override kafka-connect-manager configuration. | | kapacitor.enabled | bool | `true` | Enable Kapacitor. | diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index 70ed6e5d1a..2ae3a8e006 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -42,6 +42,7 @@ influxdb: config: data: cache-max-memory-size: 0 + # Duration a write will wait before fsyncing. This is useful for slower disks or when WAL write contention is present. wal-fsync-delay: "100ms" trace-logging-enabled: true http: @@ -129,8 +130,8 @@ influxdb-staging: # init.iql: |+ resources: requests: - memory: 1Gi - cpu: 1 + memory: 96Gi + cpu: 8 limits: memory: 96Gi cpu: 8 @@ -198,7 +199,6 @@ source-influxdb: memory: 96Gi cpu: 8 - influxdb2: enabled: false image: @@ -243,8 +243,8 @@ influxdb2: influx bucket create --name telegraf-kafka-consumer --org default resources: requests: - memory: 1Gi - cpu: 1 + memory: 16Gi + cpu: 8 limits: memory: 96Gi cpu: 8