diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index e53b746bf5..b5610d35ce 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -40,6 +40,7 @@ jobs: - "applications/argocd/values-*.yaml" - "applications/gafaelfawr/values-*.yaml" - "environments/values-*.yaml" + - "requirements/*.txt" - "src/phalanx/**" docsSpecific: - "docs/**" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c684835a13..a5a2f68946 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-merge-conflict - id: check-toml @@ -14,7 +14,7 @@ repos: - -c=.yamllint.yml - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.29.2 + rev: 0.29.4 hooks: - id: check-jsonschema files: ^applications/.*/secrets(-[^./-]+)?\.yaml @@ -46,14 +46,14 @@ repos: - --template-files=../helm-docs.md.gotmpl - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.7 + rev: v0.7.4 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] - id: ruff-format - repo: https://github.com/adamchainz/blacken-docs - rev: 1.18.0 + rev: 1.19.1 hooks: - id: blacken-docs additional_dependencies: [black==23.7.0] diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md index c8043c152d..9a4ecb14b1 100644 --- a/applications/alert-stream-broker/README.md +++ b/applications/alert-stream-broker/README.md @@ -32,7 +32,6 @@ Alert transmission to community brokers | alert-database.ingester.serviceAccountName | string | `"alert-database-ingester"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database ingester. | | alert-database.ingress.annotations | object | `{}` | | | alert-database.ingress.enabled | bool | `true` | Whether to create an ingress | -| alert-database.ingress.gafaelfawrAuthQuery | string | `"scope=read:alertdb"` | Query string for Gafaelfawr to authorize access | | alert-database.ingress.host | string | None, must be set if the ingress is enabled | Hostname for the ingress | | alert-database.ingress.path | string | `"/alertdb"` | Subpath to host the alert database application under the ingress | | alert-database.ingress.tls | list | `[]` | Configures TLS for the ingress if needed. If multiple ingresses share the same hostname, only one of them needs a TLS configuration. | diff --git a/applications/alert-stream-broker/charts/alert-database/README.md b/applications/alert-stream-broker/charts/alert-database/README.md index eca25aabf3..04107a4e53 100644 --- a/applications/alert-stream-broker/charts/alert-database/README.md +++ b/applications/alert-stream-broker/charts/alert-database/README.md @@ -23,7 +23,6 @@ Archival database of alerts sent through the alert stream. | ingester.serviceAccountName | string | `"alert-database-ingester"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database ingester. | | ingress.annotations | object | `{}` | | | ingress.enabled | bool | `true` | Whether to create an ingress | -| ingress.gafaelfawrAuthQuery | string | `"scope=read:alertdb"` | Query string for Gafaelfawr to authorize access | | ingress.host | string | None, must be set if the ingress is enabled | Hostname for the ingress | | ingress.path | string | `"/alertdb"` | Subpath to host the alert database application under the ingress | | ingress.tls | list | `[]` | Configures TLS for the ingress if needed. If multiple ingresses share the same hostname, only one of them needs a TLS configuration. | diff --git a/applications/alert-stream-broker/charts/alert-database/templates/ingress.yaml b/applications/alert-stream-broker/charts/alert-database/templates/ingress.yaml index 083c96d39b..774dadf5a1 100644 --- a/applications/alert-stream-broker/charts/alert-database/templates/ingress.yaml +++ b/applications/alert-stream-broker/charts/alert-database/templates/ingress.yaml @@ -1,38 +1,45 @@ {{- if .Values.ingress.enabled -}} -apiVersion: networking.k8s.io/v1 -kind: Ingress +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress metadata: - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/rewrite-target: /$2 - nginx.ingress.kubernetes.io/auth-method: "GET" - nginx.ingress.kubernetes.io/auth-url: "http://gafaelfawr.gafaelfawr.svc.cluster.local:8080/auth?{{ required "ingress.gafaelfawrAuthQuery must be set" .Values.ingress.gafaelfawrAuthQuery }}" - {{- with .Values.ingress.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} name: {{ template "alertDatabase.fullname" . }} labels: {{- include "alertDatabase.labels" . | nindent 4 }} -spec: - rules: - - host: {{ required "ingress.host must be set" .Values.ingress.host | quote }} - http: - paths: - - path: "{{ .Values.ingress.path }}(/|$)(.*)" - pathType: Prefix - backend: - service: - name: {{ template "alertDatabase.fullname" . }} - port: - name: http - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:alertdb" +template: + metadata: + name: {{ template "alertDatabase.fullname" . }} + annotations: + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 6 }} + {{- end }} + labels: + {{- include "alertDatabase.labels" . | nindent 4 }} + spec: + rules: + - host: {{ required "ingress.host must be set" .Values.ingress.host | quote }} + http: + paths: + - path: "{{ .Values.ingress.path }}(/|$)(.*)" + pathType: ImplementationSpecific + backend: + service: + name: {{ template "alertDatabase.fullname" . }} + port: + name: http + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} {{- end }} - {{- end }} {{- end }} diff --git a/applications/alert-stream-broker/charts/alert-database/values.yaml b/applications/alert-stream-broker/charts/alert-database/values.yaml index 867d8c2c0c..45362837da 100644 --- a/applications/alert-stream-broker/charts/alert-database/values.yaml +++ b/applications/alert-stream-broker/charts/alert-database/values.yaml @@ -101,6 +101,3 @@ ingress: # -- Subpath to host the alert database application under the ingress path: "/alertdb" - - # -- Query string for Gafaelfawr to authorize access - gafaelfawrAuthQuery: "scope=read:alertdb" diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml index 26c74abe19..74b3e6ea15 100644 --- a/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml +++ b/applications/alert-stream-broker/charts/alert-stream-broker/templates/kafka-topics.yaml @@ -93,4 +93,24 @@ spec: # timestamp instead message.timestamp.type: 'LogAppendTime' partitions: {{ .Values.latissTopicPartitions }} - replicas: {{ .Values.latissTopicReplicas }} \ No newline at end of file + replicas: {{ .Values.latissTopicReplicas }} +--- +apiVersion: "kafka.strimzi.io/{{ .Values.strimziAPIVersion }}" +kind: KafkaTopic +metadata: + labels: + strimzi.io/cluster: "{{ .Values.clusterName }}" + name: "{{ .Values.comcamTopicName}}" +spec: + config: + cleanup.policy: "delete" + retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days + retention.bytes: {{ .Values.maxBytesRetained }} + compression.type: {{ .Values.topicCompression }} + # The default timestamp is the creation time of the alert. + # To get the ingestion rate, we need this to be the log + # append time, and the header will contain the producer + # timestamp instead + message.timestamp.type: 'LogAppendTime' + partitions: {{ .Values.comcamTopicPartitions }} + replicas: {{ .Values.comcamTopicReplicas }} diff --git a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml index 0f56055671..3792a879b1 100644 --- a/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml +++ b/applications/alert-stream-broker/values-usdfdev-alert-stream-broker.yaml @@ -17,7 +17,7 @@ alert-stream-broker: enabled: false bootstrap: host: usdf-alert-stream-dev.lsst.cloud - ip: "" + ip: "134.79.23.185" annotations: metallb.universe.tf/address-pool: 'sdf-dmz' brokers: @@ -121,6 +121,10 @@ alert-stream-broker: latissTopicPartitions: 45 latissTopicReplicas: 1 + comcamTopicName: lsstcomcam-alerts + comcamTopicPartitions: 45 + comcamTopicReplicas: 1 + # Compression set to snappy to balance alert packet compression speed and size. topicCompression: snappy diff --git a/applications/alert-stream-broker/values-usdfdev.yaml b/applications/alert-stream-broker/values-usdfdev.yaml index 77b08617ac..0e01ef35e5 100644 --- a/applications/alert-stream-broker/values-usdfdev.yaml +++ b/applications/alert-stream-broker/values-usdfdev.yaml @@ -110,7 +110,6 @@ alert-database: ingress: enabled: true host: "usdf-rsp-dev.slac.stanford.edu" - gafaelfawrAuthQuery: "scope=read:alertdb" storage: gcp: diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml index 28c6a47cac..01ae908e43 100644 --- a/applications/argo-workflows/Chart.yaml +++ b/applications/argo-workflows/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-workflows - version: 0.42.3 + version: 0.42.7 repository: https://argoproj.github.io/argo-helm diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml index 56c9e07f2f..11b5e03524 100644 --- a/applications/argocd/Chart.yaml +++ b/applications/argocd/Chart.yaml @@ -8,5 +8,5 @@ sources: - https://github.com/argoproj/argo-helm dependencies: - name: argo-cd - version: 7.6.1 + version: 7.7.3 repository: https://argoproj.github.io/argo-helm diff --git a/applications/argocd/values-usdf-cm-dev.yaml b/applications/argocd/values-usdf-cm-dev.yaml index fd33f69cf3..cd292535bc 100644 --- a/applications/argocd/values-usdf-cm-dev.yaml +++ b/applications/argocd/values-usdf-cm-dev.yaml @@ -29,6 +29,7 @@ argo-cd: g, fritzm@slac.stanford.edu, role:admin g, cslater@slac.stanford.edu, role:admin g, yusra@slac.stanford.edu, role:admin + g, rra@slac.stanford.edu, role:admin scopes: "[email]" server: diff --git a/applications/argocd/values-usdfint.yaml b/applications/argocd/values-usdfint.yaml index 12ba88dd96..db73444c20 100644 --- a/applications/argocd/values-usdfint.yaml +++ b/applications/argocd/values-usdfint.yaml @@ -58,6 +58,7 @@ argo-cd: g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer g, vbecker@slac.stanford.edu, role:developer + g, saranda@slac.stanford.edu, role:developer scopes: "[email]" server: diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml index 9c5fdf1734..b39289de10 100644 --- a/applications/argocd/values-usdfprod.yaml +++ b/applications/argocd/values-usdfprod.yaml @@ -34,6 +34,7 @@ argo-cd: g, afausti@slac.stanford.edu, role:admin g, athor@slac.stanford.edu, role:admin + g, danfuchs@slac.stanford.edu, role:admin g, dspeck@slac.stanford.edu, role:admin g, frossie@slac.stanford.edu, role:admin g, jsick@slac.stanford.edu, role:admin @@ -54,6 +55,7 @@ argo-cd: g, spothi@slac.stanford.edu, role:developer g, bbrond@slac.stanford.edu, role:developer g, vbecker@slac.stanford.edu, role:developer + g, saranda@slac.stanford.edu, role:developer scopes: "[email]" server: diff --git a/applications/butler/Chart.yaml b/applications/butler/Chart.yaml index 9d3b40a094..9bae1440de 100644 --- a/applications/butler/Chart.yaml +++ b/applications/butler/Chart.yaml @@ -4,4 +4,4 @@ version: 1.0.0 description: Server for Butler data abstraction service sources: - https://github.com/lsst/daf_butler -appVersion: server-2.1.0 +appVersion: server-2.3.0 diff --git a/applications/butler/README.md b/applications/butler/README.md index a3d2d49811..0f3f968ca2 100644 --- a/applications/butler/README.md +++ b/applications/butler/README.md @@ -15,13 +15,13 @@ Server for Butler data abstraction service | autoscaling.maxReplicas | int | `100` | Maximum number of butler deployment pods | | autoscaling.minReplicas | int | `1` | Minimum number of butler deployment pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of butler deployment pods | -| config.additionalS3ProfileName | string | No second S3 profile is available. | Profile name identifying a second S3 endpoint and set of credentials to use for accessing files in the datastore. | +| config.additionalS3EndpointUrls | object | No additional URLs | Endpoint URLs for additional S3 services used by the Butler, as a mapping from profile name to URL. | | config.dp02ClientServerIsDefault | bool | `false` | True if the 'dp02' Butler repository alias should use client/server Butler. False if it should use DirectButler. | | config.dp02PostgresUri | string | No configuration file for DP02 will be generated. | Postgres connection string pointing to the registry database hosting Data Preview 0.2 data. | | config.pathPrefix | string | `"/api/butler"` | The prefix of the path portion of the URL where the Butler service will be exposed. For example, if the service should be exposed at `https://data.lsst.cloud/api/butler`, this should be set to `/api/butler` | | config.pguser | string | Use values specified in per-repository Butler config files. | Postgres username used to connect to the Butler DB | | config.repositories | object | `{}` | Mapping from Butler repository label to Butler configuration URI for repositories which will be hosted by this server. | -| config.s3EndpointUrl | string | `""` | URL for the S3 service where files for datasets are stored by Butler. | +| config.s3EndpointUrl | string | `""` | URL for the primary S3 service where files for datasets are stored by Butler. | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | diff --git a/applications/butler/secrets.yaml b/applications/butler/secrets.yaml index 23ee59d217..1b2d88511e 100644 --- a/applications/butler/secrets.yaml +++ b/applications/butler/secrets.yaml @@ -18,9 +18,3 @@ copy: application: nublado key: "postgres-credentials.txt" -"additional-s3-profile": - description: >- - Credentials and endpoint for a second S3 profile to use, in addition to the - default endpoint. For docs on format see - https://github.com/lsst/resources/blob/a34598e125919799d3db4bd8a2363087c3de434e/python/lsst/resources/s3utils.py#L201 - if: config.additionalS3ProfileName diff --git a/applications/butler/templates/configmap.yaml b/applications/butler/templates/configmap.yaml index 8529fa2ba0..5bb161ba12 100644 --- a/applications/butler/templates/configmap.yaml +++ b/applications/butler/templates/configmap.yaml @@ -13,6 +13,12 @@ data: datastore_constraints: # One entry per datastore in datastores section # Use empty `-` if no constraint override required + - constraints: + reject: + - all + - constraints: + reject: + - all - constraints: reject: - all @@ -24,6 +30,22 @@ data: name: FileDatastore@s3://butler-us-central1-panda-dev/dc2 cls: lsst.daf.butler.datastores.fileDatastore.FileDatastore root: s3://butler-us-central1-panda-dev/dc2 + - datastore: + # Datasets of type 'raw' are stored in a separate bucket for + # historical reasons. + name: FileDatastore@s3://curation-us-central1-desc-dc2-run22i + cls: lsst.daf.butler.datastores.fileDatastore.FileDatastore + root: s3://curation-us-central1-desc-dc2-run22i/ + records: + table: raw_datastore_records + - datastore: + # Also for historical reasons, some files that originated in DP01 + # are kept in a separate bucket. + name: FileDatastore@s3://butler-us-central1-dp01-desc-dr6 + cls: lsst.daf.butler.datastores.fileDatastore.FileDatastore + root: s3://butler-us-central1-dp01-desc-dr6/ + records: + table: dp01_datastore_records - datastore: name: FileDatastore@s3://butler-us-central1-dp02-user cls: lsst.daf.butler.datastores.fileDatastore.FileDatastore diff --git a/applications/butler/templates/deployment.yaml b/applications/butler/templates/deployment.yaml index c7e3f06b4c..ebbe277955 100644 --- a/applications/butler/templates/deployment.yaml +++ b/applications/butler/templates/deployment.yaml @@ -52,6 +52,10 @@ spec: value: "/opt/lsst/butler/secrets/butler-gcs-creds.json" - name: S3_ENDPOINT_URL value: {{ .Values.config.s3EndpointUrl | quote }} + {{- range $name, $url := .Values.config.additionalS3EndpointUrls }} + - name: LSST_RESOURCES_S3_PROFILE_{{ $name }} + value: {{ $url }} + {{ end }} - name: DAF_BUTLER_REPOSITORIES value: {{ .Values.config.repositories | toJson | quote }} # Serve the configuration files generated by configmap.yaml via @@ -65,13 +69,6 @@ spec: - name: PGUSER value: {{ .Values.config.pguser | quote }} {{ end }} - {{ if .Values.config.additionalS3ProfileName }} - - name: LSST_RESOURCES_S3_PROFILE_{{ .Values.config.additionalS3ProfileName }} - valueFrom: - secretKeyRef: - name: {{ include "butler.fullname" . }} - key: additional-s3-profile - {{ end }} volumeMounts: - name: "butler-secrets" mountPath: "/opt/lsst/butler/secrets" diff --git a/applications/butler/values-idfdev.yaml b/applications/butler/values-idfdev.yaml index 92cc0e6897..4b1a531e4a 100644 --- a/applications/butler/values-idfdev.yaml +++ b/applications/butler/values-idfdev.yaml @@ -5,7 +5,5 @@ config: dp02ClientServerIsDefault: true dp02PostgresUri: postgresql://postgres@sqlproxy-butler-int.sqlproxy-cross-project:5432/dp02 s3EndpointUrl: "https://storage.googleapis.com" - additionalS3ProfileName: "ir2" repositories: dp02: "file:///opt/lsst/butler/config/dp02.yaml" - ir2: "s3://butler-us-central1-panda-dev/ir2/butler-ir2.yaml" diff --git a/applications/butler/values-idfprod.yaml b/applications/butler/values-idfprod.yaml index 4e65f438d3..128891095e 100644 --- a/applications/butler/values-idfprod.yaml +++ b/applications/butler/values-idfprod.yaml @@ -1,4 +1,5 @@ config: + dp02ClientServerIsDefault: true dp02PostgresUri: postgresql://postgres@10.163.0.3/idfdp02 s3EndpointUrl: "https://storage.googleapis.com" repositories: diff --git a/applications/butler/values-usdfdev.yaml b/applications/butler/values-usdfdev.yaml index 68098547ab..8ce6a0660a 100644 --- a/applications/butler/values-usdfdev.yaml +++ b/applications/butler/values-usdfdev.yaml @@ -2,4 +2,6 @@ config: pguser: "rubin" s3EndpointUrl: "https://s3dfrgw.slac.stanford.edu" repositories: - embargo: "s3://rubin-summit-users/butler.yaml" + embargo: s3://embargo@rubin-summit-users/butler.yaml + additionalS3EndpointUrls: + embargo: "https://sdfembs3.sdf.slac.stanford.edu" diff --git a/applications/butler/values-usdfint.yaml b/applications/butler/values-usdfint.yaml index 68098547ab..8ce6a0660a 100644 --- a/applications/butler/values-usdfint.yaml +++ b/applications/butler/values-usdfint.yaml @@ -2,4 +2,6 @@ config: pguser: "rubin" s3EndpointUrl: "https://s3dfrgw.slac.stanford.edu" repositories: - embargo: "s3://rubin-summit-users/butler.yaml" + embargo: s3://embargo@rubin-summit-users/butler.yaml + additionalS3EndpointUrls: + embargo: "https://sdfembs3.sdf.slac.stanford.edu" diff --git a/applications/butler/values-usdfprod.yaml b/applications/butler/values-usdfprod.yaml index 68098547ab..8ce6a0660a 100644 --- a/applications/butler/values-usdfprod.yaml +++ b/applications/butler/values-usdfprod.yaml @@ -2,4 +2,6 @@ config: pguser: "rubin" s3EndpointUrl: "https://s3dfrgw.slac.stanford.edu" repositories: - embargo: "s3://rubin-summit-users/butler.yaml" + embargo: s3://embargo@rubin-summit-users/butler.yaml + additionalS3EndpointUrls: + embargo: "https://sdfembs3.sdf.slac.stanford.edu" diff --git a/applications/butler/values.yaml b/applications/butler/values.yaml index 51ec757201..3ea128cf97 100644 --- a/applications/butler/values.yaml +++ b/applications/butler/values.yaml @@ -93,13 +93,13 @@ config: # @default -- Use values specified in per-repository Butler config files. pguser: "" - # -- URL for the S3 service where files for datasets are stored by Butler. + # -- URL for the primary S3 service where files for datasets are stored by Butler. s3EndpointUrl: "" - # -- Profile name identifying a second S3 endpoint and set of credentials - # to use for accessing files in the datastore. - # @default -- No second S3 profile is available. - additionalS3ProfileName: "" + # -- Endpoint URLs for additional S3 services used by the Butler, as a + # mapping from profile name to URL. + # @default -- No additional URLs + additionalS3EndpointUrls: {} # -- The prefix of the path portion of the URL where the Butler service will # be exposed. For example, if the service should be exposed at diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml index 4961f9c5a3..a1e9214962 100644 --- a/applications/cert-manager/Chart.yaml +++ b/applications/cert-manager/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/cert-manager/cert-manager dependencies: - name: cert-manager - version: v1.15.3 + version: v1.16.1 repository: https://charts.jetstack.io diff --git a/applications/checkerboard/Chart.yaml b/applications/checkerboard/Chart.yaml index 5afe4f9433..827a971e39 100644 --- a/applications/checkerboard/Chart.yaml +++ b/applications/checkerboard/Chart.yaml @@ -8,5 +8,5 @@ appVersion: 0.4.5 dependencies: - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/checkerboard/README.md b/applications/checkerboard/README.md index 3a3a245f1f..95c9e31cc3 100644 --- a/applications/checkerboard/README.md +++ b/applications/checkerboard/README.md @@ -11,9 +11,10 @@ Identity mapping service | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the checkerboard frontend pod | -| config | object | `{"logLevel":"INFO","profile":"production"}` | Configuration for checkerboard server | +| config | object | See `values.yaml` | Configuration for checkerboard server | | config.logLevel | string | `"INFO"` | Choose from the text form of Python logging levels | | config.profile | string | `"production"` | application Safir profile ("production" or "development") | +| config.slackProfileField | string | `"GitHub username"` | name of Slack profile field for GitHub username (case-sensitive) | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | diff --git a/applications/checkerboard/templates/deployment.yaml b/applications/checkerboard/templates/deployment.yaml index 113a430ef3..66a083a1fa 100644 --- a/applications/checkerboard/templates/deployment.yaml +++ b/applications/checkerboard/templates/deployment.yaml @@ -25,6 +25,8 @@ spec: containers: - name: "checkerboard" env: + - name: "CHECKERBOARD_PROFILE_FIELD" + value: {{ .Values.config.slackProfileField | quote }} - name: "CHECKERBOARD_REDIS_PASSWORD" valueFrom: secretKeyRef: diff --git a/applications/checkerboard/values-roundtable-dev.yaml b/applications/checkerboard/values-roundtable-dev.yaml index 4bea0d3028..96752b4003 100644 --- a/applications/checkerboard/values-roundtable-dev.yaml +++ b/applications/checkerboard/values-roundtable-dev.yaml @@ -3,5 +3,3 @@ redis: storageClass: "standard-rwo" config: logLevel: "DEBUG" -image: - pullPolicy: "Always" diff --git a/applications/checkerboard/values.yaml b/applications/checkerboard/values.yaml index 8488bfab11..844ab9c8b4 100644 --- a/applications/checkerboard/values.yaml +++ b/applications/checkerboard/values.yaml @@ -101,6 +101,7 @@ redis: # -- Configuration for checkerboard server +# @default -- See `values.yaml` config: # -- Choose from the text form of Python logging levels logLevel: "INFO" @@ -108,6 +109,9 @@ config: # -- application Safir profile ("production" or "development") profile: "production" + # -- name of Slack profile field for GitHub username (case-sensitive) + slackProfileField: "GitHub username" + global: # -- Base URL for the environment # @default -- Set by Argo CD diff --git a/applications/consdb/Chart.yaml b/applications/consdb/Chart.yaml index 5bc9793fc7..a6202030c4 100644 --- a/applications/consdb/Chart.yaml +++ b/applications/consdb/Chart.yaml @@ -4,7 +4,7 @@ version: 1.0.0 description: Consolidated Database of Image Metadata type: application appVersion: 1.1.0 -home: consdb.lsst.io +home: https://consdb.lsst.io/ sources: - https://github.com/lsst-dm/consdb annotations: diff --git a/applications/consdb/README.md b/applications/consdb/README.md index 47eaf0d7aa..6a4a392fad 100644 --- a/applications/consdb/README.md +++ b/applications/consdb/README.md @@ -2,7 +2,7 @@ Consolidated Database of Image Metadata -**Homepage:** +**Homepage:** ## Source Code diff --git a/applications/consdb/secrets-base.yaml b/applications/consdb/secrets-base.yaml new file mode 100644 index 0000000000..2695612b18 --- /dev/null +++ b/applications/consdb/secrets-base.yaml @@ -0,0 +1,12 @@ +lfa-password: + description: >- + LFA password, used for retrieving Header Service objects. + copy: + application: auxtel + key: aws-secret-access-key +lfa-key: + description: >- + LFA key, used for retrieving Header Service objects. + copy: + application: auxtel + key: aws-access-key-id diff --git a/applications/consdb/secrets-summit.yaml b/applications/consdb/secrets-summit.yaml new file mode 100644 index 0000000000..f7baafb1ec --- /dev/null +++ b/applications/consdb/secrets-summit.yaml @@ -0,0 +1,6 @@ +lfa-password: + description: >- + LFA password, used for retrieving Header Service objects. +lfa-key: + description: >- + LFA key, used for retrieving Header Service objects. diff --git a/applications/consdb/secrets-tucson-teststand.yaml b/applications/consdb/secrets-tucson-teststand.yaml new file mode 100644 index 0000000000..f7baafb1ec --- /dev/null +++ b/applications/consdb/secrets-tucson-teststand.yaml @@ -0,0 +1,6 @@ +lfa-password: + description: >- + LFA password, used for retrieving Header Service objects. +lfa-key: + description: >- + LFA key, used for retrieving Header Service objects. diff --git a/applications/consdb/secrets.yaml b/applications/consdb/secrets.yaml index 99a8f6ba13..f8c76e5445 100644 --- a/applications/consdb/secrets.yaml +++ b/applications/consdb/secrets.yaml @@ -1,17 +1,15 @@ consdb-password: description: >- - Kafka password for consdb user + Kafka password for consdb user, used to get EFD data. copy: application: sasquatch key: consdb-password -oods-password: - description: >- - PostgreSQL password for the OODS user Butler database. -lfa-password: - description: >- - LFA password exposurelog-password: - description: "Password for the TTS where we use exposurelog database." + description: >- + PostgreSQL password for the exposurelog user exposurelog database, used to write to ConsDB schemas. copy: - application: exposure-log + application: exposurelog key: exposurelog_password +oods-password: + description: >- + PostgreSQL password for the OODS user in the Butler database. diff --git a/applications/consdb/templates/hinfo-deployment.yaml b/applications/consdb/templates/hinfo-deployment.yaml index a47a2327f3..aeb9495e36 100644 --- a/applications/consdb/templates/hinfo-deployment.yaml +++ b/applications/consdb/templates/hinfo-deployment.yaml @@ -47,7 +47,7 @@ spec: valueFrom: secretKeyRef: name: consdb - key: "oods-password" + key: "{{ .Values.db.passwordkey }}" - name: "DB_USER" value: "{{ .Values.db.user }}" - name: "DB_NAME" @@ -73,12 +73,22 @@ spec: - name: "KAFKA_PASSWORD" valueFrom: secretKeyRef: - name: sasquatch + name: consdb key: "consdb-password" - name: "KAFKA_GROUP_ID" - value: "{{ .Values.kafka.group_id }}" + value: "{{ .Values.kafka.group_id }}-latiss" - name: "SCHEMA_URL" value: "{{ .Values.kafka.schema_url }}" + volumeMounts: + - name: "tmp" + mountPath: "/tmp" + - name: "astropy" + mountPath: "/home/lsst/.astropy" + volumes: + - name: "tmp" + emptyDir: {} + - name: "astropy" + emptyDir: {} securityContext: runAsNonRoot: true runAsUser: 1000 @@ -145,7 +155,7 @@ spec: valueFrom: secretKeyRef: name: consdb - key: "oods-password" + key: "{{ .Values.db.passwordkey }}" - name: "DB_USER" value: "{{ .Values.db.user }}" - name: "DB_NAME" @@ -171,12 +181,22 @@ spec: - name: "KAFKA_PASSWORD" valueFrom: secretKeyRef: - name: sasquatch + name: consdb key: "consdb-password" - name: "KAFKA_GROUP_ID" - value: "{{ .Values.kafka.group_id }}" + value: "{{ .Values.kafka.group_id }}-lsstcomcam" - name: "SCHEMA_URL" value: "{{ .Values.kafka.schema_url }}" + volumeMounts: + - name: "tmp" + mountPath: "/tmp" + - name: "astropy" + mountPath: "/home/lsst/.astropy" + volumes: + - name: "tmp" + emptyDir: {} + - name: "astropy" + emptyDir: {} securityContext: runAsNonRoot: true runAsUser: 1000 @@ -243,7 +263,7 @@ spec: valueFrom: secretKeyRef: name: consdb - key: "oods-password" + key: "{{ .Values.db.passwordkey }}" - name: "DB_USER" value: "{{ .Values.db.user }}" - name: "DB_NAME" @@ -269,12 +289,22 @@ spec: - name: "KAFKA_PASSWORD" valueFrom: secretKeyRef: - name: sasquatch + name: consdb key: "consdb-password" - name: "KAFKA_GROUP_ID" - value: "{{ .Values.kafka.group_id }}" + value: "{{ .Values.kafka.group_id }}-lsstcam" - name: "SCHEMA_URL" value: "{{ .Values.kafka.schema_url }}" + volumeMounts: + - name: "tmp" + mountPath: "/tmp" + - name: "astropy" + mountPath: "/home/lsst/.astropy" + volumes: + - name: "tmp" + emptyDir: {} + - name: "astropy" + emptyDir: {} securityContext: runAsNonRoot: true runAsUser: 1000 diff --git a/applications/consdb/templates/pq-deployment.yaml b/applications/consdb/templates/pq-deployment.yaml index 23ebb7104c..1a4c25e9c6 100644 --- a/applications/consdb/templates/pq-deployment.yaml +++ b/applications/consdb/templates/pq-deployment.yaml @@ -49,7 +49,7 @@ spec: valueFrom: secretKeyRef: name: consdb - key: "oods-password" + key: "{{ .Values.db.passwordkey }}" - name: "DB_USER" value: "{{ .Values.db.user }}" - name: "DB_NAME" diff --git a/applications/consdb/templates/vault-secrets.yaml b/applications/consdb/templates/vault-secrets.yaml index f45a4a8a59..a8f56439e3 100644 --- a/applications/consdb/templates/vault-secrets.yaml +++ b/applications/consdb/templates/vault-secrets.yaml @@ -4,16 +4,7 @@ metadata: name: consdb namespace: consdb spec: - path: {{ .Values.global.vaultSecretsPath }}/consdb - type: Opaque ---- -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: sasquatch - namespace: consdb -spec: - path: {{ .Values.global.vaultSecretsPath }}/sasquatch + path: "{{ .Values.global.vaultSecretsPath }}/consdb" type: Opaque --- apiVersion: ricoberger.de/v1alpha1 diff --git a/applications/consdb/values-base.yaml b/applications/consdb/values-base.yaml index d7e208c1b6..ff9e972974 100644 --- a/applications/consdb/values-base.yaml +++ b/applications/consdb/values-base.yaml @@ -1,5 +1,6 @@ db: user: "oods" + passwordkey: "oods-password" host: "postgresdb01.ls.lsst.org" database: "butler" lfa: diff --git a/applications/consdb/values-summit.yaml b/applications/consdb/values-summit.yaml index 2c17d584a7..438f1fbb22 100644 --- a/applications/consdb/values-summit.yaml +++ b/applications/consdb/values-summit.yaml @@ -1,5 +1,6 @@ db: user: "oods" + passwordkey: "oods-password" host: "postgresdb01.cp.lsst.org" database: "exposurelog" lfa: @@ -7,15 +8,15 @@ lfa: hinfo: latiss: enable: true - tag: "tickets-DM-44551" + tag: "24.10.3" logConfig: "consdb.hinfo=DEBUG" lsstcomcam: enable: true - tag: "tickets-DM-44551" + tag: "24.10.3" logConfig: "consdb.hinfo=DEBUG" lsstcam: enable: false - tag: "tickets-DM-44551" + tag: "24.10.3" pq: image: - tag: "main" + tag: "24.10.3" diff --git a/applications/consdb/values-tucson-teststand.yaml b/applications/consdb/values-tucson-teststand.yaml index 21997de89d..956b148fce 100644 --- a/applications/consdb/values-tucson-teststand.yaml +++ b/applications/consdb/values-tucson-teststand.yaml @@ -1,5 +1,6 @@ db: - user: "oods" + user: "exposurelog" + passwordkey: "exposurelog-password" host: "postgresdb01.tu.lsst.org" database: "exposurelog" lfa: @@ -7,15 +8,24 @@ lfa: hinfo: latiss: enable: true - tag: "tickets-DM-44551" + tag: "24.10.3" logConfig: "consdb.hinfo=DEBUG" lsstcomcam: enable: true - tag: "tickets-DM-44551" + tag: "24.10.3" logConfig: "consdb.hinfo=DEBUG" lsstcam: enable: false - tag: "tickets-DM-44551" + tag: "24.10.3" + pq: image: - tag: "main" + tag: "24.10.3" + +resources: + requests: + cpu: 200m + memory: 20Gi + limits: + cpu: 500m + memory: 20Gi diff --git a/applications/consdb/values-usdfdev.yaml b/applications/consdb/values-usdfdev.yaml index 9ae4a6a14f..203e10f75b 100644 --- a/applications/consdb/values-usdfdev.yaml +++ b/applications/consdb/values-usdfdev.yaml @@ -1,17 +1,18 @@ db: user: "usdf" + passwordkey: "oods-password" host: "usdf-summitdb.slac.stanford.edu" database: "exposurelog" hinfo: latiss: enable: false - tag: "tickets-DM-44551" + tag: "24.10.3" lsstcomcam: enable: false - tag: "tickets-DM-44551" + tag: "24.10.3" lsstcam: enable: false - tag: "tickets-DM-44551" + tag: "24.10.3" pq: image: - tag: "main" + tag: "24.10.3" diff --git a/applications/consdb/values-usdfprod.yaml b/applications/consdb/values-usdfprod.yaml index 9ae4a6a14f..203e10f75b 100644 --- a/applications/consdb/values-usdfprod.yaml +++ b/applications/consdb/values-usdfprod.yaml @@ -1,17 +1,18 @@ db: user: "usdf" + passwordkey: "oods-password" host: "usdf-summitdb.slac.stanford.edu" database: "exposurelog" hinfo: latiss: enable: false - tag: "tickets-DM-44551" + tag: "24.10.3" lsstcomcam: enable: false - tag: "tickets-DM-44551" + tag: "24.10.3" lsstcam: enable: false - tag: "tickets-DM-44551" + tag: "24.10.3" pq: image: - tag: "main" + tag: "24.10.3" diff --git a/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml index f6b91b7fac..7158c26804 100644 --- a/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/imaging-workflow.yaml @@ -14,6 +14,8 @@ spec: - name: testreports persistentVolumeClaim: claimName: {{ .Values.persistentVolume.claimName }} + imagePullSecrets: + - name: pull-secret podMetadata: labels: argocd.argoproj.io/instance: {{ .Values.jobLabelName }} @@ -41,6 +43,8 @@ spec: value: "-A Test_Report_AuxTel_Housekeeping.list" - name: jobname value: auxtel-housekeeping + - name: reportname + value: athousekeeping.xml - name: maintel-housekeeping depends: call-cleanup-reports templateRef: @@ -52,6 +56,8 @@ spec: value: "-A Test_Report_MainTel_Housekeeping.list" - name: jobname value: maintel-housekeeping + - name: reportname + value: mthousekeeping.xml - name: auxtel-image-verification depends: auxtel-housekeeping templateRef: @@ -63,6 +69,8 @@ spec: value: "-A Test_Report_AuxTel_Image_Verification.list" - name: jobname value: auxtel-image-verification + - name: reportname + value: at_image_verify.xml - name: auxtel-latiss-daytime-checkout depends: auxtel-image-verification templateRef: @@ -74,6 +82,8 @@ spec: value: "-A Test_Report_AuxTel_LATISS_Checkout.list" - name: jobname value: auxtel-latiss-daytime-checkout + - name: reportname + value: at_latiss_checkout.xml - name: auxtel-telescope-dome-daytime-checkout depends: auxtel-latiss-daytime-checkout templateRef: @@ -85,6 +95,8 @@ spec: value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" - name: jobname value: auxtel-telescope-dome-daytime-checkout + - name: reportname + value: at_tel_dome_checkout.xml - name: auxtel-telescope-slew-take-image-daytime-checkout depends: auxtel-telescope-dome-daytime-checkout templateRef: @@ -96,6 +108,8 @@ spec: value: "-A Test_Report_AuxTel_Slew_and_Take_Image_Checkout.list" - name: jobname value: auxtel-telescope-slew-take-image-daytime-checkout + - name: reportname + value: at_slew_take_image_checkout.xml - name: auxtel-prep-flat depends: auxtel-telescope-slew-take-image-daytime-checkout templateRef: @@ -107,6 +121,8 @@ spec: value: "-A Test_Report_AuxTel_Prep_Flat.list" - name: jobname value: auxtel-prep-flat + - name: reportname + value: at_prep_flat.xml - name: auxtel-flat-calibrations depends: auxtel-prep-flat templateRef: @@ -118,6 +134,8 @@ spec: value: "-A Test_Report_AuxTel_Flat_Calibrations.list" - name: jobname value: auxtel-flat-calibrations + - name: reportname + value: at_flat_calib.xml - name: auxtel-ptc-calibrations depends: auxtel-flat-calibrations templateRef: @@ -129,6 +147,8 @@ spec: value: "-A Test_Report_AuxTel_PTC_Calibrations.list" - name: jobname value: auxtel-ptc-calibrations + - name: reportname + value: at_ptc_calib.xml - name: auxtel-prep-onsky depends: auxtel-ptc-calibrations templateRef: @@ -140,6 +160,8 @@ spec: value: "-A Test_Report_AuxTel_Prep_Onsky.list" - name: jobname value: auxtel-prep-onsky + - name: reportname + value: at_prep_onsky.xml - name: auxtel-wep-align depends: auxtel-prep-onsky templateRef: @@ -151,6 +173,8 @@ spec: value: "-A Test_Report_AuxTel_WEP_Align.list" - name: jobname value: auxtel-wep-align + - name: reportname + value: at_wep_align.xml - name: auxtel-acq-take-seq-pointing depends: auxtel-wep-align templateRef: @@ -162,6 +186,8 @@ spec: value: "-A Test_Report_AuxTel_Acq_and_Take_Seq_POINTING.list" - name: jobname value: auxtel-acq-take-seq-pointing + - name: reportname + value: at_acq_take_seq_pointing.xml - name: auxtel-acq-take-seq-verify depends: auxtel-acq-take-seq-pointing templateRef: @@ -173,6 +199,8 @@ spec: value: "-A Test_Report_AuxTel_Acq_Take_Seq_VERIFY.list" - name: jobname value: auxtel-acq-take-seq-verify + - name: reportname + value: at_acq_take_seq_verify.xml - name: auxtel-acq-take-seq-test depends: auxtel-acq-take-seq-verify templateRef: @@ -184,6 +212,8 @@ spec: value: "-A Test_Report_AuxTel_Acq_Take_Seq_TEST.list" - name: jobname value: auxtel-acq-take-seq-test + - name: reportname + value: at_acq_take_seq_test.xml - name: auxtel-acq-take-seq-nominal depends: auxtel-acq-take-seq-test templateRef: @@ -195,6 +225,8 @@ spec: value: "-A Test_Report_AuxTel_Acq_Take_Seq_NOMINAL.list" - name: jobname value: auxtel-acq-take-seq-nominal + - name: reportname + value: at_acq_take_seq_nominal.xml - name: auxtel-stop depends: auxtel-acq-take-seq-nominal templateRef: @@ -206,6 +238,8 @@ spec: value: "-A Test_Report_AuxTel_Stop.list" - name: jobname value: auxtel-stop + - name: reportname + value: at_stop.xml - name: auxtel-shutdown depends: auxtel-stop templateRef: @@ -217,6 +251,8 @@ spec: value: "-A Test_Report_AuxTel_Shutdown.list" - name: jobname value: auxtel-shutdown + - name: reportname + value: at_shutdown.xml - name: enable-atcs depends: auxtel-shutdown templateRef: @@ -228,6 +264,8 @@ spec: value: "-A Test_Report_Enable_ATCS.list" - name: jobname value: enable-atcs + - name: reportname + value: enable_atcs.xml - name: bigcam-image-verification depends: maintel-housekeeping templateRef: @@ -239,6 +277,8 @@ spec: value: "-A Test_Report_BigCamera_Image_Verification.list" - name: jobname value: bigcam-image-verification + - name: reportname + value: bigcam_image_verify.xml - name: bigcam-calibrations depends: bigcam-image-verification templateRef: @@ -250,8 +290,5 @@ spec: value: "-A Test_Report_BigCamera_Calibrations.list" - name: jobname value: bigcam-calibrations - - name: call-save-reports - depends: bigcam-calibrations && enable-atcs - templateRef: - name: save-reports-workflow - template: save-reports + - name: reportname + value: bigcam_calib.xml diff --git a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml index 7300085d61..3f02750a3a 100644 --- a/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/job-workflow-template.yaml @@ -60,6 +60,13 @@ spec: secretKeyRef: name: control-system-test key: ts-salkafka-password + resources: + limits: + cpu: 4 + memory: 4Gi + requests: + cpu: 1 + memory: 1Gi volumeMounts: - name: testreports mountPath: {{ .Values.reportLocation }} diff --git a/applications/control-system-test/charts/integration-testing/templates/love-stress-test-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/love-stress-test-workflow.yaml index c67033f844..219df45e9d 100644 --- a/applications/control-system-test/charts/integration-testing/templates/love-stress-test-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/love-stress-test-workflow.yaml @@ -14,6 +14,8 @@ spec: - name: testreports persistentVolumeClaim: claimName: {{ .Values.persistentVolume.claimName }} + imagePullSecrets: + - name: pull-secret podMetadata: labels: argocd.argoproj.io/instance: {{ .Values.jobLabelName }} @@ -41,8 +43,5 @@ spec: value: "-A Test_Report_LOVE_Stress_Test.list" - name: jobname value: love-stress-test - - name: call-save-reports - depends: love-stress-test - templateRef: - name: save-reports-workflow - template: save-reports + - name: reportname + value: love_stress_test.xml diff --git a/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml index 61137b5a1d..8fe44bbd4e 100644 --- a/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/restart-workflow.yaml @@ -14,6 +14,8 @@ spec: - name: testreports persistentVolumeClaim: claimName: {{ .Values.persistentVolume.claimName }} + imagePullSecrets: + - name: pull-secret podMetadata: labels: argocd.argoproj.io/instance: {{ .Values.jobLabelName }} @@ -41,6 +43,8 @@ spec: value: "-A Test_Report_Offline.list" - name: jobname value: cameras-offline + - name: reportname + value: offline.xml - name: standby depends: cameras-offline templateRef: @@ -52,6 +56,8 @@ spec: value: "-A Test_Report_Standby.list" - name: jobname value: standby + - name: reportname + value: standby.xml - name: disabled depends: standby templateRef: @@ -63,6 +69,8 @@ spec: value: "-A Test_Report_Disabled.list" - name: jobname value: disabled + - name: reportname + value: disabled.xml - name: enabled depends: disabled templateRef: @@ -74,6 +82,8 @@ spec: value: "-A Test_Report_Enabled.list" - name: jobname value: enabled + - name: reportname + value: enabled.xml - name: auxtel-housekeeping depends: enabled templateRef: @@ -85,6 +95,8 @@ spec: value: "-A Test_Report_AuxTel_Housekeeping.list" - name: jobname value: auxtel-housekeeping + - name: reportname + value: athousekeeping.xml - name: maintel-housekeeping depends: enabled templateRef: @@ -96,6 +108,8 @@ spec: value: "-A Test_Report_MainTel_Housekeeping.list" - name: jobname value: maintel-housekeeping + - name: reportname + value: mthousekeeping.xml - name: auxtel-image-verification depends: auxtel-housekeeping templateRef: @@ -107,6 +121,8 @@ spec: value: "-A Test_Report_AuxTel_Image_Verification.list" - name: jobname value: auxtel-image-verification + - name: reportname + value: at_image_verify.xml - name: bigcam-image-verification depends: maintel-housekeeping templateRef: @@ -118,6 +134,8 @@ spec: value: "-A Test_Report_BigCamera_Image_Verification.list" - name: jobname value: bigcam-image-verification + - name: reportname + value: bigcam_image_verify.xml - name: love-stress-test depends: auxtel-image-verification && bigcam-image-verification templateRef: @@ -129,8 +147,5 @@ spec: value: "-A Test_Report_LOVE_Stress_Test.list" - name: jobname value: love-stress-test - - name: call-save-reports - depends: love-stress-test - templateRef: - name: save-reports-workflow - template: save-reports + - name: reportname + value: love_stress_test.xml diff --git a/applications/control-system-test/charts/integration-testing/templates/sensor-csc-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/sensor-csc-workflow.yaml new file mode 100644 index 0000000000..44798c9998 --- /dev/null +++ b/applications/control-system-test/charts/integration-testing/templates/sensor-csc-workflow.yaml @@ -0,0 +1,47 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: sensor-cscs-workflow + labels: + workflows.argoproj.io/type: "integration-test" + argocd.argoproj.io/instance: integration-testing +spec: + serviceAccountName: {{ .Values.serviceAccount }} + artifactRepositoryRef: + configMap: integration-test-controller-configmap + key: artifactRepository + volumes: + - name: testreports + persistentVolumeClaim: + claimName: {{ .Values.persistentVolume.claimName }} + imagePullSecrets: + - name: pull-secret + podMetadata: + labels: + argocd.argoproj.io/instance: integration-testing + arguments: + parameters: + - name: date-key + value: "20240725" + entrypoint: run-tests + templates: + - name: run-tests + dag: + tasks: + - name: call-cleanup-reports + templateRef: + name: cleanup-reports-workflow + template: cleanup-reports + - name: sensor-cscs + depends: call-cleanup-reports + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_SensorCSCs.list" + - name: jobname + value: sensor-cscs + - name: reportname + value: sensor_cscs.xml diff --git a/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml index f300ca5feb..5ae3e86256 100644 --- a/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/shutdown-workflow.yaml @@ -14,6 +14,8 @@ spec: - name: testreports persistentVolumeClaim: claimName: {{ .Values.persistentVolume.claimName }} + imagePullSecrets: + - name: pull-secret podMetadata: labels: argocd.argoproj.io/instance: {{ .Values.jobLabelName }} @@ -41,8 +43,5 @@ spec: value: "-A Test_Report_Shutdown.list" - name: jobname value: shutdown - - name: call-save-reports - depends: shutdown - templateRef: - name: save-reports-workflow - template: save-reports + - name: reportname + value: shutdown.xml diff --git a/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml index aeb703d910..149873d03e 100644 --- a/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/simple-workflow.yaml @@ -31,15 +31,15 @@ spec: templateRef: name: cleanup-reports-workflow template: cleanup-reports - - - name: standby + - - name: offline templateRef: name: integration-test-job-template template: inttest-template arguments: parameters: - name: integrationtest - value: "-A Test_Report_Standby.list" + value: "-A Test_Report_Offline.list" - name: jobname - value: simple-standby + value: simple-offline - name: reportname - value: standby.xml + value: offline.xml diff --git a/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml index e59c9db73c..a1e0ca2323 100644 --- a/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml +++ b/applications/control-system-test/charts/integration-testing/templates/testing-workflow.yaml @@ -14,6 +14,8 @@ spec: - name: testreports persistentVolumeClaim: claimName: {{ .Values.persistentVolume.claimName }} + imagePullSecrets: + - name: pull-secret podMetadata: labels: argocd.argoproj.io/instance: {{ .Values.jobLabelName }} @@ -41,6 +43,8 @@ spec: value: "-A Test_Report_Offline.list" - name: jobname value: cameras-offline + - name: reportname + value: offline.xml - name: standby depends: cameras-offline templateRef: @@ -52,6 +56,8 @@ spec: value: "-A Test_Report_Standby.list" - name: jobname value: standby + - name: reportname + value: standby.xml - name: disabled depends: standby templateRef: @@ -63,6 +69,8 @@ spec: value: "-A Test_Report_Disabled.list" - name: jobname value: disabled + - name: reportname + value: disabled.xml - name: enabled depends: disabled templateRef: @@ -74,6 +82,21 @@ spec: value: "-A Test_Report_Enabled.list" - name: jobname value: enabled + - name: reportname + value: enabled.xml + - name: sensor-cscs + depends: enabled + templateRef: + name: integration-test-job-template + template: inttest-template + arguments: + parameters: + - name: integrationtest + value: "-A Test_Report_SensorCSCs.list" + - name: jobname + value: sensor-cscs + - name: reportname + value: sensor_cscs.xml - name: auxtel-housekeeping depends: enabled templateRef: @@ -85,6 +108,8 @@ spec: value: "-A Test_Report_AuxTel_Housekeeping.list" - name: jobname value: auxtel-housekeeping + - name: reportname + value: athousekeeping.xml - name: maintel-housekeeping depends: enabled templateRef: @@ -96,6 +121,8 @@ spec: value: "-A Test_Report_MainTel_Housekeeping.list" - name: jobname value: maintel-housekeeping + - name: reportname + value: mthousekeeping.xml - name: auxtel-image-verification depends: auxtel-housekeeping templateRef: @@ -107,6 +134,8 @@ spec: value: "-A Test_Report_AuxTel_Image_Verification.list" - name: jobname value: auxtel-image-verification + - name: reportname + value: at_image_verify.xml - name: auxtel-latiss-daytime-checkout depends: auxtel-image-verification templateRef: @@ -118,6 +147,8 @@ spec: value: "-A Test_Report_AuxTel_LATISS_Checkout.list" - name: jobname value: auxtel-latiss-daytime-checkout + - name: reportname + value: at_latiss_checkout.xml - name: auxtel-telescope-dome-daytime-checkout depends: auxtel-latiss-daytime-checkout templateRef: @@ -129,6 +160,8 @@ spec: value: "-A Test_Report_AuxTel_Telescope_Dome_Checkout.list" - name: jobname value: auxtel-telescope-dome-daytime-checkout + - name: reportname + value: at_tel_dome_checkout.xml - name: auxtel-telescope-slew-take-image-daytime-checkout depends: auxtel-telescope-dome-daytime-checkout templateRef: @@ -140,6 +173,8 @@ spec: value: "-A Test_Report_AuxTel_Slew_and_Take_Image_Checkout.list" - name: jobname value: auxtel-telescope-slew-take-image-daytime-checkout + - name: reportname + value: at_slew_take_image_checkout.xml - name: auxtel-prep-flat depends: auxtel-telescope-slew-take-image-daytime-checkout templateRef: @@ -151,6 +186,8 @@ spec: value: "-A Test_Report_AuxTel_Prep_Flat.list" - name: jobname value: auxtel-prep-flat + - name: reportname + value: at_prep_flat.xml - name: auxtel-flat-calibrations depends: auxtel-prep-flat templateRef: @@ -162,6 +199,8 @@ spec: value: "-A Test_Report_AuxTel_Flat_Calibrations.list" - name: jobname value: auxtel-flat-calibrations + - name: reportname + value: at_flat_calib.xml - name: auxtel-ptc-calibrations depends: auxtel-flat-calibrations templateRef: @@ -173,6 +212,8 @@ spec: value: "-A Test_Report_AuxTel_PTC_Calibrations.list" - name: jobname value: auxtel-ptc-calibrations + - name: reportname + value: at_ptc_calib.xml - name: auxtel-prep-onsky depends: auxtel-ptc-calibrations templateRef: @@ -184,6 +225,8 @@ spec: value: "-A Test_Report_AuxTel_Prep_Onsky.list" - name: jobname value: auxtel-prep-onsky + - name: reportname + value: at_prep_onsky.xml - name: auxtel-wep-align depends: auxtel-prep-onsky templateRef: @@ -195,6 +238,8 @@ spec: value: "-A Test_Report_AuxTel_WEP_Align.list" - name: jobname value: auxtel-wep-align + - name: reportname + value: at_wep_align.xml - name: auxtel-acq-take-seq-pointing depends: auxtel-wep-align templateRef: @@ -206,6 +251,8 @@ spec: value: "-A Test_Report_AuxTel_Acq_and_Take_Seq_POINTING.list" - name: jobname value: auxtel-acq-take-seq-pointing + - name: reportname + value: at_acq_take_seq_pointing.xml - name: auxtel-acq-take-seq-verify depends: auxtel-acq-take-seq-pointing templateRef: @@ -217,6 +264,8 @@ spec: value: "-A Test_Report_AuxTel_Acq_Take_Seq_VERIFY.list" - name: jobname value: auxtel-acq-take-seq-verify + - name: reportname + value: at_acq_take_seq_verify.xml - name: auxtel-acq-take-seq-test depends: auxtel-acq-take-seq-verify templateRef: @@ -228,6 +277,8 @@ spec: value: "-A Test_Report_AuxTel_Acq_Take_Seq_TEST.list" - name: jobname value: auxtel-acq-take-seq-test + - name: reportname + value: at_acq_take_seq_test.xml - name: auxtel-acq-take-seq-nominal depends: auxtel-acq-take-seq-test templateRef: @@ -239,6 +290,8 @@ spec: value: "-A Test_Report_AuxTel_Acq_Take_Seq_NOMINAL.list" - name: jobname value: auxtel-acq-take-seq-nominal + - name: reportname + value: at_acq_take_seq_nominal.xml - name: auxtel-stop depends: auxtel-acq-take-seq-nominal templateRef: @@ -250,6 +303,8 @@ spec: value: "-A Test_Report_AuxTel_Stop.list" - name: jobname value: auxtel-stop + - name: reportname + value: at_stop.xml - name: auxtel-shutdown depends: auxtel-stop templateRef: @@ -261,6 +316,8 @@ spec: value: "-A Test_Report_AuxTel_Shutdown.list" - name: jobname value: auxtel-shutdown + - name: reportname + value: at_shutdown.xml - name: enable-atcs depends: auxtel-shutdown templateRef: @@ -272,6 +329,8 @@ spec: value: "-A Test_Report_Enable_ATCS.list" - name: jobname value: enable-atcs + - name: reportname + value: enable_atcs.xml - name: bigcam-image-verification depends: maintel-housekeeping templateRef: @@ -283,6 +342,8 @@ spec: value: "-A Test_Report_BigCamera_Image_Verification.list" - name: jobname value: bigcam-image-verification + - name: reportname + value: bigcam_image_verify.xml - name: bigcam-calibrations depends: bigcam-image-verification templateRef: @@ -294,8 +355,10 @@ spec: value: "-A Test_Report_BigCamera_Calibrations.list" - name: jobname value: bigcam-calibrations + - name: reportname + value: bigcam_calib.xml - name: love-stress-test - depends: bigcam-calibrations && enable-atcs + depends: bigcam-calibrations && enable-atcs && sensor-cscs templateRef: name: integration-test-job-template template: inttest-template @@ -305,6 +368,8 @@ spec: value: "-A Test_Report_LOVE_Stress_Test.list" - name: jobname value: love-stress-test + - name: reportname + value: love_stress_test.xml - name: shutdown depends: love-stress-test templateRef: @@ -316,8 +381,5 @@ spec: value: "-A Test_Report_Shutdown.list" - name: jobname value: shutdown - - name: call-save-reports - depends: shutdown - templateRef: - name: save-reports-workflow - template: save-reports + - name: reportname + value: shutdown.xml diff --git a/applications/exposurelog/README.md b/applications/exposurelog/README.md index 927c35f2f7..d9037d6912 100644 --- a/applications/exposurelog/README.md +++ b/applications/exposurelog/README.md @@ -40,7 +40,7 @@ Log messages related to an exposure | image.pullPolicy | string | `"Always"` | Pull policy for the exposurelog image | | image.repository | string | `"lsstsqre/exposurelog"` | exposurelog image to use | | image.tag | string | The appVersion of the chart | Tag of exposure image to use | -| ingress.gafaelfawrAuthQuery | string | `""` | Gafaelfawr auth query string | +| ingress.auth.enabled | bool | `false` | Whether to require Gafaelfawr authentication for access | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the exposurelog pod | | podAnnotations | object | `{}` | Annotations for the exposurelog pod | diff --git a/applications/exposurelog/secrets-usdfprod.yaml b/applications/exposurelog/secrets-usdfprod.yaml new file mode 100644 index 0000000000..317e9c5aab --- /dev/null +++ b/applications/exposurelog/secrets-usdfprod.yaml @@ -0,0 +1,12 @@ +"aws-credentials.ini": + description: >- + S3 Butler credentials in AWS format. + copy: + application: nublado + key: "aws-credentials.ini" +"postgres-credentials.txt": + description: >- + PostgreSQL credentials in its pgpass format for the Butler database. + copy: + application: nublado + key: "postgres-credentials.txt" diff --git a/applications/exposurelog/templates/ingress.yaml b/applications/exposurelog/templates/ingress.yaml index aa26a054db..c5eba0a88a 100644 --- a/applications/exposurelog/templates/ingress.yaml +++ b/applications/exposurelog/templates/ingress.yaml @@ -1,30 +1,32 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress metadata: name: {{ template "exposurelog.fullname" . }} labels: {{- include "exposurelog.labels" . | nindent 4 }} - annotations: - {{- if .Values.ingress.gafaelfawrAuthQuery }} - nginx.ingress.kubernetes.io/auth-method: "GET" - nginx.ingress.kubernetes.io/auth-response-headers: "X-Auth-Request-User,X-Auth-Request-Email,X-Auth-Request-Token" - nginx.ingress.kubernetes.io/auth-signin: "{{ .Values.global.baseUrl }}/login" - nginx.ingress.kubernetes.io/auth-url: "{{ .Values.global.baseUrl }}/auth?{{ .Values.ingress.gafaelfawrAuthQuery }}" - {{- end }} - {{- with .Values.ingress.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - ingressClassName: "nginx" - rules: - - host: {{ required "global.host must be set" .Values.global.host | quote }} - http: - paths: - - path: "/exposurelog" - pathType: "Prefix" - backend: - service: - name: {{ include "exposurelog.fullname" . }} - port: - number: 8080 - +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + {{- if .Values.ingress.auth.enabled }} + loginRedirect: true + scopes: + all: + - "exec:internal-tools" + {{- else }} + scopes: + anonymous: true + {{- end }} +template: + metadata: + name: {{ template "exposurelog.fullname" . }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: "/exposurelog" + pathType: "Prefix" + backend: + service: + name: {{ include "exposurelog.fullname" . }} + port: + number: 8080 diff --git a/applications/exposurelog/values-usdfdev.yaml b/applications/exposurelog/values-usdfdev.yaml index e914a0e17f..c2ad32a60a 100644 --- a/applications/exposurelog/values-usdfdev.yaml +++ b/applications/exposurelog/values-usdfdev.yaml @@ -10,7 +10,7 @@ env: - name: DAF_BUTLER_REPOSITORY_INDEX value: "/project/data-repos.yaml" - name: S3_ENDPOINT_URL - value: "https://s3dfrgw.slac.stanford.edu" + value: "https://sdfembs3.sdf.slac.stanford.edu" - name: PGPASSFILE value: "/var/secrets/butler/postgres-credentials.txt" - name: PGUSER diff --git a/applications/exposurelog/values-usdfprod.yaml b/applications/exposurelog/values-usdfprod.yaml index 8f4f585d48..a2edbfd410 100644 --- a/applications/exposurelog/values-usdfprod.yaml +++ b/applications/exposurelog/values-usdfprod.yaml @@ -1,6 +1,17 @@ config: site_id: usdfprod - butler_uri_1: s3://rubin-summit-users/butler.yaml + butler_uri_1: s3://embargo@rubin-summit-users/butler.yaml db: host: usdf-summitdb.slac.stanford.edu user: usdf +env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: "/var/secrets/butler/aws-credentials.ini" + - name: DAF_BUTLER_REPOSITORY_INDEX + value: "/project/data-repos.yaml" + - name: S3_ENDPOINT_URL + value: "https://sdfembs3.sdf.slac.stanford.edu" + - name: PGPASSFILE + value: "/var/secrets/butler/postgres-credentials.txt" + - name: PGUSER + value: "rubin" diff --git a/applications/exposurelog/values.yaml b/applications/exposurelog/values.yaml index ece7625737..929ddd59ca 100644 --- a/applications/exposurelog/values.yaml +++ b/applications/exposurelog/values.yaml @@ -20,6 +20,11 @@ image: # @default -- The appVersion of the chart tag: "" +ingress: + auth: + # -- Whether to require Gafaelfawr authentication for access + enabled: false + db: # -- database host host: postgres.postgres @@ -30,10 +35,6 @@ db: # -- database name database: exposurelog -ingress: - # -- Gafaelfawr auth query string - gafaelfawrAuthQuery: "" - # -- Application-specific configuration config: # -- NFS path to butler registry 1 diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml index f06689002c..c2b7e19a1e 100644 --- a/applications/gafaelfawr/Chart.yaml +++ b/applications/gafaelfawr/Chart.yaml @@ -5,11 +5,11 @@ description: "Authentication and identity system" home: "https://gafaelfawr.lsst.io/" sources: - "https://github.com/lsst-sqre/gafaelfawr" -appVersion: 11.1.1 +appVersion: 12.1.1 dependencies: - name: "redis" - version: 1.0.13 + version: 1.0.14 repository: "https://lsst-sqre.github.io/charts/" annotations: diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md index 2798a697c9..51df4c45c1 100644 --- a/applications/gafaelfawr/README.md +++ b/applications/gafaelfawr/README.md @@ -18,7 +18,7 @@ Authentication and identity system | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | | cloudsql.image.schemaUpdateTagSuffix | string | `"-alpine"` | Tag suffix to use for the proxy for schema updates | -| cloudsql.image.tag | string | `"1.37.0"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.37.1"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | @@ -26,7 +26,7 @@ Authentication and identity system | cloudsql.serviceAccount | string | None, must be set if Cloud SQL Auth Proxy is enabled | The Google service account that has an IAM binding to the `gafaelfawr` Kubernetes service account and has the `cloudsql.client` role | | cloudsql.tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod | | config.afterLogoutUrl | string | Top-level page of this Phalanx environment | Where to send the user after they log out | -| config.cadcBaseUuid | string | Disabled | Whether to support the `/auth/cadc/userinfo` route. If set, this UUID is used as the namespace to generate UUID v5 `sub` claims returned by this route to meet the needs of CADC authentication code. | +| config.baseInternalUrl | string | FQDN under `svc.cluster.local` | URL for direct connections to the Gafaelfawr service, bypassing the Ingress. Must use a service name of `gafaelfawr` and port 8080. | | config.cilogon.clientId | string | `nil` | CILogon client ID. One and only one of this, `config.github.clientId`, or `config.oidc.clientId` must be set. | | config.cilogon.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | | config.cilogon.loginParams | object | `{"skin":"LSST"}` | Additional parameters to add | @@ -55,6 +55,11 @@ Authentication and identity system | config.ldap.userDn | string | Use anonymous binds | Bind DN for simple bind authentication. If set, `ldap-secret` must be set in the Gafaelfawr Vault secret. Set this or `kerberosConfig`, not both. | | config.ldap.userSearchAttr | string | `"uid"` | Search attribute containing the user's username | | config.logLevel | string | `"INFO"` | Choose from the text form of Python logging levels | +| config.metrics.application | string | `"gafaelfawr"` | Name under which to log metrics. Generally there is no reason to change this. | +| config.metrics.enabled | bool | `false` | Whether to enable sending metrics | +| config.metrics.events.topicPrefix | string | `"lsst.square.metrics.events"` | Topic prefix for events. It may sometimes be useful to change this in development environments. | +| config.metrics.schemaManager.registryUrl | string | Sasquatch in the local cluster | URL of the Confluent-compatible schema registry server | +| config.metrics.schemaManager.suffix | string | `""` | Suffix to add to all registered subjects. This is sometimes useful for experimentation during development. | | config.oidc.audience | string | Same as `clientId` | Audience (`aud` claim) to expect in ID tokens. | | config.oidc.clientId | string | `nil` | Client ID for generic OpenID Connect support. One and only one of this, `config.cilogon.clientId`, or `config.github.clientId` must be set. | | config.oidc.enrollmentUrl | string | Login fails with an error | Where to send the user if their username cannot be found in LDAP | @@ -99,7 +104,7 @@ Authentication and identity system | podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod | | redis.affinity | object | `{}` | Affinity rules for the Redis pod | | redis.config.secretKey | string | `"redis-password"` | Key inside secret from which to get the Redis password (do not change) | -| redis.config.secretName | string | `"gafaelfawr-secret"` | Name of secret containing Redis password (may require changing if fullnameOverride is set) | +| redis.config.secretName | string | `"gafaelfawr"` | Name of secret containing Redis password (do not change) | | redis.nodeSelector | object | `{}` | Node selection rules for the Redis pod | | redis.persistence.accessMode | string | `"ReadWriteOnce"` | Access mode of storage to request | | redis.persistence.enabled | bool | `true` | Whether to persist Redis storage and thus tokens. Setting this to false will use `emptyDir` and reset all tokens on every restart. Only use this for a test deployment. | diff --git a/applications/gafaelfawr/crds/ingress.yaml b/applications/gafaelfawr/crds/ingress.yaml index 4cedee74e0..faa50108de 100644 --- a/applications/gafaelfawr/crds/ingress.yaml +++ b/applications/gafaelfawr/crds/ingress.yaml @@ -56,8 +56,6 @@ spec: config: type: object description: "Configuration for the ingress to create." - required: - - baseUrl properties: authCacheDuration: type: string @@ -96,10 +94,10 @@ spec: scopes: type: array description: >- - Scopes to include in the delegated token if - they are available. These scopes are not - required to access the service; to make them - required, include them in spec.scopes as well. + Scopes to include in the delegated token if they + are available. These scopes are not required to + access the service; to make them required, include + them in spec.scopes as well. items: type: string service: @@ -114,9 +112,9 @@ spec: minimumLifetime: type: integer description: >- - Minimum lifetime of delegated token in seconds. If - the user's token has less than that time - remaining, force them to reauthenticate. + Minimum lifetime of delegated token in seconds. If the + user's token has less than that time remaining, force + them to reauthenticate. useAuthorization: type: boolean description: >- @@ -133,19 +131,24 @@ spec: description: >- Whether to redirect to the login flow if the user is not currently authenticated. + onlyServices: + type: array + description: >- + If set, access is restricted to tokens issued to one of + the listed services, in addition to any other access + constraints. Users will not be able to access the ingress + directly with their own tokens. + items: + type: string replace403: type: boolean description: >- - Whether to replace 403 responses with a custom 403 - response from Gafaelfawr that disables caching and - includes authorization-related errors in the - `WWW-Authenticate` header. + Obsolete setting. No longer has any effect. scopes: type: object description: >- - The token scope or scopes required to access this - service. May be omitted if the service allows - anonymous access. + The token scope or scopes required to access this service. + May be omitted if the service allows anonymous access. properties: any: type: array @@ -179,6 +182,13 @@ spec: - true required: - anonymous + service: + type: string + description: >- + The name of the service corresponding to this ingress, + used for metrics reporting. When delegating internal + tokens, this must match config.delegate.internal.service. + This attribute will be required in the future. username: type: string description: >- diff --git a/applications/gafaelfawr/templates/_helpers.tpl b/applications/gafaelfawr/templates/_helpers.tpl index 4b484a5aa5..eb32f96cd7 100644 --- a/applications/gafaelfawr/templates/_helpers.tpl +++ b/applications/gafaelfawr/templates/_helpers.tpl @@ -34,22 +34,28 @@ Common environment variables - name: "GAFAELFAWR_AFTER_LOGOUT_URL" value: {{ required "global.baseUrl must be set" .Values.global.baseUrl | quote }} {{- end }} +- name: "GAFAELFAWR_BASE_URL" + value: {{ .Values.global.baseUrl | quote }} +{{- if not .Values.config.baseInternalUrl }} +- name: "GAFAELFAWR_BASE_INTERNAL_URL" + value: "http://gafaelfawr.{{ .Release.Namespace }}.svc.cluster.local:8080" +{{- end }} - name: "GAFAELFAWR_BOOTSTRAP_TOKEN" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "bootstrap-token" {{- if .Values.config.cilogon.clientId }} - name: "GAFAELFAWR_CILOGON_CLIENT_SECRET" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "cilogon-client-secret" {{- end }} - name: "GAFAELFAWR_DATABASE_PASSWORD" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "database-password" {{- if (or .Values.cloudsql.enabled .Values.config.internalDatabase) }} - name: "GAFAELFAWR_DATABASE_URL" @@ -65,28 +71,28 @@ Common environment variables - name: "GAFAELFAWR_GITHUB_CLIENT_SECRET" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "github-client-secret" {{- end }} {{- if .Values.config.ldap.userDn }} - name: "GAFAELFAWR_LDAP_PASSWORD" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "ldap-password" {{- end }} {{- if .Values.config.oidc.clientId }} - name: "GAFAELFAWR_OIDC_CLIENT_SECRET" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "oidc-client-secret" {{- end }} {{- if .Values.config.oidcServer.enabled }} - name: "GAFAELFAWR_OIDC_SERVER_CLIENTS" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "oidc-server-secrets" {{- if (not .Values.config.oidcServer.issuer) }} - name: "GAFAELFAWR_OIDC_SERVER_ISSUER" @@ -95,7 +101,7 @@ Common environment variables - name: "GAFAELFAWR_OIDC_SERVER_KEY" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "signing-key" {{- end }} {{- if (not .Values.config.realm) }} @@ -107,20 +113,32 @@ Common environment variables - name: "GAFAELFAWR_REDIS_PASSWORD" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "redis-password" - name: "GAFAELFAWR_REDIS_URL" value: "redis://gafaelfawr-redis.{{ .Release.Namespace }}:6379/0" - name: "GAFAELFAWR_SESSION_SECRET" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "session-secret" {{- if .Values.config.slackAlerts }} - name: "GAFAELFAWR_SLACK_WEBHOOK" valueFrom: secretKeyRef: - name: {{ .secretName | quote }} + name: "gafaelfawr" key: "slack-webhook" {{- end }} +{{- if .Values.config.metrics.enabled }} +- name: "KAFKA_BOOTSTRAP_SERVERS" + valueFrom: + secretKeyRef: + name: "gafaelfawr-kafka" + key: "bootstrapServers" +- name: "KAFKA_SECURITY_PROTOCOL" + valueFrom: + secretKeyRef: + name: "gafaelfawr-kafka" + key: "securityProtocol" +{{- end }} {{- end }} diff --git a/applications/gafaelfawr/templates/configmap-kerberos.yaml b/applications/gafaelfawr/templates/configmap-kerberos.yaml index d21ed51b45..ab7b960106 100644 --- a/applications/gafaelfawr/templates/configmap-kerberos.yaml +++ b/applications/gafaelfawr/templates/configmap-kerberos.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: "gafaelfawr-config-kerberos" + name: "gafaelfawr-kerberos" labels: {{- include "gafaelfawr.labels" . | nindent 4 }} data: diff --git a/applications/gafaelfawr/templates/configmap.yaml b/applications/gafaelfawr/templates/configmap.yaml index 86b72672ba..b9e0efe1ba 100644 --- a/applications/gafaelfawr/templates/configmap.yaml +++ b/applications/gafaelfawr/templates/configmap.yaml @@ -1,27 +1,15 @@ apiVersion: v1 kind: ConfigMap metadata: - name: "gafaelfawr-config" + name: "gafaelfawr" labels: {{- include "gafaelfawr.labels" . | nindent 4 }} -data: - gafaelfawr.yaml: | - {{- toYaml .Values.config | nindent 4 }} -{{- if .Values.config.updateSchema }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: "gafaelfawr-config-schema-update" {{- if .Values.config.updateSchema }} annotations: helm.sh/hook: "pre-install,pre-upgrade" - helm.sh/hook-delete-policy: "hook-succeeded" + helm.sh/hook-delete-policy: "before-hook-creation" helm.sh/hook-weight: "0" {{- end }} - labels: - {{- include "gafaelfawr.labels" . | nindent 4 }} data: gafaelfawr.yaml: | {{- toYaml .Values.config | nindent 4 }} -{{- end }} diff --git a/applications/gafaelfawr/templates/cronjob-audit.yaml b/applications/gafaelfawr/templates/cronjob-audit.yaml index a9cebf91c2..df5bbd3453 100644 --- a/applications/gafaelfawr/templates/cronjob-audit.yaml +++ b/applications/gafaelfawr/templates/cronjob-audit.yaml @@ -37,7 +37,15 @@ spec: - "gafaelfawr" - "audit" env: - {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values "secretName" "gafaelfawr-secret") | nindent 16 }} + {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values) | nindent 16 }} + {{- if .Values.config.metrics.enabled }} + - name: "KAFKA_CLIENT_CERT_PATH" + value: "/etc/gafaelfawr-kafka/user.crt" + - name: "KAFKA_CLIENT_KEY_PATH" + value: "/etc/gafaelfawr-kafka/user.key" + - name: "KAFKA_CLUSTER_CA_PATH" + value: "/etc/gafaelfawr-kafka/ca.crt" + {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy | quote }} {{- with .Values.maintenance.resources }} @@ -54,6 +62,20 @@ spec: - name: "config" mountPath: "/etc/gafaelfawr" readOnly: true + {{- if .Values.config.metrics.enabled }} + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/ca.crt" + readOnly: true + subPath: "ssl.truststore.crt" + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/user.crt" + readOnly: true + subPath: "ssl.keystore.crt" + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/user.key" + readOnly: true + subPath: "ssl.keystore.key" + {{- end }} {{- if .Values.config.ldap.kerberosConfig }} - name: "keytab" mountPath: "/etc/krb5.keytab" @@ -73,14 +95,19 @@ spec: volumes: - name: "config" configMap: - name: "gafaelfawr-config" + name: "gafaelfawr" + {{- if .Values.config.metrics.enabled }} + - name: "kafka" + secret: + secretName: "gafaelfawr-kafka" + {{- end }} {{- if .Values.config.ldap.kerberosConfig }} - name: "keytab" secret: secretName: "gafaelfawr-keytab" - name: "kerberos-config" configMap: - name: "gafaelfawr-config-kerberos" + name: "gafaelfawr-kerberos" - name: "tmp" emptyDir: {} {{- end }} diff --git a/applications/gafaelfawr/templates/cronjob-maintenance.yaml b/applications/gafaelfawr/templates/cronjob-maintenance.yaml index 85227e1d29..7108a75266 100644 --- a/applications/gafaelfawr/templates/cronjob-maintenance.yaml +++ b/applications/gafaelfawr/templates/cronjob-maintenance.yaml @@ -36,7 +36,15 @@ spec: - "gafaelfawr" - "maintenance" env: - {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values "secretName" "gafaelfawr-secret") | nindent 16 }} + {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values) | nindent 16 }} + {{- if .Values.config.metrics.enabled }} + - name: "KAFKA_CLIENT_CERT_PATH" + value: "/etc/gafaelfawr-kafka/user.crt" + - name: "KAFKA_CLIENT_KEY_PATH" + value: "/etc/gafaelfawr-kafka/user.key" + - name: "KAFKA_CLUSTER_CA_PATH" + value: "/etc/gafaelfawr-kafka/ca.crt" + {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy | quote }} {{- with .Values.maintenance.resources }} @@ -53,6 +61,20 @@ spec: - name: "config" mountPath: "/etc/gafaelfawr" readOnly: true + {{- if .Values.config.metrics.enabled }} + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/ca.crt" + readOnly: true + subPath: "ssl.truststore.crt" + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/user.crt" + readOnly: true + subPath: "ssl.keystore.crt" + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/user.key" + readOnly: true + subPath: "ssl.keystore.key" + {{- end }} {{- if .Values.config.ldap.kerberosConfig }} - name: "keytab" mountPath: "/etc/krb5.keytab" @@ -72,14 +94,19 @@ spec: volumes: - name: "config" configMap: - name: "gafaelfawr-config" + name: "gafaelfawr" + {{- if .Values.config.metrics.enabled }} + - name: "kafka" + secret: + secretName: "gafaelfawr-kafka" + {{- end }} {{- if .Values.config.ldap.kerberosConfig }} - name: "keytab" secret: secretName: "gafaelfawr-keytab" - name: "kerberos-config" configMap: - name: "gafaelfawr-config-kerberos" + name: "gafaelfawr-kerberos" - name: "tmp" emptyDir: {} {{- end }} diff --git a/applications/gafaelfawr/templates/deployment-operator.yaml b/applications/gafaelfawr/templates/deployment-operator.yaml index cc4786accf..821ca0fabc 100644 --- a/applications/gafaelfawr/templates/deployment-operator.yaml +++ b/applications/gafaelfawr/templates/deployment-operator.yaml @@ -42,7 +42,15 @@ spec: - "-m" - "gafaelfawr.operator" env: - {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values "secretName" "gafaelfawr-secret") | nindent 12 }} + {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values) | nindent 12 }} + {{- if .Values.config.metrics.enabled }} + - name: "KAFKA_CLIENT_CERT_PATH" + value: "/etc/gafaelfawr-kafka/user.crt" + - name: "KAFKA_CLIENT_KEY_PATH" + value: "/etc/gafaelfawr-kafka/user.key" + - name: "KAFKA_CLUSTER_CA_PATH" + value: "/etc/gafaelfawr-kafka/ca.crt" + {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy | quote }} livenessProbe: @@ -71,6 +79,20 @@ spec: - name: "config" mountPath: "/etc/gafaelfawr" readOnly: true + {{- if .Values.config.metrics.enabled }} + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/ca.crt" + readOnly: true + subPath: "ssl.truststore.crt" + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/user.crt" + readOnly: true + subPath: "ssl.keystore.crt" + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/user.key" + readOnly: true + subPath: "ssl.keystore.key" + {{- end }} {{- if .Values.config.ldap.kerberosConfig }} - name: "keytab" mountPath: "/etc/krb5.keytab" @@ -90,14 +112,19 @@ spec: volumes: - name: "config" configMap: - name: "gafaelfawr-config" + name: "gafaelfawr" + {{- if .Values.config.metrics.enabled }} + - name: "kafka" + secret: + secretName: "gafaelfawr-kafka" + {{- end }} {{- if .Values.config.ldap.kerberosConfig }} - name: "keytab" secret: secretName: "gafaelfawr-keytab" - name: "kerberos-config" configMap: - name: "gafaelfawr-config-kerberos" + name: "gafaelfawr-kerberos" - name: "tmp" emptyDir: {} {{- end }} diff --git a/applications/gafaelfawr/templates/deployment.yaml b/applications/gafaelfawr/templates/deployment.yaml index 697aedba86..22dc810318 100644 --- a/applications/gafaelfawr/templates/deployment.yaml +++ b/applications/gafaelfawr/templates/deployment.yaml @@ -54,7 +54,15 @@ spec: {{- end }} - name: "gafaelfawr" env: - {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values "secretName" "gafaelfawr-secret" "sidecar" true) | nindent 12 }} + {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values "sidecar" true) | nindent 12 }} + {{- if .Values.config.metrics.enabled }} + - name: "KAFKA_CLIENT_CERT_PATH" + value: "/etc/gafaelfawr-kafka/user.crt" + - name: "KAFKA_CLIENT_KEY_PATH" + value: "/etc/gafaelfawr-kafka/user.key" + - name: "KAFKA_CLUSTER_CA_PATH" + value: "/etc/gafaelfawr-kafka/ca.crt" + {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy | quote }} livenessProbe: @@ -94,6 +102,20 @@ spec: - name: "config" mountPath: "/etc/gafaelfawr" readOnly: true + {{- if .Values.config.metrics.enabled }} + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/ca.crt" + readOnly: true + subPath: "ssl.truststore.crt" + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/user.crt" + readOnly: true + subPath: "ssl.keystore.crt" + - name: "kafka" + mountPath: "/etc/gafaelfawr-kafka/user.key" + readOnly: true + subPath: "ssl.keystore.key" + {{- end }} {{- if .Values.config.ldap.kerberosConfig }} - name: "keytab" mountPath: "/etc/krb5.keytab" @@ -113,14 +135,19 @@ spec: volumes: - name: "config" configMap: - name: "gafaelfawr-config" + name: "gafaelfawr" + {{- if .Values.config.metrics.enabled }} + - name: "kafka" + secret: + secretName: "gafaelfawr-kafka" + {{- end }} {{- if .Values.config.ldap.kerberosConfig }} - name: "keytab" secret: secretName: "gafaelfawr-keytab" - name: "kerberos-config" configMap: - name: "gafaelfawr-config-kerberos" + name: "gafaelfawr-kerberos" - name: "tmp" emptyDir: {} {{- end }} diff --git a/applications/gafaelfawr/templates/job-schema-update.yaml b/applications/gafaelfawr/templates/job-schema-update.yaml index 87cc71815a..65bc2b52a0 100644 --- a/applications/gafaelfawr/templates/job-schema-update.yaml +++ b/applications/gafaelfawr/templates/job-schema-update.yaml @@ -23,7 +23,7 @@ spec: gafaelfawr-redis-client: "true" spec: {{- if .Values.cloudsql.enabled }} - serviceAccountName: "gafaelfawr-schema-update" + serviceAccountName: "gafaelfawr" {{- else }} automountServiceAccountToken: false {{- end }} @@ -79,7 +79,7 @@ spec: gafaelfawr update-schema touch /lifecycle/main-terminated env: - {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values "secretName" "gafaelfawr-secret-schema-update" "sidecar" true) | nindent 12 }} + {{- include "gafaelfawr.envVars" (dict "Release" .Release "Values" .Values "sidecar" true) | nindent 12 }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy | quote }} {{- with .Values.resources }} @@ -106,7 +106,7 @@ spec: volumes: - name: "config" configMap: - name: "gafaelfawr-config-schema-update" + name: "gafaelfawr" - name: "lifecycle" emptyDir: {} {{- with .Values.nodeSelector }} diff --git a/applications/gafaelfawr/templates/kafka-access.yaml b/applications/gafaelfawr/templates/kafka-access.yaml new file mode 100644 index 0000000000..4a13c53b68 --- /dev/null +++ b/applications/gafaelfawr/templates/kafka-access.yaml @@ -0,0 +1,16 @@ +{{- if .Values.config.metrics.enabled -}} +apiVersion: access.strimzi.io/v1alpha1 +kind: KafkaAccess +metadata: + name: "gafaelfawr-kafka" +spec: + kafka: + name: "sasquatch" + namespace: "sasquatch" + listener: "tls" + user: + kind: "KafkaUser" + apiGroup: "kafka.strimzi.io" + name: "app-metrics-gafaelfawr" + namespace: "sasquatch" +{{- end }} diff --git a/applications/gafaelfawr/templates/serviceaccount.yaml b/applications/gafaelfawr/templates/serviceaccount.yaml index acf07b2ed2..42aea8bf2e 100644 --- a/applications/gafaelfawr/templates/serviceaccount.yaml +++ b/applications/gafaelfawr/templates/serviceaccount.yaml @@ -6,19 +6,10 @@ metadata: labels: {{- include "gafaelfawr.labels" . | nindent 4 }} annotations: - iam.gke.io/gcp-service-account: {{ required "cloudsql.serviceAccount must be set to a valid Google service account" .Values.cloudsql.serviceAccount | quote }} -{{- if .Values.config.updateSchema }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: "gafaelfawr-schema-update" - labels: - {{- include "gafaelfawr.labels" . | nindent 4 }} - annotations: + {{- if .Values.config.updateSchema }} helm.sh/hook: "pre-install,pre-upgrade" - helm.sh/hook-delete-policy: "hook-succeeded" + helm.sh/hook-delete-policy: "before-hook-creation" helm.sh/hook-weight: "0" + {{- end }} iam.gke.io/gcp-service-account: {{ required "cloudsql.serviceAccount must be set to a valid Google service account" .Values.cloudsql.serviceAccount | quote }} {{- end }} -{{- end }} diff --git a/applications/gafaelfawr/templates/vault-secrets.yaml b/applications/gafaelfawr/templates/vault-secrets.yaml index 558598febe..29563d2ebd 100644 --- a/applications/gafaelfawr/templates/vault-secrets.yaml +++ b/applications/gafaelfawr/templates/vault-secrets.yaml @@ -1,29 +1,20 @@ apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: - name: "gafaelfawr-secret" + name: "gafaelfawr" labels: {{- include "gafaelfawr.labels" . | nindent 4 }} -spec: - path: "{{ .Values.global.vaultSecretsPath }}/gafaelfawr" - type: Opaque -{{- if .Values.config.updateSchema }} ---- -apiVersion: ricoberger.de/v1alpha1 -kind: VaultSecret -metadata: - name: "gafaelfawr-secret-schema-update" + {{- if .Values.config.updateSchema }} annotations: helm.sh/hook: "pre-install,pre-upgrade" + helm.sh/hook-delete-policy: "before-hook-creation" helm.sh/hook-weight: "0" - labels: - {{- include "gafaelfawr.labels" . | nindent 4 }} + {{- end }} spec: path: "{{ .Values.global.vaultSecretsPath }}/gafaelfawr" type: Opaque -{{- end }} ---- {{- if .Values.config.ldap.kerberosConfig }} +--- apiVersion: ricoberger.de/v1alpha1 kind: VaultSecret metadata: diff --git a/applications/gafaelfawr/values-ccin2p3.yaml b/applications/gafaelfawr/values-ccin2p3.yaml index da12e492e5..cecfc138ac 100644 --- a/applications/gafaelfawr/values-ccin2p3.yaml +++ b/applications/gafaelfawr/values-ccin2p3.yaml @@ -6,7 +6,6 @@ redis: config: logLevel: "DEBUG" internalDatabase: true - updateSchema: False # Session length and token expiration (in minutes). #issuer: @@ -14,6 +13,9 @@ config: # github: # clientId: ae314e45a6af43ea910a + metrics: + application: gafaelfawr + enabled: false oidc: clientId: "lsst_rsp" @@ -68,8 +70,6 @@ config: oidcServer: enabled: false - cadcBaseUuid: "df534647-a1df-4608-b08e-3af8dc291e41" - # initialAdmins: # - "mainetti" diff --git a/applications/gafaelfawr/values-idfdev.yaml b/applications/gafaelfawr/values-idfdev.yaml index 5166cdca19..fc1d59d4f9 100644 --- a/applications/gafaelfawr/values-idfdev.yaml +++ b/applications/gafaelfawr/values-idfdev.yaml @@ -32,8 +32,9 @@ config: oidcServer: enabled: true - # Support generating user metadata for CADC authentication code. - cadcBaseUuid: "db8626e0-3b93-45c0-89ab-3058b0ed39fe" + # Enable metrics reporting. + metrics: + enabled: true # User quota settings for services. quota: diff --git a/applications/gafaelfawr/values-idfint.yaml b/applications/gafaelfawr/values-idfint.yaml index 9cb90b1377..b782a763d4 100644 --- a/applications/gafaelfawr/values-idfint.yaml +++ b/applications/gafaelfawr/values-idfint.yaml @@ -33,8 +33,9 @@ config: oidcServer: enabled: true - # Support generating user metadata for CADC authentication code. - cadcBaseUuid: "dd5cd3ee-4239-48e4-b0e3-282f2328b9d1" + # Enable metrics reporting. + metrics: + enabled: true # User quota settings for services. quota: diff --git a/applications/gafaelfawr/values-idfprod.yaml b/applications/gafaelfawr/values-idfprod.yaml index f9148ef05d..6e55cc5d89 100644 --- a/applications/gafaelfawr/values-idfprod.yaml +++ b/applications/gafaelfawr/values-idfprod.yaml @@ -36,9 +36,6 @@ config: - "dp0.2" - "dp0.3" - # Support generating user metadata for CADC authentication code. - cadcBaseUuid: "5f0eb655-0e72-4948-a6a5-a94c0be9019f" - # User quota settings for services. quota: default: diff --git a/applications/gafaelfawr/values-roe.yaml b/applications/gafaelfawr/values-roe.yaml index f3914a1d96..f53b9e0ead 100644 --- a/applications/gafaelfawr/values-roe.yaml +++ b/applications/gafaelfawr/values-roe.yaml @@ -8,9 +8,6 @@ config: github: clientId: "10172b4db1b67ee31620" - # Support generating user metadata for CADC authentication code. - cadcBaseUuid: "4cb5f948-aad9-466c-837b-5eae565b0a77" - # Allow access by GitHub team. groupMapping: "exec:admin": diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml index dd17d804d8..52e5b584bf 100644 --- a/applications/gafaelfawr/values-usdfdev.yaml +++ b/applications/gafaelfawr/values-usdfdev.yaml @@ -18,9 +18,6 @@ config: oidcServer: enabled: true - # Support generating user metadata for CADC authentication code. - cadcBaseUuid: "efa0a347-b648-4948-a987-055efbf6802a" - oidc: clientId: "rubin-usdf-rsp-dev" audience: "rubin-usdf-rsp-dev" diff --git a/applications/gafaelfawr/values-usdfint.yaml b/applications/gafaelfawr/values-usdfint.yaml index 91a7a20b07..c29d1dd918 100644 --- a/applications/gafaelfawr/values-usdfint.yaml +++ b/applications/gafaelfawr/values-usdfint.yaml @@ -11,9 +11,6 @@ config: oidcServer: enabled: true - # Support generating user metadata for CADC authentication code. - cadcBaseUuid: "82c6fc76-b7d3-4368-92a9-6a468dfa23dc" - oidc: clientId: vcluster--usdf-rsp-int audience: "vcluster--usdf-rsp-int" diff --git a/applications/gafaelfawr/values-usdfprod.yaml b/applications/gafaelfawr/values-usdfprod.yaml index 0d7e8d1e35..cd2dda0b9a 100644 --- a/applications/gafaelfawr/values-usdfprod.yaml +++ b/applications/gafaelfawr/values-usdfprod.yaml @@ -11,9 +11,6 @@ config: oidcServer: enabled: true - # Support generating user metadata for CADC authentication code. - cadcBaseUuid: "595f5a03-bef4-473b-8e5a-588d87f13799" - oidc: clientId: rubin-usdf-rsp audience: "rubin-usdf-rsp" diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml index 3780cee7ea..95820eabb6 100644 --- a/applications/gafaelfawr/values.yaml +++ b/applications/gafaelfawr/values.yaml @@ -41,11 +41,10 @@ config: # @default -- Top-level page of this Phalanx environment afterLogoutUrl: null - # -- Whether to support the `/auth/cadc/userinfo` route. If set, this UUID - # is used as the namespace to generate UUID v5 `sub` claims returned by this - # route to meet the needs of CADC authentication code. - # @default -- Disabled - cadcBaseUuid: null + # -- URL for direct connections to the Gafaelfawr service, bypassing the + # Ingress. Must use a service name of `gafaelfawr` and port 8080. + # @default -- FQDN under `svc.cluster.local` + baseInternalUrl: null # -- URL for the PostgreSQL database # @default -- None, must be set if neither `cloudsql.enabled` nor @@ -230,6 +229,28 @@ config: # the `rubin` scope. dataRightsMapping: {} + metrics: + # -- Whether to enable sending metrics + enabled: false + + # -- Name under which to log metrics. Generally there is no reason to + # change this. + application: "gafaelfawr" + + events: + # -- Topic prefix for events. It may sometimes be useful to change this + # in development environments. + topicPrefix: "lsst.square.metrics.events" + + schemaManager: + # -- URL of the Confluent-compatible schema registry server + # @default -- Sasquatch in the local cluster + registryUrl: "http://sasquatch-schema-registry.sasquatch.svc.cluster.local:8081" + + # -- Suffix to add to all registered subjects. This is sometimes useful + # for experimentation during development. + suffix: "" + # -- Quota settings (see # [Quotas](https://gafaelfawr.lsst.io/user-guide/helm.html#quotas)). quota: {} @@ -290,7 +311,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.37.0" + tag: "1.37.1" # -- Tag suffix to use for the proxy for schema updates schemaUpdateTagSuffix: "-alpine" @@ -394,9 +415,8 @@ operator: redis: config: - # -- Name of secret containing Redis password (may require changing if - # fullnameOverride is set) - secretName: "gafaelfawr-secret" + # -- Name of secret containing Redis password (do not change) + secretName: "gafaelfawr" # -- Key inside secret from which to get the Redis password (do not # change) diff --git a/applications/ghostwriter/Chart.yaml b/applications/ghostwriter/Chart.yaml index 8d923876b0..c289ec9f6d 100644 --- a/applications/ghostwriter/Chart.yaml +++ b/applications/ghostwriter/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 0.1.1 +appVersion: 0.1.2 description: URL rewriter/personalizer name: ghostwriter sources: diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml index 33658a3bae..06e2fd3e5f 100644 --- a/applications/ingress-nginx/Chart.yaml +++ b/applications/ingress-nginx/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/kubernetes/ingress-nginx dependencies: - name: ingress-nginx - version: 4.11.2 + version: 4.11.3 repository: https://kubernetes.github.io/ingress-nginx diff --git a/applications/kubernetes-replicator/Chart.yaml b/applications/kubernetes-replicator/Chart.yaml index 335507f312..90365efd2c 100644 --- a/applications/kubernetes-replicator/Chart.yaml +++ b/applications/kubernetes-replicator/Chart.yaml @@ -7,5 +7,5 @@ sources: - https://github.com/mittwald/kubernetes-replicator dependencies: - name: kubernetes-replicator - version: 2.10.2 + version: 2.11.0 repository: https://helm.mittwald.de diff --git a/applications/love/README.md b/applications/love/README.md index fae75a25ca..d0e10912c7 100644 --- a/applications/love/README.md +++ b/applications/love/README.md @@ -126,7 +126,7 @@ Deployment for the LSST Operators Visualization Environment | love-manager.redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | | love-manager.redis.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the redis image | | love-manager.redis.image.repository | string | `"redis"` | The redis image to use | -| love-manager.redis.image.tag | string | `"7.4.0"` | The tag to use for the redis image | +| love-manager.redis.image.tag | string | `"7.4.1"` | The tag to use for the redis image | | love-manager.redis.nodeSelector | object | `{}` | Node selection rules for the LOVE redis pods | | love-manager.redis.port | int | `6379` | The redis port number | | love-manager.redis.resources | object | `{}` | Resource specifications for the LOVE redis pods | @@ -146,7 +146,7 @@ Deployment for the LSST Operators Visualization Environment | love-nginx.affinity | object | `{}` | Affinity rules for the NGINX pod | | love-nginx.image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the NGINX image | | love-nginx.image.repository | string | `"nginx"` | The NGINX image to use | -| love-nginx.image.tag | string | `"1.27.1"` | The tag to use for the NGINX image | +| love-nginx.image.tag | string | `"1.27.2"` | The tag to use for the NGINX image | | love-nginx.imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | | love-nginx.ingress.annotations | object | `{}` | Annotations for the NGINX ingress | | love-nginx.ingress.className | string | `"nginx"` | Assign the Ingress class name | diff --git a/applications/love/charts/love-manager/README.md b/applications/love/charts/love-manager/README.md index 47a93da5c5..21e76ee331 100644 --- a/applications/love/charts/love-manager/README.md +++ b/applications/love/charts/love-manager/README.md @@ -115,7 +115,7 @@ Helm chart for the LOVE manager service. | redis.envSecrets.REDIS_PASS | string | `"redis-pass"` | The redis password secret key name | | redis.image.pullPolicy | string | `"IfNotPresent"` | The pull policy for the redis image | | redis.image.repository | string | `"redis"` | The redis image to use | -| redis.image.tag | string | `"7.4.0"` | The tag to use for the redis image | +| redis.image.tag | string | `"7.4.1"` | The tag to use for the redis image | | redis.nodeSelector | object | `{}` | Node selection rules for the LOVE redis pods | | redis.port | int | `6379` | The redis port number | | redis.resources | object | `{}` | Resource specifications for the LOVE redis pods | diff --git a/applications/love/charts/love-manager/values.yaml b/applications/love/charts/love-manager/values.yaml index d5534ee77c..c361269f09 100644 --- a/applications/love/charts/love-manager/values.yaml +++ b/applications/love/charts/love-manager/values.yaml @@ -248,7 +248,7 @@ redis: # -- The redis image to use repository: redis # -- The tag to use for the redis image - tag: 7.4.0 + tag: 7.4.1 # -- The pull policy for the redis image pullPolicy: IfNotPresent envSecrets: diff --git a/applications/love/charts/love-nginx/README.md b/applications/love/charts/love-nginx/README.md index 5e34e445b9..6a1289a87e 100644 --- a/applications/love/charts/love-nginx/README.md +++ b/applications/love/charts/love-nginx/README.md @@ -9,7 +9,7 @@ Helm chart for the LOVE Nginx server. | affinity | object | `{}` | Affinity rules for the NGINX pod | | image.pullPolicy | string | `"IfNotPresent"` | The pull policy on the NGINX image | | image.repository | string | `"nginx"` | The NGINX image to use | -| image.tag | string | `"1.27.1"` | The tag to use for the NGINX image | +| image.tag | string | `"1.27.2"` | The tag to use for the NGINX image | | imagePullSecrets | list | `[]` | The list of pull secrets needed for the images. If this section is used, each object listed can have the following attributes defined: _name_ (The label identifying the pull-secret to use) | | ingress.annotations | object | `{}` | Annotations for the NGINX ingress | | ingress.className | string | `"nginx"` | Assign the Ingress class name | diff --git a/applications/love/charts/love-nginx/values.yaml b/applications/love/charts/love-nginx/values.yaml index 92a9d612bc..d4e96320fa 100644 --- a/applications/love/charts/love-nginx/values.yaml +++ b/applications/love/charts/love-nginx/values.yaml @@ -4,7 +4,7 @@ image: # -- The NGINX image to use repository: nginx # -- The tag to use for the NGINX image - tag: 1.27.1 + tag: 1.27.2 # -- The pull policy on the NGINX image pullPolicy: IfNotPresent # -- Service type specification diff --git a/applications/mobu/Chart.yaml b/applications/mobu/Chart.yaml index aa975f02a2..351942bf12 100644 --- a/applications/mobu/Chart.yaml +++ b/applications/mobu/Chart.yaml @@ -5,4 +5,4 @@ description: "Continuous integration testing" home: https://mobu.lsst.io/ sources: - "https://github.com/lsst-sqre/mobu" -appVersion: 11.0.0 +appVersion: 13.0.1 diff --git a/applications/mobu/README.md b/applications/mobu/README.md index bf46cb4c6a..4bbc954dc1 100644 --- a/applications/mobu/README.md +++ b/applications/mobu/README.md @@ -14,10 +14,11 @@ Continuous integration testing |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the mobu frontend pod | | config.autostart | list | `[]` | Autostart specification. Must be a list of mobu flock specifications. Each flock listed will be automatically started when mobu is started. | -| config.debug | bool | `false` | If set to true, include the output from all flocks in the main mobu log and disable structured JSON logging. | -| config.githubCiApp | object | disabled. | Configuration for the GitHub refresh app integration. See https://mobu.lsst.io/operations/github_ci_app.html#add-phalanx-configuration | -| config.githubRefreshApp | object | disabled. | Configuration for the GitHub refresh app integration. See https://mobu.lsst.io/operations/github_refresh_app.html#add-phalanx-configuration | +| config.githubCiApp | string | disabled. | Configuration for the GitHub refresh app integration. See https://mobu.lsst.io/operations/github_ci_app.html#add-phalanx-configuration | +| config.githubRefreshApp | string | disabled. | Configuration for the GitHub refresh app integration. See https://mobu.lsst.io/operations/github_refresh_app.html#add-phalanx-configuration | +| config.logLevel | string | `"INFO"` | Log level. Set to 'DEBUG' to include the output from all flocks in the main mobu log. | | config.pathPrefix | string | `"/mobu"` | Prefix for mobu's API routes. | +| config.profile | string | `"production"` | One of 'production' or 'development'. 'production' configures structured JSON logging, and 'development' configures unstructured human readable logging. | | config.slackAlerts | bool | `true` | Whether to send alerts and status to Slack. | | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | diff --git a/applications/mobu/templates/configmap-autostart.yaml b/applications/mobu/templates/configmap-autostart.yaml deleted file mode 100644 index 93537ae0b6..0000000000 --- a/applications/mobu/templates/configmap-autostart.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.config.autostart -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "mobu.fullname" . }}-autostart - labels: - {{- include "mobu.labels" . | nindent 4 }} -data: - autostart.yaml: | - {{- toYaml .Values.config.autostart | nindent 4 }} -{{- end }} diff --git a/applications/mobu/templates/configmap-github-ci-app.yaml b/applications/mobu/templates/configmap-github-ci-app.yaml deleted file mode 100644 index a90491cd72..0000000000 --- a/applications/mobu/templates/configmap-github-ci-app.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.config.githubCiApp -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "mobu.fullname" . }}-github-ci-app - labels: - {{- include "mobu.labels" . | nindent 4 }} -data: - github-ci-app.yaml: | - {{- toYaml .Values.config.githubCiApp | nindent 4 }} -{{- end }} diff --git a/applications/mobu/templates/configmap-github-refresh-app.yaml b/applications/mobu/templates/configmap-github-refresh-app.yaml deleted file mode 100644 index 04910d22ef..0000000000 --- a/applications/mobu/templates/configmap-github-refresh-app.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.config.githubRefreshApp -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "mobu.fullname" . }}-github-refresh-app - labels: - {{- include "mobu.labels" . | nindent 4 }} -data: - github-refresh-app.yaml: | - {{- toYaml .Values.config.githubRefreshApp | nindent 4 }} -{{- end }} diff --git a/applications/mobu/templates/configmap.yaml b/applications/mobu/templates/configmap.yaml new file mode 100644 index 0000000000..d4e1f8f851 --- /dev/null +++ b/applications/mobu/templates/configmap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mobu.fullname" . }} + labels: + {{- include "mobu.labels" . | nindent 4 }} +data: + config.yaml: | + {{- toYaml .Values.config | nindent 4 }} diff --git a/applications/mobu/templates/deployment.yaml b/applications/mobu/templates/deployment.yaml index 07ab0f54c6..bbb05a3396 100644 --- a/applications/mobu/templates/deployment.yaml +++ b/applications/mobu/templates/deployment.yaml @@ -13,8 +13,10 @@ spec: type: "Recreate" template: metadata: - {{- with .Values.podAnnotations }} annotations: + # Force the pod to restart when the config map is updated. + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} labels: @@ -31,10 +33,6 @@ spec: name: {{ template "mobu.fullname" . }}-secret key: "ALERT_HOOK" {{- end }} - {{- if .Values.config.autostart }} - - name: "MOBU_AUTOSTART_PATH" - value: "/etc/mobu/autostart.yaml" - {{- end }} - name: "MOBU_ENVIRONMENT_URL" value: {{ .Values.global.baseUrl }} - name: "MOBU_GAFAELFAWR_TOKEN" @@ -43,8 +41,6 @@ spec: name: {{ template "mobu.fullname" . }}-gafaelfawr-token key: "token" {{- if .Values.config.githubRefreshApp }} - - name: "MOBU_GITHUB_REFRESH_APP_CONFIG_PATH" - value: "/etc/mobu/github-refresh-app.yaml" - name: "MOBU_GITHUB_REFRESH_APP_WEBHOOK_SECRET" valueFrom: secretKeyRef: @@ -52,8 +48,6 @@ spec: key: "github-refresh-app-webhook-secret" {{- end}} {{- if .Values.config.githubCiApp }} - - name: "MOBU_GITHUB_CI_APP_CONFIG_PATH" - value: "/etc/mobu/github-ci-app.yaml" - name: "MOBU_GITHUB_CI_APP_ID" valueFrom: secretKeyRef: @@ -70,17 +64,6 @@ spec: name: {{ template "mobu.fullname" . }}-secret key: "github-ci-app-webhook-secret" {{- end}} - - name: "MOBU_PATH_PREFIX" - value: {{ .Values.config.pathPrefix | quote }} - {{- if .Values.config.debug }} - - name: "MOBU_LOGGING_PROFILE" - value: "development" - - name: "MOBU_LOG_LEVEL" - value: "DEBUG" - {{- else }} - - name: "MOBU_LOGGING_PROFILE" - value: "production" - {{- end }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy | quote }} ports: @@ -114,29 +97,8 @@ spec: runAsGroup: 1000 volumes: - name: "config" - projected: - sources: - {{- if .Values.config.autostart }} - - configMap: - name: {{ include "mobu.fullname" . }}-autostart - items: - - key: autostart.yaml - path: autostart.yaml - {{- end }} - {{- if .Values.config.githubCiApp }} - - configMap: - name: {{ include "mobu.fullname" . }}-github-ci-app - items: - - key: github-ci-app.yaml - path: github-ci-app.yaml - {{- end }} - {{- if .Values.config.githubRefreshApp }} - - configMap: - name: {{ include "mobu.fullname" . }}-github-refresh-app - items: - - key: github-refresh-app.yaml - path: github-refresh-app.yaml - {{- end }} + configMap: + name: "mobu" - name: "tmp" emptyDir: {} {{- with .Values.nodeSelector }} diff --git a/applications/mobu/tests/github_ci_app_enabled_test.yaml b/applications/mobu/tests/github_ci_app_enabled_test.yaml index fcc0995c1e..edb5723900 100644 --- a/applications/mobu/tests/github_ci_app_enabled_test.yaml +++ b/applications/mobu/tests/github_ci_app_enabled_test.yaml @@ -2,7 +2,7 @@ suite: Github CI App Integration Enabled set: config: githubCiApp: - accepted_github_orgs: + acceptedGithubOrgs: - "org1" - "org2" users: @@ -27,11 +27,6 @@ tests: - it: "Should inject secrets into the Deployment env" template: "deployment.yaml" asserts: - - contains: - path: "spec.template.spec.containers[0].env" - any: true - content: - name: "MOBU_GITHUB_CI_APP_CONFIG_PATH" - contains: path: "spec.template.spec.containers[0].env" any: true @@ -48,20 +43,27 @@ tests: content: name: "MOBU_GITHUB_CI_APP_WEBHOOK_SECRET" - it: "Should create a ConfigMap" - template: "configmap-github-ci-app.yaml" + template: "configmap.yaml" asserts: - containsDocument: kind: "ConfigMap" apiVersion: v1 - equal: - path: "data['github-ci-app.yaml']" + path: "data['config.yaml']" value: | - accepted_github_orgs: - - org1 - - org2 - scopes: - - exec:notebook - - read:tap - users: - - bot-mobu-ci-user-1 - - bot-mobu-ci-user-2 + autostart: [] + githubCiApp: + acceptedGithubOrgs: + - org1 + - org2 + scopes: + - exec:notebook + - read:tap + users: + - bot-mobu-ci-user-1 + - bot-mobu-ci-user-2 + githubRefreshApp: null + logLevel: INFO + pathPrefix: /mobu + profile: production + slackAlerts: true diff --git a/applications/mobu/tests/github_disabled_test.yaml b/applications/mobu/tests/github_disabled_test.yaml index ee33c78981..223cf551d3 100644 --- a/applications/mobu/tests/github_disabled_test.yaml +++ b/applications/mobu/tests/github_disabled_test.yaml @@ -4,34 +4,25 @@ set: global: host: "example.com" tests: - - it: "Should not create a GitHub CI app ingress" - template: "ingress-github-ci-app.yaml" + - it: "Should not include github things in the config file ConfigMap" + template: "configmap.yaml" asserts: - - hasDocuments: - count: 0 - - it: "Should not create a GitHub refresh app ingress" - template: "ingress-github-refresh-app.yaml" - asserts: - - hasDocuments: - count: 0 - - it: "Should not create a GitHub CI app ConfigMap" - template: "configmap-github-ci-app.yaml" - asserts: - - hasDocuments: - count: 0 - - it: "Should not create a GitHub refresh app ConfigMap" - template: "configmap-github-refresh-app.yaml" - asserts: - - hasDocuments: - count: 0 + - containsDocument: + kind: "ConfigMap" + apiVersion: v1 + - equal: + path: "data['config.yaml']" + value: | + autostart: [] + githubCiApp: null + githubRefreshApp: null + logLevel: INFO + pathPrefix: /mobu + profile: production + slackAlerts: true - it: "Should not inject GitHub CI app secrets into the Deployment env" template: "deployment.yaml" asserts: - - notContains: - path: "spec.template.spec.containers[0].env" - any: true - content: - name: "MOBU_GITHUB_CI_APP_CONFIG_PATH" - notContains: path: "spec.template.spec.containers[0].env" any: true @@ -50,11 +41,6 @@ tests: - it: "Should not inject GitHub refresh app secrets into the Deployment env" template: "deployment.yaml" asserts: - - notContains: - path: "spec.template.spec.containers[0].env" - any: true - content: - name: "MOBU_GITHUB_REFRESH_APP_CONFIG_PATH" - notContains: path: "spec.template.spec.containers[0].env" any: true diff --git a/applications/mobu/tests/github_refresh_app_enabled_test.yaml b/applications/mobu/tests/github_refresh_app_enabled_test.yaml index 13e84ec0e5..c038f8e334 100644 --- a/applications/mobu/tests/github_refresh_app_enabled_test.yaml +++ b/applications/mobu/tests/github_refresh_app_enabled_test.yaml @@ -2,7 +2,7 @@ suite: Github Refresh App Integration Enabled set: config: githubRefreshApp: - accepted_github_orgs: + acceptedGithubOrgs: - "org1" - "org2" global: @@ -21,25 +21,27 @@ tests: - it: "Should inject secrets into the Deployment env" template: "deployment.yaml" asserts: - - contains: - path: "spec.template.spec.containers[0].env" - any: true - content: - name: "MOBU_GITHUB_REFRESH_APP_CONFIG_PATH" - contains: path: "spec.template.spec.containers[0].env" any: true content: name: "MOBU_GITHUB_REFRESH_APP_WEBHOOK_SECRET" - it: "Should create a ConfigMap" - template: "configmap-github-refresh-app.yaml" + template: "configmap.yaml" asserts: - containsDocument: kind: "ConfigMap" apiVersion: v1 - equal: - path: "data['github-refresh-app.yaml']" + path: "data['config.yaml']" value: | - accepted_github_orgs: - - org1 - - org2 + autostart: [] + githubCiApp: null + githubRefreshApp: + acceptedGithubOrgs: + - org1 + - org2 + logLevel: INFO + pathPrefix: /mobu + profile: production + slackAlerts: true diff --git a/applications/mobu/values-idfdemo.yaml b/applications/mobu/values-idfdemo.yaml index ee9e81f4cc..74cb36b14c 100644 --- a/applications/mobu/values-idfdemo.yaml +++ b/applications/mobu/values-idfdemo.yaml @@ -1,3 +1,4 @@ config: - debug: true + logLevel: DEBUG + profile: development autostart: [] diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml index 7bc506a8bc..5ec676ec7e 100644 --- a/applications/mobu/values-idfdev.yaml +++ b/applications/mobu/values-idfdev.yaml @@ -1,5 +1,6 @@ config: - debug: true + logLevel: DEBUG + profile: development githubRefreshApp: acceptedGithubOrgs: - lsst-sqre diff --git a/applications/mobu/values-roundtable-dev.yaml b/applications/mobu/values-roundtable-dev.yaml index f69f4ff4dd..ba29bd722d 100644 --- a/applications/mobu/values-roundtable-dev.yaml +++ b/applications/mobu/values-roundtable-dev.yaml @@ -1,5 +1,4 @@ config: - debug: true autostart: - name: "gitlfs" count: 1 diff --git a/applications/mobu/values-usdfdev.yaml b/applications/mobu/values-usdfdev.yaml index 87c824fe18..873086520e 100644 --- a/applications/mobu/values-usdfdev.yaml +++ b/applications/mobu/values-usdfdev.yaml @@ -1,5 +1,4 @@ config: - debug: true githubRefreshApp: acceptedGithubOrgs: - lsst-sqre diff --git a/applications/mobu/values-usdfint.yaml b/applications/mobu/values-usdfint.yaml index 438dee94a6..cb733ed173 100644 --- a/applications/mobu/values-usdfint.yaml +++ b/applications/mobu/values-usdfint.yaml @@ -1,5 +1,4 @@ config: - debug: true githubRefreshApp: acceptedGithubOrgs: - lsst-sqre diff --git a/applications/mobu/values-usdfprod.yaml b/applications/mobu/values-usdfprod.yaml index b10d42b593..d8097ed9f6 100644 --- a/applications/mobu/values-usdfprod.yaml +++ b/applications/mobu/values-usdfprod.yaml @@ -1,5 +1,4 @@ config: - debug: true autostart: - name: "firefighter" count: 1 diff --git a/applications/mobu/values.yaml b/applications/mobu/values.yaml index 6c1dd4851f..8b88740624 100644 --- a/applications/mobu/values.yaml +++ b/applications/mobu/values.yaml @@ -29,17 +29,22 @@ config: # -- Configuration for the GitHub refresh app integration. # See https://mobu.lsst.io/operations/github_refresh_app.html#add-phalanx-configuration # @default -- disabled. - githubRefreshApp: {} + githubRefreshApp: null # -- Configuration for the GitHub CI app integration. # -- Configuration for the GitHub refresh app integration. # See https://mobu.lsst.io/operations/github_ci_app.html#add-phalanx-configuration # @default -- disabled. - githubCiApp: {} + githubCiApp: null - # -- If set to true, include the output from all flocks in the main mobu log - # and disable structured JSON logging. - debug: false + # -- Log level. Set to 'DEBUG' to include the output from all flocks in the + # main mobu log. + logLevel: INFO + + # -- One of 'production' or 'development'. 'production' configures structured + # JSON logging, and 'development' configures unstructured human readable + # logging. + profile: production # -- Whether to send alerts and status to Slack. slackAlerts: true diff --git a/applications/narrativelog/README.md b/applications/narrativelog/README.md index 7fe1f08bf4..f7bd7464d6 100644 --- a/applications/narrativelog/README.md +++ b/applications/narrativelog/README.md @@ -30,7 +30,7 @@ Narrative log service | image.pullPolicy | string | `"Always"` | Pull policy for the narrativelog image | | image.repository | string | `"lsstsqre/narrativelog"` | narrativelog image to use | | image.tag | string | The appVersion of the chart | Tag of exposure image to use | -| ingress.gafaelfawrAuthQuery | string | `""` | Gafaelfawr auth query string | +| ingress.auth.enabled | bool | `false` | Whether to require Gafaelfawr authentication for access | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the narrativelog pod | | podAnnotations | object | `{}` | Annotations for the narrativelog pod | diff --git a/applications/narrativelog/templates/ingress.yaml b/applications/narrativelog/templates/ingress.yaml index cdf8f56d85..796e78fd1d 100644 --- a/applications/narrativelog/templates/ingress.yaml +++ b/applications/narrativelog/templates/ingress.yaml @@ -1,29 +1,32 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress metadata: name: {{ template "narrativelog.fullname" . }} labels: {{- include "narrativelog.labels" . | nindent 4 }} - annotations: - {{- if .Values.ingress.gafaelfawrAuthQuery }} - nginx.ingress.kubernetes.io/auth-method: "GET" - nginx.ingress.kubernetes.io/auth-response-headers: "X-Auth-Request-User,X-Auth-Request-Email,X-Auth-Request-Token" - nginx.ingress.kubernetes.io/auth-signin: "{{ .Values.global.baseUrl }}/login" - nginx.ingress.kubernetes.io/auth-url: "https://{{ .Values.global.baseUrl }}/auth?{{ .Values.ingress.gafaelfawrAuthQuery }}" - {{- end }} - {{- with .Values.ingress.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - ingressClassName: "nginx" - rules: - - host: {{ required "global.host must be set" .Values.global.host | quote }} - http: - paths: - - path: /narrativelog - pathType: Prefix - backend: - service: - name: {{ include "narrativelog.fullname" . }} - port: - number: 8080 +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + {{- if .Values.ingress.auth.enabled }} + loginRedirect: true + scopes: + all: + - "exec:internal-tools" + {{- else }} + scopes: + anonymous: true + {{- end }} +template: + metadata: + name: {{ template "narrativelog.fullname" . }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: /narrativelog + pathType: Prefix + backend: + service: + name: {{ include "narrativelog.fullname" . }} + port: + number: 8080 diff --git a/applications/narrativelog/values.yaml b/applications/narrativelog/values.yaml index 87e629f34d..c85c032573 100644 --- a/applications/narrativelog/values.yaml +++ b/applications/narrativelog/values.yaml @@ -20,6 +20,11 @@ image: # @default -- The appVersion of the chart tag: "" +ingress: + auth: + # -- Whether to require Gafaelfawr authentication for access + enabled: false + db: # -- database host host: postgres.postgres @@ -30,10 +35,6 @@ db: # -- database name database: narrativelog -ingress: - # -- Gafaelfawr auth query string - gafaelfawrAuthQuery: "" - # -- Application-specific configuration config: # -- Site ID; a non-empty string of up to 16 characters. diff --git a/applications/next-visit-fan-out/README.md b/applications/next-visit-fan-out/README.md index e9c699a48e..5f48aa4d07 100644 --- a/applications/next-visit-fan-out/README.md +++ b/applications/next-visit-fan-out/README.md @@ -7,7 +7,8 @@ Poll next visit events from Kafka, duplicate them, and send them to all applicat | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the next-visit-fan-out deployment pod | -| detectorConfigFile | string | `"detector.yaml"` | | +| debug | bool | `false` | If set, enable debug logging. | +| detectorConfig | object | See `values.yaml`. | A mapping, for each instrument, of detector number to whether that detector is "active" (i.e., producing images). | | fullnameOverride | string | `""` | | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | @@ -15,15 +16,14 @@ Poll next visit events from Kafka, duplicate them, and send them to all applicat | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"us-central1-docker.pkg.dev/prompt-proto/prompt/nextvisit-fanout"` | | | image.tag | string | `""` | | +| instruments | string | None, must be set. | The instruments that are initialized when the fan-out service starts up as a space-delimited string. This list is a subset of the keys of `detectorConfig` because the latter handles some special cases. | | kafka.expiration | float | `3600` | Maximum message age to consider, in seconds. | | kafka.offset | string | `"latest"` | | | kafka.saslMechamism | string | `"SCRAM-SHA-512"` | | | kafka.securityProtocol | string | `"SASL_PLAINTEXT"` | | -| knative.hscUrl | string | `"http://prompt-proto-service-hsc.prompt-proto-service-hsc/next-visit"` | | -| knative.latissUrl | string | `"http://prompt-proto-service-latiss.prompt-proto-service-latiss/next-visit"` | | -| knative.lsstcamUrl | string | `"http://prompt-proto-service-lsstcam.prompt-proto-service-lsstcam/next-visit"` | | -| knative.lsstcomcamUrl | string | `"http://prompt-proto-service-lsstcomcam.prompt-proto-service-lsstcomcam/next-visit"` | | -| knative.lsstcomcamsimUrl | string | `"http://prompt-proto-service-lsstcomcamsim.prompt-proto-service-lsstcomcamsim/next-visit"` | | +| knative.maxMessages | string | None, must be set. | The maximum number of messages that can be forwarded to all Knative instances combined. | +| knative.retryRequests | bool | `true` | Whether or not to retry requests that returned a suitable response. | +| knative.urls | object | See `values.yaml`. | A mapping of instrument to that instrument's Knative service. | | nameOverride | string | `""` | | | nodeSelector | object | `{}` | Node selection rules for the next-visit-fan-out deployment pod | | podAnnotations."prometheus.io/port" | string | `"8000"` | | diff --git a/applications/next-visit-fan-out/templates/deployment.yaml b/applications/next-visit-fan-out/templates/deployment.yaml index 115f8c38e8..499ffd23b5 100644 --- a/applications/next-visit-fan-out/templates/deployment.yaml +++ b/applications/next-visit-fan-out/templates/deployment.yaml @@ -23,18 +23,6 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} env: - - name: LATISS_KNATIVE_SERVING_URL - value: {{ .Values.knative.latissUrl }} - - name: HSC_KNATIVE_SERVING_URL - value: {{ .Values.knative.hscUrl }} - - name: LSSTCOMCAM_KNATIVE_SERVING_URL - value: {{ .Values.knative.lsstcomcamUrl }} - - name: LSSTCOMCAMSIM_KNATIVE_SERVING_URL - value: {{ .Values.knative.lsstcomcamsimUrl }} - - name: LSSTCAM_KNATIVE_SERVING_URL - value: {{ .Values.knative.lsstcamUrl }} - - name: DETECTOR_CONFIG_FILE - value: {{ .Values.detectorConfigFile }} - name: KAFKA_SCHEMA_REGISTRY_URL value: {{ .Values.kafka.schemaRegistryUrl }} - name: KAFKA_CLUSTER @@ -51,6 +39,10 @@ spec: value: {{ .Values.kafka.saslMechamism }} - name: SECURITY_PROTOCOL value: {{ .Values.kafka.securityProtocol }} + - name: MAX_FAN_OUT_MESSAGES + value: {{ .Values.knative.maxMessages | toString | quote }} + - name: RETRY_KNATIVE_REQUESTS + value: {{ ternary "true" "false" .Values.knative.retryRequests | quote }} - name: SASL_USERNAME valueFrom: secretKeyRef: @@ -61,10 +53,26 @@ spec: secretKeyRef: key: kafka_pp_sasl_password name: {{ template "next-visit-fan-out.fullname" . }}-secret + - name: DEBUG_LOGS + value: {{ ternary "true" "false" .Values.debug | quote }} + - name: SUPPORTED_INSTRUMENTS + value: {{ .Values.instruments }} + - name: INSTRUMENT_CONFIG_FILE + value: /etc/config/instrument.yaml resources: {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: instrument-config + mountPath: /etc/config + readOnly: true volumes: - name: kafka-sasl-prompt-prompt-processing secret: secretName: {{ template "next-visit-fan-out.fullname" . }}-secret + - name: instrument-config + configMap: + name: instrument-map + items: + - key: "instruments" + path: instrument.yaml diff --git a/applications/next-visit-fan-out/templates/instruments.yaml b/applications/next-visit-fan-out/templates/instruments.yaml new file mode 100644 index 0000000000..69c7e738b0 --- /dev/null +++ b/applications/next-visit-fan-out/templates/instruments.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: instrument-map +data: + instruments: | + knative-urls: + {{- .Values.knative.urls | toYaml | nindent 6 }} + detectors: + {{- .Values.detectorConfig | toYaml | nindent 6 }} diff --git a/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml b/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml index 9718900be4..fc97581a64 100644 --- a/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml +++ b/applications/next-visit-fan-out/values-usdfdev-prompt-processing.yaml @@ -1,3 +1,7 @@ +knative: + maxMessages: 150 + retryRequests: false + kafka: schemaRegistryUrl: http://10.96.181.159:8081 sasquatchAddress: 10.100.226.209:9094 @@ -10,4 +14,6 @@ image: repository: ghcr.io/lsst-dm/next_visit_fan_out pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: 2.3.0 + tag: 2.5.0 + +instruments: "LATISS LSSTComCam LSSTComCamSim HSC" diff --git a/applications/next-visit-fan-out/values-usdfprod-prompt-processing.yaml b/applications/next-visit-fan-out/values-usdfprod-prompt-processing.yaml index 6706fa1ff6..75a9eccb55 100644 --- a/applications/next-visit-fan-out/values-usdfprod-prompt-processing.yaml +++ b/applications/next-visit-fan-out/values-usdfprod-prompt-processing.yaml @@ -1,3 +1,6 @@ +knative: + maxMessages: 1000 # Kubernetes can't support more pods yet + kafka: schemaRegistryUrl: http://10.110.90.252:8081 sasquatchAddress: 10.96.121.181:9094 @@ -8,4 +11,6 @@ image: repository: ghcr.io/lsst-dm/next_visit_fan_out pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: 2.3.0 + tag: 2.5.0 + +instruments: "LATISS LSSTComCam" diff --git a/applications/next-visit-fan-out/values.yaml b/applications/next-visit-fan-out/values.yaml index 4adeaa8669..dc95396e8c 100644 --- a/applications/next-visit-fan-out/values.yaml +++ b/applications/next-visit-fan-out/values.yaml @@ -1,11 +1,17 @@ knative: - hscUrl: http://prompt-proto-service-hsc.prompt-proto-service-hsc/next-visit - latissUrl: http://prompt-proto-service-latiss.prompt-proto-service-latiss/next-visit - lsstcomcamUrl: http://prompt-proto-service-lsstcomcam.prompt-proto-service-lsstcomcam/next-visit - lsstcomcamsimUrl: http://prompt-proto-service-lsstcomcamsim.prompt-proto-service-lsstcomcamsim/next-visit - lsstcamUrl: http://prompt-proto-service-lsstcam.prompt-proto-service-lsstcam/next-visit - -detectorConfigFile: detector.yaml + # -- A mapping of instrument to that instrument's Knative service. + # @default -- See `values.yaml`. + urls: + HSC: http://prompt-proto-service-hsc.prompt-proto-service-hsc/next-visit + LATISS: http://prompt-proto-service-latiss.prompt-proto-service-latiss/next-visit + LSSTComCam: http://prompt-proto-service-lsstcomcam.prompt-proto-service-lsstcomcam/next-visit + LSSTComCamSim: http://prompt-proto-service-lsstcomcamsim.prompt-proto-service-lsstcomcamsim/next-visit + LSSTCam: http://prompt-proto-service-lsstcam.prompt-proto-service-lsstcam/next-visit + # -- The maximum number of messages that can be forwarded to all Knative instances combined. + # @default -- None, must be set. + maxMessages: "" + # -- Whether or not to retry requests that returned a suitable response. + retryRequests: true kafka: offset: latest @@ -56,3 +62,338 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD vaultSecretsPath: "" + +# -- If set, enable debug logging. +debug: false + +# -- The instruments that are initialized when the fan-out service starts up as a space-delimited string. +# This list is a subset of the keys of `detectorConfig` because the latter handles some special cases. +# @default -- None, must be set. +instruments: "" + +# -- A mapping, for each instrument, of detector number to whether that detector is "active" (i.e., producing images). +# @default -- See `values.yaml`. +detectorConfig: + LATISS: + 0: True + LSSTComCam: &ComCam + 0: True + 1: True + 2: True + 3: True + 4: True + 5: True + 6: True + 7: True + 8: True + LSSTComCamSim: + <<: *ComCam + LSSTCam: + 0: False + 1: False + 2: False + 3: False + 4: False + 5: False + 6: False + 7: False + 8: False + 9: False + 10: False + 11: False + 12: False + 13: False + 14: False + 15: False + 16: False + 17: False + 18: False + 19: False + 20: False + 21: False + 22: False + 23: False + 24: False + 25: False + 26: False + 27: False + 28: False + 29: False + 30: False + 31: False + 32: False + 33: False + 34: False + 35: False + 36: False + 37: False + 38: False + 39: False + 40: False + 41: False + 42: False + 43: False + 44: False + 45: False + 46: False + 47: False + 48: False + 49: False + 50: False + 51: False + 52: False + 53: False + 54: False + 55: False + 56: False + 57: False + 58: False + 59: False + 60: False + 61: False + 62: False + 63: False + 64: False + 65: False + 66: False + 67: False + 68: False + 69: False + 70: False + 71: False + 72: False + 73: False + 74: False + 75: False + 76: False + 77: False + 78: False + 79: False + 80: False + 81: False + 82: False + 83: False + 84: False + 85: False + 86: False + 87: False + 88: False + 89: False + 90: False + 91: False + 92: False + 93: False + 94: False + 95: False + 96: False + 97: False + 98: False + 99: False + 100: False + 101: False + 102: False + 103: False + 104: False + 105: False + 106: False + 107: False + 108: False + 109: False + 110: False + 111: False + 112: False + 113: False + 114: False + 115: False + 116: False + 117: False + 118: False + 119: False + 120: False + 121: False + 122: False + 123: False + 124: False + 125: False + 126: False + 127: False + 128: False + 129: False + 130: False + 131: False + 132: False + 133: False + 134: False + 135: False + 136: False + 137: False + 138: False + 139: False + 140: False + 141: False + 142: False + 143: False + 144: False + 145: False + 146: False + 147: False + 148: False + 149: False + 150: False + 151: False + 152: False + 153: False + 154: False + 155: False + 156: False + 157: False + 158: False + 159: False + 160: False + 161: False + 162: False + 163: False + 164: False + 165: False + 166: False + 167: False + 168: False + 169: False + 170: False + 171: False + 172: False + 173: False + 174: False + 175: False + 176: False + 177: False + 178: False + 179: False + 180: False + 181: False + 182: False + 183: False + 184: False + 185: False + 186: False + 187: False + 188: False + HSC: + 0: True + 1: True + 2: True + 3: True + 4: True + 5: True + 6: True + 7: True + 8: True + 9: False + 10: True + 11: True + 12: True + 13: True + 14: True + 15: True + 16: True + 17: True + 18: True + 19: True + 20: True + 21: True + 22: True + 23: True + 24: True + 25: True + 26: True + 27: True + 28: True + 29: True + 30: True + 31: True + 32: True + 33: True + 34: True + 35: True + 36: True + 37: True + 38: True + 39: True + 40: True + 41: True + 42: True + 43: True + 44: True + 45: True + 46: True + 47: True + 48: True + 49: True + 50: True + 51: True + 52: True + 53: True + 54: True + 55: True + 56: True + 57: True + 58: True + 59: True + 60: True + 61: True + 62: True + 63: True + 64: True + 65: True + 66: True + 67: True + 68: True + 69: True + 70: True + 71: True + 72: True + 73: True + 74: True + 75: True + 76: True + 77: True + 78: True + 79: True + 80: True + 81: True + 82: True + 83: True + 84: True + 85: True + 86: True + 87: True + 88: True + 89: True + 90: True + 91: True + 92: True + 93: True + 94: True + 95: True + 96: True + 97: True + 98: True + 99: True + 100: True + 101: True + 102: True + 103: True + HSC-TEST-59134: + 0: True + 4: True + 5: True + HSC-TEST-59142: + 0: True + 5: True + 11: True + HSC-TEST-59150: + 50: True + 58: True + HSC-TEST-59160: + 43: True + 51: True diff --git a/applications/nightreport/README.md b/applications/nightreport/README.md index 4a8df51c45..6adbda45d4 100644 --- a/applications/nightreport/README.md +++ b/applications/nightreport/README.md @@ -31,7 +31,7 @@ Night report log service | image.pullPolicy | string | `"Always"` | Pull policy for the nightreport image | | image.repository | string | `"lsstts/nightreport"` | nightreport image to use | | image.tag | string | The appVersion of the chart | Tag of exposure image to use | -| ingress.gafaelfawrAuthQuery | string | `""` | Gafaelfawr auth query string | +| ingress.auth.enabled | bool | `false` | Whether to require Gafaelfawr authentication for access | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the nightreport pod | | podAnnotations | object | `{}` | Annotations for the nightreport pod | diff --git a/applications/nightreport/templates/ingress.yaml b/applications/nightreport/templates/ingress.yaml index 99768a13f2..930d61eab8 100644 --- a/applications/nightreport/templates/ingress.yaml +++ b/applications/nightreport/templates/ingress.yaml @@ -1,29 +1,32 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress metadata: name: {{ template "nightreport.fullname" . }} labels: {{- include "nightreport.labels" . | nindent 4 }} - annotations: - {{- if .Values.ingress.gafaelfawrAuthQuery }} - nginx.ingress.kubernetes.io/auth-method: "GET" - nginx.ingress.kubernetes.io/auth-response-headers: "X-Auth-Request-User,X-Auth-Request-Email,X-Auth-Request-Token" - nginx.ingress.kubernetes.io/auth-signin: "{{ .Values.global.baseUrl }}/login" - nginx.ingress.kubernetes.io/auth-url: "https://{{ .Values.global.baseUrl }}/auth?{{ .Values.ingress.gafaelfawrAuthQuery }}" - {{- end }} - {{- with .Values.ingress.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - ingressClassName: "nginx" - rules: - - host: {{ required "global.host must be set" .Values.global.host | quote }} - http: - paths: - - path: /nightreport - pathType: Prefix - backend: - service: - name: {{ include "nightreport.fullname" . }} - port: - number: 8080 +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + {{- if .Values.ingress.auth.enabled }} + loginRedirect: true + scopes: + all: + - "exec:internal-tools" + {{- else }} + scopes: + anonymous: true + {{- end }} +template: + metadata: + name: {{ template "nightreport.fullname" . }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: /nightreport + pathType: Prefix + backend: + service: + name: {{ include "nightreport.fullname" . }} + port: + number: 8080 diff --git a/applications/nightreport/values.yaml b/applications/nightreport/values.yaml index 9471348e9f..87d725b6e0 100644 --- a/applications/nightreport/values.yaml +++ b/applications/nightreport/values.yaml @@ -20,6 +20,11 @@ image: # @default -- The appVersion of the chart tag: "" +ingress: + auth: + # -- Whether to require Gafaelfawr authentication for access + enabled: false + db: # -- database host host: postgres.postgres @@ -30,10 +35,6 @@ db: # -- database name database: nightreport -ingress: - # -- Gafaelfawr auth query string - gafaelfawrAuthQuery: "" - # -- Application-specific configuration config: # -- Site ID; a non-empty string of up to 16 characters. diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml index bcac46aef7..93965bf255 100644 --- a/applications/noteburst/Chart.yaml +++ b/applications/noteburst/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: noteburst version: 1.0.0 -appVersion: "0.13.0" +appVersion: "0.14.0" description: Noteburst is a notebook execution service for the Rubin Science Platform. type: application home: https://noteburst.lsst.io/ @@ -13,7 +13,7 @@ maintainers: dependencies: - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/noteburst/values-idfdev.yaml b/applications/noteburst/values-idfdev.yaml index b1f15683d5..3fd9be7aae 100644 --- a/applications/noteburst/values-idfdev.yaml +++ b/applications/noteburst/values-idfdev.yaml @@ -1,6 +1,3 @@ -image: - pullPolicy: Always - config: logLevel: "DEBUG" worker: diff --git a/applications/noteburst/values-usdfdev.yaml b/applications/noteburst/values-usdfdev.yaml index 75c085515f..612ce2000e 100644 --- a/applications/noteburst/values-usdfdev.yaml +++ b/applications/noteburst/values-usdfdev.yaml @@ -1,5 +1,5 @@ image: - pullPolicy: Always + tag: "0.13.0" replicaCount: 3 diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml index 430008b4fc..ffffd4b4fe 100644 --- a/applications/nublado/Chart.yaml +++ b/applications/nublado/Chart.yaml @@ -5,7 +5,7 @@ description: JupyterHub and custom spawner for the Rubin Science Platform sources: - https://github.com/lsst-sqre/nublado home: https://nublado.lsst.io/ -appVersion: 7.2.0 +appVersion: 8.0.3 dependencies: - name: jupyterhub diff --git a/applications/nublado/README.md b/applications/nublado/README.md index 57ae6213c6..1ecc7508f2 100644 --- a/applications/nublado/README.md +++ b/applications/nublado/README.md @@ -17,7 +17,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | | cloudsql.image.resources | object | See `values.yaml` | Resource requests and limits for Cloud SQL pod | -| cloudsql.image.tag | string | `"1.37.0"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.37.1"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Auth Proxy pod | | cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Auth Proxy pod | @@ -90,7 +90,7 @@ JupyterHub and custom spawner for the Rubin Science Platform | jupyterhub.cull.maxAge | int | 2160000 (25 days) | Maximum age of a lab regardless of activity | | jupyterhub.cull.removeNamedServers | bool | `true` | Whether to remove named servers when culling their lab | | jupyterhub.cull.timeout | int | 432000 (5 days) | Default idle timeout before the lab is automatically deleted in seconds | -| jupyterhub.cull.users | bool | `true` | Whether to log out the server when culling their lab | +| jupyterhub.cull.users | bool | `false` | Whether to log out the user (from JupyterHub) when culling their lab | | jupyterhub.hub.authenticatePrometheus | bool | `false` | Whether to require metrics requests to be authenticated | | jupyterhub.hub.baseUrl | string | `"/nb"` | Base URL on which JupyterHub listens | | jupyterhub.hub.containerSecurityContext | object | `{"allowPrivilegeEscalation":false,"runAsGroup":768,"runAsUser":768}` | Security context for JupyterHub container | diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml index 83928e10cb..0e680ff040 100644 --- a/applications/nublado/values-usdfdev.yaml +++ b/applications/nublado/values-usdfdev.yaml @@ -7,7 +7,7 @@ controller: registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" pin: - - "w_2024_32" + - "w_2024_42" lab: env: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" diff --git a/applications/nublado/values-usdfint.yaml b/applications/nublado/values-usdfint.yaml index 83928e10cb..0e680ff040 100644 --- a/applications/nublado/values-usdfint.yaml +++ b/applications/nublado/values-usdfint.yaml @@ -7,7 +7,7 @@ controller: registry: "docker-registry.slac.stanford.edu" repository: "lsstsqre/sciplat-lab" pin: - - "w_2024_32" + - "w_2024_42" lab: env: AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini" diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml index eae9fb5461..79049d8170 100644 --- a/applications/nublado/values.yaml +++ b/applications/nublado/values.yaml @@ -539,8 +539,8 @@ jupyterhub: # @default -- 300 (5 minutes) every: 300 - # -- Whether to log out the server when culling their lab - users: true + # -- Whether to log out the user (from JupyterHub) when culling their lab + users: false # -- Whether to remove named servers when culling their lab removeNamedServers: true @@ -585,7 +585,7 @@ cloudsql: pullPolicy: "IfNotPresent" # -- Cloud SQL Auth Proxy tag to use - tag: "1.37.0" + tag: "1.37.1" # -- Instance connection name for a Cloud SQL PostgreSQL instance # @default -- None, must be set if Cloud SQL Auth Proxy is enabled diff --git a/applications/obsenv-management/README.md b/applications/obsenv-management/README.md index 8ef6b00f51..9055d587ca 100644 --- a/applications/obsenv-management/README.md +++ b/applications/obsenv-management/README.md @@ -23,12 +23,18 @@ Rubin Observatory Environment Management System | obsenv-api.image.repository | string | `"rubincr.lsst.org/obsenv-api"` | Image to use in the obsenv-api deployment | | obsenv-api.image.tag | string | The appVersion of the chart | Tag of image to use | | obsenv-api.ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| obsenv-api.nfsMount.containerPath | string | `"/net/obs-env"` | Path to mount obs-env directory into container | +| obsenv-api.nfsMount.server | string | `""` | Server where the data lives | +| obsenv-api.nfsMount.serverPath | string | `"/obs-env"` | Path on the server where the data lives | | obsenv-api.nodeSelector | object | `{}` | Node selection rules for the obsenv-api deployment pod | | obsenv-api.podAnnotations | object | `{}` | Annotations for the obsenv-api deployment pod | | obsenv-api.replicaCount | int | `1` | Number of web deployment pods to start | | obsenv-api.resources | object | See `values.yaml` | Resource limits and requests for the obsenv-api deployment pod | +| obsenv-api.securityContext.group | int | `72089` | Group ID | +| obsenv-api.securityContext.user | int | `72091` | User ID | | obsenv-api.tolerations | list | `[]` | Tolerations for the obsenv-api deployment pod | | obsenv-ui.affinity | object | `{}` | Affinity rules for the obsenv-ui deployment pod | +| obsenv-ui.config.authGroup | string | `"test-group"` | The group used to authorize users to change the package versions | | obsenv-ui.config.logLevel | string | `"INFO"` | Logging level | | obsenv-ui.config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | | obsenv-ui.config.pathPrefix | string | `"/obsenv-ui"` | URL path prefix | diff --git a/applications/obsenv-management/charts/obsenv-api/Chart.yaml b/applications/obsenv-management/charts/obsenv-api/Chart.yaml index a4779452b3..304e694acb 100644 --- a/applications/obsenv-management/charts/obsenv-api/Chart.yaml +++ b/applications/obsenv-management/charts/obsenv-api/Chart.yaml @@ -2,4 +2,4 @@ name: obsenv-api apiVersion: v2 version: 1.0.0 description: Helm chart for the Observatory Environment Management API. -appVersion: "0.1.0" +appVersion: "0.2.0" diff --git a/applications/obsenv-management/charts/obsenv-api/README.md b/applications/obsenv-management/charts/obsenv-api/README.md index 47304addfe..3600062904 100644 --- a/applications/obsenv-management/charts/obsenv-api/README.md +++ b/applications/obsenv-management/charts/obsenv-api/README.md @@ -15,8 +15,13 @@ Helm chart for the Observatory Environment Management API. | image.repository | string | `"rubincr.lsst.org/obsenv-api"` | Image to use in the obsenv-api deployment | | image.tag | string | The appVersion of the chart | Tag of image to use | | ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| nfsMount.containerPath | string | `"/net/obs-env"` | Path to mount obs-env directory into container | +| nfsMount.server | string | `""` | Server where the data lives | +| nfsMount.serverPath | string | `"/obs-env"` | Path on the server where the data lives | | nodeSelector | object | `{}` | Node selection rules for the obsenv-api deployment pod | | podAnnotations | object | `{}` | Annotations for the obsenv-api deployment pod | | replicaCount | int | `1` | Number of web deployment pods to start | | resources | object | See `values.yaml` | Resource limits and requests for the obsenv-api deployment pod | +| securityContext.group | int | `72089` | Group ID | +| securityContext.user | int | `72091` | User ID | | tolerations | list | `[]` | Tolerations for the obsenv-api deployment pod | diff --git a/applications/obsenv-management/charts/obsenv-api/templates/deployment.yaml b/applications/obsenv-management/charts/obsenv-api/templates/deployment.yaml index 5a93a21f4c..188a2647b2 100644 --- a/applications/obsenv-management/charts/obsenv-api/templates/deployment.yaml +++ b/applications/obsenv-management/charts/obsenv-api/templates/deployment.yaml @@ -47,6 +47,10 @@ spec: drop: - "all" readOnlyRootFilesystem: true + volumeMounts: + - name: obsenv + mountPath: {{ .Values.nfsMount.containerPath }} + readOnly: false {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} @@ -57,5 +61,11 @@ spec: {{- end }} securityContext: runAsNonRoot: true - runAsUser: 72091 - runAsGroup: 72089 + runAsUser: {{ .Values.securityContext.user }} + runAsGroup: {{ .Values.securityContext.group }} + volumes: + - name: obsenv + nfs: + path: {{ .Values.nfsMount.serverPath }} + readOnly: false + server: {{ .Values.nfsMount.server }} diff --git a/applications/obsenv-management/charts/obsenv-api/templates/networkpolicy.yaml b/applications/obsenv-management/charts/obsenv-api/templates/networkpolicy.yaml index 33c6be6999..927209fd9a 100644 --- a/applications/obsenv-management/charts/obsenv-api/templates/networkpolicy.yaml +++ b/applications/obsenv-management/charts/obsenv-api/templates/networkpolicy.yaml @@ -15,7 +15,4 @@ spec: matchLabels: app.kubernetes.io/name: obsenv-ui egress: - - to: - - podSelector: - matchLabels: - app.kubernetes.io/name: obsenv-ui + - {} diff --git a/applications/obsenv-management/charts/obsenv-api/values.yaml b/applications/obsenv-management/charts/obsenv-api/values.yaml index 5a5143e32b..45001095d3 100644 --- a/applications/obsenv-management/charts/obsenv-api/values.yaml +++ b/applications/obsenv-management/charts/obsenv-api/values.yaml @@ -26,6 +26,23 @@ config: # -- Use fake obsenv management system useFakeObsenvManager: false +securityContext: + # -- User ID + user: 72091 + + # -- Group ID + group: 72089 + +nfsMount: + # -- Path to mount obs-env directory into container + containerPath: /net/obs-env + + # -- Path on the server where the data lives + serverPath: /obs-env + + # -- Server where the data lives + server: "" + ingress: # -- Additional annotations for the ingress rule annotations: {} diff --git a/applications/obsenv-management/charts/obsenv-ui/Chart.yaml b/applications/obsenv-management/charts/obsenv-ui/Chart.yaml index b1ec63afb3..ee0e1d1a4b 100644 --- a/applications/obsenv-management/charts/obsenv-ui/Chart.yaml +++ b/applications/obsenv-management/charts/obsenv-ui/Chart.yaml @@ -2,4 +2,4 @@ name: obsenv-ui apiVersion: v2 version: 1.0.0 description: Helm chart for the Observatory Environment Management UI. -appVersion: "0.1.0" +appVersion: "0.3.0" diff --git a/applications/obsenv-management/charts/obsenv-ui/README.md b/applications/obsenv-management/charts/obsenv-ui/README.md index 1127616b1b..3fcb2ce527 100644 --- a/applications/obsenv-management/charts/obsenv-ui/README.md +++ b/applications/obsenv-management/charts/obsenv-ui/README.md @@ -7,6 +7,7 @@ Helm chart for the Observatory Environment Management UI. | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity rules for the obsenv-ui deployment pod | +| config.authGroup | string | `"test-group"` | The group used to authorize users to change the package versions | | config.logLevel | string | `"INFO"` | Logging level | | config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | | config.pathPrefix | string | `"/obsenv-ui"` | URL path prefix | diff --git a/applications/obsenv-management/charts/obsenv-ui/templates/configmap.yaml b/applications/obsenv-management/charts/obsenv-ui/templates/configmap.yaml index 8eab91a720..80eabfd0cc 100644 --- a/applications/obsenv-management/charts/obsenv-ui/templates/configmap.yaml +++ b/applications/obsenv-management/charts/obsenv-ui/templates/configmap.yaml @@ -5,5 +5,6 @@ metadata: labels: {{- include "obsenv-ui.labels" . | nindent 4 }} data: - BASE_URL: {{ .Values.global.basePath | quote }} - OBSENV_API: "obsenv-api:8080" \ No newline at end of file + BASE_URL: {{ .Values.global.baseUrl | quote }} + OBSENV_API: "http://obsenv-api:8080/obsenv-api" + AUTH_GROUP: {{ .Values.config.authGroup | quote }} diff --git a/applications/obsenv-management/charts/obsenv-ui/templates/deployment.yaml b/applications/obsenv-management/charts/obsenv-ui/templates/deployment.yaml index 01c1bcc4df..61f100a8b2 100644 --- a/applications/obsenv-management/charts/obsenv-ui/templates/deployment.yaml +++ b/applications/obsenv-management/charts/obsenv-ui/templates/deployment.yaml @@ -33,11 +33,11 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: "http" - containerPort: 8080 + containerPort: 3000 protocol: "TCP" readinessProbe: httpGet: - path: "/" + path: "/obsenv-management/status" port: "http" resources: {{- toYaml .Values.resources | nindent 12 }} diff --git a/applications/obsenv-management/charts/obsenv-ui/templates/ingress.yaml b/applications/obsenv-management/charts/obsenv-ui/templates/ingress.yaml index a0823292d5..de30d87046 100644 --- a/applications/obsenv-management/charts/obsenv-ui/templates/ingress.yaml +++ b/applications/obsenv-management/charts/obsenv-ui/templates/ingress.yaml @@ -6,9 +6,14 @@ metadata: {{- include "obsenv-ui.labels" . | nindent 4 }} config: baseUrl: {{ .Values.global.baseUrl | quote }} + loginRedirect: true scopes: all: - "exec:internal-tools" + delegate: + internal: + service: "obsenv-api" + scopes: [] template: metadata: name: "obsenv-ui" diff --git a/applications/obsenv-management/charts/obsenv-ui/templates/networkpolicy.yaml b/applications/obsenv-management/charts/obsenv-ui/templates/networkpolicy.yaml index 71cfa11d91..549f78902c 100644 --- a/applications/obsenv-management/charts/obsenv-ui/templates/networkpolicy.yaml +++ b/applications/obsenv-management/charts/obsenv-ui/templates/networkpolicy.yaml @@ -8,14 +8,14 @@ spec: {{- include "obsenv-ui.selectorLabels" . | nindent 6 }} policyTypes: - "Ingress" - - "Egress" ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. - from: - - podSelector: - matchLabels: - app.kubernetes.io/name: obsenv-api - egress: - - to: - - podSelector: - matchLabels: - app.kubernetes.io/name: obsenv-api + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 3000 diff --git a/applications/obsenv-management/charts/obsenv-ui/values.yaml b/applications/obsenv-management/charts/obsenv-ui/values.yaml index d4c92e1857..b8a325a3a6 100644 --- a/applications/obsenv-management/charts/obsenv-ui/values.yaml +++ b/applications/obsenv-management/charts/obsenv-ui/values.yaml @@ -23,6 +23,9 @@ config: # -- URL path prefix pathPrefix: "/obsenv-ui" + # -- The group used to authorize users to change the package versions + authGroup: "test-group" + ingress: # -- Additional annotations for the ingress rule annotations: {} diff --git a/applications/obsenv-management/values-base.yaml b/applications/obsenv-management/values-base.yaml new file mode 100644 index 0000000000..c0534703db --- /dev/null +++ b/applications/obsenv-management/values-base.yaml @@ -0,0 +1,18 @@ +obsenv-api: + image: + repository: rubin-cr.lsst.org/obsenv-api + tag: 0.2.0 + pullPolicy: Always + config: + logLevel: "DEBUG" + nfsMount: + server: nfs-obsenv.ls.lsst.org + +obsenv-ui: + image: + repository: rubin-cr.lsst.org/obsenv-ui + tag: 0.3.0 + pullPolicy: Always + config: + pathPrefix: /obsenv-management + authGroup: obsenv-admins diff --git a/applications/obsenv-management/values-summit.yaml b/applications/obsenv-management/values-summit.yaml new file mode 100644 index 0000000000..4ba6a8c0bc --- /dev/null +++ b/applications/obsenv-management/values-summit.yaml @@ -0,0 +1,18 @@ +obsenv-api: + image: + repository: rubin-cr.lsst.org/obsenv-api + tag: 0.2.0 + pullPolicy: Always + config: + logLevel: "DEBUG" + nfsMount: + server: nfs-obsenv.cp.lsst.org + +obsenv-ui: + image: + repository: rubin-cr.lsst.org/obsenv-ui + tag: 0.3.0 + pullPolicy: Always + config: + pathPrefix: /obsenv-management + authGroup: obsenv-admins diff --git a/applications/obsenv-management/values-tucson-teststand.yaml b/applications/obsenv-management/values-tucson-teststand.yaml index c6fd6cd501..5ac85bb7ac 100644 --- a/applications/obsenv-management/values-tucson-teststand.yaml +++ b/applications/obsenv-management/values-tucson-teststand.yaml @@ -1,3 +1,18 @@ obsenv-api: + image: + repository: rubin-cr.lsst.org/obsenv-api + tag: 0.2.0 + pullPolicy: Always config: - useFakeObsenvManager: true + logLevel: "DEBUG" + nfsMount: + server: nfs-obsenv.tu.lsst.org + +obsenv-ui: + image: + repository: rubin-cr.lsst.org/obsenv-ui + tag: 0.3.0 + pullPolicy: Always + config: + pathPrefix: /obsenv-management + authGroup: lsst-ts-obsenv-admin-group diff --git a/applications/plot-navigator/Chart.yaml b/applications/plot-navigator/Chart.yaml index 2d5ac17db9..2c8ad3c2f8 100644 --- a/applications/plot-navigator/Chart.yaml +++ b/applications/plot-navigator/Chart.yaml @@ -1,7 +1,12 @@ apiVersion: v2 name: plot-navigator -description: Panel-based plot viewer +description: Plot-navigator version: 1.0.0 sources: - - https://github.com/lsst-dm/pipetask-plot-navigator -appVersion: "0.11.2" + - https://github.com/lsst-dm/plot-navigator +appVersion: "0.1.1" + +dependencies: + - name: redis + version: 1.0.14 + repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/plot-navigator/README.md b/applications/plot-navigator/README.md index 87a645d585..7bb6496183 100644 --- a/applications/plot-navigator/README.md +++ b/applications/plot-navigator/README.md @@ -1,10 +1,10 @@ # plot-navigator -Panel-based plot viewer +Plot-navigator ## Source Code -* +* ## Values @@ -14,10 +14,13 @@ Panel-based plot viewer | config.separateSecrets | bool | `false` | Whether to use the new secrets management scheme | | config.volume_mounts | list | `[]` | Mount points for additional volumes | | config.volumes | list | `[]` | Additional volumes to attach | -| environment | object | `{}` | Environment variables (e.g. butler configuration/auth parms) for panel | +| environment | object | `{}` | Environment variables (e.g. butler configuration/auth parms) for the nextjs server | | global.baseUrl | string | Set by Argo CD | Base URL for the environment | | global.host | string | Set by Argo CD | Host name for ingress | | global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | -| image.repository | string | `"ghcr.io/lsst-dm/pipetask-plot-navigator"` | plot-navigator image to use | +| image.repository | string | `"ghcr.io/lsst-dm/plot-navigator"` | plot-navigator image to use | | image.tag | string | The appVersion of the chart | Tag of plot-navigator image to use | | ingress.annotations | object | `{}` | Additional annotations to add to the ingress | +| redis.config.secretKey | string | `"password"` | Key inside secret from which to get the Redis password (do not change) | +| redis.config.secretName | string | `"redis-secret"` | Name of secret containing Redis password | +| resources | object | see `values.yaml` | Resource limits and requests for the nodejs pod | diff --git a/applications/plot-navigator/secrets.yaml b/applications/plot-navigator/secrets.yaml index 3f830741d4..cb657b2df6 100644 --- a/applications/plot-navigator/secrets.yaml +++ b/applications/plot-navigator/secrets.yaml @@ -2,19 +2,11 @@ description: >- Google Cloud Storage credentials to the Butler data store, formatted using AWS syntax for use with boto. - copy: - application: nublado - key: "aws-credentials.ini" -"butler-gcs-idf-creds.json": - description: >- - Google Cloud Storage credentials to the Butler data store in the native - Google syntax, containing the private asymmetric key. - copy: - application: nublado - key: "butler-gcs-idf-creds.json" "postgres-credentials.txt": description: >- PostgreSQL credentials in its pgpass format for the Butler database. - copy: - application: nublado - key: "postgres-credentials.txt" +redis-password: + description: >- + Password used to authenticate production-tools to the arq redis server. + generate: + type: password diff --git a/applications/plot-navigator/templates/deployment.yaml b/applications/plot-navigator/templates/deployment.yaml index 9f293a465b..4bd69e1103 100644 --- a/applications/plot-navigator/templates/deployment.yaml +++ b/applications/plot-navigator/templates/deployment.yaml @@ -9,45 +9,17 @@ spec: selector: matchLabels: {{- include "plot-navigator.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: nodejs template: metadata: labels: {{- include "plot-navigator.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: nodejs spec: volumes: - # butler-secrets-raw is the secrets we get from vault - - name: "butler-secrets-raw" - secret: - secretName: {{ include "plot-navigator.fullname" . }} - # butler-secrets are the copied and chmoded versions - - name: "butler-secrets" - emptyDir: {} {{- with .Values.config.volumes }} {{- . | toYaml | nindent 8 }} {{- end }} - # Have to fix permissions on the pgpass file. - # init container pattern borrowed from vo-cutouts. - initContainers: - - name: fix-secret-permissions - image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} - imagePullPolicy: Always - command: - - "/bin/bash" - - "-c" - - | - cp -RL /home/worker/secrets-raw/* /home/worker/.lsst/ - chown worker:worker /home/worker/.lsst/* - chmod 0400 /home/worker/.lsst/* - securityContext: - runAsNonRoot: false - runAsUser: 0 - runAsGroup: 0 - volumeMounts: - - name: "butler-secrets" - mountPath: "/home/worker/.lsst/" - - name: "butler-secrets-raw" - mountPath: "/home/worker/secrets-raw/" - readOnly: true containers: - name: plot-navigator image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion}} @@ -57,22 +29,29 @@ spec: - name: {{ $key | quote }} value: {{ $value | quote }} {{- end }} + - name: S3_KEY + valueFrom: + secretKeyRef: + name: {{ include "plot-navigator.fullname" . }} + key: S3_KEY + - name: S3_SECRET + valueFrom: + secretKeyRef: + name: {{ include "plot-navigator.fullname" . }} + key: S3_SECRET + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- if .Values.config.envFromSecretPath }} + envFrom: + - secretRef: {{ include "plot-navigator.fullname" . }} + {{- end }} + {{- if .Values.config.volume_mounts }} volumeMounts: - - name: butler-secrets - mountPath: "/home/worker/.lsst/" {{- with .Values.config.volume_mounts }} {{- . | toYaml | nindent 10 }} {{- end }} - command: - - /bin/bash - - -c - - panel serve dashboard_gen3.py --port 8080 --prefix /plot-navigator --allow-websocket-origin {{ .Values.global.host }} --static-dirs assets=./assets - resources: - limits: - cpu: "2" - memory: "3Gi" - ephemeral-storage: "100Mi" - requests: - cpu: "1" - memory: "2Gi" - ephemeral-storage: "50Mi" + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 4085 diff --git a/applications/plot-navigator/templates/ingress.yaml b/applications/plot-navigator/templates/ingress.yaml index 081d11c983..a0c1d19102 100644 --- a/applications/plot-navigator/templates/ingress.yaml +++ b/applications/plot-navigator/templates/ingress.yaml @@ -33,3 +33,24 @@ template: name: "plot-navigator" port: number: 80 + - path: "/plot-navigator/metrics" + pathType: "Prefix" + backend: + service: + name: "production-tools" + port: + number: 8080 + - path: "/plot-navigator/bokeh" + pathType: "Prefix" + backend: + service: + name: "production-tools" + port: + number: 8080 + - path: "/plot-navigator/images" + pathType: "Prefix" + backend: + service: + name: "production-tools" + port: + number: 8080 diff --git a/applications/plot-navigator/templates/production-tools-service.yaml b/applications/plot-navigator/templates/production-tools-service.yaml new file mode 100644 index 0000000000..78b9531c53 --- /dev/null +++ b/applications/plot-navigator/templates/production-tools-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: production-tools + labels: + {{- include "plot-navigator.labels" . | nindent 4 }} +spec: + selector: + {{- include "plot-navigator.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: production-tools + ports: + - port: 80 + protocol: TCP + targetPort: 8080 diff --git a/applications/plot-navigator/templates/production-tools-worker.yaml b/applications/plot-navigator/templates/production-tools-worker.yaml new file mode 100644 index 0000000000..6c670bf996 --- /dev/null +++ b/applications/plot-navigator/templates/production-tools-worker.yaml @@ -0,0 +1,97 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: production-tools-worker + labels: + {{- include "plot-navigator.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "plot-navigator.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: production-tools-worker + template: + metadata: + labels: + {{- include "plot-navigator.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: production-tools-worker + spec: + volumes: + - name: "butler-secrets-raw" + secret: + secretName: {{ include "plot-navigator.fullname" . }} + - name: "butler-secrets" + emptyDir: + sizeLimit: 50Mi + {{- with .Values.productionTools.volumes }} + {{- . | toYaml | nindent 8 }} + {{- end }} + containers: + - name: plot-navigator + image: {{ .Values.productionTools.image.repository }}:{{ .Values.productionTools.image.tag | default .Chart.AppVersion}} + imagePullPolicy: Always + env: +{{- range $key, $value := .Values.productionTools.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} +{{- end }} + - name: S3_KEY + valueFrom: + secretKeyRef: + name: {{ include "plot-navigator.fullname" . }} + key: S3_KEY + - name: S3_SECRET + valueFrom: + secretKeyRef: + name: {{ include "plot-navigator.fullname" . }} + key: S3_SECRET + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secret + key: password + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- if .Values.productionTools.envFromSecretPath }} + envFrom: + - secretRef: {{ include "plot-navigator.fullname" . }} + {{- end }} + {{- if .Values.productionTools.volume_mounts }} + volumeMounts: + - name: "butler-secrets" + mountPath: "/opt/lsst/butler/secrets" + {{- with .Values.productionTools.volume_mounts }} + {{- . | toYaml | nindent 10 }} + {{- end }} + {{- end }} + command: + - arq + - lsst.production.tools.cache.Worker + initContainers: + # To deal with the Postgres file permission issues, + # copy the secrets from butler-secrets-raw to butler-secrets. + - name: fix-secret-permissions + image: "alpine:latest" + imagePullPolicy: IfNotPresent + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /opt/lsst/butler/secrets/ + chown 1000:4085 /opt/lsst/butler/secrets/* + chmod 0400 /opt/lsst/butler/secrets/* + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + volumeMounts: + - name: "butler-secrets" + mountPath: "/opt/lsst/butler/secrets" + - name: "butler-secrets-raw" + mountPath: "/secrets-raw" + readOnly: true + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 4085 diff --git a/applications/plot-navigator/templates/production-tools.yaml b/applications/plot-navigator/templates/production-tools.yaml new file mode 100644 index 0000000000..f5e8298e9e --- /dev/null +++ b/applications/plot-navigator/templates/production-tools.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: production-tools + labels: + {{- include "plot-navigator.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + {{- include "plot-navigator.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: production-tools + template: + metadata: + labels: + {{- include "plot-navigator.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: production-tools + spec: + volumes: + - name: "butler-secrets-raw" + secret: + secretName: {{ include "plot-navigator.fullname" . }} + - name: "butler-secrets" + emptyDir: + sizeLimit: 50Mi + {{- with .Values.productionTools.volumes }} + {{- . | toYaml | nindent 8 }} + {{- end }} + containers: + - name: plot-navigator + image: {{ .Values.productionTools.image.repository }}:{{ .Values.productionTools.image.tag | default .Chart.AppVersion}} + imagePullPolicy: Always + env: +{{- range $key, $value := .Values.productionTools.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} +{{- end }} + - name: S3_KEY + valueFrom: + secretKeyRef: + name: {{ include "plot-navigator.fullname" . }} + key: S3_KEY + - name: S3_SECRET + valueFrom: + secretKeyRef: + name: {{ include "plot-navigator.fullname" . }} + key: S3_SECRET + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secret + key: password + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- if .Values.productionTools.envFromSecretPath }} + envFrom: + - secretRef: {{ include "plot-navigator.fullname" . }} + {{- end }} + {{- if .Values.productionTools.volume_mounts }} + volumeMounts: + - name: "butler-secrets" + mountPath: "/opt/lsst/butler/secrets" + {{- with .Values.productionTools.volume_mounts }} + {{- . | toYaml | nindent 10 }} + {{- end }} + {{- end }} + initContainers: + # To deal with the Postgres file permission issues, + # copy the secrets from butler-secrets-raw to butler-secrets. + - name: fix-secret-permissions + image: "alpine:latest" + imagePullPolicy: IfNotPresent + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /opt/lsst/butler/secrets/ + chown 1000:4085 /opt/lsst/butler/secrets/* + chmod 0400 /opt/lsst/butler/secrets/* + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + volumeMounts: + - name: "butler-secrets" + mountPath: "/opt/lsst/butler/secrets" + - name: "butler-secrets-raw" + mountPath: "/secrets-raw" + readOnly: true + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 4085 diff --git a/applications/plot-navigator/templates/service.yaml b/applications/plot-navigator/templates/service.yaml index ba648bdc01..97f81e5418 100644 --- a/applications/plot-navigator/templates/service.yaml +++ b/applications/plot-navigator/templates/service.yaml @@ -7,7 +7,8 @@ metadata: spec: selector: {{- include "plot-navigator.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: nodejs ports: - port: 80 protocol: TCP - targetPort: 8080 + targetPort: 3000 diff --git a/applications/plot-navigator/templates/vault-secrets.yaml b/applications/plot-navigator/templates/vault-secrets.yaml index 3a22648e85..5c1659da52 100644 --- a/applications/plot-navigator/templates/vault-secrets.yaml +++ b/applications/plot-navigator/templates/vault-secrets.yaml @@ -5,9 +5,18 @@ metadata: labels: {{- include "plot-navigator.labels" . | nindent 4 }} spec: -{{- if .Values.config.separateSecrets }} path: "{{ .Values.global.vaultSecretsPath }}/plot-navigator" -{{- else }} - path: "{{ .Values.global.vaultSecretsPath }}/nublado-lab-secret" -{{- end }} + type: Opaque +--- +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: redis-secret + labels: + {{- include "plot-navigator.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/plot-navigator" + templates: + password: >- + {% index .Secrets "redis-password" %} type: Opaque diff --git a/applications/plot-navigator/values-idfint.yaml b/applications/plot-navigator/values-idfint.yaml deleted file mode 100644 index 2a8515e988..0000000000 --- a/applications/plot-navigator/values-idfint.yaml +++ /dev/null @@ -1,7 +0,0 @@ -environment: - BUTLER_URI: "s3://butler-us-central1-panda-dev/dc2/butler-external.yaml" - PGPASSFILE: "/home/worker/.lsst/postgres-credentials.txt" - AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" - S3_ENDPOINT_URL: "https://storage.googleapis.com" -config: - separateSecrets: true diff --git a/applications/plot-navigator/values-usdfdev.yaml b/applications/plot-navigator/values-usdfdev.yaml index 505748ac2c..5af84b8757 100644 --- a/applications/plot-navigator/values-usdfdev.yaml +++ b/applications/plot-navigator/values-usdfdev.yaml @@ -1,14 +1,15 @@ environment: - DAF_BUTLER_REPOSITORY_INDEX: "/sdf/group/rubin/shared/data-repos.yaml" - PGPASSFILE: "/home/worker/.lsst/postgres-credentials.txt" - PGUSER: "rubin" - AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" - S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" - BUTLER_DEFAULT_REPO: "/repo/main" - BUTLER_DEFAULT_COLLECTION: "HSC/runs/RC2/w_2024_02/DM-42454" - LSST_DISABLE_BUCKET_VALIDATION: "1" + BASE_URL: "/plot-navigator" + REPO_URLS: '{"embargo": "http://internal-butler/api/butler/repo/embargo", "/repo/main": "http://internal-butler/api/butler/repo/main", "/repo/dc2": "http://internal-butler/api/butler/repo/dc2"}' + BUCKET_NAME: "rubin-plot-navigator" + BUCKET_URL: "https://s3dfrgw.slac.stanford.edu/" config: + persistentVolumeClaims: + - name: sdf-group-rubin + storageClassName: sdf-group-rubin + - name: sdf-data-rubin + storageClassName: sdf-data-rubin volumes: - name: sdf-group-rubin persistentVolumeClaim: @@ -19,10 +20,45 @@ config: volume_mounts: - name: sdf-group-rubin mountPath: /sdf/group/rubin + readOnly: true - name: sdf-data-rubin mountPath: /sdf/data/rubin + readOnly: true + +productionTools: + image: + repository: ghcr.io/lsst-dm/production_tools + tag: 0.1.2 + env: + DAF_BUTLER_REPOSITORY_INDEX: "/sdf/group/rubin/shared/data-repos.yaml" + PGPASSFILE: "/opt/lsst/butler/secrets/postgres-credentials.txt" + PGUSER: "rubin" + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/butler/secrets/aws-credentials.ini" + S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + LSST_RESOURCES_S3_PROFILE_embargo: "https://sdfembs3.sdf.slac.stanford.edu" + LSST_DISABLE_BUCKET_VALIDATION: "1" + REDIS_HOST: "plot-navigator-redis" + REDIS_PORT: "6379" + BUTLER_REPO_NAMES: "embargo,/repo/embargo,/repo/main,/repo/dc2" persistentVolumeClaims: - name: sdf-group-rubin storageClassName: sdf-group-rubin - name: sdf-data-rubin storageClassName: sdf-data-rubin + volumes: + - name: sdf-group-rubin + persistentVolumeClaim: + claimName: sdf-group-rubin + - name: sdf-data-rubin + persistentVolumeClaim: + claimName: sdf-data-rubin + volume_mounts: + - name: sdf-group-rubin + mountPath: /sdf/group/rubin + readOnly: true + - name: sdf-data-rubin + mountPath: /sdf/data/rubin + readOnly: true + +image: + tag: v0.2.2 diff --git a/applications/plot-navigator/values-usdfint.yaml b/applications/plot-navigator/values-usdfint.yaml index c8cffe48b9..5af84b8757 100644 --- a/applications/plot-navigator/values-usdfint.yaml +++ b/applications/plot-navigator/values-usdfint.yaml @@ -1,13 +1,15 @@ environment: - DAF_BUTLER_REPOSITORY_INDEX: "/sdf/group/rubin/shared/data-repos.yaml" - PGPASSFILE: "/home/worker/.lsst/postgres-credentials.txt" - PGUSER: "rubin" - AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" - S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" - BUTLER_DEFAULT_REPO: "/repo/main" - BUTLER_DEFAULT_COLLECTION: "HSC/runs/RC2/w_2024_02/DM-42454" + BASE_URL: "/plot-navigator" + REPO_URLS: '{"embargo": "http://internal-butler/api/butler/repo/embargo", "/repo/main": "http://internal-butler/api/butler/repo/main", "/repo/dc2": "http://internal-butler/api/butler/repo/dc2"}' + BUCKET_NAME: "rubin-plot-navigator" + BUCKET_URL: "https://s3dfrgw.slac.stanford.edu/" config: + persistentVolumeClaims: + - name: sdf-group-rubin + storageClassName: sdf-group-rubin + - name: sdf-data-rubin + storageClassName: sdf-data-rubin volumes: - name: sdf-group-rubin persistentVolumeClaim: @@ -18,11 +20,45 @@ config: volume_mounts: - name: sdf-group-rubin mountPath: /sdf/group/rubin + readOnly: true - name: sdf-data-rubin mountPath: /sdf/data/rubin + readOnly: true + +productionTools: + image: + repository: ghcr.io/lsst-dm/production_tools + tag: 0.1.2 + env: + DAF_BUTLER_REPOSITORY_INDEX: "/sdf/group/rubin/shared/data-repos.yaml" + PGPASSFILE: "/opt/lsst/butler/secrets/postgres-credentials.txt" + PGUSER: "rubin" + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/butler/secrets/aws-credentials.ini" + S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + LSST_RESOURCES_S3_PROFILE_embargo: "https://sdfembs3.sdf.slac.stanford.edu" + LSST_DISABLE_BUCKET_VALIDATION: "1" + REDIS_HOST: "plot-navigator-redis" + REDIS_PORT: "6379" + BUTLER_REPO_NAMES: "embargo,/repo/embargo,/repo/main,/repo/dc2" persistentVolumeClaims: - name: sdf-group-rubin storageClassName: sdf-group-rubin - name: sdf-data-rubin storageClassName: sdf-data-rubin - separateSecrets: true + volumes: + - name: sdf-group-rubin + persistentVolumeClaim: + claimName: sdf-group-rubin + - name: sdf-data-rubin + persistentVolumeClaim: + claimName: sdf-data-rubin + volume_mounts: + - name: sdf-group-rubin + mountPath: /sdf/group/rubin + readOnly: true + - name: sdf-data-rubin + mountPath: /sdf/data/rubin + readOnly: true + +image: + tag: v0.2.2 diff --git a/applications/plot-navigator/values-usdfprod.yaml b/applications/plot-navigator/values-usdfprod.yaml index 60ac287325..5af84b8757 100644 --- a/applications/plot-navigator/values-usdfprod.yaml +++ b/applications/plot-navigator/values-usdfprod.yaml @@ -1,15 +1,15 @@ environment: - DAF_BUTLER_REPOSITORY_INDEX: "/sdf/group/rubin/shared/data-repos.yaml" - PGPASSFILE: "/home/worker/.lsst/postgres-credentials.txt" - PGUSER: "rubin" - AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini" - S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" - BUTLER_DEFAULT_REPO: "/repo/main" - BUTLER_DEFAULT_COLLECTION: "HSC/runs/RC2/w_2024_02/DM-42454" - LSST_DISABLE_BUCKET_VALIDATION: "1" - LSST_RESOURCES_S3_PROFILE_embargo: "https://sdfembs3.sdf.slac.stanford.edu" + BASE_URL: "/plot-navigator" + REPO_URLS: '{"embargo": "http://internal-butler/api/butler/repo/embargo", "/repo/main": "http://internal-butler/api/butler/repo/main", "/repo/dc2": "http://internal-butler/api/butler/repo/dc2"}' + BUCKET_NAME: "rubin-plot-navigator" + BUCKET_URL: "https://s3dfrgw.slac.stanford.edu/" config: + persistentVolumeClaims: + - name: sdf-group-rubin + storageClassName: sdf-group-rubin + - name: sdf-data-rubin + storageClassName: sdf-data-rubin volumes: - name: sdf-group-rubin persistentVolumeClaim: @@ -20,10 +20,45 @@ config: volume_mounts: - name: sdf-group-rubin mountPath: /sdf/group/rubin + readOnly: true - name: sdf-data-rubin mountPath: /sdf/data/rubin + readOnly: true + +productionTools: + image: + repository: ghcr.io/lsst-dm/production_tools + tag: 0.1.2 + env: + DAF_BUTLER_REPOSITORY_INDEX: "/sdf/group/rubin/shared/data-repos.yaml" + PGPASSFILE: "/opt/lsst/butler/secrets/postgres-credentials.txt" + PGUSER: "rubin" + AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/butler/secrets/aws-credentials.ini" + S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu" + LSST_RESOURCES_S3_PROFILE_embargo: "https://sdfembs3.sdf.slac.stanford.edu" + LSST_DISABLE_BUCKET_VALIDATION: "1" + REDIS_HOST: "plot-navigator-redis" + REDIS_PORT: "6379" + BUTLER_REPO_NAMES: "embargo,/repo/embargo,/repo/main,/repo/dc2" persistentVolumeClaims: - name: sdf-group-rubin storageClassName: sdf-group-rubin - name: sdf-data-rubin storageClassName: sdf-data-rubin + volumes: + - name: sdf-group-rubin + persistentVolumeClaim: + claimName: sdf-group-rubin + - name: sdf-data-rubin + persistentVolumeClaim: + claimName: sdf-data-rubin + volume_mounts: + - name: sdf-group-rubin + mountPath: /sdf/group/rubin + readOnly: true + - name: sdf-data-rubin + mountPath: /sdf/data/rubin + readOnly: true + +image: + tag: v0.2.2 diff --git a/applications/plot-navigator/values.yaml b/applications/plot-navigator/values.yaml index 3a808b27c6..9a69783083 100644 --- a/applications/plot-navigator/values.yaml +++ b/applications/plot-navigator/values.yaml @@ -1,12 +1,12 @@ image: # -- plot-navigator image to use - repository: ghcr.io/lsst-dm/pipetask-plot-navigator + repository: ghcr.io/lsst-dm/plot-navigator # -- Tag of plot-navigator image to use # @default -- The appVersion of the chart tag: "" -# -- Environment variables (e.g. butler configuration/auth parms) for panel +# -- Environment variables (e.g. butler configuration/auth parms) for the nextjs server environment: {} ingress: @@ -40,3 +40,21 @@ global: # -- Base path for Vault secrets # @default -- Set by Argo CD vaultSecretsPath: "" + +# -- Resource limits and requests for the nodejs pod +# @default -- see `values.yaml` +resources: + limits: + cpu: "1" + memory: "512Mi" + requests: + cpu: "50m" + memory: "256Mi" + +redis: + config: + # -- Name of secret containing Redis password + secretName: "redis-secret" + # -- Key inside secret from which to get the Redis password (do not + # change) + secretKey: "password" diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml index 2c73bbe380..a21819c4ab 100644 --- a/applications/portal/Chart.yaml +++ b/applications/portal/Chart.yaml @@ -5,11 +5,11 @@ description: Rubin Science Platform Portal Aspect sources: - https://github.com/lsst/suit - https://github.com/Caltech-IPAC/firefly -appVersion: "portal-2024.2.3" +appVersion: "portal-2024.3.1-final" dependencies: - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/prompt-proto-service-hsc-gpu/README.md b/applications/prompt-proto-service-hsc-gpu/README.md index 2415159676..ac15e45b47 100644 --- a/applications/prompt-proto-service-hsc-gpu/README.md +++ b/applications/prompt-proto-service-hsc-gpu/README.md @@ -19,6 +19,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | +| prompt-proto-service.cache.maxFilters | int | `20` | The maximum number of datasets of a given type the service might load if the filter is unknown. Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | | prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | @@ -32,6 +33,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.name | string | `"HSC"` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | +| prompt-proto-service.instrument.preloadPadding | int | `30` | Number of arcseconds to pad the spatial region in preloading. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | diff --git a/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml index 7e9e4e559b..33666a8bf6 100644 --- a/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values-usdfdev-prompt-processing.yaml @@ -14,14 +14,14 @@ prompt-proto-service: pipelines: main: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/Preprocessing.yaml] - calibRepo: s3://rubin-pp-dev-users/central_repo/ + calibRepo: s3://rubin-pp-dev-users/central_repo_2 s3: imageBucket: rubin-pp-dev endpointUrl: https://s3dfrgw.slac.stanford.edu imageNotifications: - kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 + kafkaClusterAddress: prompt-processing2-kafka-bootstrap.kafka:9092 topic: prompt-processing-dev apdb: diff --git a/applications/prompt-proto-service-hsc-gpu/values.yaml b/applications/prompt-proto-service-hsc-gpu/values.yaml index 7efc93a3bb..9ccc7af9dd 100644 --- a/applications/prompt-proto-service-hsc-gpu/values.yaml +++ b/applications/prompt-proto-service-hsc-gpu/values.yaml @@ -47,6 +47,8 @@ prompt-proto-service: preprocessing: "" # -- Skymap to use with the instrument skymap: hsc_rings_v1 + # -- Number of arcseconds to pad the spatial region in preloading. + preloadPadding: 30 # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set @@ -60,6 +62,9 @@ prompt-proto-service: refcatsPerImage: 4 # -- A factor by which to multiply `baseSize` for templates and other patch-based datasets. patchesPerImage: 4 + # -- The maximum number of datasets of a given type the service might load if the filter is unknown. + # Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. + maxFilters: 20 s3: # -- Bucket containing the incoming raw images diff --git a/applications/prompt-proto-service-hsc/README.md b/applications/prompt-proto-service-hsc/README.md index fbb60fceae..ac633519c8 100644 --- a/applications/prompt-proto-service-hsc/README.md +++ b/applications/prompt-proto-service-hsc/README.md @@ -19,6 +19,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | +| prompt-proto-service.cache.maxFilters | int | `20` | The maximum number of datasets of a given type the service might load if the filter is unknown. Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | | prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | @@ -32,6 +33,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.name | string | `"HSC"` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | +| prompt-proto-service.instrument.preloadPadding | int | `30` | Number of arcseconds to pad the spatial region in preloading. | | prompt-proto-service.instrument.skymap | string | `"hsc_rings_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | diff --git a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml index aba3ca2b2c..6ef44857a0 100644 --- a/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-hsc/values-usdfdev-prompt-processing.yaml @@ -15,14 +15,14 @@ prompt-proto-service: pipelines: main: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/ApPipe.yaml] preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/HSC/Preprocessing.yaml] - calibRepo: s3://rubin-pp-dev-users/central_repo/ + calibRepo: s3://rubin-pp-dev-users/central_repo_2 s3: imageBucket: rubin-pp-dev endpointUrl: https://s3dfrgw.slac.stanford.edu imageNotifications: - kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 + kafkaClusterAddress: prompt-processing2-kafka-bootstrap.kafka:9092 topic: prompt-processing-dev apdb: diff --git a/applications/prompt-proto-service-hsc/values.yaml b/applications/prompt-proto-service-hsc/values.yaml index c3921fcb42..46ac0611d7 100644 --- a/applications/prompt-proto-service-hsc/values.yaml +++ b/applications/prompt-proto-service-hsc/values.yaml @@ -47,6 +47,8 @@ prompt-proto-service: preprocessing: "" # -- Skymap to use with the instrument skymap: hsc_rings_v1 + # -- Number of arcseconds to pad the spatial region in preloading. + preloadPadding: 30 # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set @@ -60,6 +62,9 @@ prompt-proto-service: refcatsPerImage: 4 # -- A factor by which to multiply `baseSize` for templates and other patch-based datasets. patchesPerImage: 4 + # -- The maximum number of datasets of a given type the service might load if the filter is unknown. + # Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. + maxFilters: 20 s3: # -- Bucket containing the incoming raw images diff --git a/applications/prompt-proto-service-latiss/README.md b/applications/prompt-proto-service-latiss/README.md index 941c350a20..b5028a2171 100644 --- a/applications/prompt-proto-service-latiss/README.md +++ b/applications/prompt-proto-service-latiss/README.md @@ -19,6 +19,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | +| prompt-proto-service.cache.maxFilters | int | `10` | The maximum number of datasets of a given type the service might load if the filter is unknown. Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. | | prompt-proto-service.cache.patchesPerImage | int | `6` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | | prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | @@ -32,6 +33,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.name | string | `"LATISS"` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | +| prompt-proto-service.instrument.preloadPadding | int | `30` | Number of arcseconds to pad the spatial region in preloading. | | prompt-proto-service.instrument.skymap | string | `"latiss_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | diff --git a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml index 9e0c60bf5d..4bd5b8032d 100644 --- a/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfdev-prompt-processing.yaml @@ -15,14 +15,14 @@ prompt-proto-service: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/ApPipe.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Isr.yaml] preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LATISS/Preprocessing.yaml] - calibRepo: s3://rubin-pp-dev-users/central_repo/ + calibRepo: s3://rubin-pp-dev-users/central_repo_2 s3: imageBucket: rubin-pp-dev endpointUrl: https://s3dfrgw.slac.stanford.edu imageNotifications: - kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 + kafkaClusterAddress: prompt-processing2-kafka-bootstrap.kafka:9092 topic: prompt-processing-dev apdb: diff --git a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml index ec4b9cc3f7..8325a34a2a 100644 --- a/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml +++ b/applications/prompt-proto-service-latiss/values-usdfprod-prompt-processing.yaml @@ -14,7 +14,7 @@ prompt-proto-service: image: pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: 4.5.1 + tag: 4.8.2 instrument: pipelines: diff --git a/applications/prompt-proto-service-latiss/values.yaml b/applications/prompt-proto-service-latiss/values.yaml index 38fddacd35..9229ff3319 100644 --- a/applications/prompt-proto-service-latiss/values.yaml +++ b/applications/prompt-proto-service-latiss/values.yaml @@ -48,6 +48,8 @@ prompt-proto-service: preprocessing: "" # -- Skymap to use with the instrument skymap: latiss_v1 + # -- Number of arcseconds to pad the spatial region in preloading. + preloadPadding: 30 # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set @@ -61,6 +63,9 @@ prompt-proto-service: refcatsPerImage: 4 # -- A factor by which to multiply `baseSize` for templates and other patch-based datasets. patchesPerImage: 6 + # -- The maximum number of datasets of a given type the service might load if the filter is unknown. + # Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. + maxFilters: 10 s3: # -- Bucket containing the incoming raw images diff --git a/applications/prompt-proto-service-lsstcam/README.md b/applications/prompt-proto-service-lsstcam/README.md index b2d000f026..e1071dcac0 100644 --- a/applications/prompt-proto-service-lsstcam/README.md +++ b/applications/prompt-proto-service-lsstcam/README.md @@ -19,6 +19,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | +| prompt-proto-service.cache.maxFilters | int | `20` | The maximum number of datasets of a given type the service might load if the filter is unknown. Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. | | prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | | prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | @@ -32,6 +33,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.name | string | `""` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | +| prompt-proto-service.instrument.preloadPadding | int | `30` | Number of arcseconds to pad the spatial region in preloading. | | prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | | prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | diff --git a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml index 818307f6ca..eb1c6de38f 100644 --- a/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcam/values-usdfdev-prompt-processing.yaml @@ -19,7 +19,7 @@ prompt-proto-service: disableBucketValidation: 1 imageNotifications: - kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 + kafkaClusterAddress: prompt-processing2-kafka-bootstrap.kafka:9092 topic: rubin-prompt-processing alerts: diff --git a/applications/prompt-proto-service-lsstcam/values.yaml b/applications/prompt-proto-service-lsstcam/values.yaml index a590661413..4ff089ed5e 100644 --- a/applications/prompt-proto-service-lsstcam/values.yaml +++ b/applications/prompt-proto-service-lsstcam/values.yaml @@ -47,6 +47,8 @@ prompt-proto-service: preprocessing: "" # -- Skymap to use with the instrument skymap: "" + # -- Number of arcseconds to pad the spatial region in preloading. + preloadPadding: 30 # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set @@ -60,6 +62,9 @@ prompt-proto-service: refcatsPerImage: 4 # -- A factor by which to multiply `baseSize` for templates and other patch-based datasets. patchesPerImage: 4 + # -- The maximum number of datasets of a given type the service might load if the filter is unknown. + # Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. + maxFilters: 20 s3: # -- Bucket containing the incoming raw images diff --git a/applications/prompt-proto-service-lsstcomcam/README.md b/applications/prompt-proto-service-lsstcomcam/README.md index 9e9b55654b..f4334af2ea 100644 --- a/applications/prompt-proto-service-lsstcomcam/README.md +++ b/applications/prompt-proto-service-lsstcomcam/README.md @@ -15,12 +15,13 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.additionalVolumeMounts | list | `[]` | Kubernetes YAML configs for extra container volume(s). Any volumes required by other config options are automatically handled by the Helm chart. | | prompt-proto-service.affinity | object | `{}` | Affinity rules for the prompt processing pods | | prompt-proto-service.alerts.server | string | `"usdf-alert-stream-dev-broker-0.lsst.cloud:9094"` | Server address for the alert stream | -| prompt-proto-service.alerts.topic | string | None, must be set | Topic name where alerts will be sent | +| prompt-proto-service.alerts.topic | string | `""` | Topic name where alerts will be sent | | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | -| prompt-proto-service.cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | -| prompt-proto-service.cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | +| prompt-proto-service.cache.maxFilters | int | `20` | The maximum number of datasets of a given type the service might load if the filter is unknown. Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. | +| prompt-proto-service.cache.patchesPerImage | int | `16` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | +| prompt-proto-service.cache.refcatsPerImage | int | `6` | A factor by which to multiply `baseSize` for refcat datasets. | | prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | | prompt-proto-service.image.pullPolicy | string | `IfNotPresent` in prod, `Always` in dev | Pull policy for the PP image | | prompt-proto-service.image.repository | string | `"ghcr.io/lsst-dm/prompt-service"` | Image to use in the PP deployment | @@ -29,24 +30,23 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.imageNotifications.kafkaClusterAddress | string | None, must be set | Hostname and port of the Kafka provider | | prompt-proto-service.imageNotifications.topic | string | None, must be set | Topic where raw image arrival notifications appear | | prompt-proto-service.instrument.calibRepo | string | None, must be set | URI to the shared repo used for calibrations, templates, and pipeline outputs. If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. | -| prompt-proto-service.instrument.name | string | `""` | The "short" name of the instrument | +| prompt-proto-service.instrument.name | string | `"LSSTComCam"` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | -| prompt-proto-service.instrument.skymap | string | `""` | Skymap to use with the instrument | +| prompt-proto-service.instrument.preloadPadding | int | `30` | Number of arcseconds to pad the spatial region in preloading. | +| prompt-proto-service.instrument.skymap | string | `"lsst_cells_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.ephemeralStorageLimit | string | `"5Gi"` | The maximum storage space allowed for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | | prompt-proto-service.knative.ephemeralStorageRequest | string | `"5Gi"` | The storage space reserved for each container (mostly local Butler). This allocation is for the full pod (see `containerConcurrency`) | -| prompt-proto-service.knative.extraTimeout | int | `10` | To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. This parameter adds extra time to that minimum (seconds). | | prompt-proto-service.knative.gpu | bool | `false` | GPUs enabled. | | prompt-proto-service.knative.gpuRequest | int | `0` | The number of GPUs to request for the full pod (see `containerConcurrency`). | -| prompt-proto-service.knative.idleTimeout | int | `0` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | +| prompt-proto-service.knative.idleTimeout | int | `900` | Maximum time that a container can send nothing to Knative (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.knative.memoryLimit | string | `"8Gi"` | The maximum memory limit for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.memoryRequest | string | `"2Gi"` | The minimum memory to request for the full pod (see `containerConcurrency`). | -| prompt-proto-service.knative.responseStartTimeout | int | `0` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | +| prompt-proto-service.knative.responseStartTimeout | int | `900` | Maximum time that a container can send nothing to Knative after initial submission (seconds). This is only useful if the container runs async workers. If 0, idle timeout is ignored. | | prompt-proto-service.logLevel | string | log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. | Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). | | prompt-proto-service.podAnnotations | object | See the `values.yaml` file. | Annotations for the prompt-proto-service pod | -| prompt-proto-service.raw_microservice | string | `""` | The URI to a microservice that maps image metadata to a file location. If empty, Prompt Processing does not use a microservice. | | prompt-proto-service.registry.centralRepoFile | bool | `false` | If set, this application's Vault secret must contain a `central_repo_file` key containing a remote Butler configuration, and `instrument.calibRepo` is the local path where this file is mounted. | | prompt-proto-service.s3.auth_env | bool | `true` | If set, get S3 credentials from this application's Vault secret. | | prompt-proto-service.s3.disableBucketValidation | int | `0` | Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. | diff --git a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml index 45667dadc3..e8fbd39cd6 100644 --- a/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values-usdfdev-prompt-processing.yaml @@ -5,24 +5,37 @@ prompt-proto-service: revision: "1" image: - repository: ghcr.io/lsst-dm/prompt-service pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. tag: latest instrument: - calibRepo: s3://rubin-summit-users/ + pipelines: + main: >- + (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCam/ApPipe.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCam/Isr.yaml] + preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCam/Preprocessing.yaml] + calibRepo: s3://rubin-pp-dev-users/central_repo_2 s3: - imageBucket: rubin:rubin-pp + imageBucket: rubin-pp-dev endpointUrl: https://s3dfrgw.slac.stanford.edu - disableBucketValidation: 1 imageNotifications: - kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 - topic: rubin-prompt-processing + kafkaClusterAddress: prompt-processing2-kafka-bootstrap.kafka:9092 + topic: prompt-processing-dev + + apdb: + config: s3://rubin-pp-dev-users/apdb_config/cassandra/pp_apdb_lsstcomcam-dev.py alerts: topic: "alert-stream-test" + sasquatch: + endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy + auth_env: false + + # A cache efficiency workaround breaks when mixing observing dates; see DM-43205, DM-43913. + cacheCalibs: false + fullnameOverride: "prompt-proto-service-lsstcomcam" diff --git a/applications/prompt-proto-service-lsstcomcam/values-usdfprod-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcam/values-usdfprod-prompt-processing.yaml new file mode 100644 index 0000000000..6e3885b223 --- /dev/null +++ b/applications/prompt-proto-service-lsstcomcam/values-usdfprod-prompt-processing.yaml @@ -0,0 +1,86 @@ +prompt-proto-service: + + podAnnotations: + # HACK: disable autoscaling as workaround for DM-41829 + autoscaling.knative.dev/min-scale: "200" + # see values.yaml for calculation of max-scale + autoscaling.knative.dev/max-scale: "200" + # Update this field if using latest or static image tag in dev + revision: "1" + + # HACK: disable autoscaling as workaround for DM-41829 + worker: + restart: 15 + + image: + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: 4.8.2 + + instrument: + pipelines: + # BLOCK-T60 is optics alignment + # BLOCK-T75 is giant donuts + # BLOCK-T88 is optics alignment + # BLOCK-T215 is evening twilight flats + # BLOCK-T216 is morning twilight flats + # BLOCK-T219 is pretty picture + # BLOCK-T246 is instrument checkout + # BLOCK-T249 is AOS alignment + # BLOCK-T250 is TMA daytime checkout + main: >- + (survey="BLOCK-320")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCam/ApPipe.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCam/SingleFrame.yaml, + ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCam/Isr.yaml] + (survey="BLOCK-T60")=[] + (survey="BLOCK-T75")=[] + (survey="BLOCK-T88")=[] + (survey="BLOCK-T215")=[] + (survey="BLOCK-T216")=[] + (survey="BLOCK-T219")=[] + (survey="BLOCK-T246")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCam/Isr-cal.yaml] + (survey="BLOCK-T249")=[] + (survey="BLOCK-T250")=[] + (survey="")=[] + preprocessing: >- + (survey="BLOCK-320")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCam/Preprocessing.yaml] + (survey="BLOCK-T60")=[] + (survey="BLOCK-T75")=[] + (survey="BLOCK-T88")=[] + (survey="BLOCK-T215")=[] + (survey="BLOCK-T216")=[] + (survey="BLOCK-T219")=[] + (survey="BLOCK-T246")=[] + (survey="BLOCK-T249")=[] + (survey="BLOCK-T250")=[] + (survey="")=[] + calibRepo: s3://rubin-summit-users + + s3: + imageBucket: rubin-summit + endpointUrl: https://sdfembs3.sdf.slac.stanford.edu + + raw_microservice: http://172.24.5.158:8080/presence + + imageNotifications: + kafkaClusterAddress: prompt-processing-2-kafka-bootstrap.kafka:9092 + topic: rubin-summit-notification + # Scheduler adds an extra 60-80-second delay for first visit in a sequence, + # and files can take up to 20 seconds to arrive. Scheduler delay associated + # with CWFS engineering data, should not apply to other cameras. + imageTimeout: 110 + + apdb: + config: s3://rubin-summit-users/apdb_config/cassandra/pp_apdb_lsstcomcam.py + + alerts: + topic: "lsstcomcam-alerts" + + sasquatch: + endpointUrl: https://usdf-rsp-dev.slac.stanford.edu/sasquatch-rest-proxy + namespace: lsst.prompt.prod + auth_env: false + + logLevel: timer.lsst.activator=DEBUG lsst.diaPipe=VERBOSE lsst.rbClassify=VERBOSE lsst.resources=DEBUG + + fullnameOverride: "prompt-proto-service-lsstcomcam" diff --git a/applications/prompt-proto-service-lsstcomcam/values.yaml b/applications/prompt-proto-service-lsstcomcam/values.yaml index 7682298e07..85b421ac96 100644 --- a/applications/prompt-proto-service-lsstcomcam/values.yaml +++ b/applications/prompt-proto-service-lsstcomcam/values.yaml @@ -4,7 +4,10 @@ prompt-proto-service: # @default -- See the `values.yaml` file. podAnnotations: autoscaling.knative.dev/min-scale: "3" - autoscaling.knative.dev/max-scale: "30" + # Expect to need roughly n_detector × request_latency / survey_cadence pods + # For a 30 s ComCam survey with 500 s latency, this is 150 + # Add some buffer for fast twilight survey + autoscaling.knative.dev/max-scale: "200" autoscaling.knative.dev/target-utilization-percentage: "60" autoscaling.knative.dev/target-burst-capacity: "-1" queue.sidecar.serving.knative.dev/cpu-resource-request: "1" @@ -36,7 +39,7 @@ prompt-proto-service: instrument: # -- The "short" name of the instrument - name: "" + name: LSSTComCam pipelines: # -- Machine-readable string describing which pipeline(s) should be run for which visits. # Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. @@ -46,7 +49,9 @@ prompt-proto-service: # @default -- None, must be set preprocessing: "" # -- Skymap to use with the instrument - skymap: "" + skymap: "lsst_cells_v1" + # -- Number of arcseconds to pad the spatial region in preloading. + preloadPadding: 30 # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set @@ -57,9 +62,12 @@ prompt-proto-service: # The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. baseSize: 3 # -- A factor by which to multiply `baseSize` for refcat datasets. - refcatsPerImage: 4 + refcatsPerImage: 6 # -- A factor by which to multiply `baseSize` for templates and other patch-based datasets. - patchesPerImage: 4 + patchesPerImage: 16 + # -- The maximum number of datasets of a given type the service might load if the filter is unknown. + # Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. + maxFilters: 20 s3: # -- Bucket containing the incoming raw images @@ -73,10 +81,6 @@ prompt-proto-service: # -- Set this to disable validation of S3 bucket names, allowing Ceph multi-tenant colon-separated names to be used. disableBucketValidation: 0 - # -- The URI to a microservice that maps image metadata to a file location. - # If empty, Prompt Processing does not use a microservice. - raw_microservice: "" - imageNotifications: # -- Hostname and port of the Kafka provider # @default -- None, must be set @@ -99,7 +103,6 @@ prompt-proto-service: # -- Server address for the alert stream server: "usdf-alert-stream-dev-broker-0.lsst.cloud:9094" # -- Topic name where alerts will be sent - # @default -- None, must be set topic: "" registry: @@ -108,7 +111,7 @@ prompt-proto-service: # -- Requested logging levels in the format of [Middleware's \-\-log-level argument](https://pipelines.lsst.io/v/daily/modules/lsst.daf.butler/scripts/butler.html#cmdoption-butler-log-level). # @default -- log prompt_processing at DEBUG, other LSST code at INFO, and third-party code at WARNING. - logLevel: "" + logLevel: "timer.lsst.activator=DEBUG" sasquatch: # -- Url of the Sasquatch proxy server to upload metrics to. Leave blank to disable upload. @@ -140,17 +143,14 @@ prompt-proto-service: memoryRequest: "2Gi" # -- The maximum memory limit for the full pod (see `containerConcurrency`). memoryLimit: "8Gi" - # -- To acommodate scheduling problems, Knative waits for a request for twice `worker.timeout`. - # This parameter adds extra time to that minimum (seconds). - extraTimeout: 10 # -- Maximum time that a container can send nothing to Knative (seconds). # This is only useful if the container runs async workers. # If 0, idle timeout is ignored. - idleTimeout: 0 + idleTimeout: 900 # -- Maximum time that a container can send nothing to Knative after initial submission (seconds). # This is only useful if the container runs async workers. # If 0, idle timeout is ignored. - responseStartTimeout: 0 + responseStartTimeout: 900 # -- The number of Knative requests that can be handled simultaneously by one container containerConcurrency: 1 diff --git a/applications/prompt-proto-service-lsstcomcamsim/README.md b/applications/prompt-proto-service-lsstcomcamsim/README.md index 6854bea8e2..f2874d9eae 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/README.md +++ b/applications/prompt-proto-service-lsstcomcamsim/README.md @@ -19,6 +19,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | prompt-proto-service.apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | prompt-proto-service.cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | +| prompt-proto-service.cache.maxFilters | int | `3` | The maximum number of datasets of a given type the service might load if the filter is unknown. Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. | | prompt-proto-service.cache.patchesPerImage | int | `16` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | prompt-proto-service.cache.refcatsPerImage | int | `6` | A factor by which to multiply `baseSize` for refcat datasets. | | prompt-proto-service.containerConcurrency | int | `1` | The number of Knative requests that can be handled simultaneously by one container | @@ -32,6 +33,7 @@ Prompt Proto Service is an event driven service for processing camera images. Th | prompt-proto-service.instrument.name | string | `"LSSTComCamSim"` | The "short" name of the instrument | | prompt-proto-service.instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | prompt-proto-service.instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | +| prompt-proto-service.instrument.preloadPadding | int | `30` | Number of arcseconds to pad the spatial region in preloading. | | prompt-proto-service.instrument.skymap | string | `"ops_rehersal_prep_2k_v1"` | Skymap to use with the instrument | | prompt-proto-service.knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | | prompt-proto-service.knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | diff --git a/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml b/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml index 86f51c8ce7..17d39cf975 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values-usdfdev-prompt-processing.yaml @@ -16,14 +16,14 @@ prompt-proto-service: ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/SingleFrame.yaml, ${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Isr.yaml] preprocessing: (survey="SURVEY")=[${PROMPT_PROCESSING_DIR}/pipelines/LSSTComCamSim/Preprocessing.yaml] - calibRepo: s3://rubin-pp-dev-users/central_repo/ + calibRepo: s3://rubin-pp-dev-users/central_repo_2 s3: imageBucket: rubin-pp-dev endpointUrl: https://s3dfrgw.slac.stanford.edu imageNotifications: - kafkaClusterAddress: prompt-processing-kafka-bootstrap.kafka:9092 + kafkaClusterAddress: prompt-processing2-kafka-bootstrap.kafka:9092 topic: prompt-processing-dev apdb: diff --git a/applications/prompt-proto-service-lsstcomcamsim/values.yaml b/applications/prompt-proto-service-lsstcomcamsim/values.yaml index 99f8eea75b..1f977a62bb 100644 --- a/applications/prompt-proto-service-lsstcomcamsim/values.yaml +++ b/applications/prompt-proto-service-lsstcomcamsim/values.yaml @@ -4,7 +4,9 @@ prompt-proto-service: # @default -- See the `values.yaml` file. podAnnotations: autoscaling.knative.dev/min-scale: "3" - autoscaling.knative.dev/max-scale: "100" + # Expect to need roughly n_detector × request_latency / survey_cadence pods + # For a 30 s ComCam survey with 500 s latency, this is 150 + autoscaling.knative.dev/max-scale: "150" autoscaling.knative.dev/target-utilization-percentage: "60" autoscaling.knative.dev/target-burst-capacity: "-1" queue.sidecar.serving.knative.dev/cpu-resource-request: "1" @@ -48,6 +50,8 @@ prompt-proto-service: preprocessing: "" # -- Skymap to use with the instrument skymap: ops_rehersal_prep_2k_v1 + # -- Number of arcseconds to pad the spatial region in preloading. + preloadPadding: 30 # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set @@ -61,6 +65,9 @@ prompt-proto-service: refcatsPerImage: 6 # -- A factor by which to multiply `baseSize` for templates and other patch-based datasets. patchesPerImage: 16 + # -- The maximum number of datasets of a given type the service might load if the filter is unknown. + # Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. + maxFilters: 3 s3: # -- Bucket containing the incoming raw images diff --git a/applications/rapid-analysis/templates/gather-rollup-set.yaml b/applications/rapid-analysis/templates/comcam-gather-rollup-set.yaml similarity index 93% rename from applications/rapid-analysis/templates/gather-rollup-set.yaml rename to applications/rapid-analysis/templates/comcam-gather-rollup-set.yaml index ac8958cddf..b2b30498c5 100644 --- a/applications/rapid-analysis/templates/gather-rollup-set.yaml +++ b/applications/rapid-analysis/templates/comcam-gather-rollup-set.yaml @@ -1,5 +1,5 @@ -{{ $_ := set $.Values "script" $.Values.gatherRollupSet }} -{{ $script := $.Values.gatherRollupSet }} +{{ $_ := set $.Values "script" $.Values.comcamGatherRollupSet }} +{{ $script := $.Values.comcamGatherRollupSet }} {{- if $script.name }} --- apiVersion: apps/v1 @@ -20,7 +20,7 @@ spec: metadata: {{- with $.Values.podAnnotations }} annotations: - {{- toYaml $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} @@ -97,13 +97,16 @@ spec: readOnly: true {{- if $.Values.nfsMountpoint }} {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} readOnly: {{ $values.readOnly }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpoint }} {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} {{- if ($values.subPath) }} @@ -111,8 +114,10 @@ spec: {{- end }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpointClaim }} {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} {{- if ($values.subPath) }} @@ -120,6 +125,7 @@ spec: {{- end }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.butlerSecret }} - name: {{ $.Release.Name }}-butler-secret mountPath: {{ $.Values.butlerSecret.containerPath }} @@ -169,6 +175,7 @@ spec: secretName: rubintv-secrets {{- if $.Values.nfsMountpoint }} {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} nfs: path: {{ $values.serverPath }} @@ -176,20 +183,25 @@ spec: server: {{ $values.server }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpoint }} {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} persistentVolumeClaim: claimName: {{ $values.name }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpointClaim }} {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} persistentVolumeClaim: claimName: {{ $values.name }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.butlerSecret }} - name: {{ $.Release.Name }}-butler-secret emptyDir: {} diff --git a/applications/rapid-analysis/templates/gather2a-set.yaml b/applications/rapid-analysis/templates/comcam-gather2a-set.yaml similarity index 93% rename from applications/rapid-analysis/templates/gather2a-set.yaml rename to applications/rapid-analysis/templates/comcam-gather2a-set.yaml index 2c1fdbee4f..46078cb74d 100644 --- a/applications/rapid-analysis/templates/gather2a-set.yaml +++ b/applications/rapid-analysis/templates/comcam-gather2a-set.yaml @@ -1,5 +1,5 @@ -{{ $_ := set $.Values "script" $.Values.gather2aSet }} -{{ $script := $.Values.gather2aSet }} +{{ $_ := set $.Values "script" $.Values.comcamGather2aSet }} +{{ $script := $.Values.comcamGather2aSet }} {{- if $script.name }} --- apiVersion: apps/v1 @@ -20,7 +20,7 @@ spec: metadata: {{- with $.Values.podAnnotations }} annotations: - {{- toYaml $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} @@ -97,13 +97,16 @@ spec: readOnly: true {{- if $.Values.nfsMountpoint }} {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} readOnly: {{ $values.readOnly }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpoint }} {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} {{- if ($values.subPath) }} @@ -111,8 +114,10 @@ spec: {{- end }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpointClaim }} {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} {{- if ($values.subPath) }} @@ -120,6 +125,7 @@ spec: {{- end }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.butlerSecret }} - name: {{ $.Release.Name }}-butler-secret mountPath: {{ $.Values.butlerSecret.containerPath }} @@ -169,6 +175,7 @@ spec: secretName: rubintv-secrets {{- if $.Values.nfsMountpoint }} {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} nfs: path: {{ $values.serverPath }} @@ -176,20 +183,25 @@ spec: server: {{ $values.server }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpoint }} {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} persistentVolumeClaim: claimName: {{ $values.name }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpointClaim }} {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} persistentVolumeClaim: claimName: {{ $values.name }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.butlerSecret }} - name: {{ $.Release.Name }}-butler-secret emptyDir: {} diff --git a/applications/rapid-analysis/templates/worker-set.yaml b/applications/rapid-analysis/templates/comcam-worker-set.yaml similarity index 93% rename from applications/rapid-analysis/templates/worker-set.yaml rename to applications/rapid-analysis/templates/comcam-worker-set.yaml index ad87fbc2b8..496b40ff33 100644 --- a/applications/rapid-analysis/templates/worker-set.yaml +++ b/applications/rapid-analysis/templates/comcam-worker-set.yaml @@ -1,5 +1,5 @@ -{{ $_ := set $.Values "script" $.Values.workerSet }} -{{ $script := $.Values.workerSet }} +{{ $_ := set $.Values "script" $.Values.comcamWorkerSet }} +{{ $script := $.Values.comcamWorkerSet }} {{- if $script.name }} --- apiVersion: apps/v1 @@ -20,7 +20,7 @@ spec: metadata: {{- with $.Values.podAnnotations }} annotations: - {{- toYaml $ | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} @@ -97,13 +97,16 @@ spec: readOnly: true {{- if $.Values.nfsMountpoint }} {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} readOnly: {{ $values.readOnly }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpoint }} {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} {{- if ($values.subPath) }} @@ -111,8 +114,10 @@ spec: {{- end }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpointClaim }} {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} {{- if ($values.subPath) }} @@ -120,6 +125,7 @@ spec: {{- end }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.butlerSecret }} - name: {{ $.Release.Name }}-butler-secret mountPath: {{ $.Values.butlerSecret.containerPath }} @@ -169,6 +175,7 @@ spec: secretName: rubintv-secrets {{- if $.Values.nfsMountpoint }} {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} nfs: path: {{ $values.serverPath }} @@ -176,20 +183,25 @@ spec: server: {{ $values.server }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpoint }} {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} persistentVolumeClaim: claimName: {{ $values.name }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpointClaim }} {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} persistentVolumeClaim: claimName: {{ $values.name }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.butlerSecret }} - name: {{ $.Release.Name }}-butler-secret emptyDir: {} diff --git a/applications/rapid-analysis/templates/comcamsim-gather-rollup-set.yaml b/applications/rapid-analysis/templates/comcamsim-gather-rollup-set.yaml new file mode 100644 index 0000000000..47fa606e84 --- /dev/null +++ b/applications/rapid-analysis/templates/comcamsim-gather-rollup-set.yaml @@ -0,0 +1,243 @@ +{{ $_ := set $.Values "script" $.Values.comcamsimGatherRollupSet }} +{{ $script := $.Values.comcamsimGatherRollupSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-gatherrollupset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/comcamsim-gather2a-set.yaml b/applications/rapid-analysis/templates/comcamsim-gather2a-set.yaml new file mode 100644 index 0000000000..7588341778 --- /dev/null +++ b/applications/rapid-analysis/templates/comcamsim-gather2a-set.yaml @@ -0,0 +1,243 @@ +{{ $_ := set $.Values "script" $.Values.comcamsimGather2aSet }} +{{ $script := $.Values.comcamsimGather2aSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-gather2aset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/comcamsim-worker-set.yaml b/applications/rapid-analysis/templates/comcamsim-worker-set.yaml new file mode 100644 index 0000000000..da9bf53328 --- /dev/null +++ b/applications/rapid-analysis/templates/comcamsim-worker-set.yaml @@ -0,0 +1,243 @@ +{{ $_ := set $.Values "script" $.Values.comcamsimWorkerSet }} +{{ $script := $.Values.comcamsimWorkerSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-workerset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/deployment.yaml b/applications/rapid-analysis/templates/deployment.yaml index d6a44033ca..65ae5966b5 100644 --- a/applications/rapid-analysis/templates/deployment.yaml +++ b/applications/rapid-analysis/templates/deployment.yaml @@ -15,9 +15,15 @@ spec: {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} template: metadata: - {{- with $.Values.podAnnotations }} + {{- if or $.Values.podAnnotations $script.podAnnotations }} + {{- $podAnnotations := "" }} + {{- if $script.podAnnotations }} + {{- $podAnnotations = $script.podAnnotations }} + {{- else }} + {{- $podAnnotations = $.Values.podAnnotations }} + {{- end }} annotations: - {{- toYaml $ | nindent 8 }} + {{- toYaml $podAnnotations | nindent 8 }} {{- end }} labels: {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} @@ -90,13 +96,16 @@ spec: readOnly: true {{- if $.Values.nfsMountpoint }} {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} readOnly: {{ $values.readOnly }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpoint }} {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} {{- if ($values.subPath) }} @@ -104,8 +113,10 @@ spec: {{- end }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpointClaim }} {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} mountPath: {{ $values.containerPath }} {{- if ($values.subPath) }} @@ -113,6 +124,7 @@ spec: {{- end }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.butlerSecret }} - name: {{ $.Release.Name }}-butler-secret mountPath: {{ $.Values.butlerSecret.containerPath }} @@ -162,6 +174,7 @@ spec: secretName: rubintv-secrets {{- if $.Values.nfsMountpoint }} {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} nfs: path: {{ $values.serverPath }} @@ -169,20 +182,25 @@ spec: server: {{ $values.server }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpoint }} {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} persistentVolumeClaim: claimName: {{ $values.name }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.pvcMountpointClaim }} {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} - name: {{ $values.name }} persistentVolumeClaim: claimName: {{ $values.name }} {{- end }} {{- end }} + {{- end }} {{- if $.Values.butlerSecret }} - name: {{ $.Release.Name }}-butler-secret emptyDir: {} diff --git a/applications/rapid-analysis/templates/lsstcam-gather-rollup-set.yaml b/applications/rapid-analysis/templates/lsstcam-gather-rollup-set.yaml new file mode 100644 index 0000000000..97b37a8929 --- /dev/null +++ b/applications/rapid-analysis/templates/lsstcam-gather-rollup-set.yaml @@ -0,0 +1,243 @@ +{{ $_ := set $.Values "script" $.Values.lsstcamGatherRollupSet }} +{{ $script := $.Values.lsstcamGatherRollupSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-gatherrollupset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/lsstcam-gather2a-set.yaml b/applications/rapid-analysis/templates/lsstcam-gather2a-set.yaml new file mode 100644 index 0000000000..5ec06f24a3 --- /dev/null +++ b/applications/rapid-analysis/templates/lsstcam-gather2a-set.yaml @@ -0,0 +1,243 @@ +{{ $_ := set $.Values "script" $.Values.lsstcamGather2aSet }} +{{ $script := $.Values.lsstcamGather2aSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-gather2aset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/templates/lsstcam-worker-set.yaml b/applications/rapid-analysis/templates/lsstcam-worker-set.yaml new file mode 100644 index 0000000000..82d8f3e911 --- /dev/null +++ b/applications/rapid-analysis/templates/lsstcam-worker-set.yaml @@ -0,0 +1,243 @@ +{{ $_ := set $.Values "script" $.Values.lsstcamWorkerSet }} +{{ $script := $.Values.lsstcamWorkerSet }} +{{- if $script.name }} +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "rapid-analysis.deploymentName" $ }}-workerset + namespace: {{ $.Values.namespace }} + labels: + {{- include "rapid-analysis.labels" $ | nindent 4 }} +spec: + revisionHistoryLimit: 0 + selector: + matchLabels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 6 }} + replicas: {{ ($script.replicas | int) }} + podManagementPolicy: Parallel + template: + metadata: + {{- with $.Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "rapid-analysis.selectorLabels" $ | nindent 8 }} + spec: + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.securityContext }} + securityContext: + {{- if $.Values.securityContext.uid }} + runAsUser: {{ $.Values.securityContext.uid }} + {{- end }} + {{- if $.Values.securityContext.gid }} + runAsGroup: {{ $.Values.securityContext.gid }} + {{- end }} + {{- if $.Values.securityContext.fid }} + fsGroup: {{ $.Values.securityContext.fid }} + {{- end }} + {{- end }} + containers: + - name: {{ include "rapid-analysis.scriptName" $ }} + image: "{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + - name: RUN_ARG + value: {{ $script.name }} + - name: WORKER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['statefulset.kubernetes.io/pod-name'] + - name: RAPID_ANALYSIS_LOCATION + value: {{ $.Values.location | upper | quote }} + {{- if or $.Values.env $.Values.envSecrets }} + {{- range $env_var, $env_value := $.Values.env }} + - name: {{ $env_var }} + value: {{ $env_value | quote }} + {{- end }} + {{- range $env := $.Values.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: PGPASSFILE + value: "{{ $.Values.butlerSecret.containerPath }}/postgres-credentials.txt" + - name: PGUSER + value: {{ $.Values.butlerSecret.dbUser | quote }} + - name: AWS_SHARED_CREDENTIALS_FILE + value: "{{ $.Values.butlerSecret.containerPath }}/aws-credentials.ini" + {{- end }} + {{- if $.Values.redis.enabled }} + - name: REDIS_HOST + value: "redis-service" + {{- if $.Values.redis.envSecrets }} + {{- range $env := $.Values.redis.envSecrets }} + - name: {{ $env.name }} + valueFrom: + secretKeyRef: + name: {{ $env.secretName }} + key: {{ $env.secretKey }} + {{- end }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: env-configmap + volumeMounts: + - name: rubintv-creds + mountPath: "/etc/rubintv/creds" + readOnly: true + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + readOnly: {{ $values.readOnly }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + mountPath: {{ $values.containerPath }} + {{- if ($values.subPath) }} + subPath: {{ $values.subPath }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + mountPath: {{ $.Values.butlerSecret.containerPath }} + {{- end }} + {{- if or $.Values.resources $script.resources }} + {{- $resources := "" }} + {{- if $script.resources }} + {{- $resources = $script.resources }} + {{- else }} + {{- $resources = $.Values.resources }} + {{- end }} + resources: + {{- toYaml $resources | nindent 12 }} + {{- end }} + {{- if $.Values.butlerSecret }} + initContainers: + - name: {{ $.Release.Name }}-butler-secret-perm-fixer + image: "alpine:latest" + command: + - "/bin/ash" + - "-c" + - | + cp -RL /secrets-raw/* /secrets + cat /secrets/aws-credentials.ini > new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + cat /secrets-rubintv/aws-credentials.ini >> new-aws-credentials.ini + printf "\n" >> new-aws-credentials.ini + mv new-aws-credentials.ini /secrets/aws-credentials.ini + chown 73006:73006 /secrets/* + chmod 0600 /secrets/* + volumeMounts: + - name: {{ $.Release.Name }}-raw-butler-secret + mountPath: /secrets-raw + readOnly: true + - name: {{ $.Release.Name }}-butler-secret + mountPath: /secrets + - name: rubintv-aws-creds + mountPath: /secrets-rubintv + readOnly: true + {{- end }} + volumes: + - name: rubintv-creds + secret: + secretName: google-creds + - name: rubintv-aws-creds + secret: + secretName: rubintv-secrets + {{- if $.Values.nfsMountpoint }} + {{- range $values := $.Values.nfsMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + nfs: + path: {{ $values.serverPath }} + readOnly: {{ $values.readOnly }} + server: {{ $values.server }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpoint }} + {{- range $values := $.Values.pvcMountpoint }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.pvcMountpointClaim }} + {{- range $values := $.Values.pvcMountpointClaim }} + {{- if (has $values.name $script.mounts) }} + - name: {{ $values.name }} + persistentVolumeClaim: + claimName: {{ $values.name }} + {{- end }} + {{- end }} + {{- end }} + {{- if $.Values.butlerSecret }} + - name: {{ $.Release.Name }}-butler-secret + emptyDir: {} + - name: {{ $.Release.Name }}-raw-butler-secret + secret: + secretName: butler-secret + defaultMode: 0600 + {{- end }} + {{- if or $.Values.nodeSelector $script.nodeSelector }} + {{- $nodeSelector := "" }} + {{- if $script.nodeSelector }} + {{- $nodeSelector = $script.nodeSelector }} + {{- else }} + {{- $nodeSelector = $.Values.nodeSelector }} + {{- end }} + nodeSelector: + {{- toYaml $nodeSelector | nindent 8 }} + {{- end }} + {{- if or $.Values.affinity $script.affinity }} + {{- $affinity := "" }} + {{- if $script.affinity }} + {{- $affinity = $script.affinity }} + {{- else }} + {{- $affinity = $.Values.affinity }} + {{- end }} + affinity: + {{- toYaml $affinity | nindent 8 }} + {{- end }} + {{- if or $.Values.tolerations $script.tolerations }} + {{- $tolerations := "" }} + {{- if $script.tolerations }} + {{- $tolerations = $script.tolerations }} + {{- else }} + {{- $tolerations = $.Values.tolerations }} + {{- end }} + tolerations: + {{- toYaml $tolerations | nindent 8 }} + {{- end }} +{{- end }} diff --git a/applications/rapid-analysis/values-summit.yaml b/applications/rapid-analysis/values-summit.yaml index 185b063e84..f28b339294 100644 --- a/applications/rapid-analysis/values-summit.yaml +++ b/applications/rapid-analysis/values-summit.yaml @@ -6,7 +6,17 @@ location: SUMMIT env: DAF_BUTLER_REPOSITORY_INDEX: /project/data-repos.yaml scripts: +### +### AuxTel pods +### - name: summit/auxTel/runBackgroundService.py + mounts: + - auxtel-data + - auxtel-gen3-data + - allsky-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" resources: requests: cpu: 0.5 @@ -15,12 +25,48 @@ scripts: cpu: 1.0 memory: 10G - name: summit/auxTel/runButlerWatcher.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runCalibrateCcdRunner.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/auxTel/runImExaminer.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/auxTel/runIsrRunner.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runMetadataCreator.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runMetadataServer.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/auxTel/runMonitor.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" resources: requests: cpu: 0.5 @@ -29,8 +75,26 @@ scripts: cpu: 1.0 memory: 10G - name: summit/auxTel/runMountTorquePlotter.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/auxTel/runNightReporter.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/auxTel/runSpecExaminer.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" resources: requests: cpu: 0.5 @@ -38,7 +102,105 @@ scripts: limits: cpu: 1.0 memory: 4G +### +### ComCamSim pods +### +- name: summit/LSSTComCamSim/runButlerWatcher.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared +- name: summit/LSSTComCamSim/runHeadNode.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared +- name: summit/LSSTComCamSim/runMetadataServer.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" +- name: summit/LSSTComCamSim/runPlotter.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" + resources: + requests: + cpu: 0.5 + memory: 4G + limits: + cpu: 1.0 + memory: 6G +- name: summit/LSSTComCamSim/runAosDonutPipeline.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" + resources: + requests: + cpu: 32 + memory: 96G + limits: + cpu: 32 + memory: 96G +- name: summit/LSSTComCamSim/runFocusSweepAnalysis.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" + resources: + requests: + cpu: 0.5 + memory: 1G + limits: + cpu: 1 + memory: 2G +- name: summit/LSSTComCamSim/runMetadataServerAos.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" + resources: + requests: + cpu: 0.25 + memory: 500M + limits: + cpu: .5 + memory: 1G +- name: summit/LSSTComCamSim/runPsfPlotting.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" + resources: + requests: + cpu: .5 + memory: 4G + limits: + cpu: 1 + memory: 6G +### +### Misc pods +### - name: summit/misc/runAllSky.py + mounts: + - allsky-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" resources: requests: cpu: 1.0 @@ -47,19 +209,145 @@ scripts: cpu: 2 memory: 6G - name: summit/misc/runStarTracker.py + mounts: + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/misc/runStarTrackerCatchup.py + mounts: + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/misc/runStarTrackerFast.py + mounts: + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/misc/runStarTrackerMetadata.py + mounts: + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/misc/runStarTrackerNightReport.py + mounts: + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/misc/runStarTrackerWide.py + mounts: + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/misc/runTmaTelemetry.py + mounts: + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" +### +### ComCam pods +### - name: summit/LSSTComCam/runButlerWatcher.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared - name: summit/LSSTComCam/runHeadNode.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared - name: summit/LSSTComCam/runMetadataServer.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" - name: summit/LSSTComCam/runPlotter.py -workerSet: + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" +### +### LSSTCam pods +### +- name: summit/LSSTCam/runButlerWatcher.py + mounts: + - lsstcam-data + - project-shared +- name: summit/LSSTCam/runHeadNode.py + mounts: + - lsstcam-data + - project-shared +- name: summit/LSSTCam/runMetadataServer.py + mounts: + - lsstcam-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" +- name: summit/LSSTCam/runPlotter.py + mounts: + - lsstcam-data + - project-shared + podAnnotations: + k8s.v1.cni.cncf.io/networks: "kube-system/lhn" +### +### ComCamSim StatefulSets +### +comcamsimWorkerSet: + name: summit/LSSTComCamSim/runSfmRunner.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 9 + resources: + requests: + cpu: 1.0 + memory: 4G + limits: + cpu: 1.0 + memory: 8G +comcamsimGather2aSet: + name: summit/LSSTComCamSim/runStep2aWorker.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 1 + resources: + requests: + cpu: 1.0 + memory: 4G + limits: + cpu: 1.0 + memory: 8G +comcamsimGatherRollupSet: + name: summit/LSSTComCamSim/runNightlyWorker.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 1 + resources: + requests: + cpu: 1.0 + memory: 12G + limits: + cpu: 1.0 + memory: 24G +### +### ComCam StatefulSets +### +comcamWorkerSet: name: summit/LSSTComCam/runSfmRunner.py - replicas: 36 + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 9 resources: requests: cpu: 1.0 @@ -67,6 +355,50 @@ workerSet: limits: cpu: 1.0 memory: 8G +comcamGather2aSet: + name: summit/LSSTComCam/runStep2aWorker.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 1 + resources: + requests: + cpu: 1.0 + memory: "4G" + limits: + cpu: 1.0 + memory: "8G" +comcamGatherRollupSet: + name: summit/LSSTComCam/runNightlyWorker.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 1 + resources: + requests: + cpu: 1.0 + memory: "12G" + limits: + cpu: 1.0 + memory: "24G" +### +### LSSTCam StatefulSets +### +lsstcamWorkerSet: + name: summit/LSSTCam/runSfmRunner.py + mounts: + - lsstcam-data + - project-shared + replicas: 1 + resources: + requests: + cpu: 1.0 + memory: 4G + limits: + cpu: 1.0 + memory: 8G # we should check this value credentialFile: google_write_creds pullSecretsPath: pull-secret rubinTvSecretsPath: rubintv @@ -78,51 +410,46 @@ butlerSecret: imagePullSecrets: - name: pull-secret nfsMountpoint: -- name: auxtel-gen3-data +- name: auxtel-data containerPath: /repo/LATISS readOnly: false server: nfs-auxtel.cp.lsst.org serverPath: /auxtel/repo/LATISS -- name: comcam-gen3-data +- name: auxtel-gen3-data + containerPath: /data/lsstdata/base/auxtel + readOnly: true + server: nfs-auxtel.cp.lsst.org + serverPath: /auxtel/lsstdata/base/auxtel +- name: comcam-data containerPath: /repo/LSSTComCam readOnly: false server: nfs3.cp.lsst.org serverPath: /comcam/repo/LSSTComCam -- name: auxtel-data - containerPath: /readonly/lsstdata/auxtel - readOnly: true - server: nfs-auxtel.cp.lsst.org - serverPath: /auxtel/lsstdata -- name: comcam-data - containerPath: /readonly/lsstdata/comcam +- name: comcam-gen3-data + containerPath: /data/lsstdata/base/comcam readOnly: true server: nfs3.cp.lsst.org - serverPath: /comcam/lsstdata -- name: project-shared - containerPath: /project + serverPath: /comcam/lsstdata/base/comcam +- name: lsstcam-data + containerPath: /repo/LSSTCam readOnly: false - server: nfs1.cp.lsst.org - serverPath: /project -- name: auxtel-gen3-data-temp - containerPath: /data/lsstdata/base/auxtel - readOnly: true - server: nfs-auxtel.cp.lsst.org - serverPath: /auxtel/lsstdata/base/auxtel -- name: comcam-gen3-data-temp - containerPath: /data/lsstdata/base/comcam + server: nfs3.cp.lsst.org + serverPath: /lsstcam/repo/LSSTCam +- name: lsstcam-gen3-data + containerPath: /data/lsstdata/base/maintel readOnly: true server: nfs3.cp.lsst.org - serverPath: /comcam/lsstdata/base/comcam + serverPath: /lsstcam/lsstdata/base/maintel - name: allsky-data containerPath: /data/allsky readOnly: true server: nfs-auxtel.cp.lsst.org serverPath: /auxtel/allsky -- name: scratch-shared - containerPath: /scratch +- name: project-shared + containerPath: /project readOnly: false server: nfs1.cp.lsst.org - serverPath: /scratch/rubintv + serverPath: /project resources: requests: cpu: 0.5 diff --git a/applications/rapid-analysis/values-tucson-teststand.yaml b/applications/rapid-analysis/values-tucson-teststand.yaml index 8604e12165..f5ff7a7adb 100644 --- a/applications/rapid-analysis/values-tucson-teststand.yaml +++ b/applications/rapid-analysis/values-tucson-teststand.yaml @@ -8,37 +8,153 @@ env: siteTag: tts location: TTS scripts: +### +### AuxTel pods +### - name: summit/auxTel/runBackgroundService.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + resources: + requests: + cpu: 0.5 + memory: 4G + limits: + cpu: 1.0 + memory: 10G - name: summit/auxTel/runButlerWatcher.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runCalibrateCcdRunner.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runImExaminer.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runIsrRunner.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runMetadataCreator.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runMetadataServer.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runMonitor.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared + resources: + requests: + cpu: 0.5 + memory: 1G + limits: + cpu: 1.0 + memory: 10G - name: summit/auxTel/runMountTorquePlotter.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runNightReporter.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared - name: summit/auxTel/runSpecExaminer.py -- name: summit/comCam/runButlerWatcher.py -- name: summit/comCam/runIsrRunner_000.py -- name: summit/comCam/runIsrRunner_001.py -- name: summit/comCam/runIsrRunner_002.py -- name: summit/comCam/runIsrRunner_003.py -- name: summit/comCam/runIsrRunner_004.py -- name: summit/comCam/runIsrRunner_005.py -- name: summit/comCam/runIsrRunner_006.py -- name: summit/comCam/runIsrRunner_007.py -- name: summit/comCam/runIsrRunner_008.py -- name: summit/comCam/runMetadataServer.py -- name: summit/comCam/runPlotter.py + mounts: + - auxtel-data + - auxtel-gen3-data + - project-shared resources: requests: cpu: 0.5 + memory: 2G + limits: + cpu: 1.0 memory: 4G +### +### ComCam pods +### +- name: summit/LSSTComCam/runButlerWatcher.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared +- name: summit/LSSTComCam/runHeadNode.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared +- name: summit/LSSTComCam/runMetadataServer.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared +- name: summit/LSSTComCam/runPlotter.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared +### +### ComCam StatefulSets +### +comcamWorkerSet: + name: summit/LSSTComCam/runSfmRunner.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 1 + resources: + requests: + cpu: 1.0 + memory: 4G + limits: + cpu: 1.0 + memory: 8G +comcamGather2aSet: + name: summit/LSSTComCam/runStep2aWorker.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 1 + resources: + requests: + cpu: 1.0 + memory: "4G" + limits: + cpu: 1.0 + memory: "8G" +comcamGatherRollupSet: + name: summit/LSSTComCam/runNightlyWorker.py + mounts: + - comcam-data + - comcam-gen3-data + - project-shared + replicas: 1 + resources: + requests: + cpu: 1.0 + memory: "12G" limits: cpu: 1.0 - memory: 6G -- name: summit/misc/runTmaTelemetry.py + memory: "24G" # TODO: remove google credentials credentialFile: google_write_creds vaultPrefixPath: secret/k8s_operator/tucson-teststand.lsst.codes @@ -53,46 +169,31 @@ butlerSecret: imagePullSecrets: - name: pull-secret nfsMountpoint: -- name: auxtel-gen3-data +- name: auxtel-data containerPath: /repo/LATISS readOnly: false server: nfs-auxtel.tu.lsst.org serverPath: /auxtel/repo/LATISS -- name: comcam-gen3-data +- name: auxtel-gen3-data + containerPath: /data/lsstdata/TTS/auxtel + readOnly: true + server: nfs-auxtel.tu.lsst.org + serverPath: /auxtel/lsstdata/TTS/auxtel +- name: comcam-data containerPath: /repo/LSSTComCam readOnly: false server: comcam-archiver.tu.lsst.org serverPath: /repo/LSSTComCam -- name: auxtel-data - containerPath: /readonly/lsstdata/auxtel - readOnly: true - server: nfs-auxtel.tu.lsst.org - serverPath: /auxtel/lsstdata -- name: comcam-data - containerPath: /readonly/lsstdata/comcam +- name: comcam-gen3-data + containerPath: /data/lsstdata/TTS/comcam readOnly: true server: comcam-archiver.tu.lsst.org - serverPath: /lsstdata + serverPath: /lsstdata/TTS/comcam - name: project-shared containerPath: /project readOnly: false server: nfs-project.tu.lsst.org serverPath: /project -- name: auxtel-gen3-data-temp - containerPath: /data/lsstdata/TTS/auxtel - readOnly: true - server: nfs-auxtel.tu.lsst.org - serverPath: /auxtel/lsstdata/TTS/auxtel -- name: comcam-gen3-data-temp - containerPath: /data/lsstdata/TTS/comcam - readOnly: true - server: comcam-archiver.tu.lsst.org - serverPath: /lsstdata/TTS/comcam -- name: scratch-shared - containerPath: /scratch - readOnly: false - server: nfs-scratch.tu.lsst.org - serverPath: /scratch/rubintv resources: requests: cpu: 0.5 diff --git a/applications/rubintv-dev/Chart.yaml b/applications/rubintv-dev/Chart.yaml index 3d91759fa2..bdd3d78b9c 100644 --- a/applications/rubintv-dev/Chart.yaml +++ b/applications/rubintv-dev/Chart.yaml @@ -10,5 +10,5 @@ dependencies: version: 1.0.0 repository: "file://../../charts/rubintv" - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/rubintv-dev/values-summit.yaml b/applications/rubintv-dev/values-summit.yaml index d9ae6f096b..95fa42519f 100644 --- a/applications/rubintv-dev/values-summit.yaml +++ b/applications/rubintv-dev/values-summit.yaml @@ -28,7 +28,7 @@ rubintv: replicas: 1 image: repository: ts-dockerhub.lsst.org/rapid-analysis - tag: c0037 + tag: c0039 pullPolicy: Always uid: 73006 gid: 73006 @@ -43,6 +43,12 @@ rubintv: value: "/sdf/group/rubin/repo/ir2/butler.yaml" - name: DEPLOY_BRANCH value: *s-dbE + nfsMountpoint: + - name: project-rubintv-ddv-config + containerPath: /var/ddv-config + readOnly: false + server: nfs1.cp.lsst.org + serverPath: /project/rubintv/ddv-config resources: requests: cpu: 0.5 diff --git a/applications/rubintv/Chart.yaml b/applications/rubintv/Chart.yaml index 3b40aecd66..79e0cf2fe9 100644 --- a/applications/rubintv/Chart.yaml +++ b/applications/rubintv/Chart.yaml @@ -10,5 +10,5 @@ dependencies: version: 1.0.0 repository: "file://../../charts/rubintv" - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/rubintv/README.md b/applications/rubintv/README.md index 45e56b7ba2..9caeed997e 100644 --- a/applications/rubintv/README.md +++ b/applications/rubintv/README.md @@ -53,12 +53,13 @@ Real-time display front end | rubintv.workers.image.repository | string | `"ts-dockerhub.lsst.org/rubintv-broadcaster"` | The Docker registry name for the container image. | | rubintv.workers.image.tag | string | `"develop"` | The tag of the container image to use. | | rubintv.workers.imagePullSecrets | list | See `values.yaml` | Image pull secrets. | +| rubintv.workers.nfsMountpoint | list | See `values.yaml` | NFS mountpoints for the rubintv worker pods | | rubintv.workers.nodeSelector | object | `{}` | Node selector rules for the rubintv worker pods | | rubintv.workers.pathPrefix | string | `"/"` | Prefix for the (internal) worker API routes | | rubintv.workers.podAnnotations | object | `{}` | Annotations for the rubintv worker pods | +| rubintv.workers.pvcMountpoint | list | See `values.yaml` | PVC claims for the rubintv worker pods | | rubintv.workers.replicas | int | `0` | how many replicas to use | | rubintv.workers.resources | object | `{}` | Resource limits and requests for the rubintv worker pods | | rubintv.workers.script | string | `"slac/rubintv/workerPod1.py"` | Script that runs in RUN_ARG. This dynamic mechanism needs to be replaced with something less scary, but there is resistance to that, at least while iterating. | | rubintv.workers.tolerations | list | `[]` | Tolerations for the rubintv worker pods | | rubintv.workers.uid | string | `nil` | UID to run as (site-dependent because of filesystem access; must be specified) | -| rubintv.workers.volumes | list | See `values.yaml` | Volumes for the rubintv worker pods | diff --git a/applications/rubintv/values-summit.yaml b/applications/rubintv/values-summit.yaml index 07a3594fb2..eef9d8dec8 100644 --- a/applications/rubintv/values-summit.yaml +++ b/applications/rubintv/values-summit.yaml @@ -20,14 +20,14 @@ rubintv: - name: DDV_CLIENT_WS_ADDRESS value: "rubintv/ws/ddv" image: - tag: v2.3.1 + tag: v2.5.3 pullPolicy: Always workers: replicas: 1 image: repository: ts-dockerhub.lsst.org/rapid-analysis - tag: c0037 + tag: c0039 pullPolicy: Always uid: 73006 gid: 73006 diff --git a/applications/rubintv/values-usdfdev.yaml b/applications/rubintv/values-usdfdev.yaml index e165673019..1bc20932f8 100644 --- a/applications/rubintv/values-usdfdev.yaml +++ b/applications/rubintv/values-usdfdev.yaml @@ -24,11 +24,11 @@ rubintv: workers: replicas: 1 image: - repository: ts-dockerhub.lsst.org/rapid-analysis - tag: c0037 + repository: lsstts/rapid-analysis + tag: c0039_usdf pullPolicy: Always - uid: 73006 - gid: 73006 + uid: 17951 + gid: 4085 scriptsLocation: /repos/rubintv_analysis_service/scripts script: rubintv_worker.py -a rubintv -p 8080 -l usdf env: @@ -40,7 +40,7 @@ rubintv: value: "/sdf/group/rubin/repo/ir2/butler.yaml" - name: DEPLOY_BRANCH value: *dbE - volumes: + pvcMountpoint: - name: sdf-group-rubin persistentVolumeClaim: name: sdf-group-rubin @@ -55,6 +55,14 @@ rubintv: capacity: 1Gi accessMode: ReadOnlyMany mountPath: /sdf/data/rubin + - name: sdf-data-rubin-rubintv-ddv-config + persistentVolumeClaim: + name: sdf-data-rubin-rubintv-ddv-config + storageClassName: sdf-data-rubin + capacity: 1Gi + accessMode: ReadWriteMany + mountPath: /var/ddv-config + subPath: shared/rubintv-ddv-config resources: limits: cpu: 2.0 diff --git a/applications/rubintv/values-usdfprod.yaml b/applications/rubintv/values-usdfprod.yaml index 9818e96584..68c74079d4 100644 --- a/applications/rubintv/values-usdfprod.yaml +++ b/applications/rubintv/values-usdfprod.yaml @@ -16,7 +16,7 @@ rubintv: - name: DDV_CLIENT_WS_ADDRESS value: "rubintv/ws/ddv" image: - tag: v2.3.1 + tag: v2.5.3 pullPolicy: Always workers: diff --git a/applications/rubintv/values.yaml b/applications/rubintv/values.yaml index 79342a1339..6d754d0153 100644 --- a/applications/rubintv/values.yaml +++ b/applications/rubintv/values.yaml @@ -104,11 +104,11 @@ rubintv: imagePullSecrets: [] # Each entry is of the form: { name: pull-secret-name } - # -- Volumes for the rubintv worker pods + # -- PVC claims for the rubintv worker pods # @default -- See `values.yaml` - volumes: [] + pvcMountpoint: [] # Each list item must have the following form: - # { name: volume-name, + # { name: pvc-name, # accessMode: one of "ReadOnly", "ReadWriteOnce", "ReadWriteMany", # mountPath: path-mounted-in-container, # persistentVolumeClaim: { @@ -117,8 +117,17 @@ rubintv: # capacity: size-as-string-of-pvc (e.g. "1Gi") # } # } - # It is planned to implement "nfs" as an alternative to - # "PersistentVolumeClaim" but that has not yet been done. + + # -- NFS mountpoints for the rubintv worker pods + # @default -- See `values.yaml` + nfsMountpoint: [] + # Each list item must have the following form: + # { name: nfs-name, + # containerPath: path-mounted-in-container, + # readOnly: boolean, + # server: nfs-server, + # serverPath: nfs-server-path + # } # -- Resource limits and requests for the rubintv worker pods resources: {} diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md index cde6e253fc..a3a058d559 100644 --- a/applications/sasquatch/README.md +++ b/applications/sasquatch/README.md @@ -40,6 +40,7 @@ Rubin Observatory's telemetry service | influxdb.config.coordinator.query-timeout | string | `"30s"` | Maximum duration a query is allowed to run before it is killed | | influxdb.config.coordinator.write-timeout | string | `"1h"` | Duration a write request waits before timeout is returned to the caller | | influxdb.config.data.cache-max-memory-size | int | `0` | Maximum size a shared cache can reach before it starts rejecting writes | +| influxdb.config.data.max-series-per-database | int | `0` | Maximum number of series allowed per database before writes are dropped. Change the setting to 0 to allow an unlimited number of series per database. | | influxdb.config.data.trace-logging-enabled | bool | `true` | Whether to enable verbose logging of additional debug information within the TSM engine and WAL | | influxdb.config.data.wal-fsync-delay | string | `"100ms"` | Duration a write will wait before fsyncing. This is useful for slower disks or when WAL write contention is present. | | influxdb.config.http.auth-enabled | bool | `true` | Whether authentication is required | @@ -68,7 +69,7 @@ Rubin Observatory's telemetry service | kapacitor.envVars | object | See `values.yaml` | Additional environment variables to set | | kapacitor.existingSecret | string | `"sasquatch"` | Use `influxdb-user` and `influxdb-password` keys from this secret | | kapacitor.image.repository | string | `"kapacitor"` | Docker image to use for Kapacitor | -| kapacitor.image.tag | string | `"1.7.5"` | Tag to use for Kapacitor | +| kapacitor.image.tag | string | `"1.7.6"` | Tag to use for Kapacitor | | kapacitor.influxURL | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB connection URL | | kapacitor.persistence.enabled | bool | `true` | Whether to enable Kapacitor data persistence | | kapacitor.persistence.size | string | `"100Gi"` | Size of storage to request if enabled | @@ -84,14 +85,14 @@ Rubin Observatory's telemetry service | strimzi-registry-operator.operatorNamespace | string | `"sasquatch"` | Namespace where the strimzi-registry-operator is deployed | | telegraf-kafka-consumer | object | `{}` | Overrides for telegraf-kafka-consumer configuration | | app-metrics.affinity | object | `{}` | Affinity for pod assignment | -| app-metrics.apps | list | `[]` | A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. The names should be the same as the app names in Phalanx. | +| app-metrics.apps | list | `[]` | A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. The names should be the same as the app names in Phalanx. | | app-metrics.args | list | `[]` | Arguments passed to the Telegraf agent containers | -| app-metrics.cluster.name | string | `"sasquatch"` | | +| app-metrics.cluster.name | string | `"sasquatch"` | Name of the Strimzi cluster. Synchronize this with the cluster name in the parent Sasquatch chart. | | app-metrics.debug | bool | false | Run Telegraf in debug mode. | | app-metrics.env | list | See `values.yaml` | Telegraf agent enviroment variables | -| app-metrics.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | -| app-metrics.globalAppConfig | object | `{}` | app-metrics configuration in any environment in which the subchart is enabled. This should stay globally specified here, and it shouldn't be overridden. See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) for the structure of this value. | -| app-metrics.globalInfluxTags | list | `["service"]` | Keys in an every event sent by any app that should be recorded in InfluxDB as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from `globalAppConfig` | +| app-metrics.envFromSecret | string | `""` | Name of the secret with values to be added to the environment | +| app-metrics.globalAppConfig | object | See `values.yaml` | app-metrics configuration in any environment in which the subchart is enabled. This should stay globally specified here, and it shouldn't be overridden. See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) for the structure of this value. | +| app-metrics.globalInfluxTags | list | `["application"]` | Keys in an every event sent by any app that should be recorded in InfluxDB as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from `globalAppConfig` | | app-metrics.image.pullPolicy | string | `"Always"` | Image pull policy | | app-metrics.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | | app-metrics.image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | @@ -100,14 +101,14 @@ Rubin Observatory's telemetry service | app-metrics.nodeSelector | object | `{}` | Node labels for pod assignment | | app-metrics.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods | | app-metrics.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | -| app-metrics.replicaCount | int | `3` | Number of Telegraf replicas. Multiple replicas increase availability. | +| app-metrics.replicaCount | int | `3` | Number of Telegraf replicas. Multiple replicas increase availability. | | app-metrics.resources | object | See `values.yaml` | Kubernetes resources requests and limits | | app-metrics.tolerations | list | `[]` | Tolerations for pod assignment | | influxdb-enterprise.bootstrap.auth.secretName | string | `"sasquatch"` | Enable authentication of the data nodes using this secret, by creating a username and password for an admin account. The secret must contain keys `username` and `password`. | | influxdb-enterprise.bootstrap.ddldml.configMap | string | Do not run DDL or DML | A config map containing DDL and DML that define databases, retention policies, and inject some data. The keys `ddl` and `dml` must exist, even if one of them is empty. DDL is executed before DML to ensure databases and retention policies exist. | | influxdb-enterprise.bootstrap.ddldml.resources | object | `{}` | Kubernetes resources and limits for the bootstrap job | | influxdb-enterprise.data.affinity | object | See `values.yaml` | Affinity rules for data pods | -| influxdb-enterprise.data.config.antiEntropy.enabled | bool | `true` | Enable the anti-entropy service, which copies and repairs shards | +| influxdb-enterprise.data.config.antiEntropy.enabled | bool | `false` | Enable the anti-entropy service, which copies and repairs shards | | influxdb-enterprise.data.config.cluster.log-queries-after | string | `"15s"` | Maximum duration a query can run before InfluxDB logs it as a slow query | | influxdb-enterprise.data.config.cluster.max-concurrent-queries | int | `1000` | Maximum number of running queries allowed on the instance (0 is unlimited) | | influxdb-enterprise.data.config.cluster.query-timeout | string | `"300s"` | Maximum duration a query is allowed to run before it is killed | @@ -366,10 +367,9 @@ Rubin Observatory's telemetry service | strimzi-kafka.connect.replicas | int | `3` | Number of Kafka Connect replicas to run | | strimzi-kafka.cruiseControl | object | `{"enabled":false}` | Configuration for the Kafka Cruise Control | | strimzi-kafka.kafka.affinity | object | See `values.yaml` | Affinity for Kafka pod assignment | -| strimzi-kafka.kafka.config."log.retention.bytes" | string | `"350000000000"` | How much disk space Kafka will ensure is available, set to 70% of the data partition size | -| strimzi-kafka.kafka.config."log.retention.hours" | int | `48` | Number of days for a topic's data to be retained | +| strimzi-kafka.kafka.config."log.retention.minutes" | int | 4320 minutes (3 days) | Number of days for a topic's data to be retained | | strimzi-kafka.kafka.config."message.max.bytes" | int | `10485760` | The largest record batch size allowed by Kafka | -| strimzi-kafka.kafka.config."offsets.retention.minutes" | int | `2880` | Number of minutes for a consumer group's offsets to be retained | +| strimzi-kafka.kafka.config."offsets.retention.minutes" | int | 4320 minutes (3 days) | Number of minutes for a consumer group's offsets to be retained | | strimzi-kafka.kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition | | strimzi-kafka.kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource | | strimzi-kafka.kafka.externalListener.bootstrap.host | string | Do not configure TLS | Name used for TLS hostname verification | @@ -425,8 +425,8 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer.env | list | See `values.yaml` | Telegraf agent enviroment variables | | telegraf-kafka-consumer.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | | telegraf-kafka-consumer.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| telegraf-kafka-consumer.image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | -| telegraf-kafka-consumer.image.tag | string | `"avro-mutex"` | Telegraf image tag | +| telegraf-kafka-consumer.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | +| telegraf-kafka-consumer.image.tag | string | `"1.32.1-alpine"` | Telegraf image tag | | telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | | telegraf-kafka-consumer.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | telegraf-kafka-consumer.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | @@ -462,8 +462,8 @@ Rubin Observatory's telemetry service | telegraf-kafka-consumer-oss.env | list | See `values.yaml` | Telegraf agent enviroment variables | | telegraf-kafka-consumer-oss.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | | telegraf-kafka-consumer-oss.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| telegraf-kafka-consumer-oss.image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | -| telegraf-kafka-consumer-oss.image.tag | string | `"avro-mutex"` | Telegraf image tag | +| telegraf-kafka-consumer-oss.image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | +| telegraf-kafka-consumer-oss.image.tag | string | `"1.32.1-alpine"` | Telegraf image tag | | telegraf-kafka-consumer-oss.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | | telegraf-kafka-consumer-oss.influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | telegraf-kafka-consumer-oss.influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | diff --git a/applications/sasquatch/charts/app-metrics/README.md b/applications/sasquatch/charts/app-metrics/README.md index 1cb6c56b6d..3f75c1aa74 100644 --- a/applications/sasquatch/charts/app-metrics/README.md +++ b/applications/sasquatch/charts/app-metrics/README.md @@ -7,14 +7,14 @@ Kafka topics, users, and a telegraf connector for metrics events. | Key | Type | Default | Description | |-----|------|---------|-------------| | affinity | object | `{}` | Affinity for pod assignment | -| apps | list | `[]` | A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. The names should be the same as the app names in Phalanx. | +| apps | list | `[]` | A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. The names should be the same as the app names in Phalanx. | | args | list | `[]` | Arguments passed to the Telegraf agent containers | -| cluster.name | string | `"sasquatch"` | | +| cluster.name | string | `"sasquatch"` | Name of the Strimzi cluster. Synchronize this with the cluster name in the parent Sasquatch chart. | | debug | bool | false | Run Telegraf in debug mode. | | env | list | See `values.yaml` | Telegraf agent enviroment variables | -| envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | -| globalAppConfig | object | `{}` | app-metrics configuration in any environment in which the subchart is enabled. This should stay globally specified here, and it shouldn't be overridden. See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) for the structure of this value. | -| globalInfluxTags | list | `["service"]` | Keys in an every event sent by any app that should be recorded in InfluxDB as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from `globalAppConfig` | +| envFromSecret | string | `""` | Name of the secret with values to be added to the environment | +| globalAppConfig | object | See `values.yaml` | app-metrics configuration in any environment in which the subchart is enabled. This should stay globally specified here, and it shouldn't be overridden. See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) for the structure of this value. | +| globalInfluxTags | list | `["application"]` | Keys in an every event sent by any app that should be recorded in InfluxDB as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from `globalAppConfig` | | image.pullPolicy | string | `"Always"` | Image pull policy | | image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | | image.tag | string | `"1.30.2-alpine"` | Telegraf image tag | @@ -23,6 +23,6 @@ Kafka topics, users, and a telegraf connector for metrics events. | nodeSelector | object | `{}` | Node labels for pod assignment | | podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods | | podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods | -| replicaCount | int | `3` | Number of Telegraf replicas. Multiple replicas increase availability. | +| replicaCount | int | `3` | Number of Telegraf replicas. Multiple replicas increase availability. | | resources | object | See `values.yaml` | Kubernetes resources requests and limits | | tolerations | list | `[]` | Tolerations for pod assignment | diff --git a/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml b/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml index 70db2590de..67a7cbe687 100644 --- a/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml +++ b/applications/sasquatch/charts/app-metrics/templates/kafka-topics.yaml @@ -3,7 +3,7 @@ apiVersion: kafka.strimzi.io/v1beta2 kind: KafkaTopic metadata: - name: "lsst.square.app-metrics.events.{{ . }}" + name: "lsst.square.metrics.events.{{ . }}" labels: strimzi.io/cluster: {{ $.Values.cluster.name }} spec: diff --git a/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml b/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml index 9ddab60b5e..2cde67ecef 100644 --- a/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml +++ b/applications/sasquatch/charts/app-metrics/templates/kafka-users.yaml @@ -21,7 +21,7 @@ spec: host: "*" - resource: type: topic - name: "lsst.square.app-metrics.events.{{ . }}" + name: "lsst.square.metrics.events.{{ . }}" patternType: literal operations: - "Describe" diff --git a/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml b/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml index e8a60a4ae3..84f6e47fd8 100644 --- a/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml +++ b/applications/sasquatch/charts/app-metrics/templates/telegraf-configmap.yaml @@ -19,10 +19,11 @@ data: omit_hostname = true [[outputs.influxdb]] + namedrop = ["telegraf_*"] urls = [ {{ .Values.influxdb.url | quote }} ] - database = "telegraf-kafka-app-metrics-consumer" + database = "lsst.square.metrics" username = "${INFLUXDB_USER}" password = "${INFLUXDB_PASSWORD}" @@ -54,7 +55,7 @@ data: avro_union_mode = "nullable" avro_tags = {{ include "helpers.toTomlArray" $influxTags }} topics = [ - "lsst.square.app-metrics.events.{{ $app }}", + "lsst.square.metrics.events.{{ $app }}", ] max_processing_time = "5s" consumer_fetch_default = "5MB" diff --git a/applications/sasquatch/charts/app-metrics/values.yaml b/applications/sasquatch/charts/app-metrics/values.yaml index d5bc17418f..9ddfbc4bb4 100644 --- a/applications/sasquatch/charts/app-metrics/values.yaml +++ b/applications/sasquatch/charts/app-metrics/values.yaml @@ -2,27 +2,35 @@ # -- app-metrics configuration in any environment in which the subchart is # enabled. This should stay globally specified here, and it shouldn't be -# overridden. -# See [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) +# overridden. See +# [here](https://sasquatch.lsst.io/user-guide/app-metrics.html#configuration) # for the structure of this value. -globalAppConfig: {} - -# -- A list of applications that will publish metrics events, and the keys that should be ingested into InfluxDB as tags. -# The names should be the same as the app names in Phalanx. +# @default -- See `values.yaml` +globalAppConfig: + gafaelfawr: + influxTags: + - "service" + - "username" + mobu: + influxTags: + - "type" + +# -- A list of applications that will publish metrics events, and the keys +# that should be ingested into InfluxDB as tags. The names should be the same +# as the app names in Phalanx. apps: [] -# -- Keys in an every event sent by any app that should be recorded in InfluxDB -# as "tags" (vs. "fields"). These will be concatenated with the `influxTags` from -# `globalAppConfig` -globalInfluxTags: ["service"] +# -- Keys in an every event sent by any app that should be recorded in +# InfluxDB as "tags" (vs. "fields"). These will be concatenated with the +# `influxTags` from `globalAppConfig` +globalInfluxTags: ["application"] cluster: - # The name of the Strimzi cluster. Synchronize this with the cluster name in + # -- Name of the Strimzi cluster. Synchronize this with the cluster name in # the parent Sasquatch chart. name: sasquatch -# These values refer to the telegraf deployment and config - +# These values refer to the Telegraf deployment and config image: # -- Telegraf image repository repo: "docker.io/library/telegraf" @@ -67,7 +75,7 @@ env: # InfluxDB v1 password key: influxdb-password -# -- Name of the secret with values to be added to the environment. +# -- Name of the secret with values to be added to the environment envFromSecret: "" # -- Run Telegraf in debug mode. @@ -78,10 +86,9 @@ influxdb: # -- URL of the InfluxDB v1 instance to write to url: "http://sasquatch-influxdb.sasquatch:8086" -# -- Number of Telegraf replicas. Multiple replicas increase availability. +# -- Number of Telegraf replicas. Multiple replicas increase availability. replicaCount: 3 - # -- Kubernetes resources requests and limits # @default -- See `values.yaml` resources: diff --git a/applications/sasquatch/charts/influxdb-enterprise/README.md b/applications/sasquatch/charts/influxdb-enterprise/README.md index 12233edf75..1f95a590c9 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/README.md +++ b/applications/sasquatch/charts/influxdb-enterprise/README.md @@ -14,7 +14,7 @@ Run InfluxDB Enterprise on Kubernetes | bootstrap.ddldml.configMap | string | Do not run DDL or DML | A config map containing DDL and DML that define databases, retention policies, and inject some data. The keys `ddl` and `dml` must exist, even if one of them is empty. DDL is executed before DML to ensure databases and retention policies exist. | | bootstrap.ddldml.resources | object | `{}` | Kubernetes resources and limits for the bootstrap job | | data.affinity | object | See `values.yaml` | Affinity rules for data pods | -| data.config.antiEntropy.enabled | bool | `true` | Enable the anti-entropy service, which copies and repairs shards | +| data.config.antiEntropy.enabled | bool | `false` | Enable the anti-entropy service, which copies and repairs shards | | data.config.cluster.log-queries-after | string | `"15s"` | Maximum duration a query can run before InfluxDB logs it as a slow query | | data.config.cluster.max-concurrent-queries | int | `1000` | Maximum number of running queries allowed on the instance (0 is unlimited) | | data.config.cluster.query-timeout | string | `"300s"` | Maximum duration a query is allowed to run before it is killed | diff --git a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml index 1cc01f575a..87a82cc693 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/templates/data-statefulset.yaml @@ -86,6 +86,7 @@ spec: containerPort: 25826 protocol: UDP livenessProbe: + failureThreshold: 6 httpGet: path: /ping port: http diff --git a/applications/sasquatch/charts/influxdb-enterprise/values.yaml b/applications/sasquatch/charts/influxdb-enterprise/values.yaml index 0709b449c6..5df8482d84 100644 --- a/applications/sasquatch/charts/influxdb-enterprise/values.yaml +++ b/applications/sasquatch/charts/influxdb-enterprise/values.yaml @@ -364,7 +364,7 @@ data: antiEntropy: # -- Enable the anti-entropy service, which copies and repairs shards - enabled: true + enabled: false http: # -- Whether to enable the Flux query endpoint diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md index 556761d75d..10f3965922 100644 --- a/applications/sasquatch/charts/strimzi-kafka/README.md +++ b/applications/sasquatch/charts/strimzi-kafka/README.md @@ -20,10 +20,9 @@ A subchart to deploy Strimzi Kafka components for Sasquatch. | connect.replicas | int | `3` | Number of Kafka Connect replicas to run | | cruiseControl | object | `{"enabled":false}` | Configuration for the Kafka Cruise Control | | kafka.affinity | object | See `values.yaml` | Affinity for Kafka pod assignment | -| kafka.config."log.retention.bytes" | string | `"350000000000"` | How much disk space Kafka will ensure is available, set to 70% of the data partition size | -| kafka.config."log.retention.hours" | int | `48` | Number of days for a topic's data to be retained | +| kafka.config."log.retention.minutes" | int | 4320 minutes (3 days) | Number of days for a topic's data to be retained | | kafka.config."message.max.bytes" | int | `10485760` | The largest record batch size allowed by Kafka | -| kafka.config."offsets.retention.minutes" | int | `2880` | Number of minutes for a consumer group's offsets to be retained | +| kafka.config."offsets.retention.minutes" | int | 4320 minutes (3 days) | Number of minutes for a consumer group's offsets to be retained | | kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition | | kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource | | kafka.externalListener.bootstrap.host | string | Do not configure TLS | Name used for TLS hostname verification | diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml index 0fac119249..ac8e91832a 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/kafka.yaml @@ -83,6 +83,10 @@ spec: affinity: {{- toYaml . | nindent 10 }} {{- end }} + {{- with .Values.brokerStorage.tolerations }} + tolerations: + {{- toYaml . | nindent 10 }} + {{- end}} {{- with .Values.kafka.resources }} resources: {{- toYaml . | nindent 6 }} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/schema-registry.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/schema-registry.yaml index 8f15e429a5..601864b6f1 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/schema-registry.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/schema-registry.yaml @@ -5,6 +5,7 @@ metadata: spec: listener: tls compatibilityLevel: none + registryImageTag: "7.7.1" cpuLimit: {{ .Values.registry.resources.limits.cpu | quote }} cpuRequest: {{ .Values.registry.resources.requests.cpu | quote }} memoryLimit: {{ .Values.registry.resources.limits.memory | quote }} diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml index 5b30f2a6a3..22174b52e7 100644 --- a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml @@ -29,6 +29,13 @@ spec: type: allow host: "*" operation: All + - resource: + type: topic + name: "lsst.s3" + patternType: prefix + type: allow + host: "*" + operation: All - resource: type: cluster operations: diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml index 6d587fd746..8f0eab97bf 100644 --- a/applications/sasquatch/charts/strimzi-kafka/values.yaml +++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml @@ -29,14 +29,12 @@ kafka: config: # -- Number of minutes for a consumer group's offsets to be retained - offsets.retention.minutes: 2880 + # @default -- 4320 minutes (3 days) + offsets.retention.minutes: 4320 # -- Number of days for a topic's data to be retained - log.retention.hours: 48 - - # -- How much disk space Kafka will ensure is available, set to 70% of the - # data partition size - log.retention.bytes: "350000000000" + # @default -- 4320 minutes (3 days) + log.retention.minutes: 4320 # -- The largest record batch size allowed by Kafka message.max.bytes: 10485760 diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/Chart.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/Chart.yaml index 92210beefa..9a3b69690e 100755 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/Chart.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/Chart.yaml @@ -4,4 +4,4 @@ version: 1.0.0 description: > Telegraf is an agent written in Go for collecting, processing, aggregating, and writing metrics. This chart deploys multiple instances of the telegraf agent to connect Kafka and InfluxDB in Sasquatch. -appVersion: 1.23.3 +appVersion: 1.32.1 diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md index 0be7c27bdb..532358d835 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md @@ -12,8 +12,8 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and | env | list | See `values.yaml` | Telegraf agent enviroment variables | | envFromSecret | string | `""` | Name of the secret with values to be added to the environment. | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repo | string | `"docker.io/lsstsqre/telegraf"` | Telegraf image repository | -| image.tag | string | `"avro-mutex"` | Telegraf image tag | +| image.repo | string | `"docker.io/library/telegraf"` | Telegraf image repository | +| image.tag | string | `"1.32.1-alpine"` | Telegraf image tag | | imagePullSecrets | list | `[]` | Secret names to use for Docker pulls | | influxdb.database | string | `"telegraf-kafka-consumer-v1"` | Name of the InfluxDB v1 database to write to | | influxdb.url | string | `"http://sasquatch-influxdb.sasquatch:8086"` | URL of the InfluxDB v1 instance to write to | diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl index 11dae28e5a..bb08f3add7 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/_helpers.tpl @@ -21,6 +21,7 @@ data: omit_hostname = true [[outputs.influxdb]] + namedrop = ["telegraf_*"] urls = [ {{ .influxdbUrl | quote }} ] @@ -65,6 +66,36 @@ data: max_undelivered_messages = {{ default 10000 .value.max_undelivered_messages }} compression_codec = {{ default 3 .value.compression_codec }} + {{- if .value.repair }} + [[inputs.kafka_consumer]] + brokers = [ + "sasquatch-kafka-brokers.sasquatch:9092" + ] + consumer_group = "telegraf-kafka-consumer-{{ .key }}-repairer" + sasl_mechanism = "SCRAM-SHA-512" + sasl_password = "$TELEGRAF_PASSWORD" + sasl_username = "telegraf" + data_format = "avro" + avro_schema_registry = "http://sasquatch-schema-registry.sasquatch:8081" + avro_timestamp = {{ default "private_efdStamp" .value.timestamp_field | quote }} + avro_timestamp_format = {{ default "unix" .value.timestamp_format | quote }} + avro_union_mode = {{ default "nullable" .value.union_mode | quote }} + avro_field_separator = {{ default "" .value.union_field_separator | quote }} + {{- if .value.fields }} + avro_fields = {{ .value.fields }} + {{- end }} + {{- if .value.tags }} + avro_tags = {{ .value.tags }} + {{- end }} + topic_regexps = {{ .value.topicRegexps }} + offset = "oldest" + precision = {{ default "1us" .value.precision | quote }} + max_processing_time = {{ default "5s" .value.max_processing_time | quote }} + consumer_fetch_default = {{ default "20MB" .value.consumer_fetch_default | quote }} + max_undelivered_messages = {{ default 10000 .value.max_undelivered_messages }} + compression_codec = {{ default 3 .value.compression_codec }} + {{- end }} + [[inputs.internal]] name_prefix = "telegraf_" collect_memstats = true diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml index dd0fc7cb4f..41309868b7 100644 --- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml +++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml @@ -5,10 +5,10 @@ enabled: false image: # -- Telegraf image repository - repo: "docker.io/lsstsqre/telegraf" + repo: "docker.io/library/telegraf" # -- Telegraf image tag - tag: "avro-mutex" + tag: "1.32.1-alpine" # -- Image pull policy pullPolicy: "IfNotPresent" diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml index 4440c387ea..d34b7659b3 100644 --- a/applications/sasquatch/values-base.yaml +++ b/applications/sasquatch/values-base.yaml @@ -35,7 +35,7 @@ strimzi-kafka: tls: enabled: true bootstrap: - loadBalancerIP: "139.229.153.65" + loadBalancerIP: "139.229.151.176" host: sasquatch-base-kafka-bootstrap.lsst.codes brokers: - broker: 6 @@ -119,10 +119,15 @@ strimzi-kafka: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - - key: local-storage + - key: kafka-broker operator: In values: - "true" + tolerations: + - key: "kafka-broker" + operator: "Equal" + value: "true" + effect: "NoSchedule" influxdb: persistence: diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml index 6519b85afb..76740fa521 100644 --- a/applications/sasquatch/values-idfdev.yaml +++ b/applications/sasquatch/values-idfdev.yaml @@ -64,6 +64,51 @@ influxdb: memory: 16Gi cpu: 2 +influxdb-enterprise: + enabled: true + license: + secret: + name: sasquatch + key: influxdb-enterprise-license + meta: + ingress: + enabled: true + hostname: data-dev.lsst.cloud + persistence: + enabled: true + accessMode: ReadWriteOnce + size: 16Gi + sharedSecret: + secret: + name: sasquatch + key: influxdb-enterprise-shared-secret + resources: + requests: + memory: 2Gi + cpu: 1 + limits: + memory: 4Gi + cpu: 2 + data: + replicas: 2 + config: + antiEntropy: + enabled: true + ingress: + enabled: true + hostname: data-dev.lsst.cloud + persistence: + enabled: true + accessMode: ReadWriteOnce + size: 1Ti + resources: + requests: + memory: 8Gi + cpu: 2 + limits: + memory: 16Gi + cpu: 4 + telegraf-kafka-consumer: enabled: true kafkaConsumers: @@ -108,3 +153,9 @@ chronograf: GENERIC_API_KEY: sub PUBLIC_URL: https://data-dev.lsst.cloud/ STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/idfdev.json + +app-metrics: + enabled: true + apps: + - gafaelfawr + - mobu diff --git a/applications/sasquatch/values-idfint.yaml b/applications/sasquatch/values-idfint.yaml index 1eb64499c1..139a8cfd14 100644 --- a/applications/sasquatch/values-idfint.yaml +++ b/applications/sasquatch/values-idfint.yaml @@ -159,3 +159,8 @@ chronograf: GENERIC_API_KEY: sub PUBLIC_URL: https://data-int.lsst.cloud/ STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/idfint.json + +app-metrics: + enabled: true + apps: + - gafaelfawr diff --git a/applications/sasquatch/values-idfprod.yaml b/applications/sasquatch/values-idfprod.yaml new file mode 100644 index 0000000000..fce7457c18 --- /dev/null +++ b/applications/sasquatch/values-idfprod.yaml @@ -0,0 +1,97 @@ +strimzi-kafka: + kafka: + externalListener: + tls: + enabled: true + bootstrap: + loadBalancerIP: "34.55.132.0" + host: sasquatch-kafka-bootstrap.lsst.cloud + + brokers: + - broker: 3 + loadBalancerIP: "34.122.37.250" + host: sasquatch-kafka-3.lsst.cloud + - broker: 4 + loadBalancerIP: "34.72.131.177" + host: sasquatch-kafka-4.lsst.cloud + - broker: 5 + loadBalancerIP: "34.72.103.157" + host: sasquatch-kafka-5.lsst.cloud + users: + kafdrop: + enabled: true + telegraf: + enabled: true + kraft: + enabled: true + kafkaController: + enabled: true + resources: + requests: + memory: 8Gi + cpu: "1" + limits: + memory: 8Gi + cpu: "1" + registry: + ingress: + enabled: true + annotations: + nginx.ingress.kubernetes.io/rewrite-target: /$2 + hostname: data.lsst.cloud + path: /schema-registry(/|$)(.*) + connect: + enabled: false + +influxdb: + ingress: + enabled: true + hostname: data.lsst.cloud + resources: + requests: + memory: 8Gi + cpu: 1 + limits: + memory: 8Gi + cpu: 1 + +telegraf-kafka-consumer: + enabled: true + kafkaConsumers: + example: + enabled: true + replicaCount: 1 + database: "lsst.example" + tags: | + [ "band", "instrument" ] + timestamp_format: "unix_ms" + timestamp_field: "timestamp" + topicRegexps: | + [ "lsst.example" ] +kafdrop: + ingress: + enabled: true + hostname: data.lsst.cloud + +chronograf: + ingress: + enabled: true + hostname: data.lsst.cloud + + env: + GENERIC_NAME: "OIDC" + GENERIC_AUTH_URL: https://data.lsst.cloud/auth/openid/login + GENERIC_TOKEN_URL: https://data.lsst.cloud/auth/openid/token + USE_ID_TOKEN: 1 + JWKS_URL: https://data.lsst.cloud/.well-known/jwks.json + GENERIC_API_URL: https://data.lsst.cloud/auth/openid/userinfo + GENERIC_SCOPES: openid + GENERIC_API_KEY: sub + PUBLIC_URL: https://data.lsst.cloud/ + STATUS_FEED_URL: https://raw.githubusercontent.com/lsst-sqre/rsp_broadcast/main/jsonfeeds/idfprod.json + +app-metrics: + enabled: true + apps: + - gafaelfawr + - mobu diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml index 7a6158cfef..24afd30be1 100644 --- a/applications/sasquatch/values-summit.yaml +++ b/applications/sasquatch/values-summit.yaml @@ -1,5 +1,8 @@ strimzi-kafka: kafka: + config: + log.retention.minutes: 10080 + offsets.retention.minutes: 10080 storage: storageClassName: rook-ceph-block externalListener: @@ -160,7 +163,7 @@ kafka-connect-manager: auxtel: enabled: true repairerConnector: false - topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" + topicsRegex: ".*ATAOS|.*ATBuilding|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS" maintel: enabled: true repairerConnector: false @@ -177,7 +180,7 @@ kafka-connect-manager: eas: enabled: true repairerConnector: false - topicsRegex: ".*DIMM|.*DSM|.*EPM|.*ESS|.*HVAC|.*WeatherForecast" + topicsRegex: ".*DIMM|.*DREAM|.*DSM|.*EPM|.*ESS|.*HVAC|.*WeatherForecast" latiss: enabled: true repairerConnector: false @@ -209,7 +212,7 @@ kafka-connect-manager: calsys: enabled: true repairerConnector: false - topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser" + topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LEDProjector|.*LinearStage|.*TunableLaser" mtaircompressor: enabled: true repairerConnector: false @@ -336,10 +339,11 @@ telegraf-kafka-consumer: database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + [ "lsst.sal.DIMM", "lsst.sal.DREAM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] debug: true m1m3: enabled: true + metric_batch_size: 2500 database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -378,7 +382,7 @@ telegraf-kafka-consumer: database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] + [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LEDProjector", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] debug: true mtaircompressor: enabled: true @@ -401,6 +405,13 @@ telegraf-kafka-consumer: topicRegexps: | [ "lsst.sal.GIS" ] debug: true + mtvms: + enabled: true + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTVMS" ] + debug: true lsstcam: enabled: true database: "efd" @@ -413,7 +424,7 @@ telegraf-kafka-consumer: database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + [ "lsst.sal.ATAOS", "lsst.sal.ATBuilding", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] debug: true latiss: enabled: true diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml index 03b2703aa2..3cfe4b3025 100644 --- a/applications/sasquatch/values-tucson-teststand.yaml +++ b/applications/sasquatch/values-tucson-teststand.yaml @@ -23,11 +23,11 @@ strimzi-kafka: host: sasquatch-tts-kafka-2.lsst.codes resources: requests: - memory: 80Gi - cpu: 4 + memory: 8Gi + cpu: 1 limits: - memory: 80Gi - cpu: 4 + memory: 8Gi + cpu: 1 metricsConfig: enabled: true kafkaExporter: @@ -49,6 +49,8 @@ strimzi-kafka: enabled: true kafkaConnectManager: enabled: true + consdb: + enabled: true registry: ingress: enabled: true @@ -82,7 +84,7 @@ telegraf-kafka-consumer: enabled: true database: "efd" topicRegexps: | - [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + [ "lsst.sal.ATAOS", "lsst.sal.ATBuilding", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] debug: true maintel: enabled: true @@ -102,7 +104,7 @@ telegraf-kafka-consumer: metric_batch_size: 100 flush_interval: 20s topicRegexps: | - [ "lsst.sal.DIMM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + [ "lsst.sal.DIMM", "lsst.sal.DREAM", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.ESS", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] debug: true latiss: enabled: true diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml index 9e02f4ea5e..035ec26702 100644 --- a/applications/sasquatch/values-usdfprod.yaml +++ b/applications/sasquatch/values-usdfprod.yaml @@ -1,16 +1,10 @@ strimzi-kafka: kafka: - minInsyncReplicas: 1 - listeners: - tls: - enabled: true - plain: - enabled: true - external: - enabled: true config: # -- Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. replica.lag.time.max.ms: 120000 + log.retention.minutes: 10080 + offsets.retention.minutes: 10080 connect: enabled: true @@ -143,6 +137,7 @@ telegraf-kafka-consumer: # CSC connectors maintel: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -150,6 +145,7 @@ telegraf-kafka-consumer: debug: true mtmount: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -157,6 +153,7 @@ telegraf-kafka-consumer: debug: true comcam: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -164,13 +161,16 @@ telegraf-kafka-consumer: debug: true eas: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.DIMM", "lsst.sal.ESS", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] + [ "lsst.sal.DIMM", "lsst.sal.DREAM", "lsst.sal.ESS", "lsst.sal.DSM", "lsst.sal.EPM", "lsst.sal.HVAC", "lsst.sal.WeatherForecast" ] debug: true m1m3: enabled: true + repair: false + metric_batch_size: 2500 database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -178,6 +178,7 @@ telegraf-kafka-consumer: debug: true m2: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -185,6 +186,7 @@ telegraf-kafka-consumer: debug: true obssys: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -192,6 +194,7 @@ telegraf-kafka-consumer: debug: true ocps: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -199,6 +202,7 @@ telegraf-kafka-consumer: debug: true pmd: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -206,13 +210,15 @@ telegraf-kafka-consumer: debug: true calsys: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] + [ "lsst.sal.ATMonochromator", "lsst.sal.ATWhiteLight", "lsst.sal.CBP", "lsst.sal.Electrometer", "lsst.sal.FiberSpectrograph", "lsst.sal.LEDProjector", "lsst.sal.LinearStage", "lsst.sal.TunableLaser" ] debug: true mtaircompressor: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -220,6 +226,7 @@ telegraf-kafka-consumer: debug: true genericcamera: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -227,13 +234,23 @@ telegraf-kafka-consumer: debug: true gis: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | [ "lsst.sal.GIS" ] debug: true + mtvms: + enabled: true + repair: false + database: "efd" + timestamp_field: "private_efdStamp" + topicRegexps: | + [ "lsst.sal.MTVMS" ] + debug: true lsstcam: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -241,13 +258,15 @@ telegraf-kafka-consumer: debug: true auxtel: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | - [ "lsst.sal.ATAOS", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] + [ "lsst.sal.ATAOS", "lsst.sal.ATBuilding", "lsst.sal.ATDome", "lsst.sal.ATDomeTrajectory", "lsst.sal.ATHexapod", "lsst.sal.ATPneumatics", "lsst.sal.ATPtg", "lsst.sal.ATMCS" ] debug: true latiss: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -255,6 +274,7 @@ telegraf-kafka-consumer: debug: true test: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | @@ -262,6 +282,7 @@ telegraf-kafka-consumer: debug: true lasertracker: enabled: true + repair: false database: "efd" timestamp_field: "private_efdStamp" topicRegexps: | diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml index cc9fff35e6..589d55b00e 100644 --- a/applications/sasquatch/values.yaml +++ b/applications/sasquatch/values.yaml @@ -93,6 +93,11 @@ influxdb: # within the TSM engine and WAL trace-logging-enabled: true + # -- Maximum number of series allowed per database before writes are + # dropped. Change the setting to 0 to allow an unlimited number of series per + # database. + max-series-per-database: 0 + http: # -- Whether to enable the HTTP endpoints enabled: true @@ -232,7 +237,7 @@ kapacitor: repository: kapacitor # -- Tag to use for Kapacitor - tag: 1.7.5 + tag: 1.7.6 persistence: # -- Whether to enable Kapacitor data persistence diff --git a/applications/schedview-snapshot/Chart.yaml b/applications/schedview-snapshot/Chart.yaml index f16fd1a7bd..1f34b86432 100644 --- a/applications/schedview-snapshot/Chart.yaml +++ b/applications/schedview-snapshot/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: v0.12.0 +appVersion: v0.15.0 description: Dashboard for examination of scheduler snapshots name: schedview-snapshot sources: diff --git a/applications/schedview-snapshot/values-usdfdev.yaml b/applications/schedview-snapshot/values-usdfdev.yaml index 564723a628..a2a71bce62 100644 --- a/applications/schedview-snapshot/values-usdfdev.yaml +++ b/applications/schedview-snapshot/values-usdfdev.yaml @@ -1,3 +1,3 @@ image: # -- Overrides the image tag whose default is the chart appVersion. - tag: "v0.12.0" + tag: "v0.15.0" diff --git a/applications/schedview-snapshot/values-usdfint.yaml b/applications/schedview-snapshot/values-usdfint.yaml index 70bdc55d00..ad1ab415d9 100644 --- a/applications/schedview-snapshot/values-usdfint.yaml +++ b/applications/schedview-snapshot/values-usdfint.yaml @@ -1,6 +1,6 @@ image: # -- Overrides the image tag whose default is the chart appVersion. - tag: "v0.12.0" + tag: "v0.15.0" resources: limits: diff --git a/applications/sia/Chart.yaml b/applications/sia/Chart.yaml new file mode 100644 index 0000000000..0a5388c001 --- /dev/null +++ b/applications/sia/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +appVersion: 0.1.3 +description: Simple Image Access (SIA) IVOA Service using Butler +name: sia +sources: +- https://github.com/lsst-sqre/sia +type: application +version: 1.0.0 +annotations: + phalanx.lsst.io/docs: | + - id: "SQR-095" + title: "SIAv2 over Butler FastAPI service" + url: "https://sqr-095.lsst.io" diff --git a/applications/sia/README.md b/applications/sia/README.md new file mode 100644 index 0000000000..b105a3f178 --- /dev/null +++ b/applications/sia/README.md @@ -0,0 +1,35 @@ +# sia + +Simple Image Access (SIA) IVOA Service using Butler + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity rules for the sia deployment pod | +| config.butlerDataCollections | list | `[]` | List of data (Butler) Collections Expected attributes: `config`, `label`, `name`, `butler_type`, `repository` & `datalink_url` | +| config.directButlerEnabled | bool | `false` | Whether direct butler access is enabled | +| config.logLevel | string | `"INFO"` | Logging level | +| config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | +| config.pathPrefix | string | `"/api/sia"` | URL path prefix | +| config.pgUser | string | `"rubin"` | User to use from the PGPASSFILE if sia is using a direct Butler connection | +| config.slackAlerts | bool | `false` | Whether to send alerts and status to Slack. | +| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | +| global.baseUrl | string | Set by Argo CD | Base URL for the environment | +| global.host | string | Set by Argo CD | Host name for ingress | +| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets | +| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the sia image | +| image.repository | string | `"ghcr.io/lsst-sqre/sia"` | Image to use in the sia deployment | +| image.tag | string | The appVersion of the chart | Tag of image to use | +| ingress.annotations | object | `{}` | Additional annotations for the ingress rule | +| ingress.path | string | `"/api/sia"` | Path prefix where app is hosted | +| nameOverride | string | `""` | Override the base name for resources | +| nodeSelector | object | `{}` | Node selection rules for the sia deployment pod | +| podAnnotations | object | `{}` | Annotations for the sia deployment pod | +| replicaCount | int | `1` | Number of web deployment pods to start | +| resources | object | See `values.yaml` | Resource limits and requests for the sia deployment pod | +| tolerations | list | `[]` | Tolerations for the sia deployment pod | diff --git a/applications/sia/secrets.yaml b/applications/sia/secrets.yaml new file mode 100644 index 0000000000..05d15deeea --- /dev/null +++ b/applications/sia/secrets.yaml @@ -0,0 +1,23 @@ +"aws-credentials.ini": + if: config.directButlerEnabled + copy: + application: nublado + key: "aws-credentials.ini" +"butler-gcs-idf-creds.json": + if: config.directButlerEnabled + copy: + application: nublado + key: "butler-gcs-idf-creds.json" +"postgres-credentials.txt": + if: config.directButlerEnabled + copy: + application: nublado + key: "postgres-credentials.txt" +slack-webhook: + description: >- + Slack web hook used to report internal errors to Slack. This secret may be + changed at any time. + if: config.slackAlerts + copy: + application: mobu + key: app-alert-webhook diff --git a/applications/sia/templates/_helpers.tpl b/applications/sia/templates/_helpers.tpl new file mode 100644 index 0000000000..92bdc6ea01 --- /dev/null +++ b/applications/sia/templates/_helpers.tpl @@ -0,0 +1,52 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "sia.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "sia.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "sia.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "sia.labels" -}} +helm.sh/chart: {{ include "sia.chart" . }} +{{ include "sia.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "sia.selectorLabels" -}} +app.kubernetes.io/name: "sia" +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + diff --git a/applications/sia/templates/configmap.yaml b/applications/sia/templates/configmap.yaml new file mode 100644 index 0000000000..f594419d9f --- /dev/null +++ b/applications/sia/templates/configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "sia" + labels: + {{- include "sia.labels" . | nindent 4 }} +data: + SIA_LOG_LEVEL: {{ .Values.config.logLevel | quote }} + SIA_PATH_PREFIX: {{ .Values.config.pathPrefix | quote }} + SIA_PROFILE: {{ .Values.config.logProfile | quote }} diff --git a/applications/sia/templates/deployment.yaml b/applications/sia/templates/deployment.yaml new file mode 100644 index 0000000000..e7da6b12f4 --- /dev/null +++ b/applications/sia/templates/deployment.yaml @@ -0,0 +1,117 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "sia" + labels: + {{- include "sia.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "sia.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "sia.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + automountServiceAccountToken: false + {{- if .Values.config.directButlerEnabled }} + initContainers: + - name: fix-secret-permissions + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - "/bin/sh" + - "-c" + - | + cp -RL /tmp/secrets-raw/* /etc/butler/secrets/ + chmod 0400 /etc/butler/secrets/* + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + volumeMounts: + - name: "secrets" + mountPath: "/etc/butler/secrets" + - name: "secrets-raw" + mountPath: "/tmp/secrets-raw" + {{- end }} + containers: + - name: {{ .Chart.Name }} + envFrom: + - configMapRef: + name: "sia" + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: "http" + containerPort: 8080 + protocol: "TCP" + readinessProbe: + httpGet: + path: {{ .Values.config.pathPrefix }} + port: "http" + resources: + {{- toYaml .Values.resources | nindent 12 }} + env: + - name: "SIA_BUTLER_DATA_COLLECTIONS" + value: {{ .Values.config.butlerDataCollections | toJson | quote }} + {{- if .Values.config.slackAlerts }} + - name: "SIA_SLACK_WEBHOOK" + valueFrom: + secretKeyRef: + name: "sia" + key: "slack-webhook" + {{- end }} + {{- if .Values.config.directButlerEnabled }} + - name: "AWS_SHARED_CREDENTIALS_FILE" + value: "/tmp/secrets/aws-credentials.ini" + - name: "PGUSER" + value: {{ .Values.config.pgUser }} + - name: "PGPASSFILE" + value: "/etc/butler/secrets/postgres-credentials.txt" + - name: "GOOGLE_APPLICATION_CREDENTIALS" + value: "/tmp/secrets/butler-gcs-idf-creds.json" + {{- end }} + {{- if .Values.config.directButlerEnabled }} + volumeMounts: + - name: "secrets" + mountPath: "/etc/butler/secrets" + readOnly: true + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "all" + readOnlyRootFilesystem: false + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.config.directButlerEnabled }} + volumes: + - name: "secrets-raw" + secret: + secretName: "sia" + - name: "secrets" + emptyDir: {} + {{- end }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 diff --git a/applications/sia/templates/ingress-anonymous.yaml b/applications/sia/templates/ingress-anonymous.yaml new file mode 100644 index 0000000000..c683c4e2d1 --- /dev/null +++ b/applications/sia/templates/ingress-anonymous.yaml @@ -0,0 +1,44 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: {{ template "sia.fullname" . }}-anonymous + labels: + {{- include "sia.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + anonymous: true +template: + metadata: + name: {{ template "sia.fullname" . }}-anonymous + {{- with .Values.ingress.annotations }} + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ .Values.global.host | quote }} + http: + paths: + - path: "{{ .Values.ingress.path }}/openapi.json" + pathType: "Exact" + backend: + service: + name: {{ template "sia.fullname" . }} + port: + number: 8080 + - path: "{{ .Values.ingress.path }}/.+/capabilities" + pathType: "ImplementationSpecific" + backend: + service: + name: {{ template "sia.fullname" . }} + port: + number: 8080 + - path: "{{ .Values.ingress.path }}/.+/availability" + pathType: "ImplementationSpecific" + backend: + service: + name: {{ template "sia.fullname" . }} + port: + number: 8080 diff --git a/applications/sia/templates/ingress.yaml b/applications/sia/templates/ingress.yaml new file mode 100644 index 0000000000..bb9638b596 --- /dev/null +++ b/applications/sia/templates/ingress.yaml @@ -0,0 +1,35 @@ +apiVersion: gafaelfawr.lsst.io/v1alpha1 +kind: GafaelfawrIngress +metadata: + name: {{ template "sia.fullname" . }} + labels: + {{- include "sia.labels" . | nindent 4 }} +config: + baseUrl: {{ .Values.global.baseUrl | quote }} + scopes: + all: + - "read:image" + delegate: + internal: + service: "sia" + scopes: + - "read:image" +template: + metadata: + name: {{ template "sia.fullname" . }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 6 }} + {{- end }} + spec: + rules: + - host: {{ required "global.host must be set" .Values.global.host | quote }} + http: + paths: + - path: {{ .Values.config.pathPrefix | quote }} + pathType: "Prefix" + backend: + service: + name: "sia" + port: + number: 8080 diff --git a/applications/sia/templates/networkpolicy.yaml b/applications/sia/templates/networkpolicy.yaml new file mode 100644 index 0000000000..4edbb84b29 --- /dev/null +++ b/applications/sia/templates/networkpolicy.yaml @@ -0,0 +1,21 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: "sia" +spec: + podSelector: + matchLabels: + {{- include "sia.selectorLabels" . | nindent 6 }} + policyTypes: + - "Ingress" + ingress: + # Allow inbound access from pods (in any namespace) labeled + # gafaelfawr.lsst.io/ingress: true. + - from: + - namespaceSelector: {} + podSelector: + matchLabels: + gafaelfawr.lsst.io/ingress: "true" + ports: + - protocol: "TCP" + port: 8080 diff --git a/applications/sia/templates/service.yaml b/applications/sia/templates/service.yaml new file mode 100644 index 0000000000..679e84dffa --- /dev/null +++ b/applications/sia/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: "sia" + labels: + {{- include "sia.labels" . | nindent 4 }} +spec: + type: "ClusterIP" + ports: + - port: 8080 + targetPort: "http" + protocol: "TCP" + name: "http" + selector: + {{- include "sia.selectorLabels" . | nindent 4 }} diff --git a/applications/sia/templates/vault-secrets.yaml b/applications/sia/templates/vault-secrets.yaml new file mode 100644 index 0000000000..3b1ebc978a --- /dev/null +++ b/applications/sia/templates/vault-secrets.yaml @@ -0,0 +1,9 @@ +apiVersion: ricoberger.de/v1alpha1 +kind: VaultSecret +metadata: + name: "sia" + labels: + {{- include "sia.labels" . | nindent 4 }} +spec: + path: "{{ .Values.global.vaultSecretsPath }}/sia" + type: Opaque diff --git a/applications/sia/values-idfdev.yaml b/applications/sia/values-idfdev.yaml new file mode 100644 index 0000000000..857cf69f15 --- /dev/null +++ b/applications/sia/values-idfdev.yaml @@ -0,0 +1,10 @@ +config: + + # Data (Butler) Collections + butlerDataCollections: + - config: "https://raw.githubusercontent.com/lsst-dm/dax_obscore/refs/heads/main/configs/dp02.yaml" + label: "LSST.DP02" + name: "dp02" + butler_type: "REMOTE" + repository: "https://data-dev.lsst.cloud/api/butler/repo/dp02/butler.yaml" + datalink_url: "https://data-dev.lsst.cloud/api/datalink/links?ID=butler%3A//dp02/{id}" diff --git a/applications/sia/values-idfint.yaml b/applications/sia/values-idfint.yaml new file mode 100644 index 0000000000..610338ae61 --- /dev/null +++ b/applications/sia/values-idfint.yaml @@ -0,0 +1,10 @@ +config: + + # Data (Butler) Collections + butlerDataCollections: + - config: "https://raw.githubusercontent.com/lsst-dm/dax_obscore/refs/heads/main/configs/dp02.yaml" + label: "LSST.DP02" + name: "dp02" + butler_type: "REMOTE" + repository: "https://data-int.lsst.cloud/api/butler/repo/dp02/butler.yaml" + datalink_url: "https://data-int.lsst.cloud/api/datalink/links?ID=butler%3A//dp02/{id}" diff --git a/applications/sia/values-idfprod.yaml b/applications/sia/values-idfprod.yaml new file mode 100644 index 0000000000..32d3d5f76d --- /dev/null +++ b/applications/sia/values-idfprod.yaml @@ -0,0 +1,10 @@ +config: + + # Data (Butler) Collections + butlerDataCollections: + - config: "https://raw.githubusercontent.com/lsst-dm/dax_obscore/refs/heads/main/configs/dp02.yaml" + label: "LSST.DP02" + name: "dp02" + butler_type: "REMOTE" + repository: "https://data.lsst.cloud/api/butler/repo/dp02/butler.yaml" + datalink_url: "https://data.lsst.cloud/api/datalink/links?ID=butler%3A//dp02/{id}" diff --git a/applications/sia/values.yaml b/applications/sia/values.yaml new file mode 100644 index 0000000000..241790f837 --- /dev/null +++ b/applications/sia/values.yaml @@ -0,0 +1,87 @@ +# Default values for sia. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Override the base name for resources +nameOverride: "" + +# -- Override the full name for resources (includes the release name) +fullnameOverride: "" + +# -- Number of web deployment pods to start +replicaCount: 1 + +image: + # -- Image to use in the sia deployment + repository: "ghcr.io/lsst-sqre/sia" + + # -- Pull policy for the sia image + pullPolicy: "IfNotPresent" + + # -- Tag of image to use + # @default -- The appVersion of the chart + tag: "" + +config: + # -- Whether to send alerts and status to Slack. + slackAlerts: false + + # -- Logging level + logLevel: "INFO" + + # -- Logging profile (`production` for JSON, `development` for + # human-friendly) + logProfile: "production" + + # -- URL path prefix + pathPrefix: "/api/sia" + + # -- Whether direct butler access is enabled + directButlerEnabled: false + + # -- List of data (Butler) Collections + # Expected attributes: `config`, `label`, `name`, `butler_type`, `repository` & `datalink_url` + butlerDataCollections: [] + + # -- User to use from the PGPASSFILE if sia is using a direct Butler + # connection + pgUser: "rubin" + + +ingress: + # -- Additional annotations for the ingress rule + annotations: {} + + # -- Path prefix where app is hosted + path: "/api/sia" + +# -- Affinity rules for the sia deployment pod +affinity: {} + +# -- Node selection rules for the sia deployment pod +nodeSelector: {} + +# -- Annotations for the sia deployment pod +podAnnotations: {} + +# -- Resource limits and requests for the sia deployment pod +# @default -- See `values.yaml` +resources: {} + +# -- Tolerations for the sia deployment pod +tolerations: [] + +# The following will be set by parameters injected by Argo CD and should not +# be set in the individual environment values files. +global: + # -- Base URL for the environment + # @default -- Set by Argo CD + baseUrl: null + + # -- Host name for ingress + # @default -- Set by Argo CD + host: null + + # -- Base path for Vault secrets + # @default -- Set by Argo CD + vaultSecretsPath: null diff --git a/applications/siav2/README.md b/applications/siav2/README.md index 82aaee32ae..92993339b1 100644 --- a/applications/siav2/README.md +++ b/applications/siav2/README.md @@ -28,7 +28,7 @@ Simple Image Access v2 service | uws.affinity | object | `{}` | Affinity rules for the UWS database pod | | uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image | | uws.image.repository | string | `"library/postgres"` | UWS database image to use | -| uws.image.tag | string | `"16.4"` | Tag of UWS database image to use | +| uws.image.tag | string | `"17.0"` | Tag of UWS database image to use | | uws.nodeSelector | object | `{}` | Node selection rules for the UWS database pod | | uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod | | uws.resources | object | `{"limits":{"cpu":2,"memory":"4Gi"},"requests":{"cpu":0.25,"memory":"1Gi"}}` | Resource limits and requests for the UWS database pod | diff --git a/applications/siav2/values.yaml b/applications/siav2/values.yaml index 6547f8f87a..f9ce8acc2f 100644 --- a/applications/siav2/values.yaml +++ b/applications/siav2/values.yaml @@ -79,7 +79,7 @@ uws: pullPolicy: "IfNotPresent" # -- Tag of UWS database image to use - tag: "16.4" + tag: "17.0" # -- Resource limits and requests for the UWS database pod resources: diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md index 6b4e441b3c..74b079d5d9 100644 --- a/applications/sqlproxy-cross-project/README.md +++ b/applications/sqlproxy-cross-project/README.md @@ -19,7 +19,7 @@ GCP SQL Proxy as a service | fullnameOverride | string | `""` | Override the full name for resources (includes the release name) | | image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image | | image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use | -| image.tag | string | `"1.37.0"` | Tag of Cloud SQL Proxy image to use | +| image.tag | string | `"1.37.1"` | Tag of Cloud SQL Proxy image to use | | nameOverride | string | `""` | Override the base name for resources | | nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod | | podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod | diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml index 637c381d81..ac677e8060 100644 --- a/applications/sqlproxy-cross-project/values.yaml +++ b/applications/sqlproxy-cross-project/values.yaml @@ -14,7 +14,7 @@ image: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Tag of Cloud SQL Proxy image to use - tag: "1.37.0" + tag: "1.37.1" # -- Pull policy for the Cloud SQL Proxy image pullPolicy: "IfNotPresent" diff --git a/applications/squarebot/secrets.yaml b/applications/squarebot/secrets.yaml index 045c074a21..f1f68a460b 100644 --- a/applications/squarebot/secrets.yaml +++ b/applications/squarebot/secrets.yaml @@ -1,6 +1,9 @@ SQUAREBOT_GITHUB_APP_ID: description: >- The ID of the GitHub App shared by all Squarebot services. +SQUAREBOT_GITHUB_APP_USERNAME: + description: >- + The username slug of the GitHub App shared by all Squarebot services. SQUAREBOT_GITHUB_APP_PRIVATE_KEY: description: >- The private key for the GitHub App shared by all Squarebot services. diff --git a/applications/squareone/values.yaml b/applications/squareone/values.yaml index e4746c7090..4e12177617 100644 --- a/applications/squareone/values.yaml +++ b/applications/squareone/values.yaml @@ -222,10 +222,10 @@ config: - ### Portal + ### Firefly - The Portal enables you to explore LSST image and table data in - your browser. + Help pages for Firefly, which enables exploration and visualization + of image and table data in the Portal Aspect. diff --git a/applications/strimzi-access-operator/values-idfint.yaml b/applications/strimzi-access-operator/values-idfint.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/strimzi-access-operator/values-idfprod.yaml b/applications/strimzi-access-operator/values-idfprod.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml index fc8ddc5460..d3d40d3d35 100644 --- a/applications/strimzi/Chart.yaml +++ b/applications/strimzi/Chart.yaml @@ -7,5 +7,5 @@ home: https://strimzi.io appVersion: "0.39.0" dependencies: - name: strimzi-kafka-operator - version: "0.43.0" + version: "0.44.0" repository: https://strimzi.io/charts/ diff --git a/applications/strimzi/values-idfprod.yaml b/applications/strimzi/values-idfprod.yaml new file mode 100644 index 0000000000..1abe0d7c86 --- /dev/null +++ b/applications/strimzi/values-idfprod.yaml @@ -0,0 +1,9 @@ +strimzi-kafka-operator: + resources: + limits: + memory: "1Gi" + requests: + memory: "512Mi" + watchNamespaces: + - "sasquatch" + logLevel: "INFO" diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml index 8cb53aec89..a2356af0ab 100644 --- a/applications/telegraf-ds/Chart.yaml +++ b/applications/telegraf-ds/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf-ds - version: 1.1.34 + version: 1.1.35 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml index 33c097cea8..e0cd75bc86 100644 --- a/applications/telegraf/Chart.yaml +++ b/applications/telegraf/Chart.yaml @@ -8,7 +8,7 @@ sources: - https://github.com/influxdata/helm-charts dependencies: - name: telegraf - version: 1.8.54 + version: 1.8.55 repository: https://helm.influxdata.com/ annotations: phalanx.lsst.io/docs: | diff --git a/applications/templatebot/Chart.yaml b/applications/templatebot/Chart.yaml index c8a3e6c9b1..f8999cdf59 100644 --- a/applications/templatebot/Chart.yaml +++ b/applications/templatebot/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: "tickets-DM-43699" +appVersion: "0.3.0" description: Create new projects name: templatebot sources: diff --git a/applications/templatebot/README.md b/applications/templatebot/README.md index fa76b28227..20f4a576f5 100644 --- a/applications/templatebot/README.md +++ b/applications/templatebot/README.md @@ -14,6 +14,7 @@ Create new projects | config.logLevel | string | `"INFO"` | Logging level | | config.logProfile | string | `"production"` | Logging profile (`production` for JSON, `development` for human-friendly) | | config.pathPrefix | string | `"/templatebot"` | URL path prefix | +| config.templateRepoUrl | string | `"https://github.com/lsst/templates"` | URL for the template repository | | config.topics.slackAppMention | string | `"lsst.square-events.squarebot.slack.app.mention"` | Kafka topic name for the Slack `app_mention` events | | config.topics.slackBlockActions | string | `"lsst.square-events.squarebot.slack.interaction.block-actions"` | Kafka topic for Slack `block_actions` interaction events | | config.topics.slackMessageIm | string | `"lsst.square-events.squarebot.slack.message.im"` | Kafka topic name for the Slack `message.im` events (direct message channels) | diff --git a/applications/templatebot/secrets.yaml b/applications/templatebot/secrets.yaml index 7e672c9ecf..a472d3d3ba 100644 --- a/applications/templatebot/secrets.yaml +++ b/applications/templatebot/secrets.yaml @@ -1,3 +1,9 @@ +TEMPLATEBOT_GITHUB_APP_USERNAME: + description: >- + The username slug for the GitHub App shared by all Squarebot services. + copy: + application: squarebot + key: SQUAREBOT_GITHUB_APP_USERNAME TEMPLATEBOT_GITHUB_APP_ID: description: >- The ID of the GitHub App shared by all Squarebot services. @@ -24,3 +30,9 @@ TEMPLATEBOT_SLACK_TOKEN: copy: application: squarebot key: SQUAREBOT_SLACK_TOKEN +TEMPLATEBOT_LTD_USERNAME: + description: >- + The username for the LSST the Docs admin account. +TEMPLATEBOT_LTD_PASSWORD: + description: >- + The password for the LSST the Docs admin account. diff --git a/applications/templatebot/templates/configmap.yaml b/applications/templatebot/templates/configmap.yaml index 343c47e17b..a9ed3ebcb6 100644 --- a/applications/templatebot/templates/configmap.yaml +++ b/applications/templatebot/templates/configmap.yaml @@ -9,6 +9,7 @@ data: TEMPLATEBOT_ENVIRONMENT_URL: {{ .Values.global.baseUrl | quote }} TEMPLATEBOT_PATH_PREFIX: {{ .Values.config.pathPrefix | quote }} TEMPLATEBOT_PROFILE: {{ .Values.config.logProfile | quote }} + TEMPLATEBOT_TEMPLATE_REPO_URL: {{ .Values.config.templateRepoUrl | quote }} TEMPLATEBOT_APP_MENTION_TOPIC: {{ .Values.config.topics.slackAppMention | quote }} TEMPLATEBOT_MESSAGE_IM_TOPIC: {{ .Values.config.topics.slackMessageIm | quote }} TEMPLATEBOT_BLOCK_ACTIONS_TOPIC: {{ .Values.config.topics.slackBlockActions | quote }} diff --git a/applications/templatebot/templates/deployment.yaml b/applications/templatebot/templates/deployment.yaml index 79888b1aff..497485cd31 100644 --- a/applications/templatebot/templates/deployment.yaml +++ b/applications/templatebot/templates/deployment.yaml @@ -30,6 +30,9 @@ spec: - configMapRef: name: "templatebot" env: + # Writeable directory for caching template repo checkouts + - name: "TEMPLATEBOT_TEMPLATE_CACHE_DIR" + value: "/tmp/template_repo_cache" # Writeable directory for concatenating certs. See "tmp" volume. - name: "KAFKA_CERT_TEMP_DIR" value: "/tmp/kafka_certs" @@ -58,6 +61,31 @@ spec: secretKeyRef: name: "templatebot" key: "TEMPLATEBOT_SLACK_TOKEN" + - name: "TEMPLATEBOT_GITHUB_APP_ID" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_GITHUB_APP_ID" + - name: "TEMPLATEBOT_GITHUB_APP_USERNAME" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_GITHUB_APP_USERNAME" + - name: "TEMPLATEBOT_GITHUB_APP_PRIVATE_KEY" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_GITHUB_APP_PRIVATE_KEY" + - name: "TEMPLATEBOT_LTD_USERNAME" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_LTD_USERNAME" + - name: "TEMPLATEBOT_LTD_PASSWORD" + valueFrom: + secretKeyRef: + name: "templatebot" + key: "TEMPLATEBOT_LTD_PASSWORD" volumeMounts: - name: "kafka" mountPath: "/etc/kafkacluster/ca.crt" @@ -68,8 +96,14 @@ spec: - name: "kafka" mountPath: "/etc/kafkauser/user.key" subPath: "ssl.keystore.key" # private key for the consuming client - - name: "tmp" + - name: "kafka-certs-tmp" mountPath: "/tmp/kafka_certs" + - name: "tmp" + mountPath: "/tmp" + - name: "repo-cache" + mountPath: "/tmp/template_repo_cache" + - name: "cookiecutter-replay-dir" + mountPath: "/home/appuser/.cookiecutter_replay/" image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: @@ -109,3 +143,9 @@ spec: secretName: "templatebot" - name: "tmp" emptyDir: {} + - name: "cookiecutter-replay-dir" + emptyDir: {} + - name: "kafka-certs-tmp" + emptyDir: {} + - name: "repo-cache" + emptyDir: {} diff --git a/applications/templatebot/values.yaml b/applications/templatebot/values.yaml index 227aa85890..1a71e8a5e5 100644 --- a/applications/templatebot/values.yaml +++ b/applications/templatebot/values.yaml @@ -27,6 +27,9 @@ config: # -- URL path prefix pathPrefix: "/templatebot" + # -- URL for the template repository + templateRepoUrl: "https://github.com/lsst/templates" + topics: # -- Kafka topic name for the Slack `app_mention` events slackAppMention: "lsst.square-events.squarebot.slack.app.mention" diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml index 5a48fbb4f9..00567c3136 100644 --- a/applications/times-square/Chart.yaml +++ b/applications/times-square/Chart.yaml @@ -12,7 +12,7 @@ appVersion: "0.13.0" dependencies: - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/times-square/README.md b/applications/times-square/README.md index 7385571ed7..366950ca28 100644 --- a/applications/times-square/README.md +++ b/applications/times-square/README.md @@ -19,7 +19,7 @@ An API service for managing and rendering parameterized Jupyter notebooks. | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | | cloudsql.image.resources | object | see `values.yaml` | Resource requests and limits for Cloud SQL pod | -| cloudsql.image.tag | string | `"1.37.0"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.37.1"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role | | config.databaseUrl | string | None, must be set | URL for the PostgreSQL database | diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml index e6cdc61f51..f5509e689f 100644 --- a/applications/times-square/values.yaml +++ b/applications/times-square/values.yaml @@ -156,7 +156,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.37.0" + tag: "1.37.1" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/applications/unfurlbot/Chart.yaml b/applications/unfurlbot/Chart.yaml index 7924ca56af..9f739e437f 100644 --- a/applications/unfurlbot/Chart.yaml +++ b/applications/unfurlbot/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: "0.3.0" +appVersion: "0.3.1" description: Squarebot backend that unfurls Jira issues. name: unfurlbot sources: @@ -9,5 +9,5 @@ version: 1.0.0 dependencies: - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ diff --git a/applications/unfurlbot/README.md b/applications/unfurlbot/README.md index c9f6b75900..4d615367d9 100644 --- a/applications/unfurlbot/README.md +++ b/applications/unfurlbot/README.md @@ -15,7 +15,7 @@ Squarebot backend that unfurls Jira issues. | autoscaling.maxReplicas | int | `100` | Maximum number of unfurlbot deployment pods | | autoscaling.minReplicas | int | `1` | Minimum number of unfurlbot deployment pods | | autoscaling.targetCPUUtilizationPercentage | int | `80` | Target CPU utilization of unfurlbot deployment pods | -| config.jiraProjects | string | `"ADMIN, CCB, CAP, COMCAM, COMT, DM, EPO, FRACAS, IAM, IHS, IT, ITRFC, LOVE, LASD, LIT, LOPS, LVV, M1M3V, OPSIM, PHOSIM, PST, PSV, PUB, RFC, RM, SAFE, SIM, SPP, SBTT, SE, SUMMIT, TSAIV, TCT, SECMVERIF, TMDC, TPC, TSEIA, TAS, TELV, TSSAL, TSS, TSSPP, WMP, PREOPS, OBS, SITCOM, BLOCK\n"` | Names of Jira projects to unfurl (comma-separated) | +| config.jiraProjects | string | See `values.yaml` | Names of Jira projects to unfurl (comma-separated) | | config.jiraUrl | string | `"https://rubinobs.atlassian.net/"` | Jira base URL | | config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" | | config.redisUrl | string | `"redis://unfurlbot-redis:6379/0"` | URL to the local redis instance | diff --git a/applications/unfurlbot/values.yaml b/applications/unfurlbot/values.yaml index df1aece2f7..207f34358e 100644 --- a/applications/unfurlbot/values.yaml +++ b/applications/unfurlbot/values.yaml @@ -26,22 +26,27 @@ config: # -- Kafka topic name for the Slack `app_mention` events slackAppMention: "lsst.square-events.squarebot.slack.app.mention" - # -- Kafka topic name for the Slack `message.channels` events (public channels) + # -- Kafka topic name for the Slack `message.channels` events (public + # channels) slackMessageChannels: "lsst.square-events.squarebot.slack.message.channels" - # -- Kafka topic name for the Slack `message.groups` events (private channels) + # -- Kafka topic name for the Slack `message.groups` events (private + # channels) slackMessageGroups: "lsst.square-events.squarebot.slack.message.groups" - # -- Kafka topic name for the Slack `message.im` events (direct message channels) + # -- Kafka topic name for the Slack `message.im` events (direct message + # channels) slackMessageIm: "lsst.square-events.squarebot.slack.message.im" - # -- Kafka topic name for the Slack `message.mpim` events (multi-person direct messages) + # -- Kafka topic name for the Slack `message.mpim` events (multi-person + # direct messages) slackMessageMpim: "lsst.square-events.squarebot.slack.message.mpim" # -- Jira base URL jiraUrl: "https://rubinobs.atlassian.net/" # -- Names of Jira projects to unfurl (comma-separated) + # @default -- See `values.yaml` jiraProjects: > ADMIN, CCB, @@ -81,6 +86,7 @@ config: TPC, TSEIA, TAS, + TAXICAB, TELV, TSSAL, TSS, diff --git a/applications/uws/README.md b/applications/uws/README.md index 4f1cb161a0..4ec810c701 100644 --- a/applications/uws/README.md +++ b/applications/uws/README.md @@ -38,6 +38,7 @@ Deployment for the UWS and DM OCPS CSCs | uws-api-server.server.securityContext.runAsGroup | int | `202` | Set the GID for the UWS server container entrypoint | | uws-api-server.server.securityContext.runAsUser | int | `1000` | Set the UID for the UWS server container entrypoint | | uws-api-server.targetCluster | string | `""` | Target Kubernetes cluster | +| uws-api-server.ttlSecondsAfterFinished | int | `0` | Time to live (in seconds) for pod after it completes Allows logs to be inspected. | | uws-api-server.vaultPathPrefix | string | `""` | Site-specific Vault path for secrets. | | uws-api-server.volumes | list | `[]` | Central data volumes to be mounted in job containers. Each object listed can have the following attributes defined: _name_ (A label identifier for the data volume mount) _server_ (The hostname for the NFS server with the data volume mount) _claimName_ (The PVC claim name for the data volume mount) _mountPath_ (The mount path in the server container for the data volume mount) _exportPath_ (The export path on the NFS server for the data volume mount) _subPath_ (A possible sub path for the data volume mount) _readOnly_ (Flag to mark the data volume mount as read only or read/write) | | uws-api-server.workingVolume.claimName | string | `""` | The PVC claim name for the working volume | diff --git a/applications/uws/charts/uws-api-server/README.md b/applications/uws/charts/uws-api-server/README.md index d0a72c8090..92e3631a55 100644 --- a/applications/uws/charts/uws-api-server/README.md +++ b/applications/uws/charts/uws-api-server/README.md @@ -24,6 +24,7 @@ Helm chart for deploying the Universal Worker Service API Server | server.securityContext.runAsGroup | int | `202` | Set the GID for the UWS server container entrypoint | | server.securityContext.runAsUser | int | `1000` | Set the UID for the UWS server container entrypoint | | targetCluster | string | `""` | Target Kubernetes cluster | +| ttlSecondsAfterFinished | int | `0` | Time to live (in seconds) for pod after it completes Allows logs to be inspected. | | vaultPathPrefix | string | `""` | Site-specific Vault path for secrets. | | volumes | list | `[]` | Central data volumes to be mounted in job containers. Each object listed can have the following attributes defined: _name_ (A label identifier for the data volume mount) _server_ (The hostname for the NFS server with the data volume mount) _claimName_ (The PVC claim name for the data volume mount) _mountPath_ (The mount path in the server container for the data volume mount) _exportPath_ (The export path on the NFS server for the data volume mount) _subPath_ (A possible sub path for the data volume mount) _readOnly_ (Flag to mark the data volume mount as read only or read/write) | | workingVolume.claimName | string | `""` | The PVC claim name for the working volume | diff --git a/applications/uws/charts/uws-api-server/templates/configmap.yaml b/applications/uws/charts/uws-api-server/templates/configmap.yaml index 4956d00f2d..c37a98cfc8 100644 --- a/applications/uws/charts/uws-api-server/templates/configmap.yaml +++ b/applications/uws/charts/uws-api-server/templates/configmap.yaml @@ -4,6 +4,7 @@ metadata: name: {{ .Release.Name }}-configmap data: config: | + ttlSecondsAfterFinished: "{{ .Values.ttlSecondsAfterFinished }}" workingVolume: {{- toYaml .Values.workingVolume | nindent 6 }} volumes: diff --git a/applications/uws/charts/uws-api-server/values.yaml b/applications/uws/charts/uws-api-server/values.yaml index 77d14684a0..3e37c2c88f 100644 --- a/applications/uws/charts/uws-api-server/values.yaml +++ b/applications/uws/charts/uws-api-server/values.yaml @@ -71,3 +71,6 @@ volumes: [] # -- Temporary flag to make service deploy own namespace. # Doing this to not disrupt other sites. createNamespace: false +# -- Time to live (in seconds) for pod after it completes +# Allows logs to be inspected. +ttlSecondsAfterFinished: 0 diff --git a/applications/uws/secrets-tucson-teststand.yaml b/applications/uws/secrets-tucson-teststand.yaml new file mode 100644 index 0000000000..c6609ef2f1 --- /dev/null +++ b/applications/uws/secrets-tucson-teststand.yaml @@ -0,0 +1,6 @@ +redis-password: + description: >- + Password to the Rapid Analysis redis instance. + copy: + application: rubintv + key: redis-password diff --git a/applications/uws/values-base.yaml b/applications/uws/values-base.yaml index c9ef439fe2..6b5b6bd37a 100644 --- a/applications/uws/values-base.yaml +++ b/applications/uws/values-base.yaml @@ -4,6 +4,7 @@ uws-api-server: image: tag: latest logLevel: INFO + ttlSecondsAfterFinished: 3600 butlerPg: secretName: uws containerPath: /home/lsst/.lsst diff --git a/applications/uws/values-tucson-teststand.yaml b/applications/uws/values-tucson-teststand.yaml index 9af166ba6e..869325aefc 100644 --- a/applications/uws/values-tucson-teststand.yaml +++ b/applications/uws/values-tucson-teststand.yaml @@ -6,6 +6,7 @@ uws-api-server: image: tag: latest logLevel: INFO + ttlSecondsAfterFinished: 3600 butlerPg: secretName: uws containerPath: /home/lsst/.lsst diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml index 8556856914..b366e67f4e 100644 --- a/applications/vault-secrets-operator/Chart.yaml +++ b/applications/vault-secrets-operator/Chart.yaml @@ -5,7 +5,7 @@ sources: - https://github.com/ricoberger/vault-secrets-operator dependencies: - name: vault-secrets-operator - version: 2.6.0 + version: 2.6.1 repository: https://ricoberger.github.io/helm-charts/ annotations: phalanx.lsst.io/docs: | diff --git a/applications/vault/Chart.yaml b/applications/vault/Chart.yaml index 4c89b4f645..8226ae401f 100644 --- a/applications/vault/Chart.yaml +++ b/applications/vault/Chart.yaml @@ -4,5 +4,5 @@ version: 1.0.0 description: Secret Storage dependencies: - name: vault - version: 0.28.1 + version: 0.29.0 repository: https://helm.releases.hashicorp.com diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml index 4aed5b2fe5..68d876fb99 100644 --- a/applications/vo-cutouts/Chart.yaml +++ b/applications/vo-cutouts/Chart.yaml @@ -8,7 +8,7 @@ appVersion: 3.2.0 dependencies: - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ annotations: diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md index ad78a23274..97213c69f0 100644 --- a/applications/vo-cutouts/README.md +++ b/applications/vo-cutouts/README.md @@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | | cloudsql.image.schemaUpdateTagSuffix | string | `"-alpine"` | Tag suffix to use for the proxy for schema updates | -| cloudsql.image.tag | string | `"1.37.0"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.37.1"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL is used | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy container | | config.databaseUrl | string | None, must be set if `cloudsql.enabled` is false | URL for the PostgreSQL database if Cloud SQL is not in use | diff --git a/applications/vo-cutouts/values-idfint.yaml b/applications/vo-cutouts/values-idfint.yaml index 9239f30c7d..b7e41291fd 100644 --- a/applications/vo-cutouts/values-idfint.yaml +++ b/applications/vo-cutouts/values-idfint.yaml @@ -1,7 +1,6 @@ config: serviceAccount: "vo-cutouts@science-platform-int-dc5d.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-int-us-central1-output/" - updateSchema: true cloudsql: enabled: true diff --git a/applications/vo-cutouts/values-idfprod.yaml b/applications/vo-cutouts/values-idfprod.yaml index 53657a6e3c..461cb96fe5 100644 --- a/applications/vo-cutouts/values-idfprod.yaml +++ b/applications/vo-cutouts/values-idfprod.yaml @@ -1,7 +1,6 @@ config: serviceAccount: "vo-cutouts@science-platform-stable-6994.iam.gserviceaccount.com" storageBucketUrl: "gs://rubin-cutouts-stable-us-central1-output/" - updateSchema: true cloudsql: enabled: true diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml index 17e9ad9ba5..199c0ce730 100644 --- a/applications/vo-cutouts/values.yaml +++ b/applications/vo-cutouts/values.yaml @@ -94,7 +94,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.37.0" + tag: "1.37.1" # -- Tag suffix to use for the proxy for schema updates schemaUpdateTagSuffix: "-alpine" diff --git a/charts/cadc-tap/README.md b/charts/cadc-tap/README.md index 1da63ab1f8..8adaa4f4fd 100644 --- a/charts/cadc-tap/README.md +++ b/charts/cadc-tap/README.md @@ -17,12 +17,12 @@ IVOA TAP service | cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with Cloud SQL databases on Google Cloud | | cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images | | cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use | -| cloudsql.image.tag | string | `"1.37.0"` | Cloud SQL Auth Proxy tag to use | +| cloudsql.image.tag | string | `"1.37.1"` | Cloud SQL Auth Proxy tag to use | | cloudsql.instanceConnectionName | string | `""` | Instance connection name for a Cloud SQL PostgreSQL instance | | cloudsql.resources | object | See `values.yaml` | Resource limits and requests for the Cloud SQL Proxy container | | cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `cadc-tap` Kubernetes service accounts and has the `cloudsql.client` role, access | | config.backend | string | None, must be set to `pg` or `qserv` | What type of backend are we connecting to? | -| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/v3.2.1/datalink-snippets.zip"` | Datalink payload URL | +| config.datalinkPayloadUrl | string | `"https://github.com/lsst/sdm_schemas/releases/download/v3.3.0/datalink-snippets.zip"` | Datalink payload URL | | config.gcsBucket | string | `"async-results.lsst.codes"` | Name of GCS bucket in which to store results | | config.gcsBucketType | string | `"GCS"` | GCS bucket type (GCS or S3) | | config.gcsBucketUrl | string | `"https://tap-files.lsst.codes"` | Base URL for results stored in GCS bucket | @@ -69,7 +69,7 @@ IVOA TAP service | tapSchema.affinity | object | `{}` | Affinity rules for the TAP schema database pod | | tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image | | tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. | -| tapSchema.image.tag | string | `"v3.2.1"` | Tag of TAP schema image | +| tapSchema.image.tag | string | `"v3.3.0"` | Tag of TAP schema image | | tapSchema.nodeSelector | object | `{}` | Node selection rules for the TAP schema database pod | | tapSchema.podAnnotations | object | `{}` | Annotations for the TAP schema database pod | | tapSchema.resources | object | See `values.yaml` | Resource limits and requests for the TAP schema database pod | diff --git a/charts/cadc-tap/values.yaml b/charts/cadc-tap/values.yaml index fd8b7e20ce..a755e17f89 100644 --- a/charts/cadc-tap/values.yaml +++ b/charts/cadc-tap/values.yaml @@ -99,7 +99,7 @@ config: tapSchemaAddress: "cadc-tap-schema-db:3306" # -- Datalink payload URL - datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/v3.2.1/datalink-snippets.zip" + datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/v3.3.0/datalink-snippets.zip" # -- Name of GCS bucket in which to store results gcsBucket: "async-results.lsst.codes" @@ -162,7 +162,7 @@ tapSchema: pullPolicy: "IfNotPresent" # -- Tag of TAP schema image - tag: "v3.2.1" + tag: "v3.3.0" # -- Resource limits and requests for the TAP schema database pod # @default -- See `values.yaml` @@ -229,7 +229,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.37.0" + tag: "1.37.1" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/charts/prompt-proto-service/README.md b/charts/prompt-proto-service/README.md index 03390726d6..06c8d5eff9 100644 --- a/charts/prompt-proto-service/README.md +++ b/charts/prompt-proto-service/README.md @@ -19,6 +19,7 @@ Event-driven processing of camera images | alerts.username | string | `"kafka-admin"` | Username for sending alerts to the alert stream | | apdb.config | string | None, must be set | URL to a serialized APDB configuration, or the "label:" prefix followed by the indexed name of such a config. | | cache.baseSize | int | `3` | The default number of datasets of each type to keep. The pipeline only needs one of most dataset types (one bias, one flat, etc.), so this is roughly the number of visits that fit in the cache. | +| cache.maxFilters | int | `20` | The maximum number of datasets of a given type the service might load if the filter is unknown. Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. | | cache.patchesPerImage | int | `4` | A factor by which to multiply `baseSize` for templates and other patch-based datasets. | | cache.refcatsPerImage | int | `4` | A factor by which to multiply `baseSize` for refcat datasets. | | cacheCalibs | bool | `true` | Whether or not calibs should be cached between runs of a pod. This is a temporary flag that should only be unset in specific circumstances, and only in the development environment. | @@ -35,6 +36,7 @@ Event-driven processing of camera images | instrument.name | string | None, must be set | The "short" name of the instrument | | instrument.pipelines.main | string | None, must be set | Machine-readable string describing which pipeline(s) should be run for which visits' raws. Notation is complex and still in flux; see [the source code](https://github.com/lsst-dm/prompt_processing/blob/main/python/activator/config.py) for examples. | | instrument.pipelines.preprocessing | string | None, must be set | Machine-readable string describing which pipeline(s) should be run before which visits' raw arrival. | +| instrument.preloadPadding | int | `30` | Number of arcseconds to pad the spatial region in preloading. | | instrument.skymap | string | `""` | Skymap to use with the instrument | | knative.cpuLimit | int | `1` | The maximum cpu cores for the full pod (see `containerConcurrency`). | | knative.cpuRequest | int | `1` | The cpu cores requested for the full pod (see `containerConcurrency`). | diff --git a/charts/prompt-proto-service/templates/prompt-proto-service.yaml b/charts/prompt-proto-service/templates/prompt-proto-service.yaml index f08eb4b17e..aca24d55ba 100644 --- a/charts/prompt-proto-service/templates/prompt-proto-service.yaml +++ b/charts/prompt-proto-service/templates/prompt-proto-service.yaml @@ -47,6 +47,8 @@ spec: value: {{ .Values.instrument.pipelines.main }} - name: SKYMAP value: {{ .Values.instrument.skymap }} + - name: PRELOAD_PADDING + value: {{ .Values.instrument.preloadPadding | toString | quote }} - name: IMAGE_BUCKET value: {{ .Values.s3.imageBucket }} - name: BUCKET_TOPIC @@ -115,6 +117,8 @@ spec: value: {{ .Values.cache.refcatsPerImage | toString | quote }} - name: PATCHES_PER_IMAGE value: {{ .Values.cache.patchesPerImage | toString | quote }} + - name: FILTERS_WITH_CALIBS + value: {{ .Values.cache.maxFilters | toString | quote }} - name: DEBUG_CACHE_CALIBS value: {{ if .Values.cacheCalibs }}'1'{{ else }}'0'{{ end }} volumeMounts: diff --git a/charts/prompt-proto-service/values.yaml b/charts/prompt-proto-service/values.yaml index 954c50e7d1..04c55b7472 100644 --- a/charts/prompt-proto-service/values.yaml +++ b/charts/prompt-proto-service/values.yaml @@ -49,6 +49,8 @@ instrument: preprocessing: "" # -- Skymap to use with the instrument skymap: "" + # -- Number of arcseconds to pad the spatial region in preloading. + preloadPadding: 30 # -- URI to the shared repo used for calibrations, templates, and pipeline outputs. # If `registry.centralRepoFile` is set, this URI points to a local redirect instead of the central repo itself. # @default -- None, must be set @@ -62,6 +64,9 @@ cache: refcatsPerImage: 4 # -- A factor by which to multiply `baseSize` for templates and other patch-based datasets. patchesPerImage: 4 + # -- The maximum number of datasets of a given type the service might load if the filter is unknown. + # Should be greater than or equal to the number of filters that have e.g. flats or transmission curves. + maxFilters: 20 s3: # -- Bucket containing the incoming raw images diff --git a/charts/rubintv/Chart.yaml b/charts/rubintv/Chart.yaml index 7a4c6d0448..56963770ea 100644 --- a/charts/rubintv/Chart.yaml +++ b/charts/rubintv/Chart.yaml @@ -7,5 +7,5 @@ sources: appVersion: 0.1.0 dependencies: - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ diff --git a/charts/rubintv/README.md b/charts/rubintv/README.md index 330b344e75..72573843c6 100644 --- a/charts/rubintv/README.md +++ b/charts/rubintv/README.md @@ -53,13 +53,14 @@ Real-time display front end | workers.image.repository | string | `"ts-dockerhub.lsst.org/rubintv-broadcaster"` | The Docker registry name for the container image. | | workers.image.tag | string | `"develop"` | The tag of the container image to use. | | workers.imagePullSecrets | list | See `values.yaml` | Image pull secrets. | +| workers.nfsMountpoint | list | See `values.yaml` | NFS mountpoints for the rubintv worker pods | | workers.nodeSelector | object | `{}` | Node selector rules for the rubintv worker pods | | workers.pathPrefix | string | `"/"` | Prefix for the (internal) worker API routes | | workers.podAnnotations | object | `{}` | Annotations for the rubintv worker pods | +| workers.pvcMountpoint | list | See `values.yaml` | PVC claims for the rubintv worker pods | | workers.replicas | int | `0` | how many replicas to use | | workers.resources | object | `{}` | Resource limits and requests for the rubintv worker pods | | workers.script | string | `"slac/rubintv/workerPod1.py"` | Script that runs in RUN_ARG. This dynamic mechanism needs to be replaced with something less scary, but there is resistance to that, at least while iterating. | | workers.scriptsLocation | string | `"/repos/rubintv_production/scripts"` | The location of the scripts folder where the worker pod will run specific scripts, set by RUN_ARG. | | workers.tolerations | list | `[]` | Tolerations for the rubintv worker pods | | workers.uid | string | `nil` | UID to run as (site-dependent because of filesystem access; must be specified) | -| workers.volumes | list | See `values.yaml` | Volumes for the rubintv worker pods | diff --git a/charts/rubintv/templates/deployment-workers.yaml b/charts/rubintv/templates/deployment-workers.yaml index f4bebf3e5a..b4784e839e 100644 --- a/charts/rubintv/templates/deployment-workers.yaml +++ b/charts/rubintv/templates/deployment-workers.yaml @@ -85,9 +85,17 @@ spec: volumeMounts: - name: "user-secrets" mountPath: "/etc/secrets" - {{- range $vol := .Values.workers.volumes }} + {{- range $vol := .Values.workers.pvcMountpoint }} - name: {{ $vol.name }} mountPath: {{ $vol.mountPath }} + {{- if ($vol.subPath) }} + subPath: {{ $vol.subPath }} + {{- end }} + {{- end }} + {{- range $vol := .Values.workers.nfsMountpoint }} + - name: {{ $vol.name }} + mountPath: {{ $vol.containerPath }} + readOnly: {{ $vol.readOnly }} {{- end }} initContainers: - name: "secret-perm-fixer" @@ -123,13 +131,20 @@ spec: {{- end }} - name: "user-secrets" emptyDir: {} - {{- range $vol := .Values.workers.volumes }} + {{- range $vol := .Values.workers.pvcMountpoint }} - name: {{ $vol.name | quote }} {{ with $vol.persistentVolumeClaim }} persistentVolumeClaim: claimName: {{ .name | quote }} {{- end }} {{- end }} + {{- range $vol := .Values.workers.nfsMountpoint }} + - name: {{ $vol.name | quote }} + nfs: + path: {{ $vol.serverPath }} + readOnly: {{ $vol.readOnly }} + server: {{ $vol.server }} + {{- end }} securityContext: runAsNonRoot: true runAsUser: {{ .Values.workers.uid }} diff --git a/charts/rubintv/templates/pvc.yaml b/charts/rubintv/templates/pvc.yaml index 15be702e83..802f4830e6 100644 --- a/charts/rubintv/templates/pvc.yaml +++ b/charts/rubintv/templates/pvc.yaml @@ -1,5 +1,5 @@ -{{- if .Values.workers.volumes }} -{{- range $vol := .Values.workers.volumes }} +{{- if .Values.workers.pvcMountpoint }} +{{- range $vol := .Values.workers.pvcMountpoint }} {{- if $vol.persistentVolumeClaim }} --- kind: PersistentVolumeClaim diff --git a/charts/rubintv/values.yaml b/charts/rubintv/values.yaml index a1b3658c14..7d6dc9e185 100644 --- a/charts/rubintv/values.yaml +++ b/charts/rubintv/values.yaml @@ -109,11 +109,11 @@ workers: imagePullSecrets: [] # Each entry is of the form: { name: pull-secret-name } - # -- Volumes for the rubintv worker pods + # -- PVC claims for the rubintv worker pods # @default -- See `values.yaml` - volumes: [] + pvcMountpoint: [] # Each list item must have the following form: - # { name: volume-name, + # { name: pvc-name, # accessMode: one of "ReadOnly", "ReadWriteOnce", "ReadWriteMany", # mountPath: path-mounted-in-container, # persistentVolumeClaim: { @@ -122,8 +122,17 @@ workers: # capacity: size-as-string-of-pvc (e.g. "1Gi") # } # } - # It is planned to implement "nfs" as an alternative to - # "PersistentVolumeClaim" but that has not yet been done. + + # -- NFS mountpoints for the rubintv worker pods + # @default -- See `values.yaml` + nfsMountpoint: [] + # Each list item must have the following form: + # { name: nfs-name, + # containerPath: path-mounted-in-container, + # readOnly: boolean, + # server: nfs-server, + # serverPath: nfs-server-path + # } # -- Resource limits and requests for the rubintv worker pods resources: {} diff --git a/docs/admin/audit-secrets.rst b/docs/admin/audit-secrets.rst index a04193a727..c764e24f1d 100644 --- a/docs/admin/audit-secrets.rst +++ b/docs/admin/audit-secrets.rst @@ -2,7 +2,7 @@ Audit secrets for an environment ################################ -To check that all of the necessary secrets for an environment named ```` are in Vault and appear to have the proper form, run: +To check that all of the necessary secrets for an environment named ```` are in Vault and appear to have the proper form, and to see exactly what a :doc:`syncing secrets for that environment ` would do, run: .. prompt:: bash diff --git a/docs/admin/sync-secrets.rst b/docs/admin/sync-secrets.rst index ede6114600..4998b506d1 100644 --- a/docs/admin/sync-secrets.rst +++ b/docs/admin/sync-secrets.rst @@ -2,8 +2,21 @@ Sync secrets for an environment ############################### -Before syncing secrets for an environment, you should normally audit the secrets so that you know what will change. -See :doc:`audit-secrets`. +Phalanx uses :px-app:`vault-secrets-operator` to create Kubernetes ``Secret`` resources from ``VaultSecret`` resources and entries in Vault. +It requires every Phalanx application with secrets have its own entry in Vault whose keys and values collect all secrets used by that application. +Some secrets therefore have to be duplicated between applications, and others can be automatically generated if missing. +This process of copying and generating secrets as needed is called syncing secrets. + +Syncing secrets must be done before installing a Phalanx environment for the first time, and then every time the secrets for that environment change. +Even if the environment stores static secrets in Vault directly, secrets will still need to be synced periodically to handle the copied and generated secrets also stored in Vault. + +Syncing secrets +=============== + +.. warning:: + + Before syncing secrets for an environment, you should normally audit the secrets so that you know what will change. + See :doc:`audit-secrets`. To populate Vault with all of the necessary secrets for an environment named ````, run: @@ -17,13 +30,13 @@ For SQuaRE-managed deployments, the 1Password token for ``OP_CONNECT_TOKEN`` com If you did not store the Vault write token for your environment with the static secrets, the ``VAULT_TOKEN`` environment variable must be set to the Vault write token for this environment. For SQuaRE-managed environments, you can get the write token from the ``Phalanx Vault write tokens`` item in the SQuaRE 1Password vault. -This must be done before installing a Phalanx environment for the first time. -It can then be run again whenever the secrets for that environment change. +Only secrets for the named environment will be affected. +No changes will be made outside of the configured secrets path for that environment. Deleting secrets ================ -By default old secrets that are no longer required are deleted out of Vault. +By default, old secrets that are no longer required are not deleted out of Vault. To delete obsolete secrets, pass the ``--delete`` flag to :command:`phalanx secrets sync`. This will keep your Vault tidy, but you should use this flag with caution if you have applications temporarily disabled or if you store static secrets directly in Vault and nowhere else. @@ -34,11 +47,12 @@ Regenerating secrets By default, :command:`phalanx secrets sync` will leave any existing generated secrets set to their current values. This is almost always what you want. -In the rare case where you are completely reinstalling an environment and want to invalidate all existing secrets (such as after a security breach), you can add the ``--regenerate`` flag to regenerate all static secrets. + +In the rare case where you are completely reinstalling an environment and want to invalidate all existing secrets (such as after a security breach), you can add the ``--regenerate`` flag to regenerate all non-static secrets. .. warning:: Using ``--regenerate`` will invalidate all user sessions, all user tokens, and other, possibly unanticipated, interactions with the existing cluster. - It will also break most running Phalanx applications until their secrets have been recreated and they have been restarted. + It will also break most running Phalanx applications until their Kubernetes ``Secret`` resources have been recreated and they have been restarted. This should only be used when you also plan to empty the Gafaelfawr database and otherwise reset the environment to start fresh. diff --git a/docs/applications/_summary.rst.jinja b/docs/applications/_summary.rst.jinja index 2f836ed853..5f0012bc07 100644 --- a/docs/applications/_summary.rst.jinja +++ b/docs/applications/_summary.rst.jinja @@ -34,7 +34,7 @@ * - Namespace - {{ app.namespace }} * - Argo CD Project - - {{ app.project }} + - {{ app.project.value }} * - Environments {%- if app.active_environments %} - .. list-table:: diff --git a/docs/applications/index.rst b/docs/applications/index.rst index c0bd61dc7b..a3697b97db 100644 --- a/docs/applications/index.rst +++ b/docs/applications/index.rst @@ -20,5 +20,6 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde rubin roundtable monitoring + support prompt telescope diff --git a/docs/applications/infrastructure.rst b/docs/applications/infrastructure.rst index bb614908d1..8399df03e8 100644 --- a/docs/applications/infrastructure.rst +++ b/docs/applications/infrastructure.rst @@ -12,11 +12,7 @@ Argo CD project: ``infrastructure`` argocd/index cert-manager/index - ghostwriter/index ingress-nginx/index gafaelfawr/index mobu/index - postgres/index - strimzi/index - strimzi-access-operator/index vault-secrets-operator/index diff --git a/docs/applications/roundtable.rst b/docs/applications/roundtable.rst index 8d3ecce818..f2e7c02373 100644 --- a/docs/applications/roundtable.rst +++ b/docs/applications/roundtable.rst @@ -14,7 +14,6 @@ Argo CD project: ``roundtable`` checkerboard/index giftless/index - kubernetes-replicator/index onepassword-connect/index ook/index sqrbot-sr/index diff --git a/docs/applications/rsp.rst b/docs/applications/rsp.rst index b395276738..172d953ae3 100644 --- a/docs/applications/rsp.rst +++ b/docs/applications/rsp.rst @@ -18,10 +18,9 @@ Argo CD project: ``rsp`` noteburst/index nublado/index portal/index - ppdb-replication/index semaphore/index + sia/index siav2/index - sqlproxy-cross-project/index squareone/index ssotap/index tap/index diff --git a/docs/applications/sia/index.rst b/docs/applications/sia/index.rst new file mode 100644 index 0000000000..ec4a177b15 --- /dev/null +++ b/docs/applications/sia/index.rst @@ -0,0 +1,28 @@ +.. px-app:: sia + +###################################### +sia — Simple Image Access (v2) service +###################################### + +``sia`` is an image-access API complying with the IVOA SIA (v2) specification. +This application is designed to interact with Butler repositories, through the dax_obscore package https://github.com/lsst-dm/dax_obscore and allows users to find image links for objects that match one or more filter criteria, listed in the IVOA SIA specification https://www.ivoa.net/documents/SIA/. + +Results of an SIAv2 query will be contain either a datalink if the images are stored behind an authenticated store, or a direct link to the images. + +The SIA service will have as client the RSP Portal Aspect but can also be accessed by other IVOA-compatible clients. + +If the SIA application does not appear under a VO Registry, use of it by IVOA-compatible clients will require users to input the SIA service URL manually. + +Both POST & GET methods are implemented for the /query API, as well as the VOSI-availability and VOSI-capabilities endpoints. + + +.. jinja:: sia + :file: applications/_summary.rst.jinja + +Guides +====== + +.. toctree:: + :maxdepth: 1 + + values diff --git a/docs/applications/sia/values.md b/docs/applications/sia/values.md new file mode 100644 index 0000000000..88ea24dbc4 --- /dev/null +++ b/docs/applications/sia/values.md @@ -0,0 +1,12 @@ +```{px-app-values} sia +``` + +# sia Helm values reference + +Helm values reference table for the {px-app}`sia` application. + +```{include} ../../../applications/sia/README.md +--- +start-after: "## Values" +--- +``` \ No newline at end of file diff --git a/docs/applications/support.rst b/docs/applications/support.rst new file mode 100644 index 0000000000..14c67ff93d --- /dev/null +++ b/docs/applications/support.rst @@ -0,0 +1,18 @@ +################ +Support services +################ + +Additional Argo CD services that are not required cluster infrastructure but are also not Rubin- or Roundtable-specific +These may be of use in a variety of different clusters, but do not need to be enabled in all clusters. + +Argo CD project: ``support`` + +.. toctree:: + :maxdepth: 1 + + ghostwriter/index + kubernetes-replicator/index + postgres/index + sqlproxy-cross-project/index + strimzi/index + strimzi-access-operator/index diff --git a/docs/conf.py b/docs/conf.py index 41e6428493..dfa6d9b86a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -33,3 +33,6 @@ linkcheck_exclude_documents = [ r"applications/.*/values", ] + +# Remove this later after we fix documenteer +mermaid_version = "11.2.0" diff --git a/docs/developers/helm-chart/values-yaml.rst b/docs/developers/helm-chart/values-yaml.rst index 7ab926f1c3..f5d55a2e52 100644 --- a/docs/developers/helm-chart/values-yaml.rst +++ b/docs/developers/helm-chart/values-yaml.rst @@ -3,7 +3,7 @@ Write the values.yaml file ########################## The :file:`values.yaml` file contains the customizable settings for your application. -Those settings can be overriden for each environment in :file:`values-{environmet}.yaml`. +Those settings can be overriden for each environment in :file:`values-{environment}.yaml`. As a general rule, only use :file:`values.yaml` settings for things that may vary between Phalanx environments. If something is the same in every Phalanx environment, it can be hard-coded into the Kubernetes resource templates. diff --git a/environments/README.md b/environments/README.md index d44f6c7e2d..e92de4f65a 100644 --- a/environments/README.md +++ b/environments/README.md @@ -57,6 +57,7 @@ | applications.sasquatch-backpack | bool | `false` | Enable the sasquatch-backpack application | | applications.schedview-snapshot | bool | `false` | Enable the schedview-snapshot application | | applications.semaphore | bool | `false` | Enable the semaphore application | +| applications.sia | bool | `false` | Enable the sia over butler application | | applications.siav2 | bool | `false` | Enable the siav2 application | | applications.simonyitel | bool | `false` | Enable the simonyitel control system application | | applications.sqlproxy-cross-project | bool | `false` | Enable the sqlproxy-cross-project application | diff --git a/environments/templates/applications/prompt/prompt-proto-service-lsstcomcam.yaml b/environments/templates/applications/prompt/prompt-proto-service-lsstcomcam.yaml index 96017e4d65..842abfdef9 100644 --- a/environments/templates/applications/prompt/prompt-proto-service-lsstcomcam.yaml +++ b/environments/templates/applications/prompt/prompt-proto-service-lsstcomcam.yaml @@ -1,4 +1,4 @@ -{{- if (index .Values "applications" "prompt-proto-service-lsstcam") -}} +{{- if (index .Values "applications" "prompt-proto-service-lsstcomcam") -}} apiVersion: argoproj.io/v1alpha1 kind: Application metadata: diff --git a/environments/templates/applications/rsp/sia.yaml b/environments/templates/applications/rsp/sia.yaml new file mode 100644 index 0000000000..ace746138e --- /dev/null +++ b/environments/templates/applications/rsp/sia.yaml @@ -0,0 +1,34 @@ +{{- if (index .Values "applications" "sia") -}} +apiVersion: v1 +kind: Namespace +metadata: + name: "sia" +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: "sia" + namespace: "argocd" + finalizers: + - "resources-finalizer.argocd.argoproj.io" +spec: + destination: + namespace: "sia" + server: "https://kubernetes.default.svc" + project: "rsp" + source: + path: "applications/sia" + repoURL: {{ .Values.repoUrl | quote }} + targetRevision: {{ .Values.targetRevision | quote }} + helm: + parameters: + - name: "global.host" + value: {{ .Values.fqdn | quote }} + - name: "global.baseUrl" + value: "https://{{ .Values.fqdn }}" + - name: "global.vaultSecretsPath" + value: {{ .Values.vaultPathPrefix | quote }} + valueFiles: + - "values.yaml" + - "values-{{ .Values.name }}.yaml" +{{- end -}} diff --git a/environments/templates/applications/infrastructure/ghostwriter.yaml b/environments/templates/applications/support/ghostwriter.yaml similarity index 95% rename from environments/templates/applications/infrastructure/ghostwriter.yaml rename to environments/templates/applications/support/ghostwriter.yaml index 5d993e0b88..3ba3e01ed4 100644 --- a/environments/templates/applications/infrastructure/ghostwriter.yaml +++ b/environments/templates/applications/support/ghostwriter.yaml @@ -15,7 +15,7 @@ spec: destination: namespace: "ghostwriter" server: "https://kubernetes.default.svc" - project: "infrastructure" + project: "support" source: path: "applications/ghostwriter" repoURL: {{ .Values.repoUrl | quote }} @@ -31,4 +31,4 @@ spec: valueFiles: - "values.yaml" - "values-{{ .Values.name }}.yaml" -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/environments/templates/applications/roundtable/kubernetes-replicator.yaml b/environments/templates/applications/support/kubernetes-replicator.yaml similarity index 97% rename from environments/templates/applications/roundtable/kubernetes-replicator.yaml rename to environments/templates/applications/support/kubernetes-replicator.yaml index ea241fc3e9..8cdc7bd1a6 100644 --- a/environments/templates/applications/roundtable/kubernetes-replicator.yaml +++ b/environments/templates/applications/support/kubernetes-replicator.yaml @@ -15,7 +15,7 @@ spec: destination: namespace: "kubernetes-replicator" server: "https://kubernetes.default.svc" - project: "roundtable" + project: "support" source: path: "applications/kubernetes-replicator" repoURL: {{ .Values.repoUrl | quote }} diff --git a/environments/templates/applications/infrastructure/postgres.yaml b/environments/templates/applications/support/postgres.yaml similarity index 96% rename from environments/templates/applications/infrastructure/postgres.yaml rename to environments/templates/applications/support/postgres.yaml index 4517078e85..1b1cee1451 100644 --- a/environments/templates/applications/infrastructure/postgres.yaml +++ b/environments/templates/applications/support/postgres.yaml @@ -15,7 +15,7 @@ spec: destination: namespace: "postgres" server: "https://kubernetes.default.svc" - project: "infrastructure" + project: "support" source: path: "applications/postgres" repoURL: {{ .Values.repoUrl | quote }} diff --git a/environments/templates/applications/rsp/sqlproxy-cross-project.yaml b/environments/templates/applications/support/sqlproxy-cross-project.yaml similarity index 97% rename from environments/templates/applications/rsp/sqlproxy-cross-project.yaml rename to environments/templates/applications/support/sqlproxy-cross-project.yaml index d8197acebe..7483dd3792 100644 --- a/environments/templates/applications/rsp/sqlproxy-cross-project.yaml +++ b/environments/templates/applications/support/sqlproxy-cross-project.yaml @@ -15,7 +15,7 @@ spec: destination: namespace: "sqlproxy-cross-project" server: "https://kubernetes.default.svc" - project: "rsp" + project: "support" source: path: "applications/sqlproxy-cross-project" repoURL: {{ .Values.repoUrl | quote }} diff --git a/environments/templates/applications/infrastructure/strimzi-access-operator.yaml b/environments/templates/applications/support/strimzi-access-operator.yaml similarity index 96% rename from environments/templates/applications/infrastructure/strimzi-access-operator.yaml rename to environments/templates/applications/support/strimzi-access-operator.yaml index 4038ee1493..c5933f228d 100644 --- a/environments/templates/applications/infrastructure/strimzi-access-operator.yaml +++ b/environments/templates/applications/support/strimzi-access-operator.yaml @@ -15,7 +15,7 @@ spec: destination: namespace: "strimzi-access-operator" server: "https://kubernetes.default.svc" - project: "infrastructure" + project: "support" source: path: "applications/strimzi-access-operator" repoURL: {{ .Values.repoUrl | quote }} diff --git a/environments/templates/applications/infrastructure/strimzi.yaml b/environments/templates/applications/support/strimzi.yaml similarity index 95% rename from environments/templates/applications/infrastructure/strimzi.yaml rename to environments/templates/applications/support/strimzi.yaml index 4b48562dbf..cdce21fe43 100644 --- a/environments/templates/applications/infrastructure/strimzi.yaml +++ b/environments/templates/applications/support/strimzi.yaml @@ -15,7 +15,7 @@ spec: destination: namespace: "strimzi" server: "https://kubernetes.default.svc" - project: "infrastructure" + project: "support" source: path: "applications/strimzi" repoURL: {{ .Values.repoUrl | quote }} diff --git a/environments/templates/projects/support.yaml b/environments/templates/projects/support.yaml new file mode 100644 index 0000000000..d1c4206e1e --- /dev/null +++ b/environments/templates/projects/support.yaml @@ -0,0 +1,19 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AppProject +metadata: + name: "support" + namespace: "argocd" +spec: + clusterResourceWhitelist: + - group: "*" + kind: "*" + destinations: + - namespace: "!kube-system" + server: "*" + - namespace: "*" + server: "*" + namespaceResourceWhitelist: + - group: "*" + kind: "*" + sourceRepos: + - "*" diff --git a/environments/values-base.yaml b/environments/values-base.yaml index e0a262f932..03455ce0a7 100644 --- a/environments/values-base.yaml +++ b/environments/values-base.yaml @@ -22,6 +22,7 @@ applications: narrativelog: true nightreport: true nublado: true + obsenv-management: true obssys: true portal: true rubintv: true diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml index 6283dddfbd..4f56c10fba 100644 --- a/environments/values-idfdev.yaml +++ b/environments/values-idfdev.yaml @@ -25,6 +25,7 @@ applications: portal: true sasquatch: true semaphore: true + sia: true siav2: false ssotap: true squareone: true diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml index 34696fc711..804062f9b1 100644 --- a/environments/values-idfint.yaml +++ b/environments/values-idfint.yaml @@ -20,9 +20,9 @@ applications: hips: true mobu: true nublado: true - plot-navigator: true portal: true sasquatch: true + sia: true siav2: false ssotap: true production-tools: true @@ -31,6 +31,7 @@ applications: sqlproxy-cross-project: true squareone: true strimzi: true + strimzi-access-operator: true tap: true telegraf: true telegraf-ds: true diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml index 0a6a26cc37..0a9eee8a8d 100644 --- a/environments/values-idfprod.yaml +++ b/environments/values-idfprod.yaml @@ -22,9 +22,13 @@ applications: mobu: true nublado: true portal: true + sasquatch: true semaphore: true + sia: true siav2: false squareone: true + strimzi: true + strimzi-access-operator: true ssotap: true tap: true telegraf: true diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml index a11686b579..a447b4b181 100644 --- a/environments/values-roundtable-dev.yaml +++ b/environments/values-roundtable-dev.yaml @@ -11,7 +11,6 @@ onepassword: vaultPathPrefix: "secret/phalanx/roundtable-dev" applications: - checkerboard: false giftless: true jira-data-proxy: true kubernetes-replicator: true diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml index a70ac6123a..c0625aa465 100644 --- a/environments/values-roundtable-prod.yaml +++ b/environments/values-roundtable-prod.yaml @@ -27,5 +27,6 @@ applications: squarebot: true telegraf: true telegraf-ds: true + templatebot: true unfurlbot: true vault: true diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml index ce63e3bd3f..838471061d 100644 --- a/environments/values-summit.yaml +++ b/environments/values-summit.yaml @@ -13,6 +13,7 @@ applications: narrativelog: true nightreport: true nublado: true + obsenv-management: true portal: true rapid-analysis: true rubintv: true diff --git a/environments/values-usdfprod-prompt-processing.yaml b/environments/values-usdfprod-prompt-processing.yaml index b1c1ce92d9..79c9897f4a 100644 --- a/environments/values-usdfprod-prompt-processing.yaml +++ b/environments/values-usdfprod-prompt-processing.yaml @@ -11,6 +11,6 @@ applications: prompt-proto-service-hsc: false prompt-proto-service-latiss: true prompt-proto-service-lsstcam: false - prompt-proto-service-lsstcomcam: false + prompt-proto-service-lsstcomcam: true prompt-proto-service-lsstcomcamsim: false vault-secrets-operator: false diff --git a/environments/values.yaml b/environments/values.yaml index cd11a31959..b65965df03 100644 --- a/environments/values.yaml +++ b/environments/values.yaml @@ -171,6 +171,9 @@ applications: # -- Enable the schedview-snapshot application schedview-snapshot: false + # -- Enable the sia over butler application + sia: false + # -- Enable the siav2 application siav2: false diff --git a/requirements/dev.txt b/requirements/dev.txt index a8d900ddd8..3c3e316dab 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -114,97 +114,112 @@ cffi==1.17.1 ; implementation_name == 'pypy' \ # via # -c requirements/main.txt # pyzmq -charset-normalizer==3.3.2 \ - --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ - --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ - --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ - --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ - --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ - --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ - --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ - --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ - --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ - --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ - --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ - --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ - --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ - --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ - --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ - --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ - --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ - --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ - --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ - --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ - --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ - --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ - --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ - --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ - --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ - --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ - --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ - --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ - --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ - --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ - --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ - --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ - --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ - --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ - --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ - --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ - --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ - --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ - --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ - --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ - --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ - --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ - --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ - --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ - --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ - --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ - --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ - --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ - --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ - --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ - --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ - --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ - --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ - --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ - --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ - --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ - --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ - --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ - --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ - --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ - --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ - --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ - --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ - --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ - --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ - --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ - --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ - --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ - --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ - --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ - --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ - --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ - --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ - --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ - --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ - --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ - --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ - --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ - --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ - --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ - --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ - --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ - --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ - --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ - --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ - --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ - --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ - --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ - --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ - --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 +charset-normalizer==3.4.0 \ + --hash=sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621 \ + --hash=sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6 \ + --hash=sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8 \ + --hash=sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912 \ + --hash=sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c \ + --hash=sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b \ + --hash=sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d \ + --hash=sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d \ + --hash=sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95 \ + --hash=sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e \ + --hash=sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565 \ + --hash=sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64 \ + --hash=sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab \ + --hash=sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be \ + --hash=sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e \ + --hash=sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907 \ + --hash=sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0 \ + --hash=sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2 \ + --hash=sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62 \ + --hash=sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62 \ + --hash=sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23 \ + --hash=sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc \ + --hash=sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284 \ + --hash=sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca \ + --hash=sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455 \ + --hash=sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858 \ + --hash=sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b \ + --hash=sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594 \ + --hash=sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc \ + --hash=sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db \ + --hash=sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b \ + --hash=sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea \ + --hash=sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6 \ + --hash=sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920 \ + --hash=sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749 \ + --hash=sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7 \ + --hash=sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd \ + --hash=sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99 \ + --hash=sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242 \ + --hash=sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee \ + --hash=sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129 \ + --hash=sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2 \ + --hash=sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51 \ + --hash=sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee \ + --hash=sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8 \ + --hash=sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b \ + --hash=sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613 \ + --hash=sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742 \ + --hash=sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe \ + --hash=sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3 \ + --hash=sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5 \ + --hash=sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631 \ + --hash=sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7 \ + --hash=sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15 \ + --hash=sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c \ + --hash=sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea \ + --hash=sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417 \ + --hash=sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250 \ + --hash=sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88 \ + --hash=sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca \ + --hash=sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa \ + --hash=sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99 \ + --hash=sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149 \ + --hash=sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41 \ + --hash=sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574 \ + --hash=sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0 \ + --hash=sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f \ + --hash=sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d \ + --hash=sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654 \ + --hash=sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3 \ + --hash=sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19 \ + --hash=sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90 \ + --hash=sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578 \ + --hash=sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9 \ + --hash=sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1 \ + --hash=sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51 \ + --hash=sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719 \ + --hash=sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236 \ + --hash=sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a \ + --hash=sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c \ + --hash=sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade \ + --hash=sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944 \ + --hash=sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc \ + --hash=sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6 \ + --hash=sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6 \ + --hash=sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27 \ + --hash=sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6 \ + --hash=sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2 \ + --hash=sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12 \ + --hash=sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf \ + --hash=sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114 \ + --hash=sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7 \ + --hash=sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf \ + --hash=sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d \ + --hash=sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b \ + --hash=sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed \ + --hash=sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03 \ + --hash=sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4 \ + --hash=sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67 \ + --hash=sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365 \ + --hash=sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a \ + --hash=sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748 \ + --hash=sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b \ + --hash=sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079 \ + --hash=sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482 # via # -c requirements/main.txt # requests @@ -229,117 +244,111 @@ comm==0.2.2 \ --hash=sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e \ --hash=sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3 # via ipykernel -coverage==7.6.1 \ - --hash=sha256:06a737c882bd26d0d6ee7269b20b12f14a8704807a01056c80bb881a4b2ce6ca \ - --hash=sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d \ - --hash=sha256:0c0420b573964c760df9e9e86d1a9a622d0d27f417e1a949a8a66dd7bcee7bc6 \ - --hash=sha256:0dbde0f4aa9a16fa4d754356a8f2e36296ff4d83994b2c9d8398aa32f222f989 \ - --hash=sha256:1125ca0e5fd475cbbba3bb67ae20bd2c23a98fac4e32412883f9bcbaa81c314c \ - --hash=sha256:13b0a73a0896988f053e4fbb7de6d93388e6dd292b0d87ee51d106f2c11b465b \ - --hash=sha256:166811d20dfea725e2e4baa71fffd6c968a958577848d2131f39b60043400223 \ - --hash=sha256:170d444ab405852903b7d04ea9ae9b98f98ab6d7e63e1115e82620807519797f \ - --hash=sha256:1f4aa8219db826ce6be7099d559f8ec311549bfc4046f7f9fe9b5cea5c581c56 \ - --hash=sha256:225667980479a17db1048cb2bf8bfb39b8e5be8f164b8f6628b64f78a72cf9d3 \ - --hash=sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8 \ - --hash=sha256:2bdb062ea438f22d99cba0d7829c2ef0af1d768d1e4a4f528087224c90b132cb \ - --hash=sha256:2c09f4ce52cb99dd7505cd0fc8e0e37c77b87f46bc9c1eb03fe3bc9991085388 \ - --hash=sha256:3115a95daa9bdba70aea750db7b96b37259a81a709223c8448fa97727d546fe0 \ - --hash=sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a \ - --hash=sha256:3f1156e3e8f2872197af3840d8ad307a9dd18e615dc64d9ee41696f287c57ad8 \ - --hash=sha256:4421712dbfc5562150f7554f13dde997a2e932a6b5f352edcce948a815efee6f \ - --hash=sha256:44df346d5215a8c0e360307d46ffaabe0f5d3502c8a1cefd700b34baf31d411a \ - --hash=sha256:502753043567491d3ff6d08629270127e0c31d4184c4c8d98f92c26f65019962 \ - --hash=sha256:547f45fa1a93154bd82050a7f3cddbc1a7a4dd2a9bf5cb7d06f4ae29fe94eaf8 \ - --hash=sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391 \ - --hash=sha256:609b06f178fe8e9f89ef676532760ec0b4deea15e9969bf754b37f7c40326dbc \ - --hash=sha256:645786266c8f18a931b65bfcefdbf6952dd0dea98feee39bd188607a9d307ed2 \ - --hash=sha256:6878ef48d4227aace338d88c48738a4258213cd7b74fd9a3d4d7582bb1d8a155 \ - --hash=sha256:6a89ecca80709d4076b95f89f308544ec8f7b4727e8a547913a35f16717856cb \ - --hash=sha256:6db04803b6c7291985a761004e9060b2bca08da6d04f26a7f2294b8623a0c1a0 \ - --hash=sha256:6e2cd258d7d927d09493c8df1ce9174ad01b381d4729a9d8d4e38670ca24774c \ - --hash=sha256:6e81d7a3e58882450ec4186ca59a3f20a5d4440f25b1cff6f0902ad890e6748a \ - --hash=sha256:702855feff378050ae4f741045e19a32d57d19f3e0676d589df0575008ea5004 \ - --hash=sha256:78b260de9790fd81e69401c2dc8b17da47c8038176a79092a89cb2b7d945d060 \ - --hash=sha256:7bb65125fcbef8d989fa1dd0e8a060999497629ca5b0efbca209588a73356232 \ - --hash=sha256:7dea0889685db8550f839fa202744652e87c60015029ce3f60e006f8c4462c93 \ - --hash=sha256:8284cf8c0dd272a247bc154eb6c95548722dce90d098c17a883ed36e67cdb129 \ - --hash=sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163 \ - --hash=sha256:8929543a7192c13d177b770008bc4e8119f2e1f881d563fc6b6305d2d0ebe9de \ - --hash=sha256:8ae539519c4c040c5ffd0632784e21b2f03fc1340752af711f33e5be83a9d6c6 \ - --hash=sha256:8f59d57baca39b32db42b83b2a7ba6f47ad9c394ec2076b084c3f029b7afca23 \ - --hash=sha256:9054a0754de38d9dbd01a46621636689124d666bad1936d76c0341f7d71bf569 \ - --hash=sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d \ - --hash=sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778 \ - --hash=sha256:9bc572be474cafb617672c43fe989d6e48d3c83af02ce8de73fff1c6bb3c198d \ - --hash=sha256:9c56863d44bd1c4fe2abb8a4d6f5371d197f1ac0ebdee542f07f35895fc07f36 \ - --hash=sha256:9e0b2df163b8ed01d515807af24f63de04bebcecbd6c3bfeff88385789fdf75a \ - --hash=sha256:a09ece4a69cf399510c8ab25e0950d9cf2b42f7b3cb0374f95d2e2ff594478a6 \ - --hash=sha256:a1ac0ae2b8bd743b88ed0502544847c3053d7171a3cff9228af618a068ed9c34 \ - --hash=sha256:a318d68e92e80af8b00fa99609796fdbcdfef3629c77c6283566c6f02c6d6704 \ - --hash=sha256:a4acd025ecc06185ba2b801f2de85546e0b8ac787cf9d3b06e7e2a69f925b106 \ - --hash=sha256:a6d3adcf24b624a7b778533480e32434a39ad8fa30c315208f6d3e5542aeb6e9 \ - --hash=sha256:a78d169acd38300060b28d600344a803628c3fd585c912cacc9ea8790fe96862 \ - --hash=sha256:a95324a9de9650a729239daea117df21f4b9868ce32e63f8b650ebe6cef5595b \ - --hash=sha256:abd5fd0db5f4dc9289408aaf34908072f805ff7792632250dcb36dc591d24255 \ - --hash=sha256:b06079abebbc0e89e6163b8e8f0e16270124c154dc6e4a47b413dd538859af16 \ - --hash=sha256:b43c03669dc4618ec25270b06ecd3ee4fa94c7f9b3c14bae6571ca00ef98b0d3 \ - --hash=sha256:b48f312cca9621272ae49008c7f613337c53fadca647d6384cc129d2996d1133 \ - --hash=sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb \ - --hash=sha256:b9f222de8cded79c49bf184bdbc06630d4c58eec9459b939b4a690c82ed05657 \ - --hash=sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d \ - --hash=sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca \ - --hash=sha256:cf4b19715bccd7ee27b6b120e7e9dd56037b9c0681dcc1adc9ba9db3d417fa36 \ - --hash=sha256:d0c212c49b6c10e6951362f7c6df3329f04c2b1c28499563d4035d964ab8e08c \ - --hash=sha256:d3296782ca4eab572a1a4eca686d8bfb00226300dcefdf43faa25b5242ab8a3e \ - --hash=sha256:d85f5e9a5f8b73e2350097c3756ef7e785f55bd71205defa0bfdaf96c31616ff \ - --hash=sha256:da511e6ad4f7323ee5702e6633085fb76c2f893aaf8ce4c51a0ba4fc07580ea7 \ - --hash=sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5 \ - --hash=sha256:e61c0abb4c85b095a784ef23fdd4aede7a2628478e7baba7c5e3deba61070a02 \ - --hash=sha256:e6a08c0be454c3b3beb105c0596ebdc2371fab6bb90c0c0297f4e58fd7e1012c \ - --hash=sha256:e9a6e0eb86070e8ccaedfbd9d38fec54864f3125ab95419970575b42af7541df \ - --hash=sha256:ed37bd3c3b063412f7620464a9ac1314d33100329f39799255fb8d3027da50d3 \ - --hash=sha256:f1adfc8ac319e1a348af294106bc6a8458a0f1633cc62a1446aebc30c5fa186a \ - --hash=sha256:f5796e664fe802da4f57a168c85359a8fbf3eab5e55cd4e4569fbacecc903959 \ - --hash=sha256:fc5a77d0c516700ebad189b587de289a20a78324bc54baee03dd486f0855d234 \ - --hash=sha256:fd21f6ae3f08b41004dfb433fa895d858f3f5979e7762d052b12aef444e29afc +coverage==7.6.7 \ + --hash=sha256:0266b62cbea568bd5e93a4da364d05de422110cbed5056d69339bd5af5685433 \ + --hash=sha256:0573f5cbf39114270842d01872952d301027d2d6e2d84013f30966313cadb529 \ + --hash=sha256:0ddcb70b3a3a57581b450571b31cb774f23eb9519c2aaa6176d3a84c9fc57671 \ + --hash=sha256:108bb458827765d538abcbf8288599fee07d2743357bdd9b9dad456c287e121e \ + --hash=sha256:14045b8bfd5909196a90da145a37f9d335a5d988a83db34e80f41e965fb7cb42 \ + --hash=sha256:1a5407a75ca4abc20d6252efeb238377a71ce7bda849c26c7a9bece8680a5d99 \ + --hash=sha256:2bc3e45c16564cc72de09e37413262b9f99167803e5e48c6156bccdfb22c8327 \ + --hash=sha256:2d608a7808793e3615e54e9267519351c3ae204a6d85764d8337bd95993581a8 \ + --hash=sha256:34d23e28ccb26236718a3a78ba72744212aa383141961dd6825f6595005c8b06 \ + --hash=sha256:37a15573f988b67f7348916077c6d8ad43adb75e478d0910957394df397d2874 \ + --hash=sha256:3c0317288f032221d35fa4cbc35d9f4923ff0dfd176c79c9b356e8ef8ef2dff4 \ + --hash=sha256:3c42ec2c522e3ddd683dec5cdce8e62817afb648caedad9da725001fa530d354 \ + --hash=sha256:3c6b24007c4bcd0b19fac25763a7cac5035c735ae017e9a349b927cfc88f31c1 \ + --hash=sha256:40cca284c7c310d622a1677f105e8507441d1bb7c226f41978ba7c86979609ab \ + --hash=sha256:46f21663e358beae6b368429ffadf14ed0a329996248a847a4322fb2e35d64d3 \ + --hash=sha256:49ed5ee4109258973630c1f9d099c7e72c5c36605029f3a91fe9982c6076c82b \ + --hash=sha256:5c95e0fa3d1547cb6f021ab72f5c23402da2358beec0a8e6d19a368bd7b0fb37 \ + --hash=sha256:5dd4e4a49d9c72a38d18d641135d2fb0bdf7b726ca60a103836b3d00a1182acd \ + --hash=sha256:5e444b8e88339a2a67ce07d41faabb1d60d1004820cee5a2c2b54e2d8e429a0f \ + --hash=sha256:60dcf7605c50ea72a14490d0756daffef77a5be15ed1b9fea468b1c7bda1bc3b \ + --hash=sha256:623e6965dcf4e28a3debaa6fcf4b99ee06d27218f46d43befe4db1c70841551c \ + --hash=sha256:673184b3156cba06154825f25af33baa2671ddae6343f23175764e65a8c4c30b \ + --hash=sha256:6cf96ceaa275f071f1bea3067f8fd43bec184a25a962c754024c973af871e1b7 \ + --hash=sha256:70a56a2ec1869e6e9fa69ef6b76b1a8a7ef709972b9cc473f9ce9d26b5997ce3 \ + --hash=sha256:77256ad2345c29fe59ae861aa11cfc74579c88d4e8dbf121cbe46b8e32aec808 \ + --hash=sha256:796c9b107d11d2d69e1849b2dfe41730134b526a49d3acb98ca02f4985eeff7a \ + --hash=sha256:7c07de0d2a110f02af30883cd7dddbe704887617d5c27cf373362667445a4c76 \ + --hash=sha256:7e61b0e77ff4dddebb35a0e8bb5a68bf0f8b872407d8d9f0c726b65dfabe2469 \ + --hash=sha256:82c809a62e953867cf57e0548c2b8464207f5f3a6ff0e1e961683e79b89f2c55 \ + --hash=sha256:850cfd2d6fc26f8346f422920ac204e1d28814e32e3a58c19c91980fa74d8289 \ + --hash=sha256:87ea64b9fa52bf395272e54020537990a28078478167ade6c61da7ac04dc14bc \ + --hash=sha256:90746521206c88bdb305a4bf3342b1b7316ab80f804d40c536fc7d329301ee13 \ + --hash=sha256:951aade8297358f3618a6e0660dc74f6b52233c42089d28525749fc8267dccd2 \ + --hash=sha256:963e4a08cbb0af6623e61492c0ec4c0ec5c5cf74db5f6564f98248d27ee57d30 \ + --hash=sha256:987a8e3da7da4eed10a20491cf790589a8e5e07656b6dc22d3814c4d88faf163 \ + --hash=sha256:9c2eb378bebb2c8f65befcb5147877fc1c9fbc640fc0aad3add759b5df79d55d \ + --hash=sha256:a1ab9763d291a17b527ac6fd11d1a9a9c358280adb320e9c2672a97af346ac2c \ + --hash=sha256:a3b925300484a3294d1c70f6b2b810d6526f2929de954e5b6be2bf8caa1f12c1 \ + --hash=sha256:acbb8af78f8f91b3b51f58f288c0994ba63c646bc1a8a22ad072e4e7e0a49f1c \ + --hash=sha256:ad32a981bcdedb8d2ace03b05e4fd8dace8901eec64a532b00b15217d3677dd2 \ + --hash=sha256:aee9cf6b0134d6f932d219ce253ef0e624f4fa588ee64830fcba193269e4daa3 \ + --hash=sha256:af05bbba896c4472a29408455fe31b3797b4d8648ed0a2ccac03e074a77e2314 \ + --hash=sha256:b6cce5c76985f81da3769c52203ee94722cd5d5889731cd70d31fee939b74bf0 \ + --hash=sha256:bb684694e99d0b791a43e9fc0fa58efc15ec357ac48d25b619f207c41f2fd384 \ + --hash=sha256:c132b5a22821f9b143f87446805e13580b67c670a548b96da945a8f6b4f2efbb \ + --hash=sha256:c296263093f099da4f51b3dff1eff5d4959b527d4f2f419e16508c5da9e15e8c \ + --hash=sha256:c973b2fe4dc445cb865ab369df7521df9c27bf40715c837a113edaa2aa9faf45 \ + --hash=sha256:cdd94501d65adc5c24f8a1a0eda110452ba62b3f4aeaba01e021c1ed9cb8f34a \ + --hash=sha256:d79d4826e41441c9a118ff045e4bccb9fdbdcb1d02413e7ea6eb5c87b5439d24 \ + --hash=sha256:dbba8210f5067398b2c4d96b4e64d8fb943644d5eb70be0d989067c8ca40c0f8 \ + --hash=sha256:df002e59f2d29e889c37abd0b9ee0d0e6e38c24f5f55d71ff0e09e3412a340ec \ + --hash=sha256:dfd14bcae0c94004baba5184d1c935ae0d1231b8409eb6c103a5fd75e8ecdc56 \ + --hash=sha256:e25bacb53a8c7325e34d45dddd2f2fbae0dbc230d0e2642e264a64e17322a777 \ + --hash=sha256:e2c8e3384c12dfa19fa9a52f23eb091a8fad93b5b81a41b14c17c78e23dd1d8b \ + --hash=sha256:e5f2a0f161d126ccc7038f1f3029184dbdf8f018230af17ef6fd6a707a5b881f \ + --hash=sha256:e69ad502f1a2243f739f5bd60565d14a278be58be4c137d90799f2c263e7049a \ + --hash=sha256:ead9b9605c54d15be228687552916c89c9683c215370c4a44f1f217d2adcc34d \ + --hash=sha256:f07ff574986bc3edb80e2c36391678a271d555f91fd1d332a1e0f4b5ea4b6ea9 \ + --hash=sha256:f2c7a045eef561e9544359a0bf5784b44e55cefc7261a20e730baa9220c83413 \ + --hash=sha256:f3e8796434a8106b3ac025fd15417315d7a58ee3e600ad4dbcfddc3f4b14342c \ + --hash=sha256:f63e21ed474edd23f7501f89b53280014436e383a14b9bd77a648366c81dce7b \ + --hash=sha256:fd49c01e5057a451c30c9b892948976f5d38f2cbd04dc556a82743ba8e27ed8c # via # -r requirements/dev.in # pytest-cov -debugpy==1.8.5 \ - --hash=sha256:0a1029a2869d01cb777216af8c53cda0476875ef02a2b6ff8b2f2c9a4b04176c \ - --hash=sha256:1cd04a73eb2769eb0bfe43f5bfde1215c5923d6924b9b90f94d15f207a402226 \ - --hash=sha256:28ced650c974aaf179231668a293ecd5c63c0a671ae6d56b8795ecc5d2f48d3c \ - --hash=sha256:345d6a0206e81eb68b1493ce2fbffd57c3088e2ce4b46592077a943d2b968ca3 \ - --hash=sha256:3df6692351172a42af7558daa5019651f898fc67450bf091335aa8a18fbf6f3a \ - --hash=sha256:4413b7a3ede757dc33a273a17d685ea2b0c09dbd312cc03f5534a0fd4d40750a \ - --hash=sha256:4fbb3b39ae1aa3e5ad578f37a48a7a303dad9a3d018d369bc9ec629c1cfa7408 \ - --hash=sha256:55919dce65b471eff25901acf82d328bbd5b833526b6c1364bd5133754777a44 \ - --hash=sha256:5b5c770977c8ec6c40c60d6f58cacc7f7fe5a45960363d6974ddb9b62dbee156 \ - --hash=sha256:606bccba19f7188b6ea9579c8a4f5a5364ecd0bf5a0659c8a5d0e10dcee3032a \ - --hash=sha256:7b0fe36ed9d26cb6836b0a51453653f8f2e347ba7348f2bbfe76bfeb670bfb1c \ - --hash=sha256:7e4d594367d6407a120b76bdaa03886e9eb652c05ba7f87e37418426ad2079f7 \ - --hash=sha256:8f913ee8e9fcf9d38a751f56e6de12a297ae7832749d35de26d960f14280750a \ - --hash=sha256:a697beca97dad3780b89a7fb525d5e79f33821a8bc0c06faf1f1289e549743cf \ - --hash=sha256:ad84b7cde7fd96cf6eea34ff6c4a1b7887e0fe2ea46e099e53234856f9d99a34 \ - --hash=sha256:b2112cfeb34b4507399d298fe7023a16656fc553ed5246536060ca7bd0e668d0 \ - --hash=sha256:b78c1250441ce893cb5035dd6f5fc12db968cc07f91cc06996b2087f7cefdd8e \ - --hash=sha256:c0a65b00b7cdd2ee0c2cf4c7335fef31e15f1b7056c7fdbce9e90193e1a8c8cb \ - --hash=sha256:c9f7c15ea1da18d2fcc2709e9f3d6de98b69a5b0fff1807fb80bc55f906691f7 \ - --hash=sha256:db9fb642938a7a609a6c865c32ecd0d795d56c1aaa7a7a5722d77855d5e77f2b \ - --hash=sha256:dd3811bd63632bb25eda6bd73bea8e0521794cda02be41fa3160eb26fc29e7ed \ - --hash=sha256:e84c276489e141ed0b93b0af648eef891546143d6a48f610945416453a8ad406 +debugpy==1.8.8 \ + --hash=sha256:09cc7b162586ea2171eea055985da2702b0723f6f907a423c9b2da5996ad67ba \ + --hash=sha256:0cc94186340be87b9ac5a707184ec8f36547fb66636d1029ff4f1cc020e53996 \ + --hash=sha256:143ef07940aeb8e7316de48f5ed9447644da5203726fca378f3a6952a50a9eae \ + --hash=sha256:19ffbd84e757a6ca0113574d1bf5a2298b3947320a3e9d7d8dc3377f02d9f864 \ + --hash=sha256:26b461123a030e82602a750fb24d7801776aa81cd78404e54ab60e8b5fecdad5 \ + --hash=sha256:3a9c013077a3a0000e83d97cf9cc9328d2b0bbb31f56b0e99ea3662d29d7a6a2 \ + --hash=sha256:4b93e4832fd4a759a0c465c967214ed0c8a6e8914bced63a28ddb0dd8c5f078b \ + --hash=sha256:535f4fb1c024ddca5913bb0eb17880c8f24ba28aa2c225059db145ee557035e9 \ + --hash=sha256:53709d4ec586b525724819dc6af1a7703502f7e06f34ded7157f7b1f963bb854 \ + --hash=sha256:5c0e5a38c7f9b481bf31277d2f74d2109292179081f11108e668195ef926c0f9 \ + --hash=sha256:5c6e885dbf12015aed73770f29dec7023cb310d0dc2ba8bfbeb5c8e43f80edc9 \ + --hash=sha256:64674e95916e53c2e9540a056e5f489e0ad4872645399d778f7c598eacb7b7f9 \ + --hash=sha256:705cd123a773d184860ed8dae99becd879dfec361098edbefb5fc0d3683eb804 \ + --hash=sha256:890fd16803f50aa9cb1a9b9b25b5ec321656dd6b78157c74283de241993d086f \ + --hash=sha256:90244598214bbe704aa47556ec591d2f9869ff9e042e301a2859c57106649add \ + --hash=sha256:a6531d952b565b7cb2fbd1ef5df3d333cf160b44f37547a4e7cf73666aca5d8d \ + --hash=sha256:b01f4a5e5c5fb1d34f4ccba99a20ed01eabc45a4684f4948b5db17a319dfb23f \ + --hash=sha256:c399023146e40ae373753a58d1be0a98bf6397fadc737b97ad612886b53df318 \ + --hash=sha256:d4483836da2a533f4b1454dffc9f668096ac0433de855f0c22cdce8c9f7e10c4 \ + --hash=sha256:e59b1607c51b71545cb3496876544f7186a7a27c00b436a62f285603cc68d1c6 \ + --hash=sha256:e6355385db85cbd666be703a96ab7351bc9e6c61d694893206f8001e22aee091 \ + --hash=sha256:ec684553aba5b4066d4de510859922419febc710df7bba04fe9e7ef3de15d34f \ + --hash=sha256:eea8821d998ebeb02f0625dd0d76839ddde8cbf8152ebbe289dd7acf2cdc6b98 \ + --hash=sha256:f3cbf1833e644a3100eadb6120f25be8a532035e8245584c4f7532937edc652a \ + --hash=sha256:f95651bdcbfd3b27a408869a53fbefcc2bcae13b694daee5f1365b1b83a00113 \ + --hash=sha256:ffe94dd5e9a6739a75f0b85316dc185560db3e97afa6b215628d1b6a17561cb2 # via ipykernel decorator==5.1.1 \ --hash=sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330 \ --hash=sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186 # via ipython -diagrams==0.23.4 \ - --hash=sha256:1ba69d98fcf8d768dbddf07d2c77aba6cc95c2e6f90f37146c04c96bc6765450 \ - --hash=sha256:b7ada0b119b5189dd021b1dc1467fad3704737452bb18b1e06d05e4d1fa48ed7 +diagrams==0.24.1 \ + --hash=sha256:47b77a0e4dac926a095ff2ae4dd4ec1a192be781799befc660a8f5ce6ea1052f \ + --hash=sha256:c1e3267b018bdb66886a09214c7a7884796a0c28456f8aefdf38916a232c2362 # via sphinx-diagrams -documenteer==1.4.0 \ - --hash=sha256:759fdbf4554449a74df9fb10cfe91984bc1272f0a2c6c688817d1a2525c72881 \ - --hash=sha256:e456e21cb6d0be659b5297de87cb3e60d9bf0fffb63e316dbaba20b38a5f70ee +documenteer==1.4.2 \ + --hash=sha256:03a4cf3b8ffa4905c59662131f87afe77417238f10e9f01075d849f08a32e99d \ + --hash=sha256:89756cf2026c3e70a36b9d2ecb69c38d58c320554f498be5955ddc815de4b035 # via -r requirements/dev.in docutils==0.21.2 \ --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ @@ -478,15 +487,15 @@ ipykernel==6.29.5 \ --hash=sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5 \ --hash=sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215 # via myst-nb -ipython==8.27.0 \ - --hash=sha256:0b99a2dc9f15fd68692e898e5568725c6d49c527d36a9fb5960ffbdeaa82ff7e \ - --hash=sha256:f68b3cb8bde357a5d7adc9598d57e22a45dfbea19eb6b98286fa3b288c9cd55c +ipython==8.29.0 \ + --hash=sha256:0188a1bd83267192123ccea7f4a8ed0a78910535dbaa3f37671dca76ebd429c8 \ + --hash=sha256:40b60e15b22591450eef73e40a027cf77bd652e757523eebc5bd7c7c498290eb # via # ipykernel # myst-nb -jedi==0.19.1 \ - --hash=sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd \ - --hash=sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0 +jedi==0.19.2 \ + --hash=sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0 \ + --hash=sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9 # via ipython jinja2==3.1.4 \ --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ @@ -504,13 +513,13 @@ jsonschema==4.23.0 \ # via # nbformat # sphinxcontrib-redoc -jsonschema-specifications==2023.12.1 \ - --hash=sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc \ - --hash=sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c +jsonschema-specifications==2024.10.1 \ + --hash=sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272 \ + --hash=sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf # via jsonschema -jupyter-cache==1.0.0 \ - --hash=sha256:594b1c4e29b488b36547e12477645f489dbdc62cc939b2408df5679f79245078 \ - --hash=sha256:d0fa7d7533cd5798198d8889318269a8c1382ed3b22f622c09a9356521f48687 +jupyter-cache==1.0.1 \ + --hash=sha256:16e808eb19e3fb67a223db906e131ea6e01f03aa27f49a7214ce6a5fec186fb9 \ + --hash=sha256:9c3cafd825ba7da8b5830485343091143dff903e4d8c69db9349b728b140abf6 # via myst-nb jupyter-client==8.6.3 \ --hash=sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419 \ @@ -541,67 +550,68 @@ markdown-it-py==3.0.0 \ # documenteer # mdit-py-plugins # myst-parser -markupsafe==2.1.5 \ - --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ - --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ - --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \ - --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \ - --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \ - --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \ - --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \ - --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \ - --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \ - --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \ - --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \ - --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \ - --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \ - --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \ - --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \ - --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \ - --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \ - --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \ - --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \ - --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \ - --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \ - --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \ - --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \ - --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \ - --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \ - --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \ - --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \ - --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \ - --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \ - --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \ - --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \ - --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \ - --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \ - --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \ - --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \ - --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \ - --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \ - --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \ - --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \ - --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \ - --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \ - --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \ - --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \ - --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \ - --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \ - --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \ - --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \ - --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \ - --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \ - --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \ - --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \ - --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \ - --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \ - --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \ - --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \ - --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \ - --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \ - --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \ - --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \ - --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68 +markupsafe==3.0.2 \ + --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ + --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ + --hash=sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0 \ + --hash=sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9 \ + --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \ + --hash=sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13 \ + --hash=sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028 \ + --hash=sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca \ + --hash=sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557 \ + --hash=sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832 \ + --hash=sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0 \ + --hash=sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b \ + --hash=sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579 \ + --hash=sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a \ + --hash=sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c \ + --hash=sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff \ + --hash=sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c \ + --hash=sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22 \ + --hash=sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094 \ + --hash=sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb \ + --hash=sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e \ + --hash=sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5 \ + --hash=sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a \ + --hash=sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d \ + --hash=sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a \ + --hash=sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b \ + --hash=sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8 \ + --hash=sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225 \ + --hash=sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c \ + --hash=sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144 \ + --hash=sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f \ + --hash=sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87 \ + --hash=sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d \ + --hash=sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93 \ + --hash=sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf \ + --hash=sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158 \ + --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 \ + --hash=sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb \ + --hash=sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48 \ + --hash=sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171 \ + --hash=sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c \ + --hash=sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6 \ + --hash=sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd \ + --hash=sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d \ + --hash=sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1 \ + --hash=sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d \ + --hash=sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca \ + --hash=sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a \ + --hash=sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29 \ + --hash=sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe \ + --hash=sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798 \ + --hash=sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c \ + --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \ + --hash=sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f \ + --hash=sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f \ + --hash=sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a \ + --hash=sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178 \ + --hash=sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0 \ + --hash=sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79 \ + --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \ + --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50 # via # -c requirements/main.txt # jinja2 @@ -619,42 +629,47 @@ mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -mypy==1.11.2 \ - --hash=sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36 \ - --hash=sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce \ - --hash=sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6 \ - --hash=sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b \ - --hash=sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca \ - --hash=sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24 \ - --hash=sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383 \ - --hash=sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7 \ - --hash=sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86 \ - --hash=sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d \ - --hash=sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4 \ - --hash=sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8 \ - --hash=sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987 \ - --hash=sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385 \ - --hash=sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79 \ - --hash=sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef \ - --hash=sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6 \ - --hash=sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70 \ - --hash=sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca \ - --hash=sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70 \ - --hash=sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12 \ - --hash=sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104 \ - --hash=sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a \ - --hash=sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318 \ - --hash=sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1 \ - --hash=sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b \ - --hash=sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d +mypy==1.13.0 \ + --hash=sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc \ + --hash=sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e \ + --hash=sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f \ + --hash=sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74 \ + --hash=sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a \ + --hash=sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2 \ + --hash=sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b \ + --hash=sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73 \ + --hash=sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e \ + --hash=sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d \ + --hash=sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d \ + --hash=sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6 \ + --hash=sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca \ + --hash=sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d \ + --hash=sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5 \ + --hash=sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62 \ + --hash=sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a \ + --hash=sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc \ + --hash=sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7 \ + --hash=sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb \ + --hash=sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7 \ + --hash=sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732 \ + --hash=sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80 \ + --hash=sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a \ + --hash=sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc \ + --hash=sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2 \ + --hash=sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0 \ + --hash=sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24 \ + --hash=sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7 \ + --hash=sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b \ + --hash=sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372 \ + --hash=sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8 # via -r requirements/dev.in mypy-extensions==1.0.0 \ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \ --hash=sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782 # via mypy -myst-nb==1.1.1 \ - --hash=sha256:74227c11f76d03494f43b7788659b161b94f4dedef230a2912412bc8c3c9e553 \ - --hash=sha256:8b8f9085287d948eef46cb3764aafc21915e0e981882b8c742719f5b1a84c36f +myst-nb==1.1.2 \ + --hash=sha256:961b4005657029ca89892a4c75edbf0856c54ceaf6172368b46bf7676c1f7700 \ + --hash=sha256:9b7034e5d62640cb6daf03f9ca16ef45d0462fced27944c77aa3f98c7cdcd566 # via documenteer myst-parser==4.0.0 \ --hash=sha256:851c9dfb44e36e56d15d05e72f02b80da21a9e0d07cba96baf5e2d476bb91531 \ @@ -679,9 +694,9 @@ nest-asyncio==1.6.0 \ --hash=sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe \ --hash=sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c # via ipykernel -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 +packaging==24.2 \ + --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ + --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f # via # ipykernel # pydata-sphinx-theme @@ -704,28 +719,28 @@ pluggy==1.5.0 \ --hash=sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1 \ --hash=sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669 # via pytest -prompt-toolkit==3.0.47 \ - --hash=sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10 \ - --hash=sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360 +prompt-toolkit==3.0.48 \ + --hash=sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90 \ + --hash=sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e # via ipython -psutil==6.0.0 \ - --hash=sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35 \ - --hash=sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0 \ - --hash=sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c \ - --hash=sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1 \ - --hash=sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3 \ - --hash=sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c \ - --hash=sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd \ - --hash=sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3 \ - --hash=sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0 \ - --hash=sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2 \ - --hash=sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6 \ - --hash=sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d \ - --hash=sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c \ - --hash=sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0 \ - --hash=sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132 \ - --hash=sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14 \ - --hash=sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0 +psutil==6.1.0 \ + --hash=sha256:000d1d1ebd634b4efb383f4034437384e44a6d455260aaee2eca1e9c1b55f047 \ + --hash=sha256:045f00a43c737f960d273a83973b2511430d61f283a44c96bf13a6e829ba8fdc \ + --hash=sha256:0895b8414afafc526712c498bd9de2b063deaac4021a3b3c34566283464aff8e \ + --hash=sha256:1209036fbd0421afde505a4879dee3b2fd7b1e14fee81c0069807adcbbcca747 \ + --hash=sha256:1ad45a1f5d0b608253b11508f80940985d1d0c8f6111b5cb637533a0e6ddc13e \ + --hash=sha256:353815f59a7f64cdaca1c0307ee13558a0512f6db064e92fe833784f08539c7a \ + --hash=sha256:498c6979f9c6637ebc3a73b3f87f9eb1ec24e1ce53a7c5173b8508981614a90b \ + --hash=sha256:5cd2bcdc75b452ba2e10f0e8ecc0b57b827dd5d7aaffbc6821b2a9a242823a76 \ + --hash=sha256:6d3fbbc8d23fcdcb500d2c9f94e07b1342df8ed71b948a2649b5cb060a7c94ca \ + --hash=sha256:6e2dcd475ce8b80522e51d923d10c7871e45f20918e027ab682f94f1c6351688 \ + --hash=sha256:9118f27452b70bb1d9ab3198c1f626c2499384935aaf55388211ad982611407e \ + --hash=sha256:9dcbfce5d89f1d1f2546a2090f4fcf87c7f669d1d90aacb7d7582addece9fb38 \ + --hash=sha256:a8506f6119cff7015678e2bce904a4da21025cc70ad283a53b099e7620061d85 \ + --hash=sha256:a8fb3752b491d246034fa4d279ff076501588ce8cbcdbb62c32fd7a377d996be \ + --hash=sha256:c0e0c00aa18ca2d3b2b991643b799a15fc8f0563d2ebb6040f64ce8dc027b942 \ + --hash=sha256:d905186d647b16755a800e7263d43df08b790d709d575105d419f8b6ef65423a \ + --hash=sha256:ff34df86226c0227c52f38b919213157588a678d049688eded74c76c8ba4a5d0 # via ipykernel ptyprocess==0.7.0 ; sys_platform != 'emscripten' and sys_platform != 'win32' \ --hash=sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35 \ @@ -852,9 +867,9 @@ pydantic-core==2.23.4 \ # via # -c requirements/main.txt # pydantic -pydantic-settings==2.5.2 \ - --hash=sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907 \ - --hash=sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0 +pydantic-settings==2.6.1 \ + --hash=sha256:7fb0637c786a558d3103436278a7c4f1cfd29ba8973238a50c5bb9a55387da87 \ + --hash=sha256:e0f92546d8a9923cb8941689abf85d6601a8c19a23e97a34b2964a2e3f813ca0 # via autodoc-pydantic pydata-sphinx-theme==0.12.0 \ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \ @@ -878,9 +893,9 @@ pytest==8.3.3 \ # -r requirements/dev.in # pytest-cov # pytest-sugar -pytest-cov==5.0.0 \ - --hash=sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652 \ - --hash=sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857 +pytest-cov==6.0.0 \ + --hash=sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35 \ + --hash=sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0 # via -r requirements/dev.in pytest-sugar==1.0.0 \ --hash=sha256:6422e83258f5b0c04ce7c632176c7732cab5fdb909cb39cca5c9139f81276c0a \ @@ -896,21 +911,25 @@ python-dotenv==1.0.1 \ --hash=sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca \ --hash=sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a # via pydantic-settings -pywin32==306 ; platform_python_implementation != 'PyPy' and sys_platform == 'win32' \ - --hash=sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d \ - --hash=sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65 \ - --hash=sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e \ - --hash=sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b \ - --hash=sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4 \ - --hash=sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040 \ - --hash=sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a \ - --hash=sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36 \ - --hash=sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8 \ - --hash=sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e \ - --hash=sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802 \ - --hash=sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a \ - --hash=sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407 \ - --hash=sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0 +pywin32==308 ; platform_python_implementation != 'PyPy' and sys_platform == 'win32' \ + --hash=sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47 \ + --hash=sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6 \ + --hash=sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6 \ + --hash=sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed \ + --hash=sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff \ + --hash=sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de \ + --hash=sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e \ + --hash=sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b \ + --hash=sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0 \ + --hash=sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897 \ + --hash=sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a \ + --hash=sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920 \ + --hash=sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341 \ + --hash=sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e \ + --hash=sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091 \ + --hash=sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c \ + --hash=sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd \ + --hash=sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4 # via jupyter-core pyyaml==6.0.2 \ --hash=sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff \ @@ -973,6 +992,7 @@ pyyaml==6.0.2 \ # myst-nb # myst-parser # pybtex + # sphinxcontrib-mermaid # sphinxcontrib-redoc pyzmq==26.2.0 \ --hash=sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6 \ @@ -1101,116 +1121,103 @@ requests==2.32.3 \ # documenteer # sphinx # sphinxcontrib-youtube -rpds-py==0.20.0 \ - --hash=sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c \ - --hash=sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585 \ - --hash=sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5 \ - --hash=sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6 \ - --hash=sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef \ - --hash=sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2 \ - --hash=sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29 \ - --hash=sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318 \ - --hash=sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b \ - --hash=sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399 \ - --hash=sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739 \ - --hash=sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee \ - --hash=sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174 \ - --hash=sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a \ - --hash=sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344 \ - --hash=sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2 \ - --hash=sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03 \ - --hash=sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5 \ - --hash=sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22 \ - --hash=sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e \ - --hash=sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96 \ - --hash=sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91 \ - --hash=sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752 \ - --hash=sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075 \ - --hash=sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253 \ - --hash=sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee \ - --hash=sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad \ - --hash=sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5 \ - --hash=sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce \ - --hash=sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7 \ - --hash=sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b \ - --hash=sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8 \ - --hash=sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57 \ - --hash=sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3 \ - --hash=sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec \ - --hash=sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209 \ - --hash=sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921 \ - --hash=sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045 \ - --hash=sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074 \ - --hash=sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580 \ - --hash=sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7 \ - --hash=sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5 \ - --hash=sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3 \ - --hash=sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0 \ - --hash=sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24 \ - --hash=sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139 \ - --hash=sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db \ - --hash=sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc \ - --hash=sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789 \ - --hash=sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f \ - --hash=sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2 \ - --hash=sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c \ - --hash=sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232 \ - --hash=sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6 \ - --hash=sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c \ - --hash=sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29 \ - --hash=sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489 \ - --hash=sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94 \ - --hash=sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751 \ - --hash=sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2 \ - --hash=sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda \ - --hash=sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9 \ - --hash=sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51 \ - --hash=sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c \ - --hash=sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8 \ - --hash=sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989 \ - --hash=sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511 \ - --hash=sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1 \ - --hash=sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2 \ - --hash=sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150 \ - --hash=sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c \ - --hash=sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965 \ - --hash=sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f \ - --hash=sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58 \ - --hash=sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b \ - --hash=sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f \ - --hash=sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d \ - --hash=sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821 \ - --hash=sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de \ - --hash=sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121 \ - --hash=sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855 \ - --hash=sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272 \ - --hash=sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60 \ - --hash=sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02 \ - --hash=sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1 \ - --hash=sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140 \ - --hash=sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879 \ - --hash=sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940 \ - --hash=sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364 \ - --hash=sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4 \ - --hash=sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e \ - --hash=sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420 \ - --hash=sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5 \ - --hash=sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24 \ - --hash=sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c \ - --hash=sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf \ - --hash=sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f \ - --hash=sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e \ - --hash=sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab \ - --hash=sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08 \ - --hash=sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92 \ - --hash=sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a \ - --hash=sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8 +rpds-py==0.21.0 \ + --hash=sha256:031819f906bb146561af051c7cef4ba2003d28cff07efacef59da973ff7969ba \ + --hash=sha256:0626238a43152918f9e72ede9a3b6ccc9e299adc8ade0d67c5e142d564c9a83d \ + --hash=sha256:085ed25baac88953d4283e5b5bd094b155075bb40d07c29c4f073e10623f9f2e \ + --hash=sha256:0a9e0759e7be10109645a9fddaaad0619d58c9bf30a3f248a2ea57a7c417173a \ + --hash=sha256:0c025820b78817db6a76413fff6866790786c38f95ea3f3d3c93dbb73b632202 \ + --hash=sha256:1ff2eba7f6c0cb523d7e9cff0903f2fe1feff8f0b2ceb6bd71c0e20a4dcee271 \ + --hash=sha256:20cc1ed0bcc86d8e1a7e968cce15be45178fd16e2ff656a243145e0b439bd250 \ + --hash=sha256:241e6c125568493f553c3d0fdbb38c74babf54b45cef86439d4cd97ff8feb34d \ + --hash=sha256:2c51d99c30091f72a3c5d126fad26236c3f75716b8b5e5cf8effb18889ced928 \ + --hash=sha256:2d6129137f43f7fa02d41542ffff4871d4aefa724a5fe38e2c31a4e0fd343fb0 \ + --hash=sha256:30b912c965b2aa76ba5168fd610087bad7fcde47f0a8367ee8f1876086ee6d1d \ + --hash=sha256:30bdc973f10d28e0337f71d202ff29345320f8bc49a31c90e6c257e1ccef4333 \ + --hash=sha256:320c808df533695326610a1b6a0a6e98f033e49de55d7dc36a13c8a30cfa756e \ + --hash=sha256:32eb88c30b6a4f0605508023b7141d043a79b14acb3b969aa0b4f99b25bc7d4a \ + --hash=sha256:3b766a9f57663396e4f34f5140b3595b233a7b146e94777b97a8413a1da1be18 \ + --hash=sha256:3b929c2bb6e29ab31f12a1117c39f7e6d6450419ab7464a4ea9b0b417174f044 \ + --hash=sha256:3e30a69a706e8ea20444b98a49f386c17b26f860aa9245329bab0851ed100677 \ + --hash=sha256:3e53861b29a13d5b70116ea4230b5f0f3547b2c222c5daa090eb7c9c82d7f664 \ + --hash=sha256:40c91c6e34cf016fa8e6b59d75e3dbe354830777fcfd74c58b279dceb7975b75 \ + --hash=sha256:4991ca61656e3160cdaca4851151fd3f4a92e9eba5c7a530ab030d6aee96ec89 \ + --hash=sha256:4ab2c2a26d2f69cdf833174f4d9d86118edc781ad9a8fa13970b527bf8236027 \ + --hash=sha256:4e8921a259f54bfbc755c5bbd60c82bb2339ae0324163f32868f63f0ebb873d9 \ + --hash=sha256:4eb2de8a147ffe0626bfdc275fc6563aa7bf4b6db59cf0d44f0ccd6ca625a24e \ + --hash=sha256:5145282a7cd2ac16ea0dc46b82167754d5e103a05614b724457cffe614f25bd8 \ + --hash=sha256:520ed8b99b0bf86a176271f6fe23024323862ac674b1ce5b02a72bfeff3fff44 \ + --hash=sha256:52c041802a6efa625ea18027a0723676a778869481d16803481ef6cc02ea8cb3 \ + --hash=sha256:5555db3e618a77034954b9dc547eae94166391a98eb867905ec8fcbce1308d95 \ + --hash=sha256:58a0e345be4b18e6b8501d3b0aa540dad90caeed814c515e5206bb2ec26736fd \ + --hash=sha256:590ef88db231c9c1eece44dcfefd7515d8bf0d986d64d0caf06a81998a9e8cab \ + --hash=sha256:5afb5efde74c54724e1a01118c6e5c15e54e642c42a1ba588ab1f03544ac8c7a \ + --hash=sha256:688c93b77e468d72579351a84b95f976bd7b3e84aa6686be6497045ba84be560 \ + --hash=sha256:6b4ef7725386dc0762857097f6b7266a6cdd62bfd209664da6712cb26acef035 \ + --hash=sha256:6bc0e697d4d79ab1aacbf20ee5f0df80359ecf55db33ff41481cf3e24f206919 \ + --hash=sha256:6dcc4949be728ede49e6244eabd04064336012b37f5c2200e8ec8eb2988b209c \ + --hash=sha256:6f54e7106f0001244a5f4cf810ba8d3f9c542e2730821b16e969d6887b664266 \ + --hash=sha256:808f1ac7cf3b44f81c9475475ceb221f982ef548e44e024ad5f9e7060649540e \ + --hash=sha256:8404b3717da03cbf773a1d275d01fec84ea007754ed380f63dfc24fb76ce4592 \ + --hash=sha256:878f6fea96621fda5303a2867887686d7a198d9e0f8a40be100a63f5d60c88c9 \ + --hash=sha256:8a7ff941004d74d55a47f916afc38494bd1cfd4b53c482b77c03147c91ac0ac3 \ + --hash=sha256:95a5bad1ac8a5c77b4e658671642e4af3707f095d2b78a1fdd08af0dfb647624 \ + --hash=sha256:97ef67d9bbc3e15584c2f3c74bcf064af36336c10d2e21a2131e123ce0f924c9 \ + --hash=sha256:98486337f7b4f3c324ab402e83453e25bb844f44418c066623db88e4c56b7c7b \ + --hash=sha256:98e4fe5db40db87ce1c65031463a760ec7906ab230ad2249b4572c2fc3ef1f9f \ + --hash=sha256:998a8080c4495e4f72132f3d66ff91f5997d799e86cec6ee05342f8f3cda7dca \ + --hash=sha256:9afe42102b40007f588666bc7de82451e10c6788f6f70984629db193849dced1 \ + --hash=sha256:9e20da3957bdf7824afdd4b6eeb29510e83e026473e04952dca565170cd1ecc8 \ + --hash=sha256:a017f813f24b9df929674d0332a374d40d7f0162b326562daae8066b502d0590 \ + --hash=sha256:a429b99337062877d7875e4ff1a51fe788424d522bd64a8c0a20ef3021fdb6ed \ + --hash=sha256:a58ce66847711c4aa2ecfcfaff04cb0327f907fead8945ffc47d9407f41ff952 \ + --hash=sha256:a78d8b634c9df7f8d175451cfeac3810a702ccb85f98ec95797fa98b942cea11 \ + --hash=sha256:a89a8ce9e4e75aeb7fa5d8ad0f3fecdee813802592f4f46a15754dcb2fd6b061 \ + --hash=sha256:a8eeec67590e94189f434c6d11c426892e396ae59e4801d17a93ac96b8c02a6c \ + --hash=sha256:aaeb25ccfb9b9014a10eaf70904ebf3f79faaa8e60e99e19eef9f478651b9b74 \ + --hash=sha256:ad116dda078d0bc4886cb7840e19811562acdc7a8e296ea6ec37e70326c1b41c \ + --hash=sha256:af04ac89c738e0f0f1b913918024c3eab6e3ace989518ea838807177d38a2e94 \ + --hash=sha256:af4a644bf890f56e41e74be7d34e9511e4954894d544ec6b8efe1e21a1a8da6c \ + --hash=sha256:b21747f79f360e790525e6f6438c7569ddbfb1b3197b9e65043f25c3c9b489d8 \ + --hash=sha256:b229ce052ddf1a01c67d68166c19cb004fb3612424921b81c46e7ea7ccf7c3bf \ + --hash=sha256:b4de1da871b5c0fd5537b26a6fc6814c3cc05cabe0c941db6e9044ffbb12f04a \ + --hash=sha256:b80b4690bbff51a034bfde9c9f6bf9357f0a8c61f548942b80f7b66356508bf5 \ + --hash=sha256:b876f2bc27ab5954e2fd88890c071bd0ed18b9c50f6ec3de3c50a5ece612f7a6 \ + --hash=sha256:b8f107395f2f1d151181880b69a2869c69e87ec079c49c0016ab96860b6acbe5 \ + --hash=sha256:b9b76e2afd585803c53c5b29e992ecd183f68285b62fe2668383a18e74abe7a3 \ + --hash=sha256:c2b2f71c6ad6c2e4fc9ed9401080badd1469fa9889657ec3abea42a3d6b2e1ed \ + --hash=sha256:c3761f62fcfccf0864cc4665b6e7c3f0c626f0380b41b8bd1ce322103fa3ef87 \ + --hash=sha256:c38dbf31c57032667dd5a2f0568ccde66e868e8f78d5a0d27dcc56d70f3fcd3b \ + --hash=sha256:ca9989d5d9b1b300bc18e1801c67b9f6d2c66b8fd9621b36072ed1df2c977f72 \ + --hash=sha256:cbd7504a10b0955ea287114f003b7ad62330c9e65ba012c6223dba646f6ffd05 \ + --hash=sha256:d167e4dbbdac48bd58893c7e446684ad5d425b407f9336e04ab52e8b9194e2ed \ + --hash=sha256:d2132377f9deef0c4db89e65e8bb28644ff75a18df5293e132a8d67748397b9f \ + --hash=sha256:da52d62a96e61c1c444f3998c434e8b263c384f6d68aca8274d2e08d1906325c \ + --hash=sha256:daa8efac2a1273eed2354397a51216ae1e198ecbce9036fba4e7610b308b6153 \ + --hash=sha256:dc5695c321e518d9f03b7ea6abb5ea3af4567766f9852ad1560f501b17588c7b \ + --hash=sha256:de552f4a1916e520f2703ec474d2b4d3f86d41f353e7680b597512ffe7eac5d0 \ + --hash=sha256:de609a6f1b682f70bb7163da745ee815d8f230d97276db049ab447767466a09d \ + --hash=sha256:e12bb09678f38b7597b8346983d2323a6482dcd59e423d9448108c1be37cac9d \ + --hash=sha256:e168afe6bf6ab7ab46c8c375606298784ecbe3ba31c0980b7dcbb9631dcba97e \ + --hash=sha256:e78868e98f34f34a88e23ee9ccaeeec460e4eaf6db16d51d7a9b883e5e785a5e \ + --hash=sha256:e860f065cc4ea6f256d6f411aba4b1251255366e48e972f8a347cf88077b24fd \ + --hash=sha256:ea3a6ac4d74820c98fcc9da4a57847ad2cc36475a8bd9683f32ab6d47a2bd682 \ + --hash=sha256:ebf64e281a06c904a7636781d2e973d1f0926a5b8b480ac658dc0f556e7779f4 \ + --hash=sha256:ed6378c9d66d0de903763e7706383d60c33829581f0adff47b6535f1802fa6db \ + --hash=sha256:ee1e4fc267b437bb89990b2f2abf6c25765b89b72dd4a11e21934df449e0c976 \ + --hash=sha256:ee4eafd77cc98d355a0d02f263efc0d3ae3ce4a7c24740010a8b4012bbb24937 \ + --hash=sha256:efec946f331349dfc4ae9d0e034c263ddde19414fe5128580f512619abed05f1 \ + --hash=sha256:f414da5c51bf350e4b7960644617c130140423882305f7574b6cf65a3081cecb \ + --hash=sha256:f71009b0d5e94c0e86533c0b27ed7cacc1239cb51c178fd239c3cfefefb0400a \ + --hash=sha256:f983e4c2f603c95dde63df633eec42955508eefd8d0f0e6d236d31a044c882d7 \ + --hash=sha256:faa5e8496c530f9c71f2b4e1c49758b06e5f4055e17144906245c99fa6d45356 \ + --hash=sha256:fed5dfefdf384d6fe975cc026886aece4f292feaf69d0eeb716cfd3c5a4dd8be # via # jsonschema # referencing -setuptools==75.1.0 \ - --hash=sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2 \ - --hash=sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538 +setuptools==75.5.0 \ + --hash=sha256:5c4ccb41111392671f02bb5f8436dfc5a9a7185e80500531b133f5775c4163ef \ + --hash=sha256:87cb777c3b96d638ca02031192d40390e0ad97737e27b6b4fa831bea86f2f829 # via # documenteer # sphinxcontrib-bibtex @@ -1237,9 +1244,9 @@ soupsieve==2.6 \ --hash=sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb \ --hash=sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9 # via beautifulsoup4 -sphinx==8.0.2 \ - --hash=sha256:0cce1ddcc4fd3532cf1dd283bc7d886758362c5c1de6598696579ce96d8ffa5b \ - --hash=sha256:56173572ae6c1b9a38911786e206a110c9749116745873feae4f9ce88e59391d +sphinx==8.1.3 \ + --hash=sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2 \ + --hash=sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927 # via # autodoc-pydantic # documenteer @@ -1255,13 +1262,14 @@ sphinx==8.0.2 \ # sphinx-prompt # sphinxcontrib-bibtex # sphinxcontrib-jquery + # sphinxcontrib-mermaid # sphinxcontrib-redoc # sphinxcontrib-youtube # sphinxext-opengraph # sphinxext-rediraffe -sphinx-autodoc-typehints==2.4.4 \ - --hash=sha256:940de2951fd584d147e46772579fdc904f945c5f1ee1a78c614646abfbbef18b \ - --hash=sha256:e743512da58b67a06579a1462798a6907664ab77460758a43234adeac350afbf +sphinx-autodoc-typehints==2.5.0 \ + --hash=sha256:259e1026b218d563d72743f417fcc25906a9614897fe37f91bd8d7d58f748c3b \ + --hash=sha256:53def4753239683835b19bfa8b68c021388bd48a096efcb02cdab508ece27363 # via documenteer sphinx-automodapi==0.18.0 \ --hash=sha256:022860385590768f52d4f6e19abb83b2574772d2721fb4050ecdb6e593a1a440 \ @@ -1317,9 +1325,9 @@ sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 # via sphinx -sphinxcontrib-mermaid==0.9.2 \ - --hash=sha256:252ef13dd23164b28f16d8b0205cf184b9d8e2b714a302274d9f59eb708e77af \ - --hash=sha256:6795a72037ca55e65663d2a2c1a043d636dc3d30d418e56dd6087d1459d98a5d +sphinxcontrib-mermaid==1.0.0 \ + --hash=sha256:2e8ab67d3e1e2816663f9347d026a8dee4a858acdd4ad32dd1c808893db88146 \ + --hash=sha256:60b72710ea02087f212028feb09711225fbc2e343a10d34822fe787510e1caa3 # via documenteer sphinxcontrib-qthelp==2.0.0 \ --hash=sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab \ @@ -1344,56 +1352,64 @@ sphinxext-rediraffe==0.2.7 \ --hash=sha256:651dcbfae5ffda9ffd534dfb8025f36120e5efb6ea1a33f5420023862b9f725d \ --hash=sha256:9e430a52d4403847f4ffb3a8dd6dfc34a9fe43525305131f52ed899743a5fd8c # via documenteer -sqlalchemy==2.0.35 \ - --hash=sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9 \ - --hash=sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00 \ - --hash=sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee \ - --hash=sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6 \ - --hash=sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1 \ - --hash=sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72 \ - --hash=sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf \ - --hash=sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8 \ - --hash=sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b \ - --hash=sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc \ - --hash=sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c \ - --hash=sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1 \ - --hash=sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3 \ - --hash=sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5 \ - --hash=sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90 \ - --hash=sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec \ - --hash=sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71 \ - --hash=sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7 \ - --hash=sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b \ - --hash=sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468 \ - --hash=sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3 \ - --hash=sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e \ - --hash=sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139 \ - --hash=sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff \ - --hash=sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11 \ - --hash=sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01 \ - --hash=sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62 \ - --hash=sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d \ - --hash=sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a \ - --hash=sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db \ - --hash=sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87 \ - --hash=sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e \ - --hash=sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1 \ - --hash=sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9 \ - --hash=sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f \ - --hash=sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0 \ - --hash=sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44 \ - --hash=sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936 \ - --hash=sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8 \ - --hash=sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea \ - --hash=sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f \ - --hash=sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4 \ - --hash=sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0 \ - --hash=sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c \ - --hash=sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f \ - --hash=sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60 \ - --hash=sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2 \ - --hash=sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9 \ - --hash=sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33 +sqlalchemy==2.0.36 \ + --hash=sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763 \ + --hash=sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436 \ + --hash=sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2 \ + --hash=sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588 \ + --hash=sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e \ + --hash=sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959 \ + --hash=sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d \ + --hash=sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575 \ + --hash=sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908 \ + --hash=sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8 \ + --hash=sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8 \ + --hash=sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545 \ + --hash=sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7 \ + --hash=sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971 \ + --hash=sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855 \ + --hash=sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c \ + --hash=sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71 \ + --hash=sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d \ + --hash=sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb \ + --hash=sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72 \ + --hash=sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f \ + --hash=sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5 \ + --hash=sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346 \ + --hash=sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24 \ + --hash=sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e \ + --hash=sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5 \ + --hash=sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08 \ + --hash=sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793 \ + --hash=sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88 \ + --hash=sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686 \ + --hash=sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b \ + --hash=sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2 \ + --hash=sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28 \ + --hash=sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d \ + --hash=sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5 \ + --hash=sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a \ + --hash=sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a \ + --hash=sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3 \ + --hash=sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf \ + --hash=sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5 \ + --hash=sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef \ + --hash=sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689 \ + --hash=sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c \ + --hash=sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b \ + --hash=sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07 \ + --hash=sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa \ + --hash=sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06 \ + --hash=sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1 \ + --hash=sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff \ + --hash=sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa \ + --hash=sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687 \ + --hash=sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4 \ + --hash=sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb \ + --hash=sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44 \ + --hash=sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c \ + --hash=sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e \ + --hash=sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53 # via jupyter-cache stack-data==0.6.3 \ --hash=sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9 \ @@ -1403,9 +1419,9 @@ tabulate==0.9.0 \ --hash=sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c \ --hash=sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f # via jupyter-cache -termcolor==2.4.0 \ - --hash=sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63 \ - --hash=sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a +termcolor==2.5.0 \ + --hash=sha256:37b17b5fc1e604945c2642c872a3764b5d547a48009871aea3edd3afa180afb8 \ + --hash=sha256:998d8d27da6d48442e8e1f016119076b690d962507531df4890fcd2db2ef8a6f # via pytest-sugar tomlkit==0.13.2 \ --hash=sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde \ @@ -1438,49 +1454,6 @@ traitlets==5.14.3 \ # matplotlib-inline # nbclient # nbformat -typed-ast==1.5.5 \ - --hash=sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10 \ - --hash=sha256:045f9930a1550d9352464e5149710d56a2aed23a2ffe78946478f7b5416f1ede \ - --hash=sha256:0635900d16ae133cab3b26c607586131269f88266954eb04ec31535c9a12ef1e \ - --hash=sha256:118c1ce46ce58fda78503eae14b7664163aa735b620b64b5b725453696f2a35c \ - --hash=sha256:16f7313e0a08c7de57f2998c85e2a69a642e97cb32f87eb65fbfe88381a5e44d \ - --hash=sha256:1efebbbf4604ad1283e963e8915daa240cb4bf5067053cf2f0baadc4d4fb51b8 \ - --hash=sha256:2188bc33d85951ea4ddad55d2b35598b2709d122c11c75cffd529fbc9965508e \ - --hash=sha256:2b946ef8c04f77230489f75b4b5a4a6f24c078be4aed241cfabe9cbf4156e7e5 \ - --hash=sha256:335f22ccb244da2b5c296e6f96b06ee9bed46526db0de38d2f0e5a6597b81155 \ - --hash=sha256:381eed9c95484ceef5ced626355fdc0765ab51d8553fec08661dce654a935db4 \ - --hash=sha256:429ae404f69dc94b9361bb62291885894b7c6fb4640d561179548c849f8492ba \ - --hash=sha256:44f214394fc1af23ca6d4e9e744804d890045d1643dd7e8229951e0ef39429b5 \ - --hash=sha256:48074261a842acf825af1968cd912f6f21357316080ebaca5f19abbb11690c8a \ - --hash=sha256:4bc1efe0ce3ffb74784e06460f01a223ac1f6ab31c6bc0376a21184bf5aabe3b \ - --hash=sha256:57bfc3cf35a0f2fdf0a88a3044aafaec1d2f24d8ae8cd87c4f58d615fb5b6311 \ - --hash=sha256:597fc66b4162f959ee6a96b978c0435bd63791e31e4f410622d19f1686d5e769 \ - --hash=sha256:5f7a8c46a8b333f71abd61d7ab9255440d4a588f34a21f126bbfc95f6049e686 \ - --hash=sha256:5fe83a9a44c4ce67c796a1b466c270c1272e176603d5e06f6afbc101a572859d \ - --hash=sha256:61443214d9b4c660dcf4b5307f15c12cb30bdfe9588ce6158f4a005baeb167b2 \ - --hash=sha256:622e4a006472b05cf6ef7f9f2636edc51bda670b7bbffa18d26b255269d3d814 \ - --hash=sha256:6eb936d107e4d474940469e8ec5b380c9b329b5f08b78282d46baeebd3692dc9 \ - --hash=sha256:7f58fabdde8dcbe764cef5e1a7fcb440f2463c1bbbec1cf2a86ca7bc1f95184b \ - --hash=sha256:83509f9324011c9a39faaef0922c6f720f9623afe3fe220b6d0b15638247206b \ - --hash=sha256:8c524eb3024edcc04e288db9541fe1f438f82d281e591c548903d5b77ad1ddd4 \ - --hash=sha256:94282f7a354f36ef5dbce0ef3467ebf6a258e370ab33d5b40c249fa996e590dd \ - --hash=sha256:b445c2abfecab89a932b20bd8261488d574591173d07827c1eda32c457358b18 \ - --hash=sha256:be4919b808efa61101456e87f2d4c75b228f4e52618621c77f1ddcaae15904fa \ - --hash=sha256:bfd39a41c0ef6f31684daff53befddae608f9daf6957140228a08e51f312d7e6 \ - --hash=sha256:c631da9710271cb67b08bd3f3813b7af7f4c69c319b75475436fcab8c3d21bee \ - --hash=sha256:cc95ffaaab2be3b25eb938779e43f513e0e538a84dd14a5d844b8f2932593d88 \ - --hash=sha256:d09d930c2d1d621f717bb217bf1fe2584616febb5138d9b3e8cdd26506c3f6d4 \ - --hash=sha256:d40c10326893ecab8a80a53039164a224984339b2c32a6baf55ecbd5b1df6431 \ - --hash=sha256:d41b7a686ce653e06c2609075d397ebd5b969d821b9797d029fccd71fdec8e04 \ - --hash=sha256:d5c0c112a74c0e5db2c75882a0adf3133adedcdbfd8cf7c9d6ed77365ab90a1d \ - --hash=sha256:e1a976ed4cc2d71bb073e1b2a250892a6e968ff02aa14c1f40eba4f365ffec02 \ - --hash=sha256:e48bf27022897577d8479eaed64701ecaf0467182448bd95759883300ca818c8 \ - --hash=sha256:ed4a1a42df8a3dfb6b40c3d2de109e935949f2f66b19703eafade03173f8f437 \ - --hash=sha256:f0aefdd66f1784c58f65b502b6cf8b121544680456d1cebbd300c2c813899274 \ - --hash=sha256:fc2b8c4e1bc5cd96c1a823a885e6b158f8451cf6f5530e1829390b4d27d0807f \ - --hash=sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a \ - --hash=sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2 - # via diagrams types-pyyaml==6.0.12.20240917 \ --hash=sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570 \ --hash=sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587 @@ -1511,7 +1484,7 @@ wcwidth==0.2.13 \ --hash=sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859 \ --hash=sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5 # via prompt-toolkit -zipp==3.20.2 \ - --hash=sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350 \ - --hash=sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29 +zipp==3.21.0 \ + --hash=sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4 \ + --hash=sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931 # via importlib-metadata diff --git a/requirements/main.txt b/requirements/main.txt index 10ad6927b2..489a168c0c 100644 --- a/requirements/main.txt +++ b/requirements/main.txt @@ -4,9 +4,9 @@ annotated-types==0.7.0 \ --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \ --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89 # via pydantic -anyio==4.6.0 \ - --hash=sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb \ - --hash=sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a +anyio==4.6.2.post1 \ + --hash=sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c \ + --hash=sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d # via # httpcore # starlette @@ -115,97 +115,112 @@ cffi==1.17.1 ; platform_python_implementation != 'PyPy' \ --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b # via cryptography -charset-normalizer==3.3.2 \ - --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \ - --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \ - --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \ - --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \ - --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \ - --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \ - --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \ - --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \ - --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \ - --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \ - --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \ - --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \ - --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \ - --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \ - --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \ - --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \ - --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \ - --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \ - --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \ - --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \ - --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \ - --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \ - --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \ - --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \ - --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \ - --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \ - --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \ - --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \ - --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \ - --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \ - --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \ - --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \ - --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \ - --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \ - --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \ - --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \ - --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \ - --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \ - --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \ - --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \ - --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \ - --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \ - --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \ - --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \ - --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \ - --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \ - --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \ - --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \ - --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \ - --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \ - --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \ - --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \ - --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \ - --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \ - --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \ - --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \ - --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \ - --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \ - --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \ - --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \ - --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \ - --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \ - --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \ - --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \ - --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \ - --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \ - --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \ - --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \ - --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \ - --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \ - --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \ - --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \ - --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \ - --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \ - --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \ - --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \ - --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \ - --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \ - --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \ - --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \ - --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \ - --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \ - --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \ - --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \ - --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \ - --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \ - --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \ - --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \ - --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \ - --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561 +charset-normalizer==3.4.0 \ + --hash=sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621 \ + --hash=sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6 \ + --hash=sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8 \ + --hash=sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912 \ + --hash=sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c \ + --hash=sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b \ + --hash=sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d \ + --hash=sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d \ + --hash=sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95 \ + --hash=sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e \ + --hash=sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565 \ + --hash=sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64 \ + --hash=sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab \ + --hash=sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be \ + --hash=sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e \ + --hash=sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907 \ + --hash=sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0 \ + --hash=sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2 \ + --hash=sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62 \ + --hash=sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62 \ + --hash=sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23 \ + --hash=sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc \ + --hash=sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284 \ + --hash=sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca \ + --hash=sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455 \ + --hash=sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858 \ + --hash=sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b \ + --hash=sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594 \ + --hash=sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc \ + --hash=sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db \ + --hash=sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b \ + --hash=sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea \ + --hash=sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6 \ + --hash=sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920 \ + --hash=sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749 \ + --hash=sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7 \ + --hash=sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd \ + --hash=sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99 \ + --hash=sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242 \ + --hash=sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee \ + --hash=sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129 \ + --hash=sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2 \ + --hash=sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51 \ + --hash=sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee \ + --hash=sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8 \ + --hash=sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b \ + --hash=sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613 \ + --hash=sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742 \ + --hash=sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe \ + --hash=sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3 \ + --hash=sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5 \ + --hash=sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631 \ + --hash=sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7 \ + --hash=sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15 \ + --hash=sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c \ + --hash=sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea \ + --hash=sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417 \ + --hash=sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250 \ + --hash=sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88 \ + --hash=sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca \ + --hash=sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa \ + --hash=sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99 \ + --hash=sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149 \ + --hash=sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41 \ + --hash=sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574 \ + --hash=sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0 \ + --hash=sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f \ + --hash=sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d \ + --hash=sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654 \ + --hash=sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3 \ + --hash=sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19 \ + --hash=sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90 \ + --hash=sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578 \ + --hash=sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9 \ + --hash=sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1 \ + --hash=sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51 \ + --hash=sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719 \ + --hash=sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236 \ + --hash=sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a \ + --hash=sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c \ + --hash=sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade \ + --hash=sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944 \ + --hash=sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc \ + --hash=sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6 \ + --hash=sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6 \ + --hash=sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27 \ + --hash=sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6 \ + --hash=sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2 \ + --hash=sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12 \ + --hash=sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf \ + --hash=sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114 \ + --hash=sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7 \ + --hash=sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf \ + --hash=sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d \ + --hash=sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b \ + --hash=sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed \ + --hash=sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03 \ + --hash=sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4 \ + --hash=sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67 \ + --hash=sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365 \ + --hash=sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a \ + --hash=sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748 \ + --hash=sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b \ + --hash=sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079 \ + --hash=sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482 # via requests click==8.1.7 \ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \ @@ -217,41 +232,41 @@ colorama==0.4.6 ; platform_system == 'Windows' \ --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 # via click -cryptography==43.0.1 \ - --hash=sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494 \ - --hash=sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 \ - --hash=sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d \ - --hash=sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062 \ - --hash=sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2 \ - --hash=sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4 \ - --hash=sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1 \ - --hash=sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85 \ - --hash=sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84 \ - --hash=sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042 \ - --hash=sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d \ - --hash=sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962 \ - --hash=sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2 \ - --hash=sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa \ - --hash=sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d \ - --hash=sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365 \ - --hash=sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96 \ - --hash=sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47 \ - --hash=sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d \ - --hash=sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d \ - --hash=sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c \ - --hash=sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb \ - --hash=sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277 \ - --hash=sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172 \ - --hash=sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034 \ - --hash=sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a \ - --hash=sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289 +cryptography==43.0.3 \ + --hash=sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362 \ + --hash=sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4 \ + --hash=sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa \ + --hash=sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83 \ + --hash=sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff \ + --hash=sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805 \ + --hash=sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6 \ + --hash=sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664 \ + --hash=sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08 \ + --hash=sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e \ + --hash=sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18 \ + --hash=sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f \ + --hash=sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73 \ + --hash=sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5 \ + --hash=sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984 \ + --hash=sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd \ + --hash=sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3 \ + --hash=sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e \ + --hash=sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405 \ + --hash=sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2 \ + --hash=sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c \ + --hash=sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995 \ + --hash=sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73 \ + --hash=sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16 \ + --hash=sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7 \ + --hash=sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd \ + --hash=sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7 # via # phalanx (pyproject.toml) # pyjwt # safir -fastapi==0.115.0 \ - --hash=sha256:17ea427674467486e997206a5ab25760f6b09e069f099b96f5b55a32fb6f1631 \ - --hash=sha256:f93b4ca3529a8ebc6fc3fcf710e5efa8de3df9b41570958abf1d97d843138004 +fastapi==0.115.5 \ + --hash=sha256:0e7a4d0dc0d01c68df21887cce0945e72d3c48b9f4f79dfe7a7d53aa08fbb289 \ + --hash=sha256:596b95adbe1474da47049e802f9a65ab2ffa9c2b07e7efee70eb8a66c9f2f796 # via safir gidgethub==5.3.0 \ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \ @@ -294,67 +309,68 @@ jinja2==3.1.4 \ --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d # via phalanx (pyproject.toml) -markupsafe==2.1.5 \ - --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ - --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ - --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \ - --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \ - --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \ - --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \ - --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \ - --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \ - --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \ - --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \ - --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \ - --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \ - --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \ - --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \ - --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \ - --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \ - --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \ - --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \ - --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \ - --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \ - --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \ - --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \ - --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \ - --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \ - --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \ - --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \ - --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \ - --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \ - --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \ - --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \ - --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \ - --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \ - --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \ - --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \ - --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \ - --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \ - --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \ - --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \ - --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \ - --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \ - --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \ - --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \ - --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \ - --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \ - --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \ - --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \ - --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \ - --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \ - --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \ - --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \ - --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \ - --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \ - --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \ - --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \ - --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \ - --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \ - --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \ - --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \ - --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \ - --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68 +markupsafe==3.0.2 \ + --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \ + --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \ + --hash=sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0 \ + --hash=sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9 \ + --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \ + --hash=sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13 \ + --hash=sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028 \ + --hash=sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca \ + --hash=sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557 \ + --hash=sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832 \ + --hash=sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0 \ + --hash=sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b \ + --hash=sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579 \ + --hash=sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a \ + --hash=sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c \ + --hash=sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff \ + --hash=sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c \ + --hash=sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22 \ + --hash=sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094 \ + --hash=sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb \ + --hash=sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e \ + --hash=sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5 \ + --hash=sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a \ + --hash=sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d \ + --hash=sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a \ + --hash=sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b \ + --hash=sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8 \ + --hash=sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225 \ + --hash=sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c \ + --hash=sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144 \ + --hash=sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f \ + --hash=sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87 \ + --hash=sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d \ + --hash=sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93 \ + --hash=sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf \ + --hash=sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158 \ + --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 \ + --hash=sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb \ + --hash=sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48 \ + --hash=sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171 \ + --hash=sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c \ + --hash=sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6 \ + --hash=sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd \ + --hash=sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d \ + --hash=sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1 \ + --hash=sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d \ + --hash=sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca \ + --hash=sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a \ + --hash=sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29 \ + --hash=sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe \ + --hash=sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798 \ + --hash=sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c \ + --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \ + --hash=sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f \ + --hash=sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f \ + --hash=sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a \ + --hash=sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178 \ + --hash=sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0 \ + --hash=sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79 \ + --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \ + --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50 # via jinja2 onepasswordconnectsdk==1.5.1 \ --hash=sha256:8924c614ffed98f29faada03dba940dc0bc47851b1f5f4ef7e312e43c10ec25b \ @@ -464,9 +480,9 @@ pydantic-core==2.23.4 \ # via # pydantic # safir -pyjwt==2.9.0 \ - --hash=sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850 \ - --hash=sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c +pyjwt==2.10.0 \ + --hash=sha256:543b77207db656de204372350926bed5a86201c4cbff159f623f79c7bb487a15 \ + --hash=sha256:7628a7eb7938959ac1b26e819a1df0fd3259505627b575e4bad6d08f76db695c # via gidgethub python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ @@ -539,9 +555,9 @@ safir==6.4.0 \ --hash=sha256:ba7af071eab0d198e6e15a2117028566f3f4237e02e2278e8bfc2633a7c68228 \ --hash=sha256:f38c3f1d7d76d304984b572288826510e5c7a0e1f965b2eabdd7f3bace07c48a # via phalanx (pyproject.toml) -safir-logging==6.4.0 \ - --hash=sha256:4031a430d738b8fe5bfd29125dce6cbf4e4949879307ba4146648afa3d24cd0a \ - --hash=sha256:e2dbf0b5d9dabecd70c27bff9bf01629bf0724b05b0f0087a1fe4f45c702215f +safir-logging==6.5.1 \ + --hash=sha256:b056306de26627e29bd6a6d04b1144456a1319ec0e15a67ebbc12b43362a27cd \ + --hash=sha256:ff591f0247fda10842835e714a6dbf601a894432d33d6d98e20fe035a5ad952c # via safir six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -558,9 +574,9 @@ sniffio==1.3.1 \ # anyio # httpcore # httpx -starlette==0.38.6 \ - --hash=sha256:4517a1409e2e73ee4951214ba012052b9e16f60e90d73cfb06192c19203bbb05 \ - --hash=sha256:863a1588f5574e70a821dadefb41e4881ea451a47a3cd1b4df359d4ffefe5ead +starlette==0.41.2 \ + --hash=sha256:9834fd799d1a87fd346deb76158668cfa0b0d56f85caefe8268e2d97c3468b62 \ + --hash=sha256:fbc189474b4731cf30fcef52f18a8d070e3f3b46c6a04c97579e85e6ffca942d # via # fastapi # safir diff --git a/requirements/tox.txt b/requirements/tox.txt index f50f47aa68..a3206bec05 100644 --- a/requirements/tox.txt +++ b/requirements/tox.txt @@ -15,9 +15,9 @@ colorama==0.4.6 \ # -c requirements/dev.txt # -c requirements/main.txt # tox -distlib==0.3.8 \ - --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ - --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 +distlib==0.3.9 \ + --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ + --hash=sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403 # via virtualenv filelock==3.16.1 \ --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ @@ -25,9 +25,9 @@ filelock==3.16.1 \ # via # tox # virtualenv -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 +packaging==24.2 \ + --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ + --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f # via # -c requirements/dev.txt # pyproject-api @@ -50,37 +50,37 @@ pyproject-api==1.8.0 \ --hash=sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228 \ --hash=sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496 # via tox -tox==4.20.0 \ - --hash=sha256:21a8005e3d3fe5658a8e36b8ca3ed13a4230429063c5cc2a2fdac6ee5aa0de34 \ - --hash=sha256:5b78a49b6eaaeab3ae4186415e7c97d524f762ae967c63562687c3e5f0ec23d5 +tox==4.23.2 \ + --hash=sha256:452bc32bb031f2282881a2118923176445bac783ab97c874b8770ab4c3b76c38 \ + --hash=sha256:86075e00e555df6e82e74cfc333917f91ecb47ffbc868dcafbd2672e332f4a2c # via # -r requirements/tox.in # tox-uv -tox-uv==1.13.0 \ - --hash=sha256:1037e4abad15a3b708b5970ed7a17a0765d7249b641a92b155bc3343b8b0145b \ - --hash=sha256:fb087b8b4ff779c72b48fc72ea1995387bb1c0dfb37910c20e46cef8b5f98c15 +tox-uv==1.16.0 \ + --hash=sha256:71b2e2fa6c35c1360b91a302df1d65b3e5a1f656b321c5ebf7b84545804c9f01 \ + --hash=sha256:e6f0b525a687e745ab878d07cbf5c7e85d582028d4a7c8935f95e84350651432 # via -r requirements/tox.in -uv==0.4.15 \ - --hash=sha256:04858bfd551fabe1635127d9a0afe5c62e1e7d56cf309a9674840c90bfc1f21e \ - --hash=sha256:0e9b78f1a800a4cfdfbdc9ff4e5d4cce34af770f8a1f2b9416b161f294eb3703 \ - --hash=sha256:1401e73f0e8df62b4cfbf394e65a75f18b73bf8a94a6c5653a55bd6fdb8e1bc3 \ - --hash=sha256:1bb79cb06be9bb25a1bf8641bf34593f64a96b3ba66ebd8712954f647d9faa24 \ - --hash=sha256:21a3cedb2276d635543a10a11c61f75c6e387110e23e90cdb6c6dd2e1f3c9453 \ - --hash=sha256:27884429b7fed371fe1fcbe829659c4a259463d0ecacb7891d800e4754b5f24c \ - --hash=sha256:4e40deb2cf2cb403dbaf65209d49c45462ebbb1bff290d4c18b902b5b385cdc9 \ - --hash=sha256:6eef6881abf9b858020ffd23f4e5d77423329da2d4a1bc0af6613c2f698c369a \ - --hash=sha256:7fcf7f3812dd173d39273e99fb2abb0814be6133e7a721baa424cbcfd25b483b \ - --hash=sha256:8d45295757f66d1913e5917c06f1974745adad842403d419362491939be889a6 \ - --hash=sha256:8e36b8e07595fc6216d01e729c81a0b4ff029a93cc2ef987a73d3b650d6d559c \ - --hash=sha256:9822fa4db0d8d50abf5eebe081c01666a98120455090d0b71463d01d5d4153c1 \ - --hash=sha256:9e28141883c0aa8525ad5418e519d8791b7dd75f35020d3b1457db89346c5dc8 \ - --hash=sha256:a5920ff4d114025c51d3f925130ca3b0fad277631846b1109347c24948b29159 \ - --hash=sha256:be46b37b569e3c8ffb7d78022bcc0eadeb987109f709c1cec01b00c261ed9595 \ - --hash=sha256:cf7d554656bb8c5b7710300e04d86ab5137ebdd31fe309d66860a9d474b385f8 \ - --hash=sha256:d16ae6b97eb77f478dfe51d6eb3627048d3f47bd04282d3006e6a212e541dba0 \ - --hash=sha256:e32137ba8202b1291e879e8145113bfb543fcc992b5f043852a96d803788b83c +uv==0.5.2 \ + --hash=sha256:15c7ffa08ae21abd221dbdf9ba25c8969235f587cec6df8035552434e5ca1cc5 \ + --hash=sha256:2597e91be45b3f4458d0d16a5a1cda7e93af7d6dbfddf251aae5377f9187fa88 \ + --hash=sha256:27d666da8fbb0f87d9df67abf9feea0da4ee1336730f2c4be29a11f3feaa0a29 \ + --hash=sha256:374e9498e155fcaa8728a6770b84f03781106d705332f4ec059e1cc93c8f4d8a \ + --hash=sha256:5052758d374dd769efd0c70b4789ffb08439567eb114ad8fe728536bb5cc5299 \ + --hash=sha256:675ca34829ceca3e9de395cf05e8f881334a24488f97dd923c463830270d52a7 \ + --hash=sha256:67776d34cba359c63919c5ad50331171261d2ec7a83fd07f032eb8cc22e22b8e \ + --hash=sha256:71467545d51883d1af7094c8f6da69b55e7d49b742c2dc707d644676dcb66515 \ + --hash=sha256:772b32d157ec8f27c0099ecac94cf5cd298bce72f1a1f512205591de4e9f0c5c \ + --hash=sha256:7bde66f13571e437fd45f32f5742ab53d5e011b4edb1c74cb74cb8b1cbb828b5 \ + --hash=sha256:89e60ad9601f35f187326de84f35e7517c6eb1438359da42ec85cfd9c1895957 \ + --hash=sha256:a4d4fdad03e6dc3e8216192b8a12bcf2c71c8b12046e755575c7f262cbb61924 \ + --hash=sha256:a8a9897dd7657258c53f41aecdbe787da99f4fc0775f19826ab65cc0a7136cbf \ + --hash=sha256:c9795b990fb0b2a18d3a8cef8822e13c6a6f438bc16d34ccf01d931c76cfd5da \ + --hash=sha256:cfba5b0070652da4174083b78852f3ab3d262ba1c8b63a4d5ae497263b02b834 \ + --hash=sha256:d0834c6b37750c045bbea80600d3ae3e95becc4db148f5c0d0bc3ec6a7924e8f \ + --hash=sha256:d1fe4e025dbb9ec5c9250bfc1231847b8487706538f94d10c769f0a54db3e0af \ + --hash=sha256:dfcd8275ff8cb59d5f26f826a44270b2fe8f38aa7188d7355c48d3e9b759d0c0 # via tox-uv -virtualenv==20.26.5 \ - --hash=sha256:4f3ac17b81fba3ce3bd6f4ead2749a72da5929c01774948e243db9ba41df4ff6 \ - --hash=sha256:ce489cac131aa58f4b25e321d6d186171f78e6cb13fafbf32a840cee67733ff4 +virtualenv==20.27.1 \ + --hash=sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba \ + --hash=sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4 # via tox diff --git a/src/phalanx/models/applications.py b/src/phalanx/models/applications.py index f490b5776b..33e6020c40 100644 --- a/src/phalanx/models/applications.py +++ b/src/phalanx/models/applications.py @@ -48,6 +48,7 @@ class Project(Enum): rubin = "rubin" roundtable = "roundtable" monitoring = "monitoring" + support = "support" prompt = "prompt" telescope = "telescope" diff --git a/starters/fastapi-safir-uws/Chart.yaml b/starters/fastapi-safir-uws/Chart.yaml index 4776b010fb..8e30354f9d 100644 --- a/starters/fastapi-safir-uws/Chart.yaml +++ b/starters/fastapi-safir-uws/Chart.yaml @@ -8,5 +8,5 @@ appVersion: 0.1.0 dependencies: - name: redis - version: 1.0.13 + version: 1.0.14 repository: https://lsst-sqre.github.io/charts/ diff --git a/starters/fastapi-safir-uws/values.yaml b/starters/fastapi-safir-uws/values.yaml index cd15e7b79e..44d1783c3b 100644 --- a/starters/fastapi-safir-uws/values.yaml +++ b/starters/fastapi-safir-uws/values.yaml @@ -86,7 +86,7 @@ cloudsql: repository: "gcr.io/cloudsql-docker/gce-proxy" # -- Cloud SQL Auth Proxy tag to use - tag: "1.37.0" + tag: "1.37.1" # -- Pull policy for Cloud SQL Auth Proxy images pullPolicy: "IfNotPresent" diff --git a/tests/data/input/docs/applications/rsp.rst b/tests/data/input/docs/applications/rsp.rst index 234524c178..02c38e2fec 100644 --- a/tests/data/input/docs/applications/rsp.rst +++ b/tests/data/input/docs/applications/rsp.rst @@ -18,6 +18,7 @@ Argo CD project: ``rsp`` nublado/index portal/index semaphore/index + sia/index siav2/index sqlproxy-cross-project/index squareone/index diff --git a/tests/data/output/docs/rsp.rst b/tests/data/output/docs/rsp.rst index a1a7d3ea67..23462b5e28 100644 --- a/tests/data/output/docs/rsp.rst +++ b/tests/data/output/docs/rsp.rst @@ -19,6 +19,7 @@ Argo CD project: ``rsp`` nublado/index portal/index semaphore/index + sia/index siav2/index sqlproxy-cross-project/index squareone/index