diff --git a/.flake8 b/.flake8
deleted file mode 100644
index 63e7cad58a..0000000000
--- a/.flake8
+++ /dev/null
@@ -1,5 +0,0 @@
-[flake8]
-max-line-length = 79
-# E203: whitespace before :, flake8 disagrees with PEP-8
-# W503: line break after binary operator, flake8 disagrees with PEP-8
-ignore = E203, W503
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index e30bb68d02..ab222cac1a 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -27,6 +27,24 @@ jobs:
- name: Run pre-commit
uses: pre-commit/action@v3.0.0
+ test:
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+
+ strategy:
+ matrix:
+ python:
+ - "3.11"
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - uses: lsst-sqre/run-tox@v1
+ with:
+ python-version: ${{ matrix.python }}
+ tox-envs: "typing,py,coverage-report"
+ cache-key-prefix: test
+
helm:
runs-on: ubuntu-latest
@@ -42,7 +60,7 @@ jobs:
python-version: "3.11"
- name: Install test dependencies
- run: pip install .
+ run: make init
- name: Expand modified charts
run: expand-charts
@@ -61,6 +79,7 @@ jobs:
minikube:
name: Test deploy
runs-on: ubuntu-latest
+ timeout-minutes: 30
needs: [helm]
steps:
@@ -85,10 +104,11 @@ jobs:
- name: Setup Minikube
if: steps.filter.outputs.minikube == 'true'
- uses: manusa/actions-setup-minikube@v2.7.2
+ uses: medyagh/setup-minikube@v0.0.14
with:
- minikube version: 'v1.28.0'
- kubernetes version: 'v1.25.2'
+ kubernetes-version: "v1.27.3"
+ cpus: max
+ memory: 5500m # Linux virtual machines have 7GB of RAM
- name: Test interaction with the cluster
if: steps.filter.outputs.minikube == 'true'
@@ -97,17 +117,17 @@ jobs:
- name: Download installer dependencies
if: steps.filter.outputs.minikube == 'true'
run: |
- curl -sSL -o /tmp/vault.zip https://releases.hashicorp.com/vault/1.12.1/vault_1.12.1_linux_amd64.zip
+ curl -sSL -o /tmp/vault.zip https://releases.hashicorp.com/vault/1.14.0/vault_1.14.0_linux_amd64.zip
unzip /tmp/vault.zip
sudo mv vault /usr/local/bin/vault
sudo chmod +x /usr/local/bin/vault
- sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.6.7/argocd-linux-amd64
+ sudo curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/v2.7.8/argocd-linux-amd64
sudo chmod +x /usr/local/bin/argocd
sudo apt-get install socat
sudo pip install -r installer/requirements.txt
- name: Run installer
- timeout-minutes: 30
+ timeout-minutes: 15
if: steps.filter.outputs.minikube == 'true'
run: |
cd installer
@@ -120,6 +140,7 @@ jobs:
kubectl get ingress -A
- name: Wait for all applications to be healthy
+ timeout-minutes: 15
if: steps.filter.outputs.minikube == 'true'
run: |
argocd app wait -l "argocd.argoproj.io/instance=science-platform" \
diff --git a/.github/workflows/dependencies.yaml b/.github/workflows/dependencies.yaml
new file mode 100644
index 0000000000..0fa3c498e0
--- /dev/null
+++ b/.github/workflows/dependencies.yaml
@@ -0,0 +1,31 @@
+name: Dependency Update
+
+"on":
+ workflow_dispatch: {}
+
+jobs:
+ update:
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Run neophile
+ uses: lsst-sqre/run-neophile@v1
+ with:
+ python-version: "3.11"
+ mode: pr
+ types: pre-commit
+ app-id: ${{ secrets.NEOPHILE_APP_ID }}
+ app-secret: ${{ secrets.NEOPHILE_PRIVATE_KEY }}
+
+ - name: Report status
+ if: always()
+ uses: ravsamhq/notify-slack-action@v2
+ with:
+ status: ${{ job.status }}
+ notify_when: "failure"
+ notification_title: "Periodic dependency update for {repo} failed"
+ env:
+ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_ALERT_WEBHOOK }}
diff --git a/.github/workflows/linkcheck.yaml b/.github/workflows/linkcheck.yaml
index b8e0ac1c9e..e3c4489051 100644
--- a/.github/workflows/linkcheck.yaml
+++ b/.github/workflows/linkcheck.yaml
@@ -35,7 +35,7 @@ name: Link Check
workflow_dispatch: {}
jobs:
- docs:
+ linkcheck:
runs-on: ubuntu-latest
steps:
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index addc124764..275608fa85 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,44 +2,49 @@ repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- - id: trailing-whitespace
+ - id: check-merge-conflict
- id: check-toml
+ - id: trailing-whitespace
- - repo: https://github.com/adrienverge/yamllint.git
- rev: v1.30.0
+ - repo: https://github.com/adrienverge/yamllint
+ rev: v1.32.0
hooks:
- id: yamllint
args:
- - "-c=.yamllint.yml"
+ - -c=.yamllint.yml
+
+ - repo: https://github.com/python-jsonschema/check-jsonschema
+ rev: 0.23.3
+ hooks:
+ - id: check-jsonschema
+ files: "^applications/.*/secrets(-[^./-]+)?\\.yaml"
+ args: ["--schemafile", "docs/extras/schemas/secrets.json"]
+ - id: check-metaschema
+ files: "^docs/extras/schemas/.*\\.json"
- repo: https://github.com/norwoodj/helm-docs
- rev: v1.11.0
+ rev: v1.11.1
hooks:
- id: helm-docs
args:
- - "--chart-search-root=."
+ - --chart-search-root=.
# The `./` makes it relative to the chart-search-root set above
- - "--template-files=./helm-docs.md.gotmpl"
+ - --template-files=./helm-docs.md.gotmpl
+ - --document-dependency-values=true
- - repo: https://github.com/PyCQA/isort
- rev: 5.12.0
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.0.280
hooks:
- - id: isort
- additional_dependencies:
- - toml
+ - id: ruff
+ args: [--fix, --exit-non-zero-on-fix]
- repo: https://github.com/psf/black
- rev: 23.1.0
+ rev: 23.7.0
hooks:
- id: black
- - repo: https://github.com/asottile/blacken-docs
- rev: 1.13.0
+ - repo: https://github.com/adamchainz/blacken-docs
+ rev: 1.15.0
hooks:
- id: blacken-docs
- additional_dependencies: [black==23.1.0]
-
- - repo: https://github.com/PyCQA/flake8
- rev: 6.0.0
- hooks:
- - id: flake8
+ additional_dependencies: [black==23.7.0]
diff --git a/LICENSE b/LICENSE
index 6b5e25a46c..e0d1c48a56 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2019-2022 Association of Universities for Research in Astronomy, Inc. (AURA)
+Copyright (c) 2019-2023 Association of Universities for Research in Astronomy, Inc. (AURA)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/Makefile b/Makefile
index 8fd55e5a25..9d8f64ccc4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,16 +1,33 @@
-.PHONY:
+.PHONY: help
help:
@echo "Make targets for Phalanx:"
+ @echo "make clean - Remove generated files"
@echo "make init - Set up dev environment (install pre-commit hooks)"
+ @echo "make update - Update pinned dependencies and run make init"
+ @echo "make update-deps - Update pinned dependencies"
-.PHONY:
+.PHONY: clean
+clean:
+ rm -rf .mypy_cache .ruff_cache .tox docs/_build
+ make -C docs clean
+
+.PHONY: init
init:
+ pip install --editable .
+ pip install --upgrade -r requirements/main.txt -r requirements/dev.txt
+ rm -rf .tox
pip install --upgrade pre-commit tox
pre-commit install
- pip install -e ".[dev]"
- rm -rf .tox
-.PHONY:
-clean:
- rm -rf .tox
- make -C docs clean
+.PHONY: update
+update: update-deps init
+
+.PHONY: update-deps
+update-deps:
+ pip install --upgrade pip-tools pip setuptools
+ pip-compile --upgrade --resolver=backtracking --build-isolation \
+ --generate-hashes --allow-unsafe \
+ --output-file requirements/main.txt requirements/main.in
+ pip-compile --upgrade --resolver=backtracking --build-isolation \
+ --generate-hashes --allow-unsafe \
+ --output-file requirements/dev.txt requirements/dev.in
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000..8d82cd2a83
--- /dev/null
+++ b/README.md
@@ -0,0 +1,11 @@
+# Phalanx
+
+This is the Argo CD repository for the Rubin Science Platform.
+It stores the root Argo CD application, deployment configuration for the other applications, the installer, and other helper scripts.
+
+See [phalanx.lsst.io](https://phalanx.lsst.io/) for full documentation.
+
+Phalanx is developed by the [Vera C. Rubin Observatory](https://www.lsst.org/).
+
+A phalanx is a SQuaRE deployment (Science Quality and Reliability Engineering, the team responsible for the Rubin Science Platform).
+Phalanx is how we ensure that all of our services work together as a unit.
diff --git a/README.rst b/README.rst
deleted file mode 100644
index 9cd4366892..0000000000
--- a/README.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-#######
-Phalanx
-#######
-
-This is the Argo CD repository for the Rubin Science Platform.
-It stores the root Argo CD application, deployment configuration for the other applications, the installer, and other helper scripts.
-
-See `phalanx.lsst.io `__ for the documentation.
-
-Phalanx is developed by the `Vera C. Rubin Observatory `__.
-
-Environments
-============
-The environments managed by Argo CD using configuration in this repository are detailed
-on `phalanx.lsst.io `__.
-
-There are some other environments that are used for development and testing and may not be up or reachable at any given moment.
-
-Naming
-======
-
-A phalanx is a SQuaRE deployment (Science Quality and Reliability Engineering, the team responsible for the Rubin Science Platform).
-Phalanx is how we ensure that all of our services work together as a unit.
diff --git a/applications/alert-stream-broker/README.md b/applications/alert-stream-broker/README.md
index cffc94fec9..a2a9ca8599 100644
--- a/applications/alert-stream-broker/README.md
+++ b/applications/alert-stream-broker/README.md
@@ -15,3 +15,96 @@ Alert transmission to community brokers
| strimzi-registry-operator.clusterNamespace | string | `"alert-stream-broker"` | |
| strimzi-registry-operator.operatorNamespace | string | `"alert-stream-broker"` | |
| strimzi-registry-operator.watchNamespace | string | `"alert-stream-broker"` | |
+| alert-database.fullnameOverride | string | `""` | Override the full name for resources (includes the release name) |
+| alert-database.ingester.gcp.projectID | string | `""` | Project ID which has the above GCP IAM service account |
+| alert-database.ingester.gcp.serviceAccountName | string | `""` | Name of a service account which has credentials granting access to the alert database's backing storage buckets. |
+| alert-database.ingester.image.imagePullPolicy | string | `"IfNotPresent"` | |
+| alert-database.ingester.image.repository | string | `"lsstdm/alert_database_ingester"` | |
+| alert-database.ingester.image.tag | string | `"v2.0.2"` | |
+| alert-database.ingester.kafka.cluster | string | `"alert-broker"` | Name of a Strimzi Kafka cluster to connect to. |
+| alert-database.ingester.kafka.port | int | `9092` | Port to connect to on the Strimzi Kafka cluster. It should be an internal listener that expects SCRAM SHA-512 auth. |
+| alert-database.ingester.kafka.strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions |
+| alert-database.ingester.kafka.topic | string | `"alerts-simulated"` | Name of the topic which will holds alert data. |
+| alert-database.ingester.kafka.user | string | `"alert-database-ingester"` | The username of the Kafka user identity used to connect to the broker. |
+| alert-database.ingester.logLevel | string | `"verbose"` | set the log level of the application. can be 'info', or 'debug', or anything else to suppress logging. |
+| alert-database.ingester.schemaRegistryURL | string | `""` | URL of a schema registry instance |
+| alert-database.ingester.serviceAccountName | string | `"alert-database-ingester"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database ingester. |
+| alert-database.ingress.annotations | object | `{}` | |
+| alert-database.ingress.enabled | bool | `true` | Whether to create an ingress |
+| alert-database.ingress.gafaelfawrAuthQuery | string | `"scope=read:alertdb"` | Query string for Gafaelfawr to authorize access |
+| alert-database.ingress.host | string | None, must be set if the ingress is enabled | Hostname for the ingress |
+| alert-database.ingress.path | string | `"/alertdb"` | Subpath to host the alert database application under the ingress |
+| alert-database.ingress.tls | list | `[]` | Configures TLS for the ingress if needed. If multiple ingresses share the same hostname, only one of them needs a TLS configuration. |
+| alert-database.nameOverride | string | `""` | Override the base name for resources |
+| alert-database.server.gcp.projectID | string | `""` | Project ID which has the above GCP IAM service account |
+| alert-database.server.gcp.serviceAccountName | string | `""` | Name of a service account which has credentials granting access to the alert database's backing storage buckets. |
+| alert-database.server.image.imagePullPolicy | string | `"IfNotPresent"` | |
+| alert-database.server.image.repository | string | `"lsstdm/alert_database_server"` | |
+| alert-database.server.image.tag | string | `"v2.1.0"` | |
+| alert-database.server.logLevel | string | `"verbose"` | set the log level of the application. can be 'info', or 'debug', or anything else to suppress logging. |
+| alert-database.server.service.port | int | `3000` | |
+| alert-database.server.service.type | string | `"ClusterIP"` | |
+| alert-database.server.serviceAccountName | string | `"alertdb-reader"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database server. |
+| alert-database.storage.gcp.alertBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with alert data |
+| alert-database.storage.gcp.project | string | `""` | Name of a GCP project that has a bucket for database storage |
+| alert-database.storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data |
+| alert-stream-broker.cluster.name | string | `"alert-broker"` | Name used for the Kafka broker, and used by Strimzi for many annotations. |
+| alert-stream-broker.fullnameOverride | string | `""` | Override for the full name used for Kubernetes resources; by default one will be created based on the chart name and helm release name. |
+| alert-stream-broker.kafka.config | object | `{"log.retention.bytes":"42949672960","log.retention.hours":168,"offsets.retention.minutes":1440}` | Configuration overrides for the Kafka server. |
+| alert-stream-broker.kafka.config."log.retention.bytes" | string | `"42949672960"` | Maximum retained number of bytes for a broker's data. This is a string to avoid YAML type conversion issues for large numbers. |
+| alert-stream-broker.kafka.config."log.retention.hours" | int | `168` | Number of hours for a brokers data to be retained. |
+| alert-stream-broker.kafka.config."offsets.retention.minutes" | int | `1440` | Number of minutes for a consumer group's offsets to be retained. |
+| alert-stream-broker.kafka.externalListener.bootstrap.annotations | object | `{}` | |
+| alert-stream-broker.kafka.externalListener.bootstrap.host | string | `""` | Hostname that should be used by clients who want to connect to the broker through the bootstrap address. |
+| alert-stream-broker.kafka.externalListener.bootstrap.ip | string | `""` | IP address that should be used by the broker's external bootstrap load balancer for access from the internet. The format of this is a string like "192.168.1.1". |
+| alert-stream-broker.kafka.externalListener.brokers | list | `[]` | List of hostname and IP for each broker. The format of this is a list of maps with 'ip' and 'host' keys. For example: - ip: "192.168.1.1" host: broker-0.example - ip: "192.168.1.2" host: broker-1.example Each replica should get a host and IP. If these are unset, then IP addresses will be chosen automatically by the Kubernetes cluster's LoadBalancer controller, and hostnames will be unset, which will break TLS connections. |
+| alert-stream-broker.kafka.externalListener.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of the certificate issuer. |
+| alert-stream-broker.kafka.externalListener.tls.enabled | bool | `false` | Whether TLS encryption is enabled. |
+| alert-stream-broker.kafka.interBrokerProtocolVersion | float | `3.2` | Version of the protocol for inter-broker communication, see https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. |
+| alert-stream-broker.kafka.logMessageFormatVersion | float | `3.2` | Encoding version for messages, see https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str. |
+| alert-stream-broker.kafka.nodePool.affinities | list | `[{"key":"kafka","value":"ok"}]` | List of node affinities to set for the broker's nodes. The key should be a label key, and the value should be a label value, and then the broker will prefer running Kafka and Zookeeper on nodes with those key-value pairs. |
+| alert-stream-broker.kafka.nodePool.tolerations | list | `[{"effect":"NoSchedule","key":"kafka","value":"ok"}]` | List of taint tolerations when scheduling the broker's pods onto nodes. The key should be a taint key, the value should be a taint value, and effect should be a taint effect that can be tolerated (ignored) when scheduling the broker's Kafka and Zookeeper pods. |
+| alert-stream-broker.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. |
+| alert-stream-broker.kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. |
+| alert-stream-broker.kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
+| alert-stream-broker.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. |
+| alert-stream-broker.nameOverride | string | `""` | |
+| alert-stream-broker.strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. |
+| alert-stream-broker.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. |
+| alert-stream-broker.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. |
+| alert-stream-broker.tls.subject.organization | string | `"Vera C. Rubin Observatory"` | Organization to use in the 'Subject' field of the broker's TLS certificate. |
+| alert-stream-broker.users | list | `[{"groups":["rubin-testing"],"readonlyTopics":["alert-stream","alerts-simulated"],"username":"rubin-testing"}]` | A list of users that should be created and granted access. Passwords for these users are not generated automatically; they are expected to be stored as 1Password secrets which are replicated into Vault. Each username should have a "{{ $username }}-password" secret associated with it. |
+| alert-stream-broker.users[0].groups | list | `["rubin-testing"]` | A list of string prefixes for groups that the user should get admin access to, allowing them to create, delete, describe, etc consumer groups. Note that these are prefix-matched, not just literal exact matches. |
+| alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated"]` | A list of topics that the user should get read-only access to. |
+| alert-stream-broker.users[0].username | string | `"rubin-testing"` | The username for the user that should be created. |
+| alert-stream-broker.vaultSecretsPath | string | `""` | Path to the secret resource in Vault |
+| alert-stream-broker.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. |
+| alert-stream-broker.zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. |
+| alert-stream-broker.zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
+| alert-stream-schema-registry.clusterName | string | `"alert-broker"` | Strimzi "cluster name" of the broker to use as a backend. |
+| alert-stream-schema-registry.hostname | string | `"alert-schemas-int.lsst.cloud"` | Hostname for an ingress which sends traffic to the Schema Registry. |
+| alert-stream-schema-registry.name | string | `"alert-schema-registry"` | Name used by the registry, and by its users. |
+| alert-stream-schema-registry.port | int | `8081` | Port where the registry is listening. NOTE: Not actually configurable in strimzi-registry-operator, so this basically cannot be changed. |
+| alert-stream-schema-registry.schemaSync | object | `{"image":{"repository":"lsstdm/lsst_alert_packet","tag":"tickets-DM-32743"},"subject":"alert-packet"}` | Configuration for the Job which injects the most recent alert_packet schema into the Schema Registry |
+| alert-stream-schema-registry.schemaSync.image.repository | string | `"lsstdm/lsst_alert_packet"` | Repository of a container which has the alert_packet syncLatestSchemaToRegistry.py program |
+| alert-stream-schema-registry.schemaSync.image.tag | string | `"tickets-DM-32743"` | Version of the container to use |
+| alert-stream-schema-registry.schemaSync.subject | string | `"alert-packet"` | Subject name to use when inserting data into the Schema Registry |
+| alert-stream-schema-registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry to store data. |
+| alert-stream-schema-registry.strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. |
+| alert-stream-simulator.clusterName | string | `"alert-broker"` | Name of a Strimzi Kafka cluster to connect to. |
+| alert-stream-simulator.clusterPort | int | `9092` | Port to connect to on the Strimzi Kafka cluster. It should be an internal TLS listener. |
+| alert-stream-simulator.fullnameOverride | string | `""` | Explicitly sets the full name used for the deployment and job (includes the release name). |
+| alert-stream-simulator.image.imagePullPolicy | string | `"IfNotPresent"` | Pull policy for the Deployment |
+| alert-stream-simulator.image.repository | string | `"lsstdm/alert-stream-simulator"` | Source repository for the image which holds the rubin-alert-stream program. |
+| alert-stream-simulator.image.tag | string | `"v1.2.1"` | Tag to use for the rubin-alert-stream container. |
+| alert-stream-simulator.kafkaUserName | string | `"alert-stream-simulator"` | The username of the Kafka user identity used to connect to the broker. |
+| alert-stream-simulator.maxBytesRetained | string | `"24000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. |
+| alert-stream-simulator.maxMillisecondsRetained | string | `"604800000"` | Maximum amount of time to save simulated alerts in the replay topic, in milliseconds. Default is 7 days. |
+| alert-stream-simulator.nameOverride | string | `""` | Explicitly sets the name of the deployment and job. |
+| alert-stream-simulator.repeatInterval | int | `37` | How often (in seconds) to repeat the sample data into the replay topic. |
+| alert-stream-simulator.replayTopicName | string | `"alerts-simulated"` | Name of the topic which will receive the repeated alerts on an interval. |
+| alert-stream-simulator.replayTopicPartitions | int | `8` | |
+| alert-stream-simulator.replayTopicReplicas | int | `2` | |
+| alert-stream-simulator.schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. |
+| alert-stream-simulator.staticTopicName | string | `"alerts-static"` | Name of the topic which will hold a static single visit of sample data. |
+| alert-stream-simulator.strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions |
\ No newline at end of file
diff --git a/applications/alert-stream-broker/charts/alert-database/README.md b/applications/alert-stream-broker/charts/alert-database/README.md
index 30b66902e0..7343a81b47 100644
--- a/applications/alert-stream-broker/charts/alert-database/README.md
+++ b/applications/alert-stream-broker/charts/alert-database/README.md
@@ -38,4 +38,4 @@ Archival database of alerts sent through the alert stream.
| server.serviceAccountName | string | `"alertdb-reader"` | The name of the Kubernetes ServiceAccount (*not* the Google Cloud IAM service account!) which is used by the alert database server. |
| storage.gcp.alertBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with alert data |
| storage.gcp.project | string | `""` | Name of a GCP project that has a bucket for database storage |
-| storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data |
+| storage.gcp.schemaBucket | string | `""` | Name of a Google Cloud Storage bucket in GCP with schema data |
\ No newline at end of file
diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/README.md b/applications/alert-stream-broker/charts/alert-stream-broker/README.md
index 394f840d4c..8b61066caf 100644
--- a/applications/alert-stream-broker/charts/alert-stream-broker/README.md
+++ b/applications/alert-stream-broker/charts/alert-stream-broker/README.md
@@ -25,7 +25,7 @@ Kafka broker cluster for distributing alerts
| kafka.replicas | int | `3` | Number of Kafka broker replicas to run. |
| kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. |
| kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
-| kafka.version | string | `"3.2.3"` | Version of Kafka to deploy. |
+| kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. |
| nameOverride | string | `""` | |
| strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. |
| superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. |
@@ -38,4 +38,4 @@ Kafka broker cluster for distributing alerts
| vaultSecretsPath | string | `""` | Path to the secret resource in Vault |
| zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. |
| zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. |
-| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
+| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
\ No newline at end of file
diff --git a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml
index 35e107ae7a..5803b74943 100644
--- a/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml
+++ b/applications/alert-stream-broker/charts/alert-stream-broker/values.yaml
@@ -9,7 +9,7 @@ cluster:
kafka:
# -- Version of Kafka to deploy.
- version: 3.2.3
+ version: 3.4.0
# -- Encoding version for messages, see
# https://strimzi.io/docs/operators/latest/deploying.html#ref-kafka-versions-str.
logMessageFormatVersion: 3.2
diff --git a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md
index cc6ac85074..1cc74892f7 100644
--- a/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md
+++ b/applications/alert-stream-broker/charts/alert-stream-schema-registry/README.md
@@ -15,4 +15,4 @@ Confluent Schema Registry for managing schema versions for the Alert Stream
| schemaSync.image.tag | string | `"tickets-DM-32743"` | Version of the container to use |
| schemaSync.subject | string | `"alert-packet"` | Subject name to use when inserting data into the Schema Registry |
| schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry to store data. |
-| strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. |
+| strimziAPIVersion | string | `"v1beta2"` | Version of the Strimzi Custom Resource API. The correct value depends on the deployed version of Strimzi. See [this blog post](https://strimzi.io/blog/2021/04/29/api-conversion/) for more. |
\ No newline at end of file
diff --git a/applications/alert-stream-broker/charts/alert-stream-simulator/README.md b/applications/alert-stream-broker/charts/alert-stream-simulator/README.md
index e0833c4138..724924bad9 100644
--- a/applications/alert-stream-broker/charts/alert-stream-simulator/README.md
+++ b/applications/alert-stream-broker/charts/alert-stream-simulator/README.md
@@ -22,4 +22,4 @@ Producer which repeatedly publishes a static set of alerts into a Kafka topic
| replayTopicReplicas | int | `2` | |
| schemaID | int | `1` | Integer ID to use in the prefix of alert data packets. This should be a valid Confluent Schema Registry ID associated with the schema used. |
| staticTopicName | string | `"alerts-static"` | Name of the topic which will hold a static single visit of sample data. |
-| strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions |
+| strimziAPIVersion | string | `"v1beta2"` | API version of the Strimzi installation's custom resource definitions |
\ No newline at end of file
diff --git a/applications/alert-stream-broker/values-idfint.yaml b/applications/alert-stream-broker/values-idfint.yaml
index b60dc8a259..3b94677a96 100644
--- a/applications/alert-stream-broker/values-idfint.yaml
+++ b/applications/alert-stream-broker/values-idfint.yaml
@@ -18,14 +18,14 @@ alert-stream-broker:
host: alert-stream-int-broker-1.lsst.cloud
- ip: "35.238.84.221"
host: alert-stream-int-broker-2.lsst.cloud
- - ip: "35.188.93.220"
- host: alert-stream-int-broker-3.lsst.cloud
- - ip: "35.224.219.71"
- host: alert-stream-int-broker-4.lsst.cloud
- - ip: "35.232.51.105"
- host: alert-stream-int-broker-5.lsst.cloud
-
- replicas: 6
+ # - ip: "35.184.182.182"
+ # host: alert-stream-int-broker-3.lsst.cloud
+ # - ip: "35.232.191.72"
+ # host: alert-stream-int-broker-4.lsst.cloud
+ # - ip: "34.27.122.46"
+ # host: alert-stream-int-broker-5.lsst.cloud
+
+ replicas: 3
storage:
size: 1500Gi
diff --git a/applications/argo-workflows/Chart.yaml b/applications/argo-workflows/Chart.yaml
index 0c17b5eca8..72a59f0a91 100644
--- a/applications/argo-workflows/Chart.yaml
+++ b/applications/argo-workflows/Chart.yaml
@@ -8,5 +8,5 @@ sources:
- https://github.com/argoproj/argo-helm
dependencies:
- name: argo-workflows
- version: 0.28.0
+ version: 0.32.1
repository: https://argoproj.github.io/argo-helm
diff --git a/applications/argo-workflows/README.md b/applications/argo-workflows/README.md
index a8d76e16da..7daf31f859 100644
--- a/applications/argo-workflows/README.md
+++ b/applications/argo-workflows/README.md
@@ -22,4 +22,4 @@ Kubernetes workflow engine
| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
| ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | |
| ingress.annotations."nginx.ingress.kubernetes.io/use-regex" | string | `"true"` | |
-| ingress.scopes[0] | string | `"exec:admin"` | |
+| ingress.scopes[0] | string | `"exec:admin"` | |
\ No newline at end of file
diff --git a/applications/argo-workflows/values-base.yaml b/applications/argo-workflows/values-base.yaml
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/applications/argocd/Chart.yaml b/applications/argocd/Chart.yaml
index 8f231534e2..f9249a9808 100644
--- a/applications/argocd/Chart.yaml
+++ b/applications/argocd/Chart.yaml
@@ -8,5 +8,5 @@ sources:
- https://github.com/argoproj/argo-helm
dependencies:
- name: argo-cd
- version: 5.34.1
+ version: 5.41.1
repository: https://argoproj.github.io/argo-helm
diff --git a/applications/argocd/README.md b/applications/argocd/README.md
index cfe35d5e6b..42a1636626 100644
--- a/applications/argocd/README.md
+++ b/applications/argocd/README.md
@@ -13,24 +13,21 @@ Kubernetes application manager
| Key | Type | Default | Description |
|-----|------|---------|-------------|
-| argo-cd.configs.secret.createSecret | bool | `false` | |
-| argo-cd.controller.metrics.applicationLabels.enabled | bool | `true` | |
-| argo-cd.controller.metrics.applicationLabels.labels[0] | string | `"name"` | |
-| argo-cd.controller.metrics.applicationLabels.labels[1] | string | `"instance"` | |
-| argo-cd.controller.metrics.enabled | bool | `true` | |
+| argo-cd.configs.cm."resource.compareoptions" | string | `"ignoreAggregatedRoles: true\n"` | Configure resource comparison |
+| argo-cd.configs.params."server.basehref" | string | `"/argo-cd"` | Base href for `index.html` when running under a reverse proxy |
+| argo-cd.configs.params."server.insecure" | bool | `true` | Do not use TLS (this is terminated at the ingress) |
+| argo-cd.configs.secret.createSecret | bool | `false` | Create the Argo CD secret (we manage this with Vault) |
+| argo-cd.controller.metrics.applicationLabels.enabled | bool | `true` | Enable adding additional labels to `argocd_app_labels` metric |
+| argo-cd.controller.metrics.applicationLabels.labels | list | `["name","instance"]` | Labels to add to `argocd_app_labels` metric |
+| argo-cd.controller.metrics.enabled | bool | `true` | Enable controller metrics service |
| argo-cd.global.logging.format | string | `"json"` | Set the global logging format. Either: `text` or `json` |
-| argo-cd.notifications.metrics.enabled | bool | `true` | |
-| argo-cd.redis.enabled | bool | `true` | |
-| argo-cd.redis.metrics.enabled | bool | `true` | |
-| argo-cd.repoServer.metrics.enabled | bool | `true` | |
-| argo-cd.server.config."helm.repositories" | string | `"- url: https://lsst-sqre.github.io/charts/\n name: lsst-sqre\n- url: https://ricoberger.github.io/helm-charts/\n name: ricoberger\n- url: https://kubernetes.github.io/ingress-nginx/\n name: ingress-nginx\n- url: https://charts.helm.sh/stable\n name: stable\n- url: https://strimzi.io/charts/\n name: strimzi\n"` | |
-| argo-cd.server.config."resource.compareoptions" | string | `"ignoreAggregatedRoles: true\n"` | |
-| argo-cd.server.extraArgs[0] | string | `"--basehref=/argo-cd"` | |
-| argo-cd.server.extraArgs[1] | string | `"--insecure=true"` | |
-| argo-cd.server.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/$2"` | |
-| argo-cd.server.ingress.enabled | bool | `true` | |
-| argo-cd.server.ingress.ingressClassName | string | `"nginx"` | |
-| argo-cd.server.ingress.pathType | string | `"ImplementationSpecific"` | |
-| argo-cd.server.ingress.paths[0] | string | `"/argo-cd(/|$)(.*)"` | |
-| argo-cd.server.metrics.enabled | bool | `true` | |
-| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
+| argo-cd.notifications.metrics.enabled | bool | `true` | Enable notifications metrics service |
+| argo-cd.redis.metrics.enabled | bool | `true` | Enable Redis metrics service |
+| argo-cd.repoServer.metrics.enabled | bool | `true` | Enable repo server metrics service |
+| argo-cd.server.ingress.annotations | object | Rewrite requests to remove `/argo-cd/` prefix | Additional annotations to add to the Argo CD ingress |
+| argo-cd.server.ingress.enabled | bool | `true` | Create an ingress for the Argo CD server |
+| argo-cd.server.ingress.ingressClassName | string | `"nginx"` | Ingress class to use for Argo CD ingress |
+| argo-cd.server.ingress.pathType | string | `"ImplementationSpecific"` | Type of path expression for Argo CD ingress |
+| argo-cd.server.ingress.paths | list | `["/argo-cd(/|$)(.*)"]` | Paths to route to Argo CD |
+| argo-cd.server.metrics.enabled | bool | `true` | Enable server metrics service |
+| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
\ No newline at end of file
diff --git a/applications/argocd/secrets.yaml b/applications/argocd/secrets.yaml
new file mode 100644
index 0000000000..8c32f231ab
--- /dev/null
+++ b/applications/argocd/secrets.yaml
@@ -0,0 +1,34 @@
+"admin.plaintext_password":
+ description: >-
+ Admin password for Argo CD. This password is normally not used because
+ Argo CD is configured to use Google or GitHub authentication, but it is
+ used by the installer (which cannot use external authentication) and is
+ useful as a fallback if external authentication is not working for some
+ reason. This secret can be changed at any time.
+ generate:
+ type: password
+"admin.password":
+ description: >-
+ bcrypt hash of the admin password. This is the only version of the admin
+ password exposed to the running Argo CD pod. It will be updated
+ automatically if the admin password is changed.
+ generate:
+ type: bcrypt-password-hash
+ source: admin.plaintext_password
+"admin.passwordMtime":
+ description: "Last modification time for the admin password."
+ generate:
+ type: mtime
+ source: admin.plaintext_password
+"dex.clientSecret":
+ description: >-
+ OAuth 2 or OpenID Connect client secret, used to authenticate to GitHub or
+ Google as part of the authentication flow. This secret can be changed at
+ any time.
+"server.secretkey":
+ description: >-
+ Key used to validate user session cookies. Argo CD will generate this
+ secret if it is missing, but we provide it because the Argo CD secret is
+ managed via a VaultSecret.
+ generate:
+ type: password
diff --git a/applications/argocd/values-usdf-tel-rsp.yaml b/applications/argocd/values-usdf-tel-rsp.yaml
new file mode 100644
index 0000000000..280ffe7033
--- /dev/null
+++ b/applications/argocd/values-usdf-tel-rsp.yaml
@@ -0,0 +1,85 @@
+argo-cd:
+ redis:
+ enabled: true
+
+ server:
+ ingress:
+ enabled: true
+ hosts:
+ - "usdf-tel-rsp.slac.stanford.edu"
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/rewrite-target: "/$2"
+ paths:
+ - /argo-cd(/|$)(.*)
+
+ extraArgs:
+ - "--basehref=/argo-cd"
+ - "--insecure=true"
+
+ env:
+ - name: HTTP_PROXY
+ value: http://squid.slac.stanford.edu:3128
+ - name: HTTPS_PROXY
+ value: http://squid.slac.stanford.edu:3128
+ - name: NO_PROXY
+ value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server
+
+ config:
+ url: https://usdf-tel-rsp.slac.stanford.edu/argo-cd
+ oidc.config: |
+ name: SLAC
+ issuer: https://dex.slac.stanford.edu
+ clientID: $oidc.clientId
+ clientSecret: $oidc.clientSecret
+ # Optional set of OIDC scopes to request. If omitted, defaults to: ["openid", "profile", "email", "groups"]
+ requestedScopes: ["openid", "profile", "email", "groups"]
+ # Optional set of OIDC claims to request on the ID token.
+ requestedIDTokenClaims: {"groups": {"essential": true}}
+ rbacConfig:
+ policy.csv: |
+ g, ytl@slac.stanford.edu, role:admin
+ g, ppascual@slac.stanford.edu, role:admin
+ g, pav@slac.stanford.edu, role:admin
+ g, dspeck@slac.stanford.edu, role:admin
+ g, afausti@slac.stanford.edu, role:admin
+ g, mfl@slac.stanford.edu, role:admin
+ g, cbanek@slac.stanford.edu, role:admin
+ g, frossie@slac.stanford.edu, role:admin
+ g, hchiang2@slac.stanford.edu, role:admin
+ g, athor@slac.stanford.edu, role:admin
+ g, reinking@slac.stanford.edu, role:admin
+ g, smart@slac.stanford.edu, role:admin
+ g, omullan@slac.stanford.edu, role:admin
+ g, mreuter@slac.stanford.edu, role:admin
+ scopes: "[email]"
+
+ helm.repositories: |
+ - url: https://lsst-sqre.github.io/charts/
+ name: lsst-sqre
+ - url: https://charts.helm.sh/stable
+ name: stable
+
+ repoServer:
+
+ env:
+ - name: HTTP_PROXY
+ value: http://squid.slac.stanford.edu:3128
+ - name: HTTPS_PROXY
+ value: http://squid.slac.stanford.edu:3128
+ - name: NO_PROXY
+ value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server
+
+ controller:
+
+ env:
+ - name: HTTP_PROXY
+ value: http://squid.slac.stanford.edu:3128
+ - name: HTTPS_PROXY
+ value: http://squid.slac.stanford.edu:3128
+ - name: NO_PROXY
+ value: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.cluster.local,argocd-repo-server
+
+ configs:
+ secret:
+ createSecret: false
diff --git a/applications/argocd/values-usdfdev.yaml b/applications/argocd/values-usdfdev.yaml
index ca9f930465..fa44ff14a9 100644
--- a/applications/argocd/values-usdfdev.yaml
+++ b/applications/argocd/values-usdfdev.yaml
@@ -39,14 +39,23 @@ argo-cd:
rbacConfig:
policy.csv: |
g, ytl@slac.stanford.edu, role:admin
+ g, ppascual@slac.stanford.edu, role:admin
g, pav@slac.stanford.edu, role:admin
g, dspeck@slac.stanford.edu, role:admin
g, afausti@slac.stanford.edu, role:admin
g, mfl@slac.stanford.edu, role:admin
g, cbanek@slac.stanford.edu, role:admin
+ g, frossie@slac.stanford.edu, role:admin
g, hchiang2@slac.stanford.edu, role:admin
g, athor@slac.stanford.edu, role:admin
g, jsick@slac.stanford.edu, role:admin
+ g, reinking@slac.stanford.edu, role:admin
+ g, smart@slac.stanford.edu, role:admin
+ g, omullan@slac.stanford.edu, role:admin
+ g, mreuter@slac.stanford.edu, role:admin
+ g, rra@slac.stanford.edu, role:admin
+ g, fritzm@slac.stanford.edu, role:admin
+ g, cslater@slac.stanford.edu, role:admin
scopes: "[email]"
helm.repositories: |
diff --git a/applications/argocd/values-usdfprod.yaml b/applications/argocd/values-usdfprod.yaml
index 4084e0e20f..86f3188b58 100644
--- a/applications/argocd/values-usdfprod.yaml
+++ b/applications/argocd/values-usdfprod.yaml
@@ -38,14 +38,23 @@ argo-cd:
requestedIDTokenClaims: {"groups": {"essential": true}}
rbacConfig:
policy.csv: |
- g, ytl@slac.stanford.edu, role:adming
+ g, ytl@slac.stanford.edu, role:admin
+ g, ppascual@slac.stanford.edu, role:admin
g, pav@slac.stanford.edu, role:admin
g, dspeck@slac.stanford.edu, role:admin
g, afausti@slac.stanford.edu, role:admin
g, mfl@slac.stanford.edu, role:admin
g, cbanek@slac.stanford.edu, role:admin
+ g, frossie@slac.stanford.edu, role:admin
g, hchiang2@slac.stanford.edu, role:admin
g, athor@slac.stanford.edu, role:admin
+ g, reinking@slac.stanford.edu, role:admin
+ g, smart@slac.stanford.edu, role:admin
+ g, omullan@slac.stanford.edu, role:admin
+ g, mreuter@slac.stanford.edu, role:admin
+ g, rra@slac.stanford.edu, role:admin
+ g, fritzm@slac.stanford.edu, role:admin
+ g, cslater@slac.stanford.edu, role:admin
scopes: "[email]"
helm.repositories: |
diff --git a/applications/argocd/values.yaml b/applications/argocd/values.yaml
index 05310ab224..dcb51f1c8c 100644
--- a/applications/argocd/values.yaml
+++ b/applications/argocd/values.yaml
@@ -8,58 +8,71 @@ argo-cd:
format: "json"
redis:
- enabled: true
metrics:
+ # -- Enable Redis metrics service
enabled: true
controller:
metrics:
+ # -- Enable controller metrics service
enabled: true
+
applicationLabels:
+ # -- Enable adding additional labels to `argocd_app_labels` metric
enabled: true
+
+ # -- Labels to add to `argocd_app_labels` metric
labels: ["name", "instance"]
repoServer:
metrics:
+ # -- Enable repo server metrics service
enabled: true
notifications:
metrics:
+ # -- Enable notifications metrics service
enabled: true
server:
metrics:
+ # -- Enable server metrics service
enabled: true
+
ingress:
+ # -- Create an ingress for the Argo CD server
enabled: true
+
+ # -- Additional annotations to add to the Argo CD ingress
+ # @default -- Rewrite requests to remove `/argo-cd/` prefix
annotations:
nginx.ingress.kubernetes.io/rewrite-target: "/$2"
+
+ # -- Ingress class to use for Argo CD ingress
ingressClassName: "nginx"
+
+ # -- Paths to route to Argo CD
paths:
- "/argo-cd(/|$)(.*)"
+
+ # -- Type of path expression for Argo CD ingress
pathType: "ImplementationSpecific"
- extraArgs:
- - "--basehref=/argo-cd"
- - "--insecure=true"
-
- config:
- helm.repositories: |
- - url: https://lsst-sqre.github.io/charts/
- name: lsst-sqre
- - url: https://ricoberger.github.io/helm-charts/
- name: ricoberger
- - url: https://kubernetes.github.io/ingress-nginx/
- name: ingress-nginx
- - url: https://charts.helm.sh/stable
- name: stable
- - url: https://strimzi.io/charts/
- name: strimzi
+ configs:
+ cm:
+ # -- Configure resource comparison
resource.compareoptions: |
ignoreAggregatedRoles: true
- configs:
+ params:
+ # -- Do not use TLS (this is terminated at the ingress)
+ server.insecure: true
+
+ # -- Base href for `index.html` when running under a reverse proxy
+ server.basehref: "/argo-cd"
+
secret:
+ # -- Create the Argo CD secret (we manage this with Vault)
createSecret: false
# The following will be set by parameters injected by Argo CD and should not
diff --git a/applications/cachemachine/README.md b/applications/cachemachine/README.md
index 1ed392e993..1f2dda7790 100644
--- a/applications/cachemachine/README.md
+++ b/applications/cachemachine/README.md
@@ -28,4 +28,4 @@ JupyterLab image prepuller
| serviceAccount | object | `{"annotations":{},"name":""}` | Secret names to use for all Docker pulls |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.name | string | Name based on the fullname template | Name of the service account to use |
-| tolerations | list | `[]` | Tolerations for the cachemachine frontend pod |
+| tolerations | list | `[]` | Tolerations for the cachemachine frontend pod |
\ No newline at end of file
diff --git a/applications/cachemachine/values-base.yaml b/applications/cachemachine/values-base.yaml
index a4e00aff39..738f0af25c 100644
--- a/applications/cachemachine/values-base.yaml
+++ b/applications/cachemachine/values-base.yaml
@@ -10,11 +10,11 @@ autostart:
"type": "RubinRepoMan",
"registry_url": "ts-dockerhub.lsst.org",
"repo": "sal-sciplat-lab",
- "recommended_tag": "recommended_c0030",
+ "recommended_tag": "recommended_c0031",
"num_releases": 0,
"num_weeklies": 3,
"num_dailies": 2,
- "cycle": 30,
+ "cycle": 31,
"alias_tags": [
"latest",
"latest_daily",
diff --git a/applications/cachemachine/values-idfdev.yaml b/applications/cachemachine/values-idfdev.yaml
deleted file mode 100644
index cc7047215f..0000000000
--- a/applications/cachemachine/values-idfdev.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-serviceAccount:
- annotations: {
- iam.gke.io/gcp-service-account: cachemachine-wi@science-platform-dev-7696.iam.gserviceaccount.com
- }
-
-autostart:
- jupyter: |
- {
- "name": "jupyter",
- "labels": {},
- "repomen": [
- {
- "type": "RubinRepoGar",
- "registry_url": "us-central1-docker.pkg.dev",
- "gar_repository": "sciplat",
- "gar_image": "sciplat-lab",
- "project_id": "rubin-shared-services-71ec",
- "location": "us-central1",
- "recommended_tag": "recommended",
- "num_releases": 1,
- "num_weeklies": 2,
- "num_dailies": 3
- },
- {
- "type": "SimpleRepoMan",
- "images": [
- {
- "image_url": "us-central1-docker.pkg.dev/rubin-shared-services-71ec/sciplat/sciplat-lab:w_2023_07",
- "name": "Weekly 2023_07"
- }
- ]
- }
- ]
- }
diff --git a/applications/cachemachine/values-summit.yaml b/applications/cachemachine/values-summit.yaml
index 92f3503a56..e40d0ee13b 100644
--- a/applications/cachemachine/values-summit.yaml
+++ b/applications/cachemachine/values-summit.yaml
@@ -8,11 +8,11 @@ autostart:
"type": "RubinRepoMan",
"registry_url": "ts-dockerhub.lsst.org",
"repo": "sal-sciplat-lab",
- "recommended_tag": "recommended_c0030",
+ "recommended_tag": "recommended_c0031",
"num_releases": 0,
"num_weeklies": 3,
"num_dailies": 2,
- "cycle": 30,
+ "cycle": 31,
"alias_tags": [
"latest",
"latest_daily",
diff --git a/applications/cachemachine/values-tucson-teststand.yaml b/applications/cachemachine/values-tucson-teststand.yaml
index 8fbd910a19..f88f37ba79 100644
--- a/applications/cachemachine/values-tucson-teststand.yaml
+++ b/applications/cachemachine/values-tucson-teststand.yaml
@@ -8,11 +8,11 @@ autostart:
"type": "RubinRepoMan",
"registry_url": "ts-dockerhub.lsst.org",
"repo": "sal-sciplat-lab",
- "recommended_tag": "recommended_c0030",
+ "recommended_tag": "recommended_c0032",
"num_releases": 1,
"num_weeklies": 3,
"num_dailies": 2,
- "cycle": 30,
+ "cycle": 32,
"alias_tags": [
"latest",
"latest_daily",
diff --git a/applications/cert-manager/Chart.yaml b/applications/cert-manager/Chart.yaml
index 8f807c7af3..38b8d24d30 100644
--- a/applications/cert-manager/Chart.yaml
+++ b/applications/cert-manager/Chart.yaml
@@ -7,5 +7,5 @@ sources:
- https://github.com/cert-manager/cert-manager
dependencies:
- name: cert-manager
- version: v1.12.0
+ version: v1.12.3
repository: https://charts.jetstack.io
diff --git a/applications/cert-manager/README.md b/applications/cert-manager/README.md
index e8155ae527..84d991ca04 100644
--- a/applications/cert-manager/README.md
+++ b/applications/cert-manager/README.md
@@ -20,4 +20,4 @@ TLS certificate manager
| config.email | string | sqre-admin | Contact email address registered with Let's Encrypt |
| config.route53.awsAccessKeyId | string | None, must be set if `createIssuer` is true | AWS access key ID for Route 53 (must match `aws-secret-access-key` in Vault secret referenced by `config.vaultSecretPath`) |
| config.route53.hostedZone | string | None, must be set if `createIssuer` is true | Route 53 hosted zone in which to create challenge records |
-| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
+| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
\ No newline at end of file
diff --git a/applications/cert-manager/secrets.yaml b/applications/cert-manager/secrets.yaml
new file mode 100644
index 0000000000..714939b88c
--- /dev/null
+++ b/applications/cert-manager/secrets.yaml
@@ -0,0 +1,5 @@
+aws-secret-access-key:
+ description: >-
+ AWS credentials with write access to the appropriate Route 53 subdomain in
+ which Let's Encrypt challenges should be created.
+ if: config.createIssuer
diff --git a/applications/datalinker/README.md b/applications/datalinker/README.md
index a3cfbd378f..c3b14afad5 100644
--- a/applications/datalinker/README.md
+++ b/applications/datalinker/README.md
@@ -30,4 +30,4 @@ IVOA DataLink-based service and data discovery
| podAnnotations | object | `{}` | Annotations for the datalinker deployment pod |
| replicaCount | int | `1` | Number of web deployment pods to start |
| resources | object | `{}` | Resource limits and requests for the datalinker deployment pod |
-| tolerations | list | `[]` | Tolerations for the datalinker deployment pod |
+| tolerations | list | `[]` | Tolerations for the datalinker deployment pod |
\ No newline at end of file
diff --git a/applications/exposurelog/README.md b/applications/exposurelog/README.md
index b3e08c1aba..634f405a8d 100644
--- a/applications/exposurelog/README.md
+++ b/applications/exposurelog/README.md
@@ -47,4 +47,4 @@ Log messages related to an exposure
| replicaCount | int | `1` | How many exposurelog pods to run |
| resources | object | `{}` | Resource limits and requests for the exposurelog pod |
| securityContext | object | `{}` | Security context for the exposurelog deployment |
-| tolerations | list | `[]` | Tolerations for the exposurelog pod |
+| tolerations | list | `[]` | Tolerations for the exposurelog pod |
\ No newline at end of file
diff --git a/applications/exposurelog/secrets.yaml b/applications/exposurelog/secrets.yaml
new file mode 100644
index 0000000000..b318a54aed
--- /dev/null
+++ b/applications/exposurelog/secrets.yaml
@@ -0,0 +1,4 @@
+database-password:
+ description: "Password for the exposurelog database."
+ generate:
+ type: password
diff --git a/applications/exposurelog/values-base.yaml b/applications/exposurelog/values-base.yaml
index 1c7834eacd..c3ff786c6e 100644
--- a/applications/exposurelog/values-base.yaml
+++ b/applications/exposurelog/values-base.yaml
@@ -1,7 +1,7 @@
config:
site_id: base
- nfs_path_1: /repo/LATISS # Mounted as /volume_1
- nfs_server_1: auxtel-archiver.ls.lsst.org
+ nfs_path_1: /auxtel/repo/LATISS # Mounted as /volume_1
+ nfs_server_1: nfs-auxtel.ls.lsst.org
butler_uri_1: /volume_1
db:
diff --git a/applications/exposurelog/values-summit.yaml b/applications/exposurelog/values-summit.yaml
index 991b8e96a1..636150ebec 100644
--- a/applications/exposurelog/values-summit.yaml
+++ b/applications/exposurelog/values-summit.yaml
@@ -4,8 +4,8 @@ config:
nfs_server_1: comcam-archiver.cp.lsst.org
butler_uri_1: /volume_1
- nfs_path_2: /repo/LATISS # Mounted as /volume_2
- nfs_server_2: auxtel-archiver.cp.lsst.org
+ nfs_path_2: /auxtel/repo/LATISS # Mounted as /volume_2
+ nfs_server_2: nfs-auxtel.cp.lsst.org
butler_uri_2: /volume_2
db:
host: postgresdb01.cp.lsst.org
diff --git a/applications/exposurelog/values-tucson-teststand.yaml b/applications/exposurelog/values-tucson-teststand.yaml
index c634947b61..94a3159b2f 100644
--- a/applications/exposurelog/values-tucson-teststand.yaml
+++ b/applications/exposurelog/values-tucson-teststand.yaml
@@ -4,8 +4,8 @@ config:
nfs_server_1: comcam-archiver.tu.lsst.org
butler_uri_1: /volume_1
- nfs_path_2: /repo/LATISS # Mounted as /volume_2
- nfs_server_2: auxtel-archiver.tu.lsst.org
+ nfs_path_2: /auxtel/repo/LATISS # Mounted as /volume_2
+ nfs_server_2: nfs-auxtel.tu.lsst.org
butler_uri_2: /volume_2
db:
host: postgresdb01.tu.lsst.org
diff --git a/applications/gafaelfawr/Chart.yaml b/applications/gafaelfawr/Chart.yaml
index dd8279cca1..49637f1e3c 100644
--- a/applications/gafaelfawr/Chart.yaml
+++ b/applications/gafaelfawr/Chart.yaml
@@ -5,11 +5,11 @@ description: Authentication and identity system
home: https://gafaelfawr.lsst.io/
sources:
- https://github.com/lsst-sqre/gafaelfawr
-appVersion: 9.2.1
+appVersion: 9.3.0
dependencies:
- name: redis
- version: 1.0.5
+ version: 1.0.6
repository: https://lsst-sqre.github.io/charts/
annotations:
diff --git a/applications/gafaelfawr/README.md b/applications/gafaelfawr/README.md
index a1012431f9..2b9640fcc7 100644
--- a/applications/gafaelfawr/README.md
+++ b/applications/gafaelfawr/README.md
@@ -17,7 +17,7 @@ Authentication and identity system
| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as a separate service (behind a `NetworkPolicy`) for other, lower-traffic services. |
| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images |
| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use |
-| cloudsql.image.tag | string | `"1.33.7"` | Cloud SQL Auth Proxy tag to use |
+| cloudsql.image.tag | string | `"1.33.9"` | Cloud SQL Auth Proxy tag to use |
| cloudsql.instanceConnectionName | string | None, must be set if Cloud SQL Auth Proxy is enabled | Instance connection name for a CloudSQL PostgreSQL instance |
| cloudsql.nodeSelector | object | `{}` | Node selection rules for the Cloud SQL Proxy pod |
| cloudsql.podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod |
@@ -32,7 +32,7 @@ Authentication and identity system
| config.cilogon.test | bool | `false` | Whether to use the test instance of CILogon |
| config.cilogon.uidClaim | string | `"uidNumber"` | Claim from which to get the numeric UID (only used if not retrieved from LDAP or Firestore) |
| config.cilogon.usernameClaim | string | `"uid"` | Claim from which to get the username |
-| config.databaseUrl | string | None, must be set if `cloudsql.enabled` is not true | URL for the PostgreSQL database |
+| config.databaseUrl | string | None, must be set if neither `cloudsql.enabled` | URL for the PostgreSQL database nor `config.internalDatabase` are true |
| config.errorFooter | string | `""` | HTML footer to add to any login error page (will be enclosed in a
tag). |
| config.firestore.project | string | Firestore support is disabled | If set, assign UIDs and GIDs using Google Firestore in the given project. Cloud SQL must be enabled and the Cloud SQL service account must have read/write access to that Firestore instance. |
| config.forgerock.url | string | ForgeRock Identity Management support is disabled | If set, obtain the GIDs for groups from this ForgeRock Identity Management server. |
@@ -40,6 +40,7 @@ Authentication and identity system
| config.github.clientId | string | `""` | GitHub client ID. One and only one of this, `config.cilogon.clientId`, or `config.oidc.clientId` must be set. |
| config.groupMapping | object | `{}` | Defines a mapping of scopes to groups that provide that scope. See [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes. |
| config.initialAdmins | list | `[]` | Usernames to add as administrators when initializing a new database. Used only if there are no administrators. |
+| config.internalDatabase | bool | `false` | Whether to use the PostgreSQL server internal to the Kubernetes cluster |
| config.knownScopes | object | See the `values.yaml` file | Names and descriptions of all scopes in use. This is used to populate the new token creation page. Only scopes listed here will be options when creating a new token. See [DMTN-235](https://dmtn-235.lsst.io/). |
| config.ldap.addUserGroup | bool | `false` | Whether to synthesize a user private group for each user with a GID equal to their UID |
| config.ldap.emailAttr | string | `"mail"` | Attribute containing the user's email address |
@@ -79,6 +80,7 @@ Authentication and identity system
| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Gafaelfawr image |
| image.repository | string | `"ghcr.io/lsst-sqre/gafaelfawr"` | Gafaelfawr image to use |
| image.tag | string | The appVersion of the chart | Tag of Gafaelfawr image to use |
+| ingress.additionalHosts | list | `[]` | Defines additional FQDNs for Gafaelfawr. This doesn't work for cookie or browser authentication, but for token-based services like git-lfs or the webdav server it does. |
| maintenance.affinity | object | `{}` | Affinity rules for Gafaelfawr maintenance and audit pods |
| maintenance.auditSchedule | string | `"30 3 * * *"` | Cron schedule string for Gafaelfawr data consistency audit (in UTC) |
| maintenance.maintenanceSchedule | string | `"5 * * * *"` | Cron schedule string for Gafaelfawr periodic maintenance (in UTC) |
@@ -108,4 +110,4 @@ Authentication and identity system
| redis.tolerations | list | `[]` | Tolerations for the Redis pod |
| replicaCount | int | `1` | Number of web frontend pods to start |
| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod |
-| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod |
+| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod |
\ No newline at end of file
diff --git a/applications/gafaelfawr/secrets.yaml b/applications/gafaelfawr/secrets.yaml
new file mode 100644
index 0000000000..972698fe99
--- /dev/null
+++ b/applications/gafaelfawr/secrets.yaml
@@ -0,0 +1,81 @@
+bootstrap-token:
+ description: >-
+ Token with admin access, regardless of any other scopes or configuration,
+ which can be used to add new Gafaelfawr administrators and bootstrap
+ creation of other tokens with arbitrary scopes. To use this token,
+ retrieve it from the Kubernetes secret and then use it in API calls like
+ any other Gafaelfawr token. This secret can be changed at any time.
+ generate:
+ type: gafaelfawr-token
+cilogon-client-secret:
+ description: >-
+ Secret used to authenticate to CILogon as part of the OpenID Connect login
+ protocol to obtain an identity token for the user. This secret can be
+ changed at any time.
+ if: config.cilogon.clientId
+database-password:
+ description: >-
+ Password used to authenticate to the PostgreSQL database used to store
+ Gafaelfawr data. This password may be changed at any time.
+ generate:
+ if: config.internalDatabase
+ type: password
+forgerock-password:
+ description: >-
+ Password used to authenticate to a ForgeRock Identity server using HTTP
+ Basic authentication to retrieve GID mappings for groups.
+ if: config.forgerock.url
+github-client-secret:
+ description: >-
+ GitHub OAuth App secret used to authenticate to GitHub as part of the
+ OAuth 2 login protocol to obtain an identity token for the user. This
+ secret can be changed at any time.
+ if: config.github.clientId
+ldap-keytab:
+ description: >-
+ Kerberos keytab used to authenticate to the LDAP server via GSSAPI binds
+ to retrieve user and group information. This keytab can be changed at any
+ time.
+ if: config.ldap.kerberosConfig
+ldap-password:
+ description: >-
+ Password to authenticate to the LDAP server via simple binds to retrieve
+ user and group information. This password can be changed at any time.
+ if: config.ldap.userDn
+oidc-client-secret:
+ description: >-
+ Secret used to authenticate to a remote OpenID Connect authentication
+ server. This secret can be changed at any time.
+ if: config.oidc.clientId
+redis-password:
+ description: >-
+ Password used to authenticate Gafaelfawr to its internal Redis server,
+ deployed as part of the same Argo CD application. This secret can be
+ changed at any time, but both the Redis server and all Gafaelfawr
+ deployments will then have to be restarted to pick up the new value.
+ generate:
+ type: password
+session-secret:
+ description: >-
+ Encryption key used to encrypt the contents of Redis and the cookie data
+ stored in user web browsers that holds their session token and related
+ information. Changing this secret will invalidate all existing Redis data
+ and all user authentication cookies.
+ generate:
+ type: fernet-key
+signing-key:
+ description: >-
+ RSA private key used to sign JWTs issued by Gafaelfawr when it acts as an
+ OpenID Connect server. Changing this secret will invalidate all existing
+ issued OpenID Connect JWTs.
+ if: config.oidcServer.enabled
+ generate:
+ type: rsa-private-key
+slack-webhook:
+ description: >-
+ Slack web hook used to report internal errors to Slack. This secret may be
+ changed at any time.
+ if: config.slackAlerts
+ copy:
+ application: mobu
+ key: app-alert-webhook
diff --git a/applications/gafaelfawr/templates/configmap.yaml b/applications/gafaelfawr/templates/configmap.yaml
index 282516e7d5..e5c02c34d9 100644
--- a/applications/gafaelfawr/templates/configmap.yaml
+++ b/applications/gafaelfawr/templates/configmap.yaml
@@ -194,6 +194,8 @@ data:
gafaelfawr.yaml: |
{{- if .Values.cloudsql.enabled }}
databaseUrl: "postgresql://gafaelfawr@cloud-sql-proxy/gafaelfawr"
+ {{- else if .Values.config.internalDatabase }}
+ databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr"
{{- else }}
databaseUrl: {{ required "config.databaseUrl must be set" .Values.config.databaseUrl | quote }}
{{- end }}
@@ -209,6 +211,8 @@ data:
gafaelfawr.yaml: |
{{- if .Values.cloudsql.enabled }}
databaseUrl: "postgresql://gafaelfawr@localhost/gafaelfawr"
+ {{- else if .Values.config.internalDatabase }}
+ databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr"
{{- else }}
databaseUrl: {{ required "config.databaseUrl must be set" .Values.config.databaseUrl | quote }}
{{- end }}
diff --git a/applications/gafaelfawr/templates/ingress.yaml b/applications/gafaelfawr/templates/ingress.yaml
index 18a03df271..1284d838f2 100644
--- a/applications/gafaelfawr/templates/ingress.yaml
+++ b/applications/gafaelfawr/templates/ingress.yaml
@@ -47,3 +47,18 @@ spec:
port:
number: 8080
{{- end }}
+ {{- $context := . }}
+ {{- with $context.Values.ingress.additionalHosts }}
+ {{- range . }}
+ - host: {{ . | quote }}
+ http:
+ paths:
+ - path: "/auth"
+ pathType: Prefix
+ backend:
+ service:
+ name: {{ template "gafaelfawr.fullname" $context }}
+ port:
+ number: 8080
+ {{- end }}
+ {{- end }}
diff --git a/applications/gafaelfawr/values-ccin2p3.yaml b/applications/gafaelfawr/values-ccin2p3.yaml
index b5dee269a4..19498c3a97 100644
--- a/applications/gafaelfawr/values-ccin2p3.yaml
+++ b/applications/gafaelfawr/values-ccin2p3.yaml
@@ -6,7 +6,7 @@ redis:
config:
logLevel: "DEBUG"
- databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr"
+ internalDatabase: true
# Session length and token expiration (in minutes).
issuer:
diff --git a/applications/gafaelfawr/values-idfdev.yaml b/applications/gafaelfawr/values-idfdev.yaml
index 2bcb74b887..fc371da3c2 100644
--- a/applications/gafaelfawr/values-idfdev.yaml
+++ b/applications/gafaelfawr/values-idfdev.yaml
@@ -34,8 +34,8 @@ config:
quota:
default:
notebook:
- cpu: 9.0
- memory: 27
+ cpu: 4.0
+ memory: 16
groupMapping:
"admin:jupyterlab":
diff --git a/applications/gafaelfawr/values-idfint.yaml b/applications/gafaelfawr/values-idfint.yaml
index 8569c0861c..e7798bca92 100644
--- a/applications/gafaelfawr/values-idfint.yaml
+++ b/applications/gafaelfawr/values-idfint.yaml
@@ -35,8 +35,8 @@ config:
quota:
default:
notebook:
- cpu: 9.0
- memory: 27
+ cpu: 8.0
+ memory: 32
# Allow access by GitHub team.
groupMapping:
diff --git a/applications/gafaelfawr/values-idfprod.yaml b/applications/gafaelfawr/values-idfprod.yaml
index 481fb53603..309e698d29 100644
--- a/applications/gafaelfawr/values-idfprod.yaml
+++ b/applications/gafaelfawr/values-idfprod.yaml
@@ -30,8 +30,8 @@ config:
quota:
default:
notebook:
- cpu: 9.0
- memory: 27
+ cpu: 4.0
+ memory: 16
groupMapping:
"admin:provision":
diff --git a/applications/gafaelfawr/values-minikube.yaml b/applications/gafaelfawr/values-minikube.yaml
index 46266cb4d5..44c3e841e7 100644
--- a/applications/gafaelfawr/values-minikube.yaml
+++ b/applications/gafaelfawr/values-minikube.yaml
@@ -4,7 +4,7 @@ redis:
enabled: false
config:
- databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr"
+ internalDatabase: true
# Support OpenID Connect clients like Chronograf.
oidcServer:
diff --git a/applications/gafaelfawr/values-roe.yaml b/applications/gafaelfawr/values-roe.yaml
index 5f7c2128f4..59b349094f 100644
--- a/applications/gafaelfawr/values-roe.yaml
+++ b/applications/gafaelfawr/values-roe.yaml
@@ -3,7 +3,7 @@ redis:
enabled: false
config:
- databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr"
+ internalDatabase: true
github:
clientId: "10172b4db1b67ee31620"
diff --git a/applications/gafaelfawr/values-roundtable-dev.yaml b/applications/gafaelfawr/values-roundtable-dev.yaml
index 3a6770ec5b..98dc65924b 100644
--- a/applications/gafaelfawr/values-roundtable-dev.yaml
+++ b/applications/gafaelfawr/values-roundtable-dev.yaml
@@ -18,11 +18,19 @@ config:
oidcServer:
enabled: false
+ knownScopes:
+ "write:git-lfs": >-
+ Can write objects to Git LFS storage bucket
+
groupMapping:
"exec:admin":
- github:
organization: "lsst-sqre"
team: "square"
+ "write:git-lfs":
+ - github:
+ organization: "lsst-sqre"
+ team: "square"
initialAdmins:
- "afausti"
@@ -34,3 +42,9 @@ config:
errorFooter: |
To report problems or ask for help, contact #dm-square on the LSSTC Slack.
+
+ingress:
+ additionalHosts:
+ - "git-lfs-dev.lsst.cloud"
+ - "git-lfs-dev-rw.lsst.cloud"
+ - "monitoring-dev.lsst.cloud"
diff --git a/applications/gafaelfawr/values-usdf-tel-rsp.yaml b/applications/gafaelfawr/values-usdf-tel-rsp.yaml
new file mode 100644
index 0000000000..04cb16d38d
--- /dev/null
+++ b/applications/gafaelfawr/values-usdf-tel-rsp.yaml
@@ -0,0 +1,227 @@
+replicaCount: 2
+
+# Use the CSI storage class so that we can use snapshots.
+redis:
+ persistence:
+ storageClass: "wekafs--sdf-k8s01"
+
+config:
+ databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr"
+
+ oidcServer:
+ enabled: true
+
+ oidc:
+ clientId: rubin-usdf-tel-rsp
+ audience: "rubin-usdf-tel-rsp"
+ loginUrl: "https://dex.slac.stanford.edu/auth"
+ tokenUrl: "https://dex.slac.stanford.edu/token"
+ issuer: "https://dex.slac.stanford.edu"
+ scopes:
+ - "openid"
+ - "email"
+ - "groups"
+ - "profile"
+ usernameClaim: "name"
+
+ ldap:
+ url: ldaps://ldap-unix.slac.stanford.edu:636
+ groupBaseDn: ou=Group,dc=slac,dc=stanford,dc=edu
+ groupObjectClass: posixGroup
+ groupMemberAttr: memberUid
+ userBaseDn: ou=Accounts,dc=slac,dc=stanford,dc=edu
+ userSearchAttr: uid
+ addUserGroup: false
+ uidAttr: uidNumber
+ gidAttr: gidNumber
+ nameAttr: gecos
+
+ groupMapping:
+ "admin:token":
+ - "rubinmgr"
+ - "unix-admin"
+ "admin:users":
+ - "rubinmgr"
+ - "unix-admin"
+ "exec:admin":
+ - "rubinmgr"
+ - "unix-admin"
+ "exec:notebook":
+ - "lsst"
+ - "lsst-ccs"
+ - "rubin_users"
+ - "rubin_users-a"
+ - "rubin_users-b"
+ - "rubin_users-c"
+ - "rubin_users-d"
+ - "rubin_users-e"
+ - "rubin_users-f"
+ - "rubin_users-g"
+ - "rubin_users-h"
+ - "rubin_users-i"
+ - "rubin_users-j"
+ - "rubin_users-k"
+ - "rubin_users-l"
+ - "rubin_users-m"
+ - "rubin_users-n"
+ - "rubin_users-o"
+ - "rubin_users-p"
+ - "rubin_users-q"
+ - "rubin_users-r"
+ - "rubin_users-s"
+ - "rubin_users-t"
+ - "rubin_users-u"
+ - "rubin_users-v"
+ - "rubin_users-w"
+ - "rubin_users-x"
+ - "rubin_users-y"
+ - "rubin_users-z"
+ - "rubin_admin_datasets"
+ - "rubin_admin_repos"
+ - "unix-admin"
+ "exec:portal":
+ - "lsst"
+ - "lsst-ccs"
+ - "rubin_users"
+ - "rubin_users-a"
+ - "rubin_users-b"
+ - "rubin_users-c"
+ - "rubin_users-d"
+ - "rubin_users-e"
+ - "rubin_users-f"
+ - "rubin_users-g"
+ - "rubin_users-h"
+ - "rubin_users-i"
+ - "rubin_users-j"
+ - "rubin_users-k"
+ - "rubin_users-l"
+ - "rubin_users-m"
+ - "rubin_users-n"
+ - "rubin_users-o"
+ - "rubin_users-p"
+ - "rubin_users-q"
+ - "rubin_users-r"
+ - "rubin_users-s"
+ - "rubin_users-t"
+ - "rubin_users-u"
+ - "rubin_users-v"
+ - "rubin_users-w"
+ - "rubin_users-x"
+ - "rubin_users-y"
+ - "rubin_users-z"
+ - "rubin_admin_datasets"
+ - "rubin_admin_repos"
+ - "unix-admin"
+ "exec:user":
+ - "lsst"
+ - "lsst-ccs"
+ - "rubin_users"
+ - "rubin_users-a"
+ - "rubin_users-b"
+ - "rubin_users-c"
+ - "rubin_users-d"
+ - "rubin_users-e"
+ - "rubin_users-f"
+ - "rubin_users-g"
+ - "rubin_users-h"
+ - "rubin_users-i"
+ - "rubin_users-j"
+ - "rubin_users-k"
+ - "rubin_users-l"
+ - "rubin_users-m"
+ - "rubin_users-n"
+ - "rubin_users-o"
+ - "rubin_users-p"
+ - "rubin_users-q"
+ - "rubin_users-r"
+ - "rubin_users-s"
+ - "rubin_users-t"
+ - "rubin_users-u"
+ - "rubin_users-v"
+ - "rubin_users-w"
+ - "rubin_users-x"
+ - "rubin_users-y"
+ - "rubin_users-z"
+ - "rubin_admin_datasets"
+ - "rubin_admin_repos"
+ - "unix-admin"
+ "read:tap":
+ - "lsst"
+ - "lsst-ccs"
+ - "rubin_users"
+ - "rubin_users-a"
+ - "rubin_users-b"
+ - "rubin_users-c"
+ - "rubin_users-d"
+ - "rubin_users-e"
+ - "rubin_users-f"
+ - "rubin_users-g"
+ - "rubin_users-h"
+ - "rubin_users-i"
+ - "rubin_users-j"
+ - "rubin_users-k"
+ - "rubin_users-l"
+ - "rubin_users-m"
+ - "rubin_users-n"
+ - "rubin_users-o"
+ - "rubin_users-p"
+ - "rubin_users-q"
+ - "rubin_users-r"
+ - "rubin_users-s"
+ - "rubin_users-t"
+ - "rubin_users-u"
+ - "rubin_users-v"
+ - "rubin_users-w"
+ - "rubin_users-x"
+ - "rubin_users-y"
+ - "rubin_users-z"
+ - "rubin_admin_datasets"
+ - "rubin_admin_repos"
+ - "unix-admin"
+ "read:image":
+ - "lsst"
+ - "lsst-ccs"
+ - "rubin_users"
+ - "rubin_users-a"
+ - "rubin_users-b"
+ - "rubin_users-c"
+ - "rubin_users-d"
+ - "rubin_users-e"
+ - "rubin_users-f"
+ - "rubin_users-g"
+ - "rubin_users-h"
+ - "rubin_users-i"
+ - "rubin_users-j"
+ - "rubin_users-k"
+ - "rubin_users-l"
+ - "rubin_users-m"
+ - "rubin_users-n"
+ - "rubin_users-o"
+ - "rubin_users-p"
+ - "rubin_users-q"
+ - "rubin_users-r"
+ - "rubin_users-s"
+ - "rubin_users-t"
+ - "rubin_users-u"
+ - "rubin_users-v"
+ - "rubin_users-w"
+ - "rubin_users-x"
+ - "rubin_users-y"
+ - "rubin_users-z"
+ - rubin_admin_datasets
+ - rubin_admin_repos
+ - "unix-admin"
+ "write:sasquatch":
+ - "rubinmgr"
+ - "unix-admin"
+
+ initialAdmins:
+ - "afausti"
+ - "athor"
+ - "cbanek"
+ - "frossie"
+ - "jonathansick"
+ - "rra"
+ - "simonkrughoff"
+ - "ytl"
+ - "ppascual"
diff --git a/applications/gafaelfawr/values-usdfdev.yaml b/applications/gafaelfawr/values-usdfdev.yaml
index 664ef85758..74d3c872c6 100644
--- a/applications/gafaelfawr/values-usdfdev.yaml
+++ b/applications/gafaelfawr/values-usdfdev.yaml
@@ -6,7 +6,7 @@ redis:
storageClass: "wekafs--sdf-k8s01"
config:
- databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr"
+ internalDatabase: true
oidcServer:
enabled: true
@@ -219,6 +219,6 @@ config:
- "frossie"
- "jonathansick"
- "rra"
- - "simonkrughoff"
- "ytl"
- "ppascual"
+ - "pav"
diff --git a/applications/gafaelfawr/values-usdfprod.yaml b/applications/gafaelfawr/values-usdfprod.yaml
index d3a93749e9..f5513a3a14 100644
--- a/applications/gafaelfawr/values-usdfprod.yaml
+++ b/applications/gafaelfawr/values-usdfprod.yaml
@@ -6,7 +6,7 @@ redis:
storageClass: "wekafs--sdf-k8s01"
config:
- databaseUrl: "postgresql://gafaelfawr@postgres.postgres/gafaelfawr"
+ internalDatabase: true
oidcServer:
enabled: true
diff --git a/applications/gafaelfawr/values.yaml b/applications/gafaelfawr/values.yaml
index 3aa3614042..85d1353fdd 100644
--- a/applications/gafaelfawr/values.yaml
+++ b/applications/gafaelfawr/values.yaml
@@ -36,8 +36,12 @@ tolerations: []
affinity: {}
config:
+ # -- Whether to use the PostgreSQL server internal to the Kubernetes cluster
+ internalDatabase: false
+
# -- URL for the PostgreSQL database
- # @default -- None, must be set if `cloudsql.enabled` is not true
+ # @default -- None, must be set if neither `cloudsql.enabled`
+ # nor `config.internalDatabase` are true
databaseUrl: ""
# -- Choose from the text form of Python logging levels
@@ -275,6 +279,12 @@ config:
# [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes.
groupMapping: {}
+ingress:
+ # -- Defines additional FQDNs for Gafaelfawr. This doesn't work for
+ # cookie or browser authentication, but for token-based services like
+ # git-lfs or the webdav server it does.
+ additionalHosts: []
+
cloudsql:
# -- Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google
# Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as
@@ -287,7 +297,7 @@ cloudsql:
repository: "gcr.io/cloudsql-docker/gce-proxy"
# -- Cloud SQL Auth Proxy tag to use
- tag: "1.33.7"
+ tag: "1.33.9"
# -- Pull policy for Cloud SQL Auth Proxy images
pullPolicy: "IfNotPresent"
diff --git a/applications/giftless/Chart.yaml b/applications/giftless/Chart.yaml
new file mode 100644
index 0000000000..08dbe29ad5
--- /dev/null
+++ b/applications/giftless/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v2
+name: giftless
+version: 0.0.1
+description: Git-LFS server with GCS S3 backend, with Rubin-specific auth
+sources:
+ - https://github.com/datopian/giftless
+appVersion: 0.5.0
diff --git a/applications/giftless/README.md b/applications/giftless/README.md
new file mode 100644
index 0000000000..fcec2931a7
--- /dev/null
+++ b/applications/giftless/README.md
@@ -0,0 +1,33 @@
+# giftless
+
+Git-LFS server with GCS S3 backend, with Rubin-specific auth
+
+## Source Code
+
+*
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| affinity | object | `{}` | Affinity rules for the giftless frontend pod |
+| config | object | `{"bucketName":"","projectName":""}` | Configuration for giftless server |
+| config.bucketName | string | Must be overridden in environment-specific values file | Bucket name for GCS LFS Object bucket |
+| config.projectName | string | Must be overridden in environment-specific values file | Project name for GCS LFS Object bucket |
+| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) |
+| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
+| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the giftless image |
+| image.repository | string | `"docker.io/datopian/giftless"` | Giftless image to use |
+| image.tag | string | The appVersion of the chart | Tag of giftless image to use |
+| ingress.annotations | object | `{}` | Additional annotations to add to the ingress |
+| ingress.hostname | object | Must be overridden in environment-specific values file | FQDNs of giftless ingresses |
+| ingress.hostname.readonly | string | Must be overridden in environment-specific values file | FQDN for the read-only giftless ingress |
+| ingress.hostname.readwrite | string | Must be overridden in environment-specific values file | FQDN for the read-write giftless ingress |
+| nameOverride | string | `""` | Override the base name for resources |
+| nodeSelector | object | `{}` | Node selector rules for the giftless frontend pod |
+| podAnnotations | object | `{}` | Annotations for the giftless frontend pod |
+| resources | object | `{}` | Resource limits and requests for the giftless frontend pod |
+| server.debug | bool | `false` | Turn on debugging mode |
+| server.processes | int | `2` | Number of processes for server |
+| server.threads | int | `2` | Number of threads per process |
+| tolerations | list | `[]` | Tolerations for the giftless frontend pod |
\ No newline at end of file
diff --git a/applications/giftless/templates/_helpers.tpl b/applications/giftless/templates/_helpers.tpl
new file mode 100644
index 0000000000..5851081721
--- /dev/null
+++ b/applications/giftless/templates/_helpers.tpl
@@ -0,0 +1,73 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "giftless.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "giftless.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "giftless.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "giftless.labels" -}}
+app.kubernetes.io/name: {{ include "giftless.name" . }}
+helm.sh/chart: {{ include "giftless.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "giftless.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "giftless.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+{{/*
+Common labels-RW
+*/}}
+{{- define "giftless-rw.labels" -}}
+app.kubernetes.io/name: {{ include "giftless.name" . }}-rw
+helm.sh/chart: {{ include "giftless.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "giftless-rw.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "giftless.name" . }}-rw
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
diff --git a/applications/giftless/templates/configmap.yaml b/applications/giftless/templates/configmap.yaml
new file mode 100644
index 0000000000..7cb4901730
--- /dev/null
+++ b/applications/giftless/templates/configmap.yaml
@@ -0,0 +1,39 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "giftless.fullname" . }}
+ labels:
+ {{- include "giftless.labels" . | nindent 4 }}
+data:
+ giftless.conf.yaml: |-
+ AUTH_PROVIDERS:
+ - "giftless.auth.allow_anon:read_only"
+ TRANSFER_ADAPTERS:
+ basic:
+ factory: "giftless.transfer.basic_external:factory"
+ options:
+ storage_class: "giftless.storage.google_cloud:GoogleCloudStorage"
+ storage_options:
+ account_key_file: "/etc/secret/giftless-gcp-key.json"
+ project_name: {{ .Values.config.projectName | quote }}
+ bucket_name: {{ .Values.config.bucketName | quote }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ template "giftless.fullname" . }}-rw
+ labels:
+ {{- include "giftless-rw.labels" . | nindent 4 }}
+data:
+ giftless.conf.yaml: |-
+ AUTH_PROVIDERS:
+ - "giftless.auth.allow_anon:read_write"
+ TRANSFER_ADAPTERS:
+ basic:
+ factory: "giftless.transfer.basic_external:factory"
+ options:
+ storage_class: "giftless.storage.google_cloud:GoogleCloudStorage"
+ storage_options:
+ account_key_file: "/etc/secret/giftless-gcp-key.json"
+ project_name: {{ .Values.config.projectName | quote }}
+ bucket_name: {{ .Values.config.bucketName | quote }}
diff --git a/applications/giftless/templates/deployment.yaml b/applications/giftless/templates/deployment.yaml
new file mode 100644
index 0000000000..ab17ea9b2c
--- /dev/null
+++ b/applications/giftless/templates/deployment.yaml
@@ -0,0 +1,200 @@
+# Note that this creates two nearly-identical deployments, one named
+# "giftless" and one named "giftless-rw". The only real difference
+# between them is that their configuration configmaps and secrets are
+# different: one has the configuration for read-only access to the Git
+# LFS server, and other has configuration for read-write access. It is
+# possible that we might in future want to further split the
+# configuration in order to allow, for instance, different numbers of
+# processes and threads for the read-write and the read-only servers, on
+# the grounds that our Git LFS usage is read-mostly.
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "giftless.fullname" . }}
+ labels:
+ {{- include "giftless.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "giftless.selectorLabels" . | nindent 6 }}
+ strategy:
+ type: "Recreate"
+ template:
+ metadata:
+ annotations:
+ {{- with .Values.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ labels:
+ {{- include "giftless.selectorLabels" . | nindent 8 }}
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: {{ .Chart.Name }}
+ command:
+ - "uwsgi"
+ - "--http"
+ - ":5000"
+ - "-M"
+ - "-T"
+ - "--die-on-term"
+ - "--threads"
+ - "{{- .Values.server.threads }}"
+ - "-p"
+ - "{{- .Values.server.processes }}"
+ - "--manage-script-name"
+ - "--callable"
+ - "app"
+ env:
+ - name: GIFTLESS_CONFIG_FILE
+ value: "/etc/giftless/giftless.conf.yaml"
+ {{- if .Values.server.debug }}
+ - name: GIFTLESS_DEBUG
+ value: "true"
+ {{- end }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ ports:
+ - name: "http"
+ containerPort: 5000
+ protocol: "TCP"
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - name: "tmp"
+ mountPath: "/tmp"
+ - name: "giftless-config"
+ mountPath: "/etc/giftless"
+ - name: "giftless-secret"
+ mountPath: "/etc/secret"
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+ runAsGroup: 1000
+ volumes:
+ - name: "tmp"
+ emptyDir: {}
+ - name: "giftless-config"
+ configMap:
+ name: {{ include "giftless.fullname" . | quote }}
+ - name: "giftless-secret"
+ secret:
+ secretName: {{ include "giftless.fullname" . | quote }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "giftless.fullname" . }}-rw
+ labels:
+ {{- include "giftless-rw.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "giftless-rw.selectorLabels" . | nindent 6 }}
+ strategy:
+ type: "Recreate"
+ template:
+ metadata:
+ annotations:
+ {{- with .Values.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ labels:
+ {{- include "giftless-rw.selectorLabels" . | nindent 8 }}
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: {{ .Chart.Name }}
+ command:
+ - "uwsgi"
+ - "--http"
+ - ":5000"
+ - "-M"
+ - "-T"
+ - "--die-on-term"
+ - "--threads"
+ - "{{- .Values.server.threads }}"
+ - "-p"
+ - "{{- .Values.server.processes }}"
+ - "--manage-script-name"
+ - "--callable"
+ - "app"
+ env:
+ - name: GIFTLESS_CONFIG_FILE
+ value: "/etc/giftless/giftless.conf.yaml"
+ {{- if .Values.server.debug }}
+ - name: GIFTLESS_DEBUG
+ value: "true"
+ {{- end }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
+ ports:
+ - name: "http"
+ containerPort: 5000
+ protocol: "TCP"
+ {{- with .Values.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - name: "tmp"
+ mountPath: "/tmp"
+ - name: "giftless-config"
+ mountPath: "/etc/giftless"
+ - name: "giftless-secret"
+ mountPath: "/etc/secret"
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+ runAsGroup: 1000
+ volumes:
+ - name: "tmp"
+ emptyDir: {}
+ - name: "giftless-config"
+ configMap:
+ name: {{ template "giftless.fullname" . }}-rw
+ - name: "giftless-secret"
+ secret:
+ secretName: {{ include "giftless.fullname" . | quote }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/applications/giftless/templates/ingress.yaml b/applications/giftless/templates/ingress.yaml
new file mode 100644
index 0000000000..b990ead3b7
--- /dev/null
+++ b/applications/giftless/templates/ingress.yaml
@@ -0,0 +1,70 @@
+apiVersion: gafaelfawr.lsst.io/v1alpha1
+kind: GafaelfawrIngress
+metadata:
+ name: {{ include "giftless.fullname" . }}
+ labels:
+ {{- include "giftless.labels" . | nindent 4 }}
+config:
+ baseUrl: "https://{{ .Values.ingress.hostname.readonly }}"
+ scopes:
+ anonymous: true
+template:
+ metadata:
+ annotations:
+ cert-manager.io/cluster-issuer: "letsencrypt-dns"
+ {{- with .Values.ingress.annotations }}
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ name: {{ include "giftless.fullname" . }}
+ spec:
+ tls:
+ - hosts:
+ - {{ .Values.ingress.hostname.readonly | quote }}
+ secretName: tls
+ rules:
+ - host: {{ .Values.ingress.hostname.readonly | quote }}
+ http:
+ paths:
+ - path: "/"
+ pathType: "Prefix"
+ backend:
+ service:
+ name: {{ include "giftless.fullname" . }}
+ port:
+ number: 5000
+---
+apiVersion: gafaelfawr.lsst.io/v1alpha1
+kind: GafaelfawrIngress
+metadata:
+ name: {{ template "giftless.fullname" . }}-rw
+ labels:
+ {{- include "giftless.labels" . | nindent 4 }}
+config:
+ baseUrl: "https://{{ .Values.ingress.hostname.readwrite }}"
+ scopes:
+ all:
+ - "write:git-lfs"
+template:
+ metadata:
+ name: {{ template "giftless.fullname" . }}-rw
+ annotations:
+ cert-manager.io/cluster-issuer: "letsencrypt-dns"
+ {{- with .Values.ingress.annotations }}
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ spec:
+ tls:
+ - hosts:
+ - {{ .Values.ingress.hostname.readwrite | quote }}
+ secretName: tls
+ rules:
+ - host: {{ .Values.ingress.hostname.readwrite | quote }}
+ http:
+ paths:
+ - path: "/"
+ pathType: "Prefix"
+ backend:
+ service:
+ name: {{ template "giftless.fullname" . }}-rw
+ port:
+ number: 5000
diff --git a/applications/giftless/templates/service.yaml b/applications/giftless/templates/service.yaml
new file mode 100644
index 0000000000..1ce6a9be64
--- /dev/null
+++ b/applications/giftless/templates/service.yaml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "giftless.fullname" . }}
+ labels:
+ {{- include "giftless.labels" . | nindent 4 }}
+spec:
+ type: "ClusterIP"
+ ports:
+ - port: 5000
+ targetPort: "http"
+ protocol: "TCP"
+ selector:
+ {{- include "giftless.selectorLabels" . | nindent 4 }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ template "giftless.fullname" . }}-rw
+ labels:
+ {{- include "giftless-rw.labels" . | nindent 4 }}
+spec:
+ type: "ClusterIP"
+ ports:
+ - port: 5000
+ targetPort: "http"
+ protocol: "TCP"
+ selector:
+ {{- include "giftless-rw.selectorLabels" . | nindent 4 }}
diff --git a/applications/giftless/templates/vault-secrets.yaml b/applications/giftless/templates/vault-secrets.yaml
new file mode 100644
index 0000000000..0466225d3c
--- /dev/null
+++ b/applications/giftless/templates/vault-secrets.yaml
@@ -0,0 +1,9 @@
+apiVersion: ricoberger.de/v1alpha1
+kind: VaultSecret
+metadata:
+ name: {{ include "giftless.fullname" . }}
+ labels:
+ {{- include "giftless.labels" . | nindent 4 }}
+spec:
+ path: "{{ .Values.global.vaultSecretsPath }}/giftless"
+ type: "Opaque"
diff --git a/applications/giftless/values-roundtable-dev.yaml b/applications/giftless/values-roundtable-dev.yaml
new file mode 100644
index 0000000000..5bbf6b5651
--- /dev/null
+++ b/applications/giftless/values-roundtable-dev.yaml
@@ -0,0 +1,9 @@
+server:
+ debug: true
+ingress:
+ hostname:
+ readonly: "git-lfs-dev.lsst.cloud"
+ readwrite: "git-lfs-dev-rw.lsst.cloud"
+config:
+ projectName: "plasma-geode-127520"
+ bucketName: "rubin-gitlfs-experimental"
diff --git a/applications/giftless/values.yaml b/applications/giftless/values.yaml
new file mode 100644
index 0000000000..0de26a224b
--- /dev/null
+++ b/applications/giftless/values.yaml
@@ -0,0 +1,68 @@
+# Default values for giftless.
+
+# -- Override the base name for resources
+nameOverride: ""
+
+# -- Override the full name for resources (includes the release name)
+fullnameOverride: ""
+
+# -- Resource limits and requests for the giftless frontend pod
+resources: {}
+
+# -- Annotations for the giftless frontend pod
+podAnnotations: {}
+
+# -- Node selector rules for the giftless frontend pod
+nodeSelector: {}
+
+# -- Tolerations for the giftless frontend pod
+tolerations: []
+
+# -- Affinity rules for the giftless frontend pod
+affinity: {}
+
+# The following will be set by parameters injected by Argo CD and should not
+# be set in the individual environment values files.
+image:
+ # -- Giftless image to use
+ repository: docker.io/datopian/giftless
+ # -- Pull policy for the giftless image
+ pullPolicy: "IfNotPresent"
+ # -- Tag of giftless image to use
+ # @default -- The appVersion of the chart
+ tag: ""
+
+ingress:
+ # -- FQDNs of giftless ingresses
+ # @default -- Must be overridden in environment-specific values file
+ hostname:
+ # -- FQDN for the read-only giftless ingress
+ # @default -- Must be overridden in environment-specific values file
+ readonly: ""
+ # -- FQDN for the read-write giftless ingress
+ # @default -- Must be overridden in environment-specific values file
+ readwrite: ""
+ # -- Additional annotations to add to the ingress
+ annotations: {}
+
+server:
+ # -- Turn on debugging mode
+ debug: false
+ # -- Number of processes for server
+ processes: 2
+ # -- Number of threads per process
+ threads: 2
+
+# -- Configuration for giftless server
+config:
+ # -- Project name for GCS LFS Object bucket
+ # @default -- Must be overridden in environment-specific values file
+ projectName: ""
+ # -- Bucket name for GCS LFS Object bucket
+ # @default -- Must be overridden in environment-specific values file
+ bucketName: ""
+
+global:
+ # -- Base path for Vault secrets
+ # @default -- Set by Argo CD
+ vaultSecretsPath: ""
diff --git a/applications/hips/README.md b/applications/hips/README.md
index 01f98617a6..54712cc322 100644
--- a/applications/hips/README.md
+++ b/applications/hips/README.md
@@ -29,4 +29,4 @@ HiPS tile server backed by Google Cloud Storage
| podAnnotations | object | `{}` | Annotations for the hips deployment pod |
| replicaCount | int | `1` | Number of web deployment pods to start |
| resources | object | `{}` | Resource limits and requests for the hips deployment pod |
-| tolerations | list | `[]` | Tolerations for the hips deployment pod |
+| tolerations | list | `[]` | Tolerations for the hips deployment pod |
\ No newline at end of file
diff --git a/applications/ingress-nginx/Chart.yaml b/applications/ingress-nginx/Chart.yaml
index 78fc4689e5..77127b0d9f 100644
--- a/applications/ingress-nginx/Chart.yaml
+++ b/applications/ingress-nginx/Chart.yaml
@@ -7,5 +7,5 @@ sources:
- https://github.com/kubernetes/ingress-nginx
dependencies:
- name: ingress-nginx
- version: 4.6.1
+ version: 4.7.1
repository: https://kubernetes.github.io/ingress-nginx
diff --git a/applications/ingress-nginx/README.md b/applications/ingress-nginx/README.md
index 94e146d6cd..78980b1244 100644
--- a/applications/ingress-nginx/README.md
+++ b/applications/ingress-nginx/README.md
@@ -21,4 +21,4 @@ Ingress controller
| ingress-nginx.controller.metrics.enabled | bool | `true` | Enable metrics reporting via Prometheus |
| ingress-nginx.controller.podLabels | object | See `values.yaml` | Add labels used by `NetworkPolicy` objects to restrict access to the ingress and thus ensure that auth subrequest handlers run |
| ingress-nginx.controller.service.externalTrafficPolicy | string | `"Local"` | Force traffic routing policy to Local so that the external IP in `X-Forwarded-For` will be correct |
-| vaultCertificate.enabled | bool | `false` | Whether to store ingress TLS certificate via vault-secrets-operator. Typically "squareone" owns it instead in an RSP. |
+| vaultCertificate.enabled | bool | `false` | Whether to store ingress TLS certificate via vault-secrets-operator. Typically "squareone" owns it instead in an RSP. |
\ No newline at end of file
diff --git a/applications/ingress-nginx/secrets.yaml b/applications/ingress-nginx/secrets.yaml
new file mode 100644
index 0000000000..d473eeb79a
--- /dev/null
+++ b/applications/ingress-nginx/secrets.yaml
@@ -0,0 +1,13 @@
+"tls.key":
+ description: >-
+ Private key of the TLS certificate to use for all connections to the
+ Phalanx environment.
+ if: vaultCertificate.enabled
+"tls.crt":
+ description: >-
+ Signed public TLS certificate, including any required chain certificates
+ tying it back to a root CA, to use for all connections to the Phalanx
+ environment. This certificate is used regardless of hostname, so it must
+ be valid for every hostname that will be used to connect to this Phalanx
+ environment.
+ if: vaultCertificate.enabled
diff --git a/applications/ingress-nginx/values-idfdev.yaml b/applications/ingress-nginx/values-idfdev.yaml
index ce9e5c39ca..b7dfd83db6 100644
--- a/applications/ingress-nginx/values-idfdev.yaml
+++ b/applications/ingress-nginx/values-idfdev.yaml
@@ -1,4 +1,6 @@
ingress-nginx:
controller:
+ config:
+ error-log-level: "error"
service:
loadBalancerIP: "35.225.112.77"
diff --git a/applications/kubernetes-replicator/README.md b/applications/kubernetes-replicator/README.md
index 1ca6da402d..5ce44bec30 100644
--- a/applications/kubernetes-replicator/README.md
+++ b/applications/kubernetes-replicator/README.md
@@ -25,4 +25,4 @@ Kafka secret replicator
| kubernetes-replicator.serviceAccount.privileges[0].apiGroups[1] | string | `"apps"` | |
| kubernetes-replicator.serviceAccount.privileges[0].apiGroups[2] | string | `"extensions"` | |
| kubernetes-replicator.serviceAccount.privileges[0].resources[0] | string | `"secrets"` | |
-| kubernetes-replicator.serviceAccount.privileges[0].resources[1] | string | `"configmaps"` | |
+| kubernetes-replicator.serviceAccount.privileges[0].resources[1] | string | `"configmaps"` | |
\ No newline at end of file
diff --git a/applications/linters/README.md b/applications/linters/README.md
index facd3c1375..ff3b7459c5 100644
--- a/applications/linters/README.md
+++ b/applications/linters/README.md
@@ -24,4 +24,4 @@ Linters running for operational reasons
| podAnnotations | object | `{}` | Annotations for the linter pod |
| replicaCount | int | `1` | Number of web frontend pods to start |
| resources | object | `{}` | Resource limits and requests for the linter pod |
-| tolerations | list | `[]` | Tolerations for the linter pod |
+| tolerations | list | `[]` | Tolerations for the linter pod |
\ No newline at end of file
diff --git a/applications/obstap/Chart.yaml b/applications/livetap/Chart.yaml
similarity index 89%
rename from applications/obstap/Chart.yaml
rename to applications/livetap/Chart.yaml
index 990e2d0670..eff6e39966 100644
--- a/applications/obstap/Chart.yaml
+++ b/applications/livetap/Chart.yaml
@@ -5,4 +5,4 @@ description: IVOA TAP service
sources:
- https://github.com/lsst-sqre/tap-postgres
- https://github.com/opencadc/tap
-appVersion: "1.10"
+appVersion: "1.12"
diff --git a/applications/obstap/README.md b/applications/livetap/README.md
similarity index 81%
rename from applications/obstap/README.md
rename to applications/livetap/README.md
index ee0b20be14..7eca13edcd 100644
--- a/applications/obstap/README.md
+++ b/applications/livetap/README.md
@@ -18,7 +18,7 @@ IVOA TAP service
| config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) |
| config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket |
| config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. |
-| config.tapSchemaAddress | string | `"tap-schema-db.tap-schema.svc.cluster.local:3306"` | Address to a MySQL database containing TAP schema data |
+| config.tapSchemaAddress | string | `"cadc-tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data |
| fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) |
| global.baseUrl | string | Set by Argo CD | Base URL for the environment |
| global.host | string | Set by Argo CD | Host name for ingress |
@@ -45,6 +45,14 @@ IVOA TAP service
| podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod |
| replicaCount | int | `1` | Number of pods to start |
| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod |
+| tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod |
+| tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image |
+| tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. |
+| tapSchema.image.tag | string | `"2.0.2"` | Tag of TAP schema image |
+| tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod |
+| tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod |
+| tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod |
+| tapSchema.tolerations | list | `[]` | Tolerations for the mock QServ pod |
| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod |
| uws.affinity | object | `{}` | Affinity rules for the UWS database pod |
| uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image |
@@ -54,4 +62,4 @@ IVOA TAP service
| uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod |
| uws.resources | object | `{}` | Resource limits and requests for the UWS database pod |
| uws.tolerations | list | `[]` | Tolerations for the UWS database pod |
-| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) |
+| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) |
\ No newline at end of file
diff --git a/applications/livetap/secrets.yaml b/applications/livetap/secrets.yaml
new file mode 100644
index 0000000000..4280c602a3
--- /dev/null
+++ b/applications/livetap/secrets.yaml
@@ -0,0 +1,4 @@
+"google_creds.json":
+ description: >-
+ Google service account credentials used to write async job output to
+ Google Cloud Storage.
diff --git a/applications/obstap/templates/_helpers.tpl b/applications/livetap/templates/_helpers.tpl
similarity index 100%
rename from applications/obstap/templates/_helpers.tpl
rename to applications/livetap/templates/_helpers.tpl
diff --git a/applications/obstap/templates/mock-pg-deployment.yaml b/applications/livetap/templates/mock-pg-deployment.yaml
similarity index 100%
rename from applications/obstap/templates/mock-pg-deployment.yaml
rename to applications/livetap/templates/mock-pg-deployment.yaml
diff --git a/applications/obstap/templates/mock-pg-networkpolicy.yaml b/applications/livetap/templates/mock-pg-networkpolicy.yaml
similarity index 100%
rename from applications/obstap/templates/mock-pg-networkpolicy.yaml
rename to applications/livetap/templates/mock-pg-networkpolicy.yaml
diff --git a/applications/obstap/templates/mock-pg-service.yaml b/applications/livetap/templates/mock-pg-service.yaml
similarity index 100%
rename from applications/obstap/templates/mock-pg-service.yaml
rename to applications/livetap/templates/mock-pg-service.yaml
diff --git a/applications/obstap/templates/tap-deployment.yaml b/applications/livetap/templates/tap-deployment.yaml
similarity index 100%
rename from applications/obstap/templates/tap-deployment.yaml
rename to applications/livetap/templates/tap-deployment.yaml
diff --git a/applications/obstap/templates/tap-ingress-anonymous.yaml b/applications/livetap/templates/tap-ingress-anonymous.yaml
similarity index 93%
rename from applications/obstap/templates/tap-ingress-anonymous.yaml
rename to applications/livetap/templates/tap-ingress-anonymous.yaml
index 575173975c..c9b5173bd7 100644
--- a/applications/obstap/templates/tap-ingress-anonymous.yaml
+++ b/applications/livetap/templates/tap-ingress-anonymous.yaml
@@ -17,7 +17,7 @@ template:
nginx.ingress.kubernetes.io/proxy-read-timeout: "900"
nginx.ingress.kubernetes.io/rewrite-target: "/tap/$1"
nginx.ingress.kubernetes.io/proxy-redirect-from: "http://$host/tap/"
- nginx.ingress.kubernetes.io/proxy-redirect-to: "https://$host/api/obstap/"
+ nginx.ingress.kubernetes.io/proxy-redirect-to: "https://$host/api/live/"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/use-regex: "true"
{{- with .Values.ingress.anonymousAnnotations }}
@@ -28,7 +28,7 @@ template:
- host: {{ required "global.host must be set" .Values.global.host | quote }}
http:
paths:
- - path: "/api/obstap/(availability|capabilities|swagger-ui.*)"
+ - path: "/api/live/(availability|capabilities|swagger-ui.*)"
pathType: "ImplementationSpecific"
backend:
service:
diff --git a/applications/obstap/templates/tap-ingress-authenticated.yaml b/applications/livetap/templates/tap-ingress-authenticated.yaml
similarity index 96%
rename from applications/obstap/templates/tap-ingress-authenticated.yaml
rename to applications/livetap/templates/tap-ingress-authenticated.yaml
index 2b38dfeb93..6323445829 100644
--- a/applications/obstap/templates/tap-ingress-authenticated.yaml
+++ b/applications/livetap/templates/tap-ingress-authenticated.yaml
@@ -24,7 +24,7 @@ template:
nginx.ingress.kubernetes.io/proxy-read-timeout: "1800"
nginx.ingress.kubernetes.io/rewrite-target: "/tap/$2"
nginx.ingress.kubernetes.io/proxy-redirect-from: "http://$host/tap/"
- nginx.ingress.kubernetes.io/proxy-redirect-to: "https://$host/api/obstap/"
+ nginx.ingress.kubernetes.io/proxy-redirect-to: "https://$host/api/live/"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/use-regex: "true"
{{- with .Values.ingress.authenticatedAnnotations }}
@@ -35,7 +35,7 @@ template:
- host: {{ required "global.host must be set" .Values.global.host | quote }}
http:
paths:
- - path: "/api/obstap(/|$)(.*)"
+ - path: "/api/live(/|$)(.*)"
pathType: "ImplementationSpecific"
backend:
service:
diff --git a/applications/obstap/templates/tap-networkpolicy.yaml b/applications/livetap/templates/tap-networkpolicy.yaml
similarity index 100%
rename from applications/obstap/templates/tap-networkpolicy.yaml
rename to applications/livetap/templates/tap-networkpolicy.yaml
diff --git a/applications/livetap/templates/tap-schema-db-deployment.yaml b/applications/livetap/templates/tap-schema-db-deployment.yaml
new file mode 100644
index 0000000000..0623cf1c9a
--- /dev/null
+++ b/applications/livetap/templates/tap-schema-db-deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "cadc-tap.fullname" . }}-tap-schema-db
+ labels:
+ {{- include "cadc-tap.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "cadc-tap.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.tapSchema.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "cadc-tap.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: "schema-db"
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: "tap-schema-db"
+ env:
+ - name: MYSQL_DATABASE
+ value: "TAP_SCHEMA"
+ - name: MYSQL_USER
+ value: "TAP_SCHEMA"
+ - name: MYSQL_PASSWORD
+ value: "TAP_SCHEMA"
+ - name: MYSQL_ROOT_HOST
+ value: "%"
+ image: "{{ .Values.tapSchema.image.repository }}:{{ .Values.tapSchema.image.tag}}"
+ imagePullPolicy: {{ .Values.tapSchema.image.pullPolicy | quote }}
+ ports:
+ - containerPort: 3306
+ protocol: "TCP"
+ {{- with .Values.tapSchema.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ imagePullSecrets:
+ - name: "pull-secret"
+ {{- with .Values.tapSchema.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tapSchema.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tapSchema.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/applications/livetap/templates/tap-schema-db-service.yaml b/applications/livetap/templates/tap-schema-db-service.yaml
new file mode 100644
index 0000000000..e5b9dd0856
--- /dev/null
+++ b/applications/livetap/templates/tap-schema-db-service.yaml
@@ -0,0 +1,15 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ template "cadc-tap.fullname" . }}-schema-db
+ labels:
+ {{- include "cadc-tap.labels" . | nindent 4 }}
+spec:
+ type: "ClusterIP"
+ ports:
+ - protocol: "TCP"
+ port: 3306
+ targetPort: 3306
+ selector:
+ {{- include "cadc-tap.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: "schema-db"
diff --git a/applications/obstap/templates/tap-service.yaml b/applications/livetap/templates/tap-service.yaml
similarity index 100%
rename from applications/obstap/templates/tap-service.yaml
rename to applications/livetap/templates/tap-service.yaml
diff --git a/applications/obstap/templates/uws-db-deployment.yaml b/applications/livetap/templates/uws-db-deployment.yaml
similarity index 100%
rename from applications/obstap/templates/uws-db-deployment.yaml
rename to applications/livetap/templates/uws-db-deployment.yaml
diff --git a/applications/obstap/templates/uws-db-networkpolicy.yaml b/applications/livetap/templates/uws-db-networkpolicy.yaml
similarity index 100%
rename from applications/obstap/templates/uws-db-networkpolicy.yaml
rename to applications/livetap/templates/uws-db-networkpolicy.yaml
diff --git a/applications/obstap/templates/uws-db-service.yaml b/applications/livetap/templates/uws-db-service.yaml
similarity index 100%
rename from applications/obstap/templates/uws-db-service.yaml
rename to applications/livetap/templates/uws-db-service.yaml
diff --git a/applications/obstap/templates/vault-secrets.yaml b/applications/livetap/templates/vault-secrets.yaml
similarity index 89%
rename from applications/obstap/templates/vault-secrets.yaml
rename to applications/livetap/templates/vault-secrets.yaml
index ab07ba33d2..66a0f5d6e1 100644
--- a/applications/obstap/templates/vault-secrets.yaml
+++ b/applications/livetap/templates/vault-secrets.yaml
@@ -6,7 +6,7 @@ metadata:
app: {{ template "cadc-tap.fullname" . }}
{{ include "cadc-tap.labels" . | indent 4 }}
spec:
- path: "{{ .Values.global.vaultSecretsPath }}/obstap"
+ path: "{{ .Values.global.vaultSecretsPath }}/livetap"
type: Opaque
---
apiVersion: ricoberger.de/v1alpha1
diff --git a/applications/livetap/values-minikube.yaml b/applications/livetap/values-minikube.yaml
new file mode 100644
index 0000000000..ad1b6ab54f
--- /dev/null
+++ b/applications/livetap/values-minikube.yaml
@@ -0,0 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-usdf-prod-livetap"
+
+config:
+ gcsBucket: "async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
diff --git a/applications/obstap/values-usdfdev.yaml b/applications/livetap/values-usdfdev.yaml
similarity index 77%
rename from applications/obstap/values-usdfdev.yaml
rename to applications/livetap/values-usdfdev.yaml
index a8802d4c5e..bcaa935a27 100644
--- a/applications/obstap/values-usdfdev.yaml
+++ b/applications/livetap/values-usdfdev.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-usdf-dev-livetap"
+
resources:
requests:
cpu: 2.0
@@ -10,7 +14,7 @@ replicaCount: 2
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
pg:
diff --git a/applications/obstap/values-usdfprod.yaml b/applications/livetap/values-usdfprod.yaml
similarity index 77%
rename from applications/obstap/values-usdfprod.yaml
rename to applications/livetap/values-usdfprod.yaml
index a8802d4c5e..f716d9db32 100644
--- a/applications/obstap/values-usdfprod.yaml
+++ b/applications/livetap/values-usdfprod.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-usdf-prod-livetap"
+
resources:
requests:
cpu: 2.0
@@ -10,7 +14,7 @@ replicaCount: 2
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
pg:
diff --git a/applications/obstap/values.yaml b/applications/livetap/values.yaml
similarity index 85%
rename from applications/obstap/values.yaml
rename to applications/livetap/values.yaml
index fbf2c73154..e235b944fe 100644
--- a/applications/obstap/values.yaml
+++ b/applications/livetap/values.yaml
@@ -53,7 +53,7 @@ vaultSecretsPath: ""
config:
# -- Address to a MySQL database containing TAP schema data
- tapSchemaAddress: "tap-schema-db.tap-schema.svc.cluster.local:3306"
+ tapSchemaAddress: "cadc-tap-schema-db:3306"
# -- Datalink payload URL
datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip"
@@ -120,6 +120,33 @@ pg:
# -- Affinity rules for the mock postgres pod
affinity: {}
+tapSchema:
+ image:
+ # -- TAP schema image to ue. This must be overridden by each environment
+ # with the TAP schema for that environment.
+ repository: "lsstsqre/tap-schema-mock"
+
+ # -- Pull policy for the TAP schema image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of TAP schema image
+ tag: "2.0.2"
+
+ # -- Resource limits and requests for the TAP schema database pod
+ resources: {}
+
+ # -- Annotations for the mock QServ pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the mock QServ pod
+ nodeSelector: {}
+
+ # -- Tolerations for the mock QServ pod
+ tolerations: []
+
+ # -- Affinity rules for the mock QServ pod
+ affinity: {}
+
uws:
image:
# -- UWS database image to use
diff --git a/applications/mobu/Chart.yaml b/applications/mobu/Chart.yaml
index 0daea287c2..9d1709f040 100644
--- a/applications/mobu/Chart.yaml
+++ b/applications/mobu/Chart.yaml
@@ -4,4 +4,4 @@ version: 1.0.0
description: Continuous integration testing
sources:
- https://github.com/lsst-sqre/mobu
-appVersion: 6.0.0
+appVersion: 6.1.1
diff --git a/applications/mobu/README.md b/applications/mobu/README.md
index 582048e66a..51d0506ee1 100644
--- a/applications/mobu/README.md
+++ b/applications/mobu/README.md
@@ -13,8 +13,8 @@ Continuous integration testing
| affinity | object | `{}` | Affinity rules for the mobu frontend pod |
| config.autostart | list | `[]` | Autostart specification. Must be a list of mobu flock specifications. Each flock listed will be automatically started when mobu is started. |
| config.debug | bool | `false` | If set to true, include the output from all flocks in the main mobu log and disable structured JSON logging. |
-| config.disableSlackAlerts | bool | `false` | If set to true, do not configure mobu to send alerts to Slack. |
| config.pathPrefix | string | `"/mobu"` | Prefix for mobu's API routes. |
+| config.slackAlerts | bool | `true` | Whether to send alerts and status to Slack. |
| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) |
| global.baseUrl | string | Set by Argo CD | Base URL for the environment |
| global.host | string | Set by Argo CD | Host name for ingress |
@@ -27,4 +27,4 @@ Continuous integration testing
| nodeSelector | object | `{}` | Node selector rules for the mobu frontend pod |
| podAnnotations | object | `{}` | Annotations for the mobu frontend pod |
| resources | object | `{}` | Resource limits and requests for the mobu frontend pod |
-| tolerations | list | `[]` | Tolerations for the mobu frontend pod |
+| tolerations | list | `[]` | Tolerations for the mobu frontend pod |
\ No newline at end of file
diff --git a/applications/mobu/secrets.yaml b/applications/mobu/secrets.yaml
new file mode 100644
index 0000000000..696c08e670
--- /dev/null
+++ b/applications/mobu/secrets.yaml
@@ -0,0 +1,13 @@
+app-alert-webhook:
+ description: >-
+ Slack web hook to which to post internal application alerts. This secret
+ is not used directly by mobu, but is copied from here to all of the
+ applications that report internal problems to Slack. It should normally be
+ separate from mobu's own web hook, since the separate identities attached
+ to the messages helps make the type of mesasge clearer, but the same web
+ hook as mobu's own alerts can be used in a pinch.
+ if: config.slackAlerts
+ALERT_HOOK:
+ description: >-
+ Slack web hook to which mobu should report failures and daily status.
+ if: config.slackAlerts
diff --git a/applications/mobu/templates/deployment.yaml b/applications/mobu/templates/deployment.yaml
index 907c4ff097..d80bb97975 100644
--- a/applications/mobu/templates/deployment.yaml
+++ b/applications/mobu/templates/deployment.yaml
@@ -24,7 +24,7 @@ spec:
containers:
- name: {{ .Chart.Name }}
env:
- {{- if (not .Values.config.disableSlackAlerts) }}
+ {{- if .Values.config.slackAlerts }}
- name: "ALERT_HOOK"
valueFrom:
secretKeyRef:
diff --git a/applications/mobu/templates/vault-secrets.yaml b/applications/mobu/templates/vault-secrets.yaml
index 050d8fbadc..b5dfaabaee 100644
--- a/applications/mobu/templates/vault-secrets.yaml
+++ b/applications/mobu/templates/vault-secrets.yaml
@@ -1,3 +1,4 @@
+{{- if .Values.config.slackAlerts }}
apiVersion: ricoberger.de/v1alpha1
kind: VaultSecret
metadata:
@@ -7,3 +8,4 @@ metadata:
spec:
path: "{{ .Values.global.vaultSecretsPath }}/mobu"
type: "Opaque"
+{{- end }}
diff --git a/applications/mobu/values-idfdev.yaml b/applications/mobu/values-idfdev.yaml
index 37bc0563ac..645b79227f 100644
--- a/applications/mobu/values-idfdev.yaml
+++ b/applications/mobu/values-idfdev.yaml
@@ -1,10 +1,10 @@
config:
debug: true
autostart:
- - name: "nublado2"
+ - name: "weekly"
count: 1
users:
- - username: "bot-mobu-user"
+ - username: "bot-mobu-weekly"
scopes:
- "exec:notebook"
- "exec:portal"
@@ -17,11 +17,12 @@ config:
image_class: "latest-weekly"
repo_url: "https://github.com/lsst-sqre/system-test.git"
repo_branch: "prod"
+ use_cachemachine: false
restart: true
- - name: "weekly"
+ - name: "tutorial"
count: 1
users:
- - username: "bot-mobu-weekly"
+ - username: "bot-mobu-tutorial"
scopes:
- "exec:notebook"
- "exec:portal"
@@ -32,9 +33,10 @@ config:
options:
image:
image_class: "latest-weekly"
- repo_url: "https://github.com/lsst-sqre/system-test.git"
- repo_branch: "prod"
- url_prefix: "/n3"
+ repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git"
+ repo_branch: "main"
+ max_executions: 1
+ working_directory: "notebooks/tutorial-notebooks"
use_cachemachine: false
restart: true
- name: "tap"
diff --git a/applications/mobu/values-idfint.yaml b/applications/mobu/values-idfint.yaml
index e8da2c1d59..7dc005b4ca 100644
--- a/applications/mobu/values-idfint.yaml
+++ b/applications/mobu/values-idfint.yaml
@@ -15,6 +15,7 @@ config:
repo_url: "https://github.com/lsst-sqre/system-test.git"
repo_branch: "prod"
max_executions: 1
+ url_prefix: "/n2"
restart: true
- name: "recommended"
count: 1
@@ -31,7 +32,6 @@ config:
repo_url: "https://github.com/lsst-sqre/system-test.git"
repo_branch: "prod"
use_cachemachine: false
- url_prefix: "/n3"
restart: true
- name: "weekly"
count: 1
@@ -50,7 +50,26 @@ config:
repo_url: "https://github.com/lsst-sqre/system-test.git"
repo_branch: "prod"
use_cachemachine: false
- url_prefix: "/n3"
+ restart: true
+ - name: "tutorial"
+ count: 1
+ users:
+ - username: "bot-mobu-tutorial"
+ scopes:
+ - "exec:notebook"
+ - "exec:portal"
+ - "read:image"
+ - "read:tap"
+ business:
+ type: "NotebookRunner"
+ options:
+ image:
+ image_class: "latest-weekly"
+ repo_url: "https://github.com/rubin-dp0/tutorial-notebooks.git"
+ repo_branch: "main"
+ max_executions: 1
+ working_directory: "notebooks/tutorial-notebooks"
+ use_cachemachine: false
restart: true
- name: "tap"
count: 1
diff --git a/applications/mobu/values-idfprod.yaml b/applications/mobu/values-idfprod.yaml
index df45b2880b..0f6813639a 100644
--- a/applications/mobu/values-idfprod.yaml
+++ b/applications/mobu/values-idfprod.yaml
@@ -15,6 +15,7 @@ config:
repo_url: "https://github.com/lsst-sqre/system-test.git"
repo_branch: "prod"
max_executions: 1
+ use_cachemachine: false
restart: true
- name: "quickbeam"
count: 1
@@ -32,6 +33,7 @@ config:
repo_branch: "prod"
idle_time: 900
delete_lab: false
+ use_cachemachine: false
restart: true
- name: "tutorial"
count: 1
@@ -49,6 +51,7 @@ config:
repo_branch: "prod"
max_executions: 1
working_directory: "notebooks/tutorial-notebooks"
+ use_cachemachine: false
restart: true
- name: "tap"
count: 1
diff --git a/applications/mobu/values-minikube.yaml b/applications/mobu/values-minikube.yaml
index 6f6e9f20c9..357c0bd2a3 100644
--- a/applications/mobu/values-minikube.yaml
+++ b/applications/mobu/values-minikube.yaml
@@ -1,2 +1,2 @@
config:
- disableSlackAlerts: true
+ slackAlerts: false
diff --git a/applications/mobu/values-usdfdev.yaml b/applications/mobu/values-usdfdev.yaml
index 6f6e9f20c9..5fca5d0a48 100644
--- a/applications/mobu/values-usdfdev.yaml
+++ b/applications/mobu/values-usdfdev.yaml
@@ -1,2 +1,53 @@
config:
- disableSlackAlerts: true
+ debug: true
+ autostart:
+ - name: "firefighter"
+ count: 1
+ users:
+ - username: "bot-mobu01"
+ uidnumber: 45692
+ gidnumber: 1126
+ scopes:
+ - "exec:notebook"
+ - "exec:portal"
+ - "read:image"
+ - "read:tap"
+ business:
+ type: "NotebookRunner"
+ options:
+ repo_url: "https://github.com/lsst-sqre/system-test.git"
+ repo_branch: "prod"
+ use_cachemachine: false
+ restart: true
+ - name: "weekly"
+ count: 1
+ users:
+ - username: "bot-mobu02"
+ uidnumber: 45693
+ gidnumber: 1126
+ scopes:
+ - "exec:notebook"
+ - "exec:portal"
+ - "read:image"
+ - "read:tap"
+ business:
+ type: "NotebookRunner"
+ options:
+ image:
+ image_class: "latest-weekly"
+ repo_url: "https://github.com/lsst-sqre/system-test.git"
+ repo_branch: "prod"
+ use_cachemachine: false
+ restart: true
+ - name: "tap"
+ count: 1
+ users:
+ - username: "bot-mobu03"
+ uidnumber: 45694
+ gidnumber: 1126
+ scopes: ["read:tap"]
+ business:
+ type: "TAPQueryRunner"
+ options:
+ query_set: "dp0.2"
+ restart: true
diff --git a/applications/mobu/values-usdfprod.yaml b/applications/mobu/values-usdfprod.yaml
index 6f6e9f20c9..3bd79a6fe1 100644
--- a/applications/mobu/values-usdfprod.yaml
+++ b/applications/mobu/values-usdfprod.yaml
@@ -1,2 +1,33 @@
config:
- disableSlackAlerts: true
+ debug: true
+ autostart:
+ - name: "firefighter"
+ count: 1
+ users:
+ - username: "bot-mobu04"
+ uidnumber: 45695
+ gidnumber: 1126
+ scopes:
+ - "exec:notebook"
+ - "exec:portal"
+ - "read:image"
+ - "read:tap"
+ business:
+ type: "NotebookRunner"
+ options:
+ repo_url: "https://github.com/lsst-sqre/system-test.git"
+ repo_branch: "prod"
+ use_cachemachine: false
+ restart: true
+ - name: "tap"
+ count: 1
+ users:
+ - username: "bot-mobu05"
+ uidnumber: 45696
+ gidnumber: 1126
+ scopes: ["read:tap"]
+ business:
+ type: "TAPQueryRunner"
+ options:
+ query_set: "dp0.2"
+ restart: true
diff --git a/applications/mobu/values.yaml b/applications/mobu/values.yaml
index 241d73d661..729c3d979b 100644
--- a/applications/mobu/values.yaml
+++ b/applications/mobu/values.yaml
@@ -30,8 +30,8 @@ config:
# and disable structured JSON logging.
debug: false
- # -- If set to true, do not configure mobu to send alerts to Slack.
- disableSlackAlerts: false
+ # -- Whether to send alerts and status to Slack.
+ slackAlerts: true
# -- Prefix for mobu's API routes.
pathPrefix: "/mobu"
diff --git a/applications/moneypenny/README.md b/applications/moneypenny/README.md
index 1cfedae207..9bd5518680 100644
--- a/applications/moneypenny/README.md
+++ b/applications/moneypenny/README.md
@@ -31,4 +31,4 @@ User provisioning actions
| replicaCount | int | `1` | Number of pods to start |
| resources | object | `{}` | Resource limits and requests for the vo-cutouts frontend pod |
| serviceAccount.name | string | Name based on the fullname template | Name of the service account to use |
-| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod |
+| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod |
\ No newline at end of file
diff --git a/applications/moneypenny/values-idfdev.yaml b/applications/moneypenny/values-idfdev.yaml
deleted file mode 100644
index 77b96cbe69..0000000000
--- a/applications/moneypenny/values-idfdev.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-orders:
- commission:
- - name: initcommission
- image: lsstsqre/inituserhome
- securityContext:
- runAsUser: 0
- runAsNonRootUser: false
- volumeMounts:
- - mountPath: /homedirs
- name: homedirs
- volumes:
- - name: homedirs
- nfs:
- server: 10.87.86.26
- path: /share1/home
diff --git a/applications/monitoring/Chart.yaml b/applications/monitoring/Chart.yaml
new file mode 100644
index 0000000000..a57d8d1979
--- /dev/null
+++ b/applications/monitoring/Chart.yaml
@@ -0,0 +1,11 @@
+apiVersion: v2
+name: monitoring
+version: 0.0.1
+description: Chronograf-based UI for monitoring (data stored in InfluxDBv2)
+sources:
+ - https://github.com/lsst-sqre/rubin-influx-tools
+appVersion: 0.2.0
+dependencies:
+ - name: chronograf
+ version: 1.2.5
+ repository: https://helm.influxdata.com/
diff --git a/applications/monitoring/README.md b/applications/monitoring/README.md
new file mode 100644
index 0000000000..dae5de8f29
--- /dev/null
+++ b/applications/monitoring/README.md
@@ -0,0 +1,44 @@
+# monitoring
+
+Chronograf-based UI for monitoring (data stored in InfluxDBv2)
+
+## Source Code
+
+*
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| chronograf.env.CUSTOM_AUTO_REFRESH | string | `"1s=1000"` | |
+| chronograf.env.GH_CLIENT_ID | string | `""` | |
+| chronograf.env.GH_ORGS | string | `"lsst-sqre"` | |
+| chronograf.env.HOST_PAGE_DISABLED | bool | `true` | |
+| chronograf.env.INFLUXDB_ORG | string | `"square"` | |
+| chronograf.env.INFLUXDB_URL | string | `"https://monitoring.lsst.codes"` | |
+| chronograf.envFromSecret | string | `"monitoring"` | |
+| chronograf.image.pullPolicy | string | `"IfNotPresent"` | |
+| chronograf.image.tag | string | `"1.9.4"` | |
+| chronograf.ingress.enabled | bool | `false` | |
+| chronograf.oauth.enabled | bool | `false` | |
+| chronograf.resources.limits.cpu | int | `4` | |
+| chronograf.resources.limits.memory | string | `"30Gi"` | |
+| chronograf.resources.requests.cpu | int | `1` | |
+| chronograf.resources.requests.memory | string | `"1024Mi"` | |
+| chronograf.service.replicas | int | `1` | |
+| chronograf.service.type | string | `"ClusterIP"` | |
+| chronograf.updateStrategy.type | string | `"Recreate"` | |
+| cronjob.debug | bool | `false` | set to true to enable debug logging |
+| cronjob.image | object | `{"repository":"ghcr.io/lsst-sqre/rubin-influx-tools","tag":""}` | image for monitoring-related cronjobs |
+| cronjob.image.repository | string | `"ghcr.io/lsst-sqre/rubin-influx-tools"` | repository for rubin-influx-tools |
+| cronjob.image.tag | string | the appVersion of the chart | tag for rubin-influx-tools |
+| cronjob.schedule | object | `{"bucketmaker":"*/15 * * * *","bucketmapper":"3-59/15 * * * *","taskmaker":"6-59/15 * * * *"}` | schedules for jobs |
+| cronjob.schedule.bucketmaker | string | `"*/15 * * * *"` | bucketmaker schedule |
+| cronjob.schedule.bucketmapper | string | `"3-59/15 * * * *"` | bucketmapper schedule |
+| cronjob.schedule.taskmaker | string | `"6-59/15 * * * *"` | taskmaker schedule |
+| global.influxdbOrg | string | `"square"` | InfluxDBv2 organization |
+| global.influxdbUrl | string | `"https://monitoring.lsst.codes"` | URL for InfluxDBv2 instance |
+| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
+| ingress.chronograf | object | `{"annotations":{},"hostname":""}` | ingress for Chronograf UI |
+| ingress.chronograf.annotations | object | `{}` | Additional annotations to add to the ingress |
+| ingress.chronograf.hostname | string | `""` | hostname for Chronograf UI @ default -- None, must be set by each individual instance |
\ No newline at end of file
diff --git a/applications/monitoring/templates/_helpers.tpl b/applications/monitoring/templates/_helpers.tpl
new file mode 100644
index 0000000000..42400e8296
--- /dev/null
+++ b/applications/monitoring/templates/_helpers.tpl
@@ -0,0 +1,53 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "monitoring.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "monitoring.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "monitoring.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "monitoring.labels" -}}
+app.kubernetes.io/name: {{ include "monitoring.name" . }}
+helm.sh/chart: {{ include "monitoring.chart" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "monitoring.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "monitoring.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
diff --git a/applications/monitoring/templates/cronjobs.yaml b/applications/monitoring/templates/cronjobs.yaml
new file mode 100644
index 0000000000..19f0e25490
--- /dev/null
+++ b/applications/monitoring/templates/cronjobs.yaml
@@ -0,0 +1,158 @@
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: bucketmaker
+spec:
+ schedule: {{ .Values.cronjob.schedule.bucketmaker | quote }}
+ successfulJobsHistoryLimit: 1
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ restartPolicy: Never
+ automountServiceAccountToken: false
+ {{- with .Values.cronjob.tolerations }}
+ tolerations:
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ {{- with .Values.cronjob.affinity }}
+ affinity:
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ containers:
+ - name: bucketmaker
+ image: "{{ .Values.cronjob.image.repository }}:{{ .Values.cronjob.image.tag | default .Chart.AppVersion }}"
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 405
+ runAsGroup: 100
+ capabilities:
+ drop:
+ - all
+ readOnlyRootFilesystem: true
+ env:
+ - name: "INFLUXDB_TOKEN"
+ valueFrom:
+ secretKeyRef:
+ name: "monitoring"
+ key: "influx-alert-token"
+ - name: "INFLUXDB_ORG"
+ value: {{ .Values.global.influxdbOrg | quote }}
+ - name: "INFLUXDB_URL"
+ value: {{ .Values.global.influxdbUrl | quote }}
+ {{- with .Values.cronjob.debug }}
+ - name: "DEBUG"
+ value: "true"
+ {{- end }}
+ command: [ "bucketmaker" ]
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmpdir
+ volumes:
+ # We download the phalanx repo into here to determine what our
+ # active applications are.
+ - name: tmpdir
+ emptyDir: {}
+---
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: bucketmapper
+spec:
+ schedule: {{ .Values.cronjob.schedule.bucketmapper | quote }}
+ successfulJobsHistoryLimit: 1
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ restartPolicy: Never
+ automountServiceAccountToken: false
+ {{- with .Values.cronjob.tolerations }}
+ tolerations:
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ {{- with .Values.cronjob.affinity }}
+ affinity:
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ containers:
+ - name: bucketmapper
+ image: "{{ .Values.cronjob.image.repository }}:{{ .Values.cronjob.image.tag | default .Chart.AppVersion }}"
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 405
+ runAsGroup: 100
+ capabilities:
+ drop:
+ - all
+ readOnlyRootFilesystem: true
+ env:
+ - name: "INFLUXDB_TOKEN"
+ valueFrom:
+ secretKeyRef:
+ name: "monitoring"
+ # We should be able to do away with this level of
+ # privilege as recent Influx versions automatically
+ # create this mapping, but we would need to change
+ # our naming conventions to adapt.
+ key: "admin-token"
+ - name: "INFLUXDB_ORG"
+ value: {{ .Values.global.influxdbOrg | quote }}
+ - name: "INFLUXDB_URL"
+ value: {{ .Values.global.influxdbUrl | quote }}
+ {{- with .Values.cronjob.debug }}
+ - name: "DEBUG"
+ value: "true"
+ {{- end }}
+ command: [ "bucketmapper" ]
+---
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: taskmaker
+spec:
+ successfulJobsHistoryLimit: 1
+ schedule: {{ .Values.cronjob.schedule.taskmaker | quote }}
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ restartPolicy: Never
+ automountServiceAccountToken: false
+ {{- with .Values.cronjob.tolerations }}
+ tolerations:
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ {{- with .Values.cronjob.affinity }}
+ affinity:
+{{ toYaml . | indent 12 }}
+ {{- end }}
+ containers:
+ - name: taskmaker
+ image: "{{ .Values.cronjob.image.repository }}:{{ .Values.cronjob.image.tag | default .Chart.AppVersion }}"
+ securityContext:
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 405
+ runAsGroup: 100
+ capabilities:
+ drop:
+ - all
+ readOnlyRootFilesystem: true
+ env:
+ - name: "INFLUXDB_TOKEN"
+ valueFrom:
+ secretKeyRef:
+ name: "monitoring"
+ key: "influx-alert-token"
+ - name: "INFLUXDB_ORG"
+ value: {{ .Values.global.influxdbOrg | quote }}
+ - name: "INFLUXDB_URL"
+ value: {{ .Values.global.influxdbUrl | quote }}
+ {{- with .Values.cronjob.debug }}
+ - name: "DEBUG"
+ value: "true"
+ {{- end }}
+ command: [ "taskmaker" ]
diff --git a/applications/monitoring/templates/ingress.yaml b/applications/monitoring/templates/ingress.yaml
new file mode 100644
index 0000000000..0cfac115df
--- /dev/null
+++ b/applications/monitoring/templates/ingress.yaml
@@ -0,0 +1,34 @@
+apiVersion: gafaelfawr.lsst.io/v1alpha1
+kind: GafaelfawrIngress
+metadata:
+ name: "chronograf"
+ labels:
+ {{- include "monitoring.labels" . | nindent 4 }}
+config:
+ baseUrl: "https://{{ .Values.ingress.chronograf.hostname }}"
+ scopes:
+ anonymous: true # We will use Chronograf auth for now.
+template:
+ metadata:
+ name: "chronograf"
+ annotations:
+ cert-manager.io/cluster-issuer: "letsencrypt-dns"
+ {{- with .Values.ingress.chronograf.annotations }}
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ spec:
+ tls:
+ - hosts:
+ - {{ .Values.ingress.chronograf.hostname | quote }}
+ secretName: tls
+ rules:
+ - host: {{ .Values.ingress.chronograf.hostname | quote }}
+ http:
+ paths:
+ - path: "/"
+ pathType: "Prefix"
+ backend:
+ service:
+ name: monitoring-chronograf
+ port:
+ number: 80
diff --git a/applications/monitoring/templates/vault-secret.yaml b/applications/monitoring/templates/vault-secret.yaml
new file mode 100644
index 0000000000..fb3657e6fa
--- /dev/null
+++ b/applications/monitoring/templates/vault-secret.yaml
@@ -0,0 +1,9 @@
+apiVersion: ricoberger.de/v1alpha1
+kind: VaultSecret
+metadata:
+ name: {{ include "monitoring.fullname" . }}
+ labels:
+ {{- include "monitoring.labels" . | nindent 4 }}
+spec:
+ path: "{{ .Values.global.vaultSecretsPath }}/monitoring"
+ type: Opaque
diff --git a/applications/monitoring/values-roundtable-dev.yaml b/applications/monitoring/values-roundtable-dev.yaml
new file mode 100644
index 0000000000..ec948c5cce
--- /dev/null
+++ b/applications/monitoring/values-roundtable-dev.yaml
@@ -0,0 +1,8 @@
+chronograf:
+ env:
+ GH_CLIENT_ID: "e85fe410b0021a251180"
+cronjob:
+ debug: true
+ingress:
+ chronograf:
+ hostname: "monitoring-dev.lsst.cloud"
diff --git a/applications/monitoring/values.yaml b/applications/monitoring/values.yaml
new file mode 100644
index 0000000000..110d5b8c3f
--- /dev/null
+++ b/applications/monitoring/values.yaml
@@ -0,0 +1,96 @@
+chronograf:
+ ## Image Settings
+ ##
+ image:
+ tag: 1.9.4
+ pullPolicy: IfNotPresent
+
+ ## Specify a service type
+ ## ClusterIP is default
+ ## ref: http://kubernetes.io/docs/user-guide/services/
+ ##
+ service:
+ replicas: 1
+ type: ClusterIP
+
+ ## Configure resource requests and limits
+ ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
+ ##
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 1
+ limits:
+ memory: 30Gi
+ cpu: 4
+
+ ## Use our own Gafaelfawr ingress.
+ ingress:
+ ## We will use Gafaelfawr ingresses instead
+ enabled: false
+
+ ## Enable OAuth
+ oauth:
+ ## This is a lie: see below.
+ enabled: false
+ ## OAuth Settings for OAuth Providers
+ ## We do not set these here. What we do is set:
+ ## - TOKEN_SECRET
+ ## - GH_CLIENT_ID
+ ## - GH_CLIENT_SECRET
+ ## - GH_ORGS
+ ## in the environment. The secrets should come from the "monitoring"
+ ## secret, which is a VaultSecret, and the rest can just be injected
+ ## into the deployment env directly.
+
+ ## Extra environment variables that will be passed onto deployment pods
+ env:
+ CUSTOM_AUTO_REFRESH: "1s=1000"
+ GH_CLIENT_ID: "" # Must be specified for each endpoint for the callback
+ GH_ORGS: "lsst-sqre"
+ HOST_PAGE_DISABLED: true
+ INFLUXDB_URL: "https://monitoring.lsst.codes" # Expect this to change
+ INFLUXDB_ORG: "square"
+ ## INFLUXDB_TOKEN should be in the monitoring secret as well as
+ ## TOKEN_SECRET and GH_CLIENT_SECRET. Note that INFLUX_TOKEN is for
+ ## InfluxDBv1 and INFLUXDB_TOKEN is for v2.
+ envFromSecret: monitoring
+ updateStrategy:
+ type: Recreate
+
+cronjob:
+ # -- image for monitoring-related cronjobs
+ image:
+ # -- repository for rubin-influx-tools
+ repository: ghcr.io/lsst-sqre/rubin-influx-tools
+ # -- tag for rubin-influx-tools
+ # @default -- the appVersion of the chart
+ tag: ""
+ # -- set to true to enable debug logging
+ debug: false
+ # -- schedules for jobs
+ schedule:
+ # -- bucketmaker schedule
+ bucketmaker: "*/15 * * * *"
+ # -- bucketmapper schedule
+ bucketmapper: "3-59/15 * * * *"
+ # -- taskmaker schedule
+ taskmaker: "6-59/15 * * * *"
+
+ingress:
+ # -- ingress for Chronograf UI
+ chronograf:
+ # -- hostname for Chronograf UI
+ # @ default -- None, must be set by each individual instance
+ hostname: ""
+ # -- Additional annotations to add to the ingress
+ annotations: {}
+
+global:
+ # -- Base path for Vault secrets
+ # @default -- Set by Argo CD
+ vaultSecretsPath: ""
+ # -- URL for InfluxDBv2 instance
+ influxdbUrl: "https://monitoring.lsst.codes" # Expect this to change
+ # -- InfluxDBv2 organization
+ influxdbOrg: "square"
diff --git a/applications/narrativelog/README.md b/applications/narrativelog/README.md
index 281d4e69db..9023cf08d5 100644
--- a/applications/narrativelog/README.md
+++ b/applications/narrativelog/README.md
@@ -38,4 +38,4 @@ Narrative log service
| replicaCount | int | `1` | Number of narrativelog replicas to run |
| resources | object | `{}` | Resource limits and requests for the narrativelog pod |
| securityContext | object | `{}` | Security context for the narrativelog deployment |
-| tolerations | list | `[]` | Tolerations for the narrativelog pod |
+| tolerations | list | `[]` | Tolerations for the narrativelog pod |
\ No newline at end of file
diff --git a/applications/narrativelog/secrets.yaml b/applications/narrativelog/secrets.yaml
new file mode 100644
index 0000000000..6d65b32bd6
--- /dev/null
+++ b/applications/narrativelog/secrets.yaml
@@ -0,0 +1,4 @@
+database-password:
+ description: "Password for the narrativelog database."
+ generate:
+ type: password
diff --git a/applications/noteburst/Chart.yaml b/applications/noteburst/Chart.yaml
index c25e51b424..bbc6be0d9c 100644
--- a/applications/noteburst/Chart.yaml
+++ b/applications/noteburst/Chart.yaml
@@ -1,7 +1,7 @@
apiVersion: v2
name: noteburst
version: 1.0.0
-appVersion: "0.7.0"
+appVersion: "0.7.1"
description: Noteburst is a notebook execution service for the Rubin Science Platform.
type: application
home: https://noteburst.lsst.io/
@@ -13,7 +13,7 @@ maintainers:
dependencies:
- name: redis
- version: 1.0.5
+ version: 1.0.6
repository: https://lsst-sqre.github.io/charts/
annotations:
diff --git a/applications/noteburst/README.md b/applications/noteburst/README.md
index 2d56c582c5..58af0aee95 100644
--- a/applications/noteburst/README.md
+++ b/applications/noteburst/README.md
@@ -56,4 +56,4 @@ Noteburst is a notebook execution service for the Rubin Science Platform.
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
| serviceAccount.name | string | `""` | |
-| tolerations | list | `[]` | |
+| tolerations | list | `[]` | |
\ No newline at end of file
diff --git a/applications/noteburst/templates/ingress.yaml b/applications/noteburst/templates/ingress.yaml
index 2fef313df5..2072a48326 100644
--- a/applications/noteburst/templates/ingress.yaml
+++ b/applications/noteburst/templates/ingress.yaml
@@ -8,7 +8,7 @@ config:
baseUrl: {{ .Values.global.baseUrl | quote }}
scopes:
all:
- - "exec:admin"
+ - "exec:notebook"
loginRedirect: true
template:
metadata:
diff --git a/applications/noteburst/values-idfdev.yaml b/applications/noteburst/values-idfdev.yaml
index bf6d619d3f..b1f15683d5 100644
--- a/applications/noteburst/values-idfdev.yaml
+++ b/applications/noteburst/values-idfdev.yaml
@@ -3,7 +3,6 @@ image:
config:
logLevel: "DEBUG"
- hubPathPrefix: "/n3"
worker:
workerCount: 1
identities:
diff --git a/applications/noteburst/values-usdfdev.yaml b/applications/noteburst/values-usdfdev.yaml
new file mode 100644
index 0000000000..60e463b6e0
--- /dev/null
+++ b/applications/noteburst/values-usdfdev.yaml
@@ -0,0 +1,15 @@
+image:
+ pullPolicy: Always
+
+config:
+ logLevel: "DEBUG"
+ worker:
+ workerCount: 1
+ identities:
+ - username: "bot-noteburst01"
+ - username: "bot-noteburst02"
+
+# Use SSD for Redis storage.
+redis:
+ persistence:
+ storageClass: "wekafs--sdf-k8s01"
diff --git a/applications/nublado/Chart.yaml b/applications/nublado/Chart.yaml
index 1a76ce6a01..5f731c4266 100644
--- a/applications/nublado/Chart.yaml
+++ b/applications/nublado/Chart.yaml
@@ -6,7 +6,7 @@ sources:
- https://github.com/lsst-sqre/jupyterlab-controller
- https://github.com/lsst-sqre/rsp-restspawner
home: https://github.com/lsst-sqre/jupyterlab-controller
-appVersion: 0.5.0
+appVersion: 0.7.0
dependencies:
- name: jupyterhub
diff --git a/applications/nublado/README.md b/applications/nublado/README.md
index 9d34955f4a..386432b776 100644
--- a/applications/nublado/README.md
+++ b/applications/nublado/README.md
@@ -50,6 +50,7 @@ JupyterHub and custom spawner for the Rubin Science Platform
| global.baseUrl | string | Set by Argo CD | Base URL for the environment |
| global.host | string | Set by Argo CD | Host name for ingress |
| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
+| hub.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. |
| hub.timeout.spawn | int | `600` | Timeout for the Kubernetes spawn process in seconds. (Allow long enough to pull uncached images if needed.) |
| hub.timeout.startup | int | `90` | Timeout for JupyterLab to start. Currently this sometimes takes over 60 seconds for reasons we don't understand. |
| jupyterhub.cull.enabled | bool | `true` | Enable the lab culler. |
@@ -69,7 +70,7 @@ JupyterHub and custom spawner for the Rubin Science Platform
| jupyterhub.hub.extraVolumeMounts | list | `hub-config` and the Gafaelfawr token | Additional volume mounts for JupyterHub |
| jupyterhub.hub.extraVolumes | list | The `hub-config` `ConfigMap` and the Gafaelfawr token | Additional volumes to make available to JupyterHub |
| jupyterhub.hub.image.name | string | `"ghcr.io/lsst-sqre/rsp-restspawner"` | Image to use for JupyterHub |
-| jupyterhub.hub.image.tag | string | `"0.3.0"` | Tag of image to use for JupyterHub |
+| jupyterhub.hub.image.tag | string | `"0.3.2"` | Tag of image to use for JupyterHub |
| jupyterhub.hub.loadRoles.server.scopes | list | `["self"]` | Default scopes for the user's lab, overridden to allow the lab to delete itself (which we use for our added menu items) |
| jupyterhub.hub.networkPolicy.enabled | bool | `false` | Whether to enable the default `NetworkPolicy` (currently, the upstream one does not work correctly) |
| jupyterhub.hub.resources | object | `{"limits":{"cpu":"900m","memory":"1Gi"}}` | Resource limits and requests |
@@ -83,4 +84,4 @@ JupyterHub and custom spawner for the Rubin Science Platform
| jupyterhub.singleuser.cloudMetadata.blockWithIptables | bool | `false` | Whether to configure iptables to block cloud metadata endpoints. This is unnecessary in our environments (they are blocked by cluster configuration) and thus is disabled to reduce complexity. |
| jupyterhub.singleuser.cmd | string | `"/opt/lsst/software/jupyterlab/runlab.sh"` | Start command for labs |
| jupyterhub.singleuser.defaultUrl | string | `"/lab"` | Default URL prefix for lab endpoints |
-| proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) |
+| proxy.ingress.annotations | object | Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m | Additional annotations to add to the proxy ingress (also used to talk to JupyterHub and all user labs) |
\ No newline at end of file
diff --git a/applications/nublado/secrets-idfdev.yaml b/applications/nublado/secrets-idfdev.yaml
new file mode 100644
index 0000000000..97d5af3ca8
--- /dev/null
+++ b/applications/nublado/secrets-idfdev.yaml
@@ -0,0 +1,15 @@
+"aws-credentials.ini":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store, formatted using
+ AWS syntax for use with boto.
+"butler-gcs-idf-creds.json":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the native
+ Google syntax, containing the private asymmetric key.
+"butler-hmac-idf-creds.json":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the private
+ key syntax used for HMACs.
+"postgres-credentials.txt":
+ description: >-
+ PostgreSQL credentials in its pgpass format for the Butler database.
diff --git a/applications/nublado/secrets.yaml b/applications/nublado/secrets.yaml
new file mode 100644
index 0000000000..f0e2c52cee
--- /dev/null
+++ b/applications/nublado/secrets.yaml
@@ -0,0 +1,23 @@
+cryptkeeper_key:
+ description: "Encryption key for internal key management."
+ generate:
+ type: password
+crypto_key:
+ description: "Encryption key for JupyterHub stored state."
+ generate:
+ type: password
+hub_db_password:
+ description: "Password to authenticate to the JupyterHub session database."
+ generate:
+ type: password
+ if: hub.internalDatabase
+proxy_token:
+ description: "Token authenticating JupyterHub to the proxy server."
+ generate:
+ type: password
+slack_webhook:
+ description: "Slack web hook to which to post alerts."
+ if: controller.slackAlerts
+ copy:
+ application: mobu
+ key: app-alert-webhook
diff --git a/applications/nublado/templates/controller-serviceaccount.yaml b/applications/nublado/templates/controller-serviceaccount.yaml
index d0c09d9fba..35d540edf0 100644
--- a/applications/nublado/templates/controller-serviceaccount.yaml
+++ b/applications/nublado/templates/controller-serviceaccount.yaml
@@ -31,6 +31,7 @@ rules:
- "pods"
- "resourcequotas"
- "services"
+ - "persistentvolumeclaims"
verbs:
- "create"
- "delete"
diff --git a/applications/nublado/values-idfdev.yaml b/applications/nublado/values-idfdev.yaml
index bfcc0838f9..d886967228 100644
--- a/applications/nublado/values-idfdev.yaml
+++ b/applications/nublado/values-idfdev.yaml
@@ -68,6 +68,5 @@ controller:
server: "10.87.86.26"
jupyterhub:
hub:
- baseUrl: "/n3"
db:
url: "postgresql://nublado3@postgres.postgres/nublado3"
diff --git a/applications/nublado/values-idfint.yaml b/applications/nublado/values-idfint.yaml
index 82b1eef1ef..7c794d2cd1 100644
--- a/applications/nublado/values-idfint.yaml
+++ b/applications/nublado/values-idfint.yaml
@@ -2,6 +2,8 @@ controller:
googleServiceAccount: "nublado-controller@science-platform-int-dc5d.iam.gserviceaccount.com"
slackAlerts: true
config:
+ fileserver:
+ enabled: true
images:
source:
type: "google"
@@ -35,16 +37,16 @@ controller:
sizes:
small:
cpu: 1.0
- memory: 3Gi
+ memory: 4Gi
medium:
cpu: 2.0
- memory: 6Gi
+ memory: 8Gi
large:
cpu: 4.0
- memory: 12Gi
+ memory: 16Gi
huge:
cpu: 8.0
- memory: 24Gi
+ memory: 32Gi
initContainers:
- name: "initdir"
image: "ghcr.io/lsst-sqre/initdir:0.0.4"
@@ -87,7 +89,6 @@ controller:
jupyterhub:
hub:
- baseUrl: "/n3"
config:
ServerApp:
shutdown_no_activity_timeout: 432000
diff --git a/applications/nublado/values-idfprod.yaml b/applications/nublado/values-idfprod.yaml
new file mode 100644
index 0000000000..d7b2704849
--- /dev/null
+++ b/applications/nublado/values-idfprod.yaml
@@ -0,0 +1,92 @@
+controller:
+ googleServiceAccount: "nublado-controller@science-platform-stable-6994.iam.gserviceaccount.com"
+ slackAlerts: true
+ config:
+ images:
+ source:
+ type: "google"
+ location: "us-central1"
+ projectId: "rubin-shared-services-71ec"
+ repository: "sciplat"
+ image: "sciplat-lab"
+ recommendedTag: "recommended"
+ numReleases: 1
+ numWeeklies: 2
+ numDailies: 3
+ lab:
+ env:
+ AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod"
+ AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini"
+ PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt"
+ GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json"
+ DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-repos.yaml"
+ S3_ENDPOINT_URL: "https://storage.googleapis.com"
+ NO_ACTIVITY_TIMEOUT: "432000"
+ CULL_KERNEL_IDLE_TIMEOUT: "432000"
+ CULL_TERMINAL_INACTIVE_TIMEOUT: "432000"
+
+ sizes:
+ small:
+ cpu: 1.0
+ memory: 4Gi
+ medium:
+ cpu: 2.0
+ memory: 8Gi
+ large:
+ cpu: 4.0
+ memory: 16Gi
+ initContainers:
+ - name: "initdir"
+ image: "ghcr.io/lsst-sqre/initdir:0.0.4"
+ privileged: true
+ volumes:
+ - containerPath: "/home"
+ mode: "rw"
+ source:
+ serverPath: "/share1/home"
+ server: "10.13.105.122"
+ type: "nfs"
+ secrets:
+ - secretName: "nublado-lab-secret"
+ secretKey: "aws-credentials.ini"
+ - secretName: "nublado-lab-secret"
+ secretKey: "butler-gcs-idf-creds.json"
+ - secretName: "nublado-lab-secret"
+ secretKey: "butler-hmac-idf-creds.json"
+ - secretName: "nublado-lab-secret"
+ secretKey: "postgres-credentials.txt"
+ volumes:
+ - containerPath: "/home"
+ mode: "rw"
+ source:
+ serverPath: "/share1/home"
+ server: "10.13.105.122"
+ type: "nfs"
+ - containerPath: "/project"
+ mode: "rw"
+ source:
+ serverPath: "/share1/project"
+ server: "10.13.105.122"
+ type: "nfs"
+ - containerPath: "/scratch"
+ mode: "rw"
+ source:
+ serverPath: "/share1/scratch"
+ server: "10.13.105.122"
+ type: "nfs"
+
+jupyterhub:
+ hub:
+ config:
+ ServerApp:
+ shutdown_no_activity_timeout: 432000
+ db:
+ url: "postgresql://nublado3@postgres.postgres/nublado3"
+
+ cull:
+ enabled: true
+ users: false
+ removeNamedServers: false
+ timeout: 432000
+ every: 300
+ maxAge: 2160000
diff --git a/applications/nublado/values-usdfdev.yaml b/applications/nublado/values-usdfdev.yaml
index 762480d2f2..bbac615954 100644
--- a/applications/nublado/values-usdfdev.yaml
+++ b/applications/nublado/values-usdfdev.yaml
@@ -2,6 +2,9 @@ controller:
config:
safir:
logLevel: "DEBUG"
+ fileserver:
+ enabled: false
+ timeout: 21600
images:
source:
@@ -16,10 +19,13 @@ controller:
lab:
pullSecret: "pull-secret"
+ homedirSchema: "initialThenUsername"
+
env:
AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini"
AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod"
DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml"
+ HUB_ROUTE: "/nb/hub"
PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt"
PGUSER: "rubin"
S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu"
@@ -125,16 +131,6 @@ controller:
resources:
requests:
storage: "1Gi"
- - containerPath: "/fs/nfs"
- mode: "rw"
- source:
- type: "persistentVolumeClaim"
- storageClassName: "fs-nfs"
- accessModes:
- - "ReadWriteMany"
- resources:
- requests:
- storage: "1Gi"
- containerPath: "/fs/ddn/sdf/group/rubin"
mode: "rw"
source:
@@ -169,6 +165,10 @@ proxy:
nginx.ingress.kubernetes.io/proxy-read-timeout: "20"
jupyterhub:
+ hub:
+ baseUrl: "/nb"
+ db:
+ url: "postgresql://nublado3@postgres.postgres/nublado3"
cull:
timeout: 432000
every: 300
diff --git a/applications/nublado/values-usdfprod.yaml b/applications/nublado/values-usdfprod.yaml
new file mode 100644
index 0000000000..9f38fe9157
--- /dev/null
+++ b/applications/nublado/values-usdfprod.yaml
@@ -0,0 +1,174 @@
+controller:
+ config:
+ safir:
+ logLevel: "DEBUG"
+ fileserver:
+ enabled: false
+ timeout: 21600
+
+ images:
+ source:
+ type: "docker"
+ registry: "docker-registry.slac.stanford.edu"
+ repository: "lsstsqre/sciplat-lab"
+ recommendedTag: "recommended"
+ numReleases: 1
+ numWeeklies: 2
+ numDailies: 3
+
+ lab:
+ pullSecret: "pull-secret"
+
+ homedirSchema: "initialThenUsername"
+
+ env:
+ AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini"
+ AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod"
+ DAF_BUTLER_REPOSITORY_INDEX: "/project/data-repos.yaml"
+ HUB_ROUTE: "/nb/hub"
+ PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt"
+ PGUSER: "rubin"
+ S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu"
+ http_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128"
+ https_proxy: "http://sdfproxy.sdf.slac.stanford.edu:3128"
+ no_proxy: "hub.nublado,.sdf.slac.stanford.edu,.slac.stanford.edu,localhost,127.0.0.1"
+
+ files:
+ # Add rubin_users group (there is not yet a simpler way to do this).
+ /etc/group:
+ contents: |
+ root:x:0:
+ bin:x:1:
+ daemon:x:2:
+ sys:x:3:
+ adm:x:4:
+ tty:x:5:
+ disk:x:6:
+ lp:x:7:
+ mem:x:8:
+ kmem:x:9:
+ wheel:x:10:
+ cdrom:x:11:
+ mail:x:12:
+ man:x:15:
+ dialout:x:18:
+ floppy:x:19:
+ games:x:20:
+ utmp:x:22:
+ tape:x:33:
+ utempter:x:35:
+ video:x:39:
+ ftp:x:50:
+ lock:x:54:
+ tss:x:59:
+ audio:x:63:
+ dbus:x:81:
+ screen:x:84:
+ nobody:x:99:
+ users:x:100:
+ systemd-journal:x:190:
+ systemd-network:x:192:
+ cgred:x:997:
+ ssh_keys:x:998:
+ input:x:999:
+ rubin_users:x:4085:
+
+ secrets:
+ - secretName: "nublado-lab-secret"
+ secretKey: "aws-credentials.ini"
+ - secretName: "nublado-lab-secret"
+ secretKey: "postgres-credentials.txt"
+
+ volumes:
+ - containerPath: "/home"
+ mode: "rw"
+ source:
+ type: "persistentVolumeClaim"
+ storageClassName: "sdf-home"
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "1Gi"
+ - containerPath: "/project"
+ subPath: "g"
+ mode: "rw"
+ source:
+ type: "persistentVolumeClaim"
+ storageClassName: "sdf-group-rubin"
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "1Gi"
+ - containerPath: "/sdf/group/rubin"
+ mode: "rw"
+ source:
+ type: "persistentVolumeClaim"
+ storageClassName: "sdf-group-rubin"
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "1Gi"
+ - containerPath: "/sdf/data/rubin"
+ mode: "rw"
+ source:
+ type: "persistentVolumeClaim"
+ storageClassName: "sdf-data-rubin"
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "1Gi"
+ - containerPath: "/scratch"
+ mode: "rw"
+ source:
+ type: "persistentVolumeClaim"
+ storageClassName: "sdf-scratch"
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "1Gi"
+ - containerPath: "/fs/ddn/sdf/group/rubin"
+ mode: "rw"
+ source:
+ type: "persistentVolumeClaim"
+ storageClassName: "fs-ddn-sdf-group-rubin"
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "1Gi"
+ - containerPath: "/fs/ddn/sdf/group/lsst"
+ mode: "rw"
+ source:
+ type: "persistentVolumeClaim"
+ storageClassName: "fs-ddn-sdf-group-lsst"
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "1Gi"
+
+proxy:
+ ingress:
+ annotations:
+ # proxy-body-size is temporary until USDF uses our normal ingress-nginx,
+ # which already configures a larger value.
+ nginx.ingress.kubernetes.io/proxy-body-size: "50m"
+
+ # These are substantially shorter than the default timeouts (it's not
+ # clear why).
+ nginx.ingress.kubernetes.io/proxy-connect-timeout: "30"
+ nginx.ingress.kubernetes.io/proxy-read-timeout: "20"
+
+jupyterhub:
+ hub:
+ db:
+ url: "postgresql://nublado3@postgres.postgres/nublado3"
+ cull:
+ timeout: 432000
+ every: 300
+ maxAge: 2160000
diff --git a/applications/nublado/values.yaml b/applications/nublado/values.yaml
index 0cf31756f6..0f89b35a56 100644
--- a/applications/nublado/values.yaml
+++ b/applications/nublado/values.yaml
@@ -139,13 +139,13 @@ controller:
sizes:
small:
cpu: 1.0
- memory: 3Gi
+ memory: 4Gi
medium:
cpu: 2.0
- memory: 6Gi
+ memory: 8Gi
large:
cpu: 4.0
- memory: 12Gi
+ memory: 16Gi
# -- Volumes that should be mounted in lab pods. This supports NFS,
# HostPath, and PVC volume types (differentiated in source.type)
@@ -226,7 +226,7 @@ controller:
# No longer used, but preserves compatibility with runlab.sh
dask_worker.yml: |
enabled: false
- /opt/lsst/software/jupyterlab/panda:
+ /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template:
modify: false
contents: |
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -257,6 +257,11 @@ controller:
# JupyterHub configuration handled directly by this chart rather than by Zero
# to JupyterHub.
hub:
+ # -- Whether to use the cluster-internal PostgreSQL server instead of an
+ # external server. This is not used directly by the Nublado chart, but
+ # controls how the database password is managed.
+ internalDatabase: true
+
timeout:
# -- Timeout for the Kubernetes spawn process in seconds. (Allow long
# enough to pull uncached images if needed.)
@@ -288,7 +293,7 @@ jupyterhub:
name: ghcr.io/lsst-sqre/rsp-restspawner
# -- Tag of image to use for JupyterHub
- tag: 0.3.0
+ tag: 0.3.2
# -- Resource limits and requests
resources:
diff --git a/applications/nublado2/README.md b/applications/nublado2/README.md
index 9caffa00d5..80c3a68da9 100644
--- a/applications/nublado2/README.md
+++ b/applications/nublado2/README.md
@@ -15,11 +15,12 @@ JupyterHub for the Rubin Science Platform
| config.base_url | string | `""` | base_url must be set in each instantiation of this chart to the URL of the primary ingress. It's used to construct API requests to the authentication service (which should go through the ingress). |
| config.butler_secret_path | string | `""` | butler_secret_path must be set here, because it's passed through to the lab rather than being part of the Hub configuration. |
| config.cachemachine_image_policy | string | `"available"` | Cachemachine image policy: "available" or "desired". Use "desired" at instances with streaming image support. |
+| config.internalDatabase | bool | `true` | Whether to use the cluster-internal PostgreSQL server instead of an external server. This is not used directly by the Nublado chart, but controls how the database password is managed. |
| config.lab_environment | object | See `values.yaml` | Environment variables to set in spawned lab containers. Each value will be expanded using Jinja 2 templating. |
| config.pinned_images | list | `[]` | images to pin to spawner menu |
| config.pull_secret_path | string | `""` | pull_secret_path must also be set here; it specifies resources in the lab namespace |
| config.shutdown_on_logout | bool | `true` | shut down user pods on logout. Superfluous, because our LogoutHandler enforces this in any event, but nice to make explicit. |
-| config.sizes | list | `[{"cpu":1,"name":"Small","ram":"3072M"},{"cpu":2,"name":"Medium","ram":"6144M"},{"cpu":4,"name":"Large","ram":"12288M"}]` | definitions of Lab sizes available in a given instance |
+| config.sizes | list | `[{"cpu":1,"name":"Small","ram":"4096M"},{"cpu":2,"name":"Medium","ram":"8192M"},{"cpu":4,"name":"Large","ram":"16384M"}]` | definitions of Lab sizes available in a given instance |
| config.user_resources_template | string | See `values.yaml` | Templates for the user resources to create for each lab spawn. This is a string that can be templated and then loaded as YAML to generate a list of Kubernetes objects to create. |
| config.volume_mounts | list | `[]` | Where to mount volumes for a particular instance |
| config.volumes | list | `[]` | Volumes to use for a particular instance |
@@ -115,4 +116,4 @@ JupyterHub for the Rubin Science Platform
| jupyterhub.singleuser.storage.extraVolumes[6].configMap.name | string | `"group"` | |
| jupyterhub.singleuser.storage.extraVolumes[6].name | string | `"group"` | |
| jupyterhub.singleuser.storage.type | string | `"none"` | |
-| network_policy.enabled | bool | `true` | |
+| network_policy.enabled | bool | `true` | |
\ No newline at end of file
diff --git a/applications/nublado2/secrets-idfdev.yaml b/applications/nublado2/secrets-idfdev.yaml
new file mode 100644
index 0000000000..97d5af3ca8
--- /dev/null
+++ b/applications/nublado2/secrets-idfdev.yaml
@@ -0,0 +1,15 @@
+"aws-credentials.ini":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store, formatted using
+ AWS syntax for use with boto.
+"butler-gcs-idf-creds.json":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the native
+ Google syntax, containing the private asymmetric key.
+"butler-hmac-idf-creds.json":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the private
+ key syntax used for HMACs.
+"postgres-credentials.txt":
+ description: >-
+ PostgreSQL credentials in its pgpass format for the Butler database.
diff --git a/applications/nublado2/secrets.yaml b/applications/nublado2/secrets.yaml
new file mode 100644
index 0000000000..15d1b5eeba
--- /dev/null
+++ b/applications/nublado2/secrets.yaml
@@ -0,0 +1,17 @@
+cryptkeeper_key:
+ description: "Encryption key for internal key management."
+ generate:
+ type: password
+crypto_key:
+ description: "Encryption key for JupyterHub stored state."
+ generate:
+ type: password
+hub_db_password:
+ description: "Password to authenticate to the JupyterHub session database."
+ generate:
+ type: password
+ if: config.internalDatabase
+proxy_token:
+ description: "Token authenticating JupyterHub to the proxy server."
+ generate:
+ type: password
diff --git a/applications/nublado2/values-base.yaml b/applications/nublado2/values-base.yaml
index 1e1b972945..6d677fe081 100644
--- a/applications/nublado2/values-base.yaml
+++ b/applications/nublado2/values-base.yaml
@@ -49,6 +49,19 @@ config:
nfs:
path: /lsstdata
server: nfs-lsstdata.ls.lsst.org
+ - name: auxtel-butler
+ nfs:
+ path: /auxtel/repo/LATISS
+ server: nfs-auxtel.ls.lsst.org
+ - name: auxtel-oods
+ nfs:
+ path: /auxtel/lsstdata/BTS/auxtel
+ server: nfs-auxtel.ls.lsst.org
+ readOnly: true
+ - name: obs-env
+ nfs:
+ path: /obs-env
+ server: nfs-obsenv.ls.lsst.org
volume_mounts:
- name: home
mountPath: /home
@@ -58,3 +71,10 @@ config:
mountPath: /project
- name: scratch
mountPath: /scratch
+ - name: auxtel-butler
+ mountPath: /repo/LATISS
+ - name: auxtel-oods
+ mountPath: /data/lsstdata/BTS/auxtel
+ readOnly: true
+ - name: obs-env
+ mountPath: /net/obs-env
diff --git a/applications/nublado2/values-idfdev.yaml b/applications/nublado2/values-idfdev.yaml
deleted file mode 100644
index cafa58321f..0000000000
--- a/applications/nublado2/values-idfdev.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-jupyterhub:
- hub:
- config:
- ServerApp:
- shutdown_no_activity_timeout: 432000
- db:
- upgrade: true
-
- cull:
- enabled: true
- users: false
- removeNamedServers: false
- timeout: 432000
- every: 300
- maxAge: 2160000
-
- ingress:
- hosts: ["data-dev.lsst.cloud"]
- annotations:
- nginx.ingress.kubernetes.io/auth-signin: "https://data-dev.lsst.cloud/login"
-
-config:
- base_url: "https://data-dev.lsst.cloud"
- butler_secret_path: "secret/k8s_operator/data-dev.lsst.cloud/butler-secret"
- pull_secret_path: "secret/k8s_operator/data-dev.lsst.cloud/pull-secret"
- cachemachine_image_policy: "desired"
- lab_environment:
- PGPASSFILE: "/opt/lsst/software/jupyterlab/butler-secret/postgres-credentials.txt"
- AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/butler-secret/aws-credentials.ini"
- S3_ENDPOINT_URL: "https://storage.googleapis.com"
- GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/butler-secret/butler-gcs-idf-creds.json"
- DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml"
- AUTO_REPO_URLS: https://github.com/lsst-sqre/system-test,https://github.com/rubin-dp0/tutorial-notebooks
- AUTO_REPO_BRANCH: prod
- AUTO_REPO_SPECS: https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod
- NO_ACTIVITY_TIMEOUT: "432000"
- CULL_KERNEL_IDLE_TIMEOUT: "432000"
- CULL_KERNEL_CONNECTED: "True"
- CULL_KERNEL_INTERVAL: "300"
- CULL_TERMINAL_INACTIVE_TIMEOUT: "432000"
- CULL_TERMINAL_INTERVAL: "300"
- volumes:
- - name: home
- nfs:
- path: /share1/home
- server: 10.87.86.26
- - name: project
- nfs:
- path: /share1/project
- server: 10.87.86.26
- - name: scratch
- nfs:
- path: /share1/scratch
- server: 10.87.86.26
- volume_mounts:
- - name: home
- mountPath: /home
- - name: project
- mountPath: /project
- - name: scratch
- mountPath: /scratch
diff --git a/applications/nublado2/values-idfint.yaml b/applications/nublado2/values-idfint.yaml
index a04cfdc465..c5812aedb8 100644
--- a/applications/nublado2/values-idfint.yaml
+++ b/applications/nublado2/values-idfint.yaml
@@ -1,5 +1,6 @@
jupyterhub:
hub:
+ baseUrl: "/n2"
config:
ServerApp:
shutdown_no_activity_timeout: 432000
@@ -43,16 +44,16 @@ config:
sizes:
- name: Small
cpu: 1
- ram: 3072M
+ ram: 4096M
- name: Medium
cpu: 2
- ram: 6144M
+ ram: 8192M
- name: Large
cpu: 4
- ram: 12288M
+ ram: 16384M
- name: Huge
cpu: 8
- ram: 24576M
+ ram: 32768M
volumes:
- name: home
nfs:
diff --git a/applications/nublado2/values-idfprod.yaml b/applications/nublado2/values-idfprod.yaml
index ff3cb92991..315d8f26dd 100644
--- a/applications/nublado2/values-idfprod.yaml
+++ b/applications/nublado2/values-idfprod.yaml
@@ -1,5 +1,6 @@
jupyterhub:
hub:
+ baseUrl: "/n2"
config:
ServerApp:
shutdown_no_activity_timeout: 432000
diff --git a/applications/nublado2/values-summit.yaml b/applications/nublado2/values-summit.yaml
index 60af40ddc7..3f8f14c394 100644
--- a/applications/nublado2/values-summit.yaml
+++ b/applications/nublado2/values-summit.yaml
@@ -40,8 +40,8 @@ config:
server: nfs1.cp.lsst.org
- name: auxtel
nfs:
- path: /lsstdata
- server: auxtel-archiver.cp.lsst.org
+ path: /auxtel/lsstdata
+ server: nfs-auxtel.cp.lsst.org
readOnly: true
- name: comcam
nfs:
@@ -55,12 +55,12 @@ config:
readOnly: true
- name: latiss
nfs:
- path: /repo/LATISS
- server: auxtel-archiver.cp.lsst.org
+ path: /auxtel/repo/LATISS
+ server: nfs-auxtel.cp.lsst.org
- name: base-auxtel
nfs:
- path: /lsstdata/base/auxtel
- server: auxtel-archiver.cp.lsst.org
+ path: /auxtel/lsstdata/base/auxtel
+ server: nfs-auxtel.cp.lsst.org
readOnly: true
- name: lsstcomcam
nfs:
diff --git a/applications/nublado2/values-tucson-teststand.yaml b/applications/nublado2/values-tucson-teststand.yaml
index 99898d00b2..4739d07de4 100644
--- a/applications/nublado2/values-tucson-teststand.yaml
+++ b/applications/nublado2/values-tucson-teststand.yaml
@@ -51,12 +51,12 @@ config:
server: nfs-lsstdata.tu.lsst.org
- name: auxtel-butler
nfs:
- path: /repo/LATISS
- server: auxtel-archiver.tu.lsst.org
+ path: /auxtel/repo/LATISS
+ server: nfs-auxtel.tu.lsst.org
- name: auxtel-oods
nfs:
- path: /lsstdata/TTS/auxtel
- server: auxtel-archiver.tu.lsst.org
+ path: /auxtel/lsstdata/TTS/auxtel
+ server: nfs-auxtel.tu.lsst.org
readOnly: true
- name: comcam-butler
nfs:
diff --git a/applications/nublado2/values-usdfdev.yaml b/applications/nublado2/values-usdfdev.yaml
index eba1cf3260..77d80bc08e 100644
--- a/applications/nublado2/values-usdfdev.yaml
+++ b/applications/nublado2/values-usdfdev.yaml
@@ -1,6 +1,7 @@
jupyterhub:
hub:
+ baseUrl: "/n2"
config:
ServerApp:
shutdown_no_activity_timeout: 432000
@@ -61,9 +62,6 @@ config:
- name: fs-ddn-sdf-group-rubin
persistentVolumeClaim:
claimName: fs-ddn-sdf-group-rubin
- - name: fs-nfs
- persistentVolumeClaim:
- claimName: fs-nfs
- name: sdf-scratch
persistentVolumeClaim:
claimName: sdf-scratch
@@ -85,8 +83,6 @@ config:
mountPath: /sdf/data/rubin
- name: sdf-scratch
mountPath: /scratch
- - name: fs-nfs
- mountPath: /fs/nfs
- name: fs-ddn-sdf-group-rubin
mountPath: /fs/ddn/sdf/group/rubin
- name: fs-ddn-sdf-group-lsst
@@ -406,18 +402,6 @@ config:
resources:
requests:
storage: 1Gi
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: fs-nfs
- namespace: "{{ user_namespace }}"
- spec:
- storageClassName: fs-nfs
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 1Gi
vault_secret_path: "secret/rubin/usdf-rsp-dev/nublado2"
diff --git a/applications/nublado2/values-usdfprod.yaml b/applications/nublado2/values-usdfprod.yaml
index 8238d4d396..690fd7c7bd 100644
--- a/applications/nublado2/values-usdfprod.yaml
+++ b/applications/nublado2/values-usdfprod.yaml
@@ -1,6 +1,7 @@
jupyterhub:
hub:
+ baseUrl: "/n2"
config:
ServerApp:
shutdown_no_activity_timeout: 432000
@@ -61,9 +62,6 @@ config:
- name: fs-ddn-sdf-group-rubin
persistentVolumeClaim:
claimName: fs-ddn-sdf-group-rubin
- - name: fs-nfs
- persistentVolumeClaim:
- claimName: fs-nfs
- name: sdf-scratch
persistentVolumeClaim:
claimName: sdf-scratch
@@ -73,9 +71,6 @@ config:
volume_mounts:
- name: home
mountPath: "/home/"
-# - name: sdf-group-rubin
-# mountPath: /datasets
-# subPath: datasets
- name: sdf-data-rubin
mountPath: /repo
subPath: repo
@@ -88,14 +83,6 @@ config:
mountPath: /sdf/data/rubin
- name: sdf-scratch
mountPath: /scratch
-# - name: fs-ddn-sdf-group-rubin
-# mountPath: /teststand
-# subPath: lsstdata/offline/teststand
-# - name: fs-ddn-sdf-group-rubin
-# mountPath: /instrument
-# subPath: lsstdata/offline/instrument
- - name: fs-nfs
- mountPath: /fs/nfs
- name: fs-ddn-sdf-group-rubin
mountPath: /fs/ddn/sdf/group/rubin
- name: fs-ddn-sdf-group-lsst
@@ -415,18 +402,6 @@ config:
resources:
requests:
storage: 1Gi
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: fs-nfs
- namespace: "{{ user_namespace }}"
- spec:
- storageClassName: fs-nfs
- accessModes:
- - ReadWriteMany
- resources:
- requests:
- storage: 1Gi
vault_secret_path: "secret/rubin/usdf-rsp/nublado2"
diff --git a/applications/nublado2/values.yaml b/applications/nublado2/values.yaml
index 7582be6e31..8585f00d47 100644
--- a/applications/nublado2/values.yaml
+++ b/applications/nublado2/values.yaml
@@ -178,6 +178,10 @@ jupyterhub:
enabled: false
config:
+ # -- Whether to use the cluster-internal PostgreSQL server instead of an
+ # external server. This is not used directly by the Nublado chart, but
+ # controls how the database password is managed.
+ internalDatabase: true
# -- base_url must be set in each instantiation of this chart to the URL of
# the primary ingress. It's used to construct API requests to the
# authentication service (which should go through the ingress).
@@ -200,13 +204,13 @@ config:
sizes:
- name: Small
cpu: 1
- ram: 3072M
+ ram: 4096M
- name: Medium
cpu: 2
- ram: 6144M
+ ram: 8192M
- name: Large
cpu: 4
- ram: 12288M
+ ram: 16384M
# -- Volumes to use for a particular instance
volumes: []
# -- Where to mount volumes for a particular instance
diff --git a/applications/obsloctap/README.md b/applications/obsloctap/README.md
index c6cc3d3a5a..b50f7ee849 100644
--- a/applications/obsloctap/README.md
+++ b/applications/obsloctap/README.md
@@ -20,4 +20,4 @@ Publish observing schedule
| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the obsloctap image |
| image.repository | string | `"ghcr.io/lsst-dm/obsloctap"` | obsloctap image to use |
| image.tag | string | The appVersion of the chart | Tag of obsloctap image to use |
-| ingress.annotations | object | `{}` | Additional annotations to add to the ingress |
+| ingress.annotations | object | `{}` | Additional annotations to add to the ingress |
\ No newline at end of file
diff --git a/applications/obsloctap/values-usdfdev.yaml b/applications/obsloctap/values-usdfdev.yaml
index 5865451aa6..130292d4c8 100644
--- a/applications/obsloctap/values-usdfdev.yaml
+++ b/applications/obsloctap/values-usdfdev.yaml
@@ -4,6 +4,7 @@ environment:
PGUSER: "rubin"
AWS_SHARED_CREDENTIALS_FILE: "/home/worker/.lsst/aws-credentials.ini"
S3_ENDPOINT_URL: "https://s3dfrgw.slac.stanford.edu"
+ EFD: "usdf-efd"
config:
volumes:
diff --git a/applications/obstap/values-idfdev.yaml b/applications/obstap/values-idfdev.yaml
deleted file mode 100644
index b0a7af3d2f..0000000000
--- a/applications/obstap/values-idfdev.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-resources:
- requests:
- cpu: 2.0
- memory: "2G"
- limits:
- cpu: 8.0
- memory: "32G"
-
-config:
- gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
- jvmMaxHeapSize: "31G"
-
-qserv:
- host: "10.136.1.211:4040"
- mock:
- enabled: false
diff --git a/applications/obstap/values-idfint.yaml b/applications/obstap/values-idfint.yaml
deleted file mode 100644
index a7a76f923a..0000000000
--- a/applications/obstap/values-idfint.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-resources:
- requests:
- cpu: 2.0
- memory: "2G"
- limits:
- cpu: 8.0
- memory: "32G"
-
-replicaCount: 2
-
-config:
- gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
- jvmMaxHeapSize: "31G"
-
-pg:
- mock:
- enabled: true
diff --git a/applications/obstap/values-idfprod.yaml b/applications/obstap/values-idfprod.yaml
deleted file mode 100644
index cc8d6515e5..0000000000
--- a/applications/obstap/values-idfprod.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-resources:
- requests:
- cpu: 2.0
- memory: "2G"
- limits:
- cpu: 8.0
- memory: "32G"
-
-replicaCount: 2
-
-config:
- gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
- jvmMaxHeapSize: "31G"
-
-pg:
- mock:
- enabled: true
-
-uws:
- resources:
- requests:
- cpu: 0.25
- memory: "1G"
- limits:
- cpu: 2.0
- memory: "4G"
diff --git a/applications/obstap/values-minikube.yaml b/applications/obstap/values-minikube.yaml
deleted file mode 100644
index 6e3f1aca1e..0000000000
--- a/applications/obstap/values-minikube.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-config:
- gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
diff --git a/applications/ook/Chart.yaml b/applications/ook/Chart.yaml
new file mode 100644
index 0000000000..e2e2b5d80c
--- /dev/null
+++ b/applications/ook/Chart.yaml
@@ -0,0 +1,17 @@
+apiVersion: v2
+name: ook
+version: 1.0.0
+appVersion: "0.6.0"
+description: Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io.
+type: application
+home: https://ook.lsst.io/
+sources:
+ - https://github.com/lsst-sqre/ook
+maintainers:
+ - name: jonathansick
+ url: https://github.com/jonathansick
+annotations:
+ phalanx.lsst.io/docs: |
+ - id: "SQR-075"
+ title: "Shared Pydantic schemas as the basis for Kafka/Avro messages in SQuaRE Roundtable"
+ url: "https://sqr-076.lsst.io/"
diff --git a/applications/ook/README.md b/applications/ook/README.md
new file mode 100644
index 0000000000..b47e1b5223
--- /dev/null
+++ b/applications/ook/README.md
@@ -0,0 +1,40 @@
+# ook
+
+Ook is the librarian service for Rubin Observatory. Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, www.lsst.io.
+
+**Homepage:**
+
+## Source Code
+
+*
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| affinity | object | `{}` | |
+| config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" |
+| config.registryUrl | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Cluster URL for the Confluent Schema Registry |
+| config.subjectCompatibility | string | `"FORWARD"` | Schema subject compatibility. |
+| config.subjectSuffix | string | `""` | Schema subject suffix. Should be empty for production but can be set to a value to create unique subjects in the Confluent Schema Registry for testing. |
+| config.topics.ingest | string | `"lsst.square-events.ook.ingest"` | Kafka topic name for ingest events |
+| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) |
+| global.baseUrl | string | Set by Argo CD | Base URL for the environment |
+| global.host | string | Set by Argo CD | Host name for ingress |
+| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
+| image.repository | string | `"ghcr.io/lsst-sqre/ook"` | Squarebot image repository |
+| image.tag | string | The appVersion of the chart | Tag of the image |
+| imagePullSecrets | list | `[]` | Secret names to use for all Docker pulls |
+| ingress.annotations | object | `{}` | Additional annotations to add to the ingress |
+| ingress.path | string | `"/ook"` | Path prefix where Squarebot is hosted |
+| nameOverride | string | `""` | Override the base name for resources |
+| nodeSelector | object | `{}` | |
+| podAnnotations | object | `{}` | Annotations for API and worker pods |
+| replicaCount | int | `1` | Number of API pods to run |
+| resources | object | `{}` | |
+| service.port | int | `80` | Port of the service to create and map to the ingress |
+| service.type | string | `"ClusterIP"` | Type of service to create |
+| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
+| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
+| serviceAccount.name | string | `""` | |
+| tolerations | list | `[]` | |
\ No newline at end of file
diff --git a/applications/ook/templates/_helpers.tpl b/applications/ook/templates/_helpers.tpl
new file mode 100644
index 0000000000..d881f724c0
--- /dev/null
+++ b/applications/ook/templates/_helpers.tpl
@@ -0,0 +1,62 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "ook.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "ook.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "ook.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "ook.labels" -}}
+helm.sh/chart: {{ include "ook.chart" . }}
+{{ include "ook.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "ook.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "ook.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "ook.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "ook.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
diff --git a/applications/ook/templates/configmap.yaml b/applications/ook/templates/configmap.yaml
new file mode 100644
index 0000000000..582a9ae737
--- /dev/null
+++ b/applications/ook/templates/configmap.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "ook.fullname" . }}
+ labels:
+ {{- include "ook.labels" . | nindent 4 }}
+data:
+ SAFIR_LOG_LEVEL: {{ .Values.config.logLevel | quote }}
+ SAFIR_PATH_PREFIX: {{ .Values.ingress.path | quote }}
+ SAFIR_ENVIRONMENT_URL: {{ .Values.global.baseUrl | quote }}
+ SAFIR_PROFILE: "production"
+ OOK_REGISTRY_URL: {{ .Values.config.registryUrl | quote }}
+ OOK_SUBJECT_SUFFIX: {{ .Values.config.subjectSuffix | quote }}
+ OOK_SUBJECT_COMPATIBILITY: {{ .Values.config.subjectCompatibility | quote }}
+ OOK_INGEST_KAFKA_TOPIC: {{ .Values.config.topics.ingest | quote }}
+ ALGOLIA_DOCUMENT_INDEX: "ook_documents_test"
diff --git a/applications/ook/templates/deployment.yaml b/applications/ook/templates/deployment.yaml
new file mode 100644
index 0000000000..48b61523e1
--- /dev/null
+++ b/applications/ook/templates/deployment.yaml
@@ -0,0 +1,139 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "ook.fullname" . }}
+ labels:
+ {{- include "ook.labels" . | nindent 4 }}
+ app.kubernetes.io/component: "server"
+ app.kubernetes.io/part-of: "ook"
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "ook.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ {{- with .Values.podAnnotations }}
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "ook.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: "server"
+ app.kubernetes.io/part-of: "ook"
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "ook.serviceAccountName" . }}
+ securityContext:
+ runAsNonRoot: true
+ runAsUser: 1000
+ runAsGroup: 1000
+ containers:
+ - name: {{ .Chart.Name }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: 8080
+ protocol: TCP
+ livenessProbe:
+ httpGet:
+ path: /
+ port: http
+ readinessProbe:
+ httpGet:
+ path: /
+ port: http
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ envFrom:
+ - configMapRef:
+ name: {{ include "ook.fullname" . }}
+ env:
+ # Writeable directory for concatenating certs. See "tmp" volume.
+ - name: "KAFKA_CERT_TEMP_DIR"
+ value: "/tmp/kafka_certs"
+ # From KafkaAccess
+ - name: "KAFKA_BOOTSTRAP_SERVERS"
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "ook.fullname" . }}-kafka
+ key: "bootstrapServers"
+ - name: "KAFKA_SECURITY_PROTOCOL"
+ value: "SSL"
+ # From replicated KafkaUser secret
+ - name: "KAFKA_SSL_CLUSTER_CAFILE"
+ value: "/etc/kafkacluster/ca.crt"
+ - name: "KAFKA_SSL_CLIENT_CAFILE"
+ value: "/etc/kafkauser/ca.crt"
+ - name: "KAFKA_SSL_CLIENT_CERTFILE"
+ value: "/etc/kafkauser/user.crt"
+ - name: "KAFKA_SSL_CLIENT_KEYFILE"
+ value: "/etc/kafkauser/user.key"
+ # From Vault secrets
+ - name: "ALGOLIA_APP_ID"
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "ook.fullname" . }}
+ key: "ALGOLIA_APP_ID"
+ - name: "ALGOLIA_API_KEY"
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "ook.fullname" . }}
+ key: "ALGOLIA_API_KEY"
+ - name: "OOK_GITHUB_APP_ID"
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "ook.fullname" . }}
+ key: "OOK_GITHUB_APP_ID"
+ - name: "OOK_GITHUB_APP_PRIVATE_KEY"
+ valueFrom:
+ secretKeyRef:
+ name: {{ template "ook.fullname" . }}
+ key: "OOK_GITHUB_APP_PRIVATE_KEY"
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ volumeMounts:
+ - name: "{{ template "ook.fullname" . }}"
+ mountPath: "/etc/kafkacluster/ca.crt"
+ subPath: "ca.crt"
+ - name: "kafka-user"
+ mountPath: "/etc/kafkauser/ca.crt"
+ subPath: "ca.crt"
+ - name: "kafka-user"
+ mountPath: "/etc/kafkauser/user.crt"
+ subPath: "user.crt"
+ - name: "kafka-user"
+ mountPath: "/etc/kafkauser/user.key"
+ subPath: "user.key"
+ - name: "tmp"
+ mountPath: "/tmp/kafka_certs"
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ volumes:
+ - name: "kafka-user"
+ secret:
+ secretName: {{ template "ook.fullname" . }}-kafka-user
+ - name: "{{ template "ook.fullname" . }}"
+ secret:
+ secretName: {{ template "ook.fullname" . }}
+ - name: "tmp"
+ emptyDir: {}
diff --git a/applications/ook/templates/ingress.yaml b/applications/ook/templates/ingress.yaml
new file mode 100644
index 0000000000..41fe9578a7
--- /dev/null
+++ b/applications/ook/templates/ingress.yaml
@@ -0,0 +1,31 @@
+apiVersion: gafaelfawr.lsst.io/v1alpha1
+kind: GafaelfawrIngress
+metadata:
+ name: {{ template "ook.fullname" . }}
+ labels:
+ {{- include "ook.labels" . | nindent 4 }}
+config:
+ baseUrl: {{ .Values.global.baseUrl | quote }}
+ scopes:
+ all:
+ - "exec:admin"
+ loginRedirect: true
+template:
+ metadata:
+ name: {{ template "ook.fullname" . }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ spec:
+ rules:
+ - host: {{ required "global.host must be set" .Values.global.host | quote }}
+ http:
+ paths:
+ - path: {{ .Values.ingress.path | quote }}
+ pathType: "Prefix"
+ backend:
+ service:
+ name: {{ template "ook.fullname" . }}
+ port:
+ number: {{ .Values.service.port }}
diff --git a/applications/ook/templates/kafkaaccess.yaml b/applications/ook/templates/kafkaaccess.yaml
new file mode 100644
index 0000000000..2171625fa9
--- /dev/null
+++ b/applications/ook/templates/kafkaaccess.yaml
@@ -0,0 +1,14 @@
+apiVersion: access.strimzi.io/v1alpha1
+kind: KafkaAccess
+metadata:
+ name: {{ include "ook.fullname" . }}-kafka
+spec:
+ kafka:
+ name: sasquatch
+ namespace: sasquatch
+ listener: tls
+ user:
+ kind: KafkaUser
+ apiGroup: kafka.strimzi.io
+ name: ook
+ namespace: sasquatch
diff --git a/applications/ook/templates/kafkauser-secret.yaml b/applications/ook/templates/kafkauser-secret.yaml
new file mode 100644
index 0000000000..a332e645a7
--- /dev/null
+++ b/applications/ook/templates/kafkauser-secret.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "ook.fullname" . }}-kafka-user
+ annotations:
+ replicator.v1.mittwald.de/replicate-from: sasquatch/ook
+ replicator.v1.mittwald.de/strip-labels: "true"
+data: {}
diff --git a/applications/ook/templates/networkpolicy.yaml b/applications/ook/templates/networkpolicy.yaml
new file mode 100644
index 0000000000..914b196dc6
--- /dev/null
+++ b/applications/ook/templates/networkpolicy.yaml
@@ -0,0 +1,21 @@
+apiVersion: networking.k8s.io/v1
+kind: NetworkPolicy
+metadata:
+ name: {{ include "ook.fullname" . }}
+spec:
+ podSelector:
+ matchLabels:
+ {{- include "ook.selectorLabels" . | nindent 6 }}
+ policyTypes:
+ - Ingress
+ ingress:
+ # Allow inbound access from pods (in any namespace) labeled
+ # gafaelfawr.lsst.io/ingress: true.
+ - from:
+ - namespaceSelector: {}
+ podSelector:
+ matchLabels:
+ gafaelfawr.lsst.io/ingress: "true"
+ ports:
+ - protocol: "TCP"
+ port: 8080
diff --git a/applications/ook/templates/service.yaml b/applications/ook/templates/service.yaml
new file mode 100644
index 0000000000..94e4fd5aed
--- /dev/null
+++ b/applications/ook/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "ook.fullname" . }}
+ labels:
+ {{- include "ook.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "ook.selectorLabels" . | nindent 4 }}
diff --git a/applications/ook/templates/serviceaccount.yaml b/applications/ook/templates/serviceaccount.yaml
new file mode 100644
index 0000000000..47ed6cc775
--- /dev/null
+++ b/applications/ook/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "ook.serviceAccountName" . }}
+ labels:
+ {{- include "ook.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/applications/ook/templates/tests/test-connection.yaml b/applications/ook/templates/tests/test-connection.yaml
new file mode 100644
index 0000000000..b701b729d5
--- /dev/null
+++ b/applications/ook/templates/tests/test-connection.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: "{{ include "ook.fullname" . }}-test-connection"
+ labels:
+ {{- include "ook.labels" . | nindent 4 }}
+ annotations:
+ "helm.sh/hook": test
+spec:
+ containers:
+ - name: wget
+ image: busybox
+ command: ['wget']
+ args: ['{{ include "ook.fullname" . }}:{{ .Values.service.port }}']
+ restartPolicy: Never
diff --git a/applications/ook/templates/vaultsecret.yaml b/applications/ook/templates/vaultsecret.yaml
new file mode 100644
index 0000000000..9d6d470429
--- /dev/null
+++ b/applications/ook/templates/vaultsecret.yaml
@@ -0,0 +1,9 @@
+apiVersion: ricoberger.de/v1alpha1
+kind: VaultSecret
+metadata:
+ name: {{ include "ook.fullname" . }}
+ labels:
+ {{- include "ook.labels" . | nindent 4 }}
+spec:
+ path: "{{ .Values.global.vaultSecretsPathPrefix }}/ook"
+ type: Opaque
diff --git a/applications/ook/values-roundtable-dev.yaml b/applications/ook/values-roundtable-dev.yaml
new file mode 100644
index 0000000000..91a3f6a1c6
--- /dev/null
+++ b/applications/ook/values-roundtable-dev.yaml
@@ -0,0 +1,5 @@
+image:
+ pullPolicy: Always
+
+config:
+ logLevel: "DEBUG"
diff --git a/applications/ook/values.yaml b/applications/ook/values.yaml
new file mode 100644
index 0000000000..f172712030
--- /dev/null
+++ b/applications/ook/values.yaml
@@ -0,0 +1,103 @@
+# Default values for squarebot.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+#
+# Global parameters will be set by parameters injected by Argo CD and should
+# not be set in the individual environment values files.
+global:
+ # -- Base URL for the environment
+ # @default -- Set by Argo CD
+ baseUrl: ""
+
+ # -- Host name for ingress
+ # @default -- Set by Argo CD
+ host: ""
+
+# -- Number of API pods to run
+replicaCount: 1
+
+image:
+ # -- Squarebot image repository
+ repository: ghcr.io/lsst-sqre/ook
+
+ # -- Image pull policy
+ pullPolicy: IfNotPresent
+
+ # -- Tag of the image
+ # @default -- The appVersion of the chart
+ tag: ""
+
+# -- Secret names to use for all Docker pulls
+imagePullSecrets: []
+
+# -- Override the base name for resources
+nameOverride: ""
+
+# -- Override the full name for resources (includes the release name)
+fullnameOverride: ""
+
+serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+
+ # -- Annotations to add to the service account
+ annotations: {}
+
+ # The name of the service account to use.
+ # @default -- Generated using the fullname template
+ name: ""
+
+# -- Annotations for API and worker pods
+podAnnotations: {}
+
+service:
+ # -- Type of service to create
+ type: ClusterIP
+
+ # -- Port of the service to create and map to the ingress
+ port: 80
+
+ingress:
+ # -- Additional annotations to add to the ingress
+ annotations: {}
+
+ # -- Path prefix where Squarebot is hosted
+ path: "/ook"
+
+resources:
+ {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+config:
+ # -- Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
+ logLevel: "INFO"
+
+ # -- Cluster URL for the Confluent Schema Registry
+ registryUrl: "http://sasquatch-schema-registry.sasquatch:8081"
+
+ # -- Schema subject suffix. Should be empty for production but can be set
+ # to a value to create unique subjects in the Confluent Schema Registry
+ # for testing.
+ subjectSuffix: ""
+
+ # -- Schema subject compatibility.
+ subjectCompatibility: "FORWARD"
+
+ topics:
+ # -- Kafka topic name for ingest events
+ ingest: "lsst.square-events.ook.ingest"
diff --git a/applications/plot-navigator/README.md b/applications/plot-navigator/README.md
index 6ca85a93d7..b04d84a00a 100644
--- a/applications/plot-navigator/README.md
+++ b/applications/plot-navigator/README.md
@@ -19,4 +19,4 @@ Panel-based plot viewer
| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
| image.repository | string | `"ghcr.io/lsst-dm/pipetask-plot-navigator"` | plot-navigator image to use |
| image.tag | string | `""` | |
-| ingress.annotations | object | `{}` | Additional annotations to add to the ingress |
+| ingress.annotations | object | `{}` | Additional annotations to add to the ingress |
\ No newline at end of file
diff --git a/applications/portal/Chart.yaml b/applications/portal/Chart.yaml
index 95a12c7dea..e64be482b1 100644
--- a/applications/portal/Chart.yaml
+++ b/applications/portal/Chart.yaml
@@ -5,11 +5,11 @@ description: Rubin Science Platform Portal Aspect
sources:
- https://github.com/lsst/suit
- https://github.com/Caltech-IPAC/firefly
-appVersion: "suit-2023.1.3"
+appVersion: "suit-2023.1.5"
dependencies:
- name: redis
- version: 1.0.5
+ version: 1.0.6
repository: https://lsst-sqre.github.io/charts/
annotations:
diff --git a/applications/portal/README.md b/applications/portal/README.md
index ba46958eba..cfaf6957c0 100644
--- a/applications/portal/README.md
+++ b/applications/portal/README.md
@@ -15,6 +15,7 @@ Rubin Science Platform Portal Aspect
| config.cleanupInterval | string | `"36h"` | How long results should be retained before being deleted |
| config.debug | string | `"FALSE"` | Set to `TRUE` to enable service debugging |
| config.hipsUrl | string | `/api/hips/images/color_gri` in the local Science Platform | URL for default HiPS service |
+| config.ssotap | string | `""` | Endpoint under `/api/` for the DP0.3 SSO TAP service on the instance, if present |
| config.visualizeFitsSearchPath | string | `"/datasets"` | Search path for FITS files |
| config.volumes.configHostPath | string | Use an `emptyDir` | hostPath to mount as configuration. Set either this of `configNfs`, not both. |
| config.volumes.configNfs | object | Use an `emptyDir` | NFS information for a configuration. If set, must have keys for path and server, Set either this of `configHostPath`, not both. |
@@ -42,4 +43,4 @@ Rubin Science Platform Portal Aspect
| replicaCount | int | `1` | Number of pods to start |
| resources | object | `{"limits":{"cpu":2,"memory":"6Gi"}}` | Resource limits and requests. The Portal will use (by default) 93% of container RAM. This is a smallish Portal; tweak it as you need to in instance definitions in Phalanx. |
| securityContext | object | `{}` | Security context for the Portal pod |
-| tolerations | list | `[]` | Tolerations for the Portal pod |
+| tolerations | list | `[]` | Tolerations for the Portal pod |
\ No newline at end of file
diff --git a/applications/portal/secrets.yaml b/applications/portal/secrets.yaml
new file mode 100644
index 0000000000..24576c9bcf
--- /dev/null
+++ b/applications/portal/secrets.yaml
@@ -0,0 +1,4 @@
+"ADMIN_PASSWORD":
+ description: "Password used for authentication to internal Redis."
+ generate:
+ type: password
diff --git a/applications/portal/templates/deployment.yaml b/applications/portal/templates/deployment.yaml
index 676a0b74c8..5d48a8cab7 100644
--- a/applications/portal/templates/deployment.yaml
+++ b/applications/portal/templates/deployment.yaml
@@ -61,8 +61,9 @@ spec:
},
"tap" : {
"additional": {
- "services": [ {
- "label": "LSST RSP",
+ "services": [
+ {
+ "label": "LSST DP0.2 DC2",
"value": "{{ .Values.global.baseUrl }}/api/tap",
{{- if .Values.config.hipsUrl }}
"hipsUrl": "{{ .Values.config.hipsUrl }}",
@@ -71,7 +72,18 @@ spec:
{{- end }}
"centerWP": "62;-37;EQ_J2000",
"fovDeg": 10
- } ]
+ }
+ {{- if .Values.config.ssotap }}
+ ,
+ {
+ "label": "LSST DP0.3 SSO",
+ "value": "{{ .Values.global.baseUrl }}/api/{{ .Values.config.ssotap}}",
+ "hipsUrl": "{{ .Values.global.baseUrl }}/api/hips/images/color_gri",
+ "centerWP": "0;0;ECL",
+ "fovDeg": 10
+ }
+ {{- end }}
+ ]
}
},
"hips": {
diff --git a/applications/portal/values-idfdev.yaml b/applications/portal/values-idfdev.yaml
index b8d18401c0..2b55535762 100644
--- a/applications/portal/values-idfdev.yaml
+++ b/applications/portal/values-idfdev.yaml
@@ -5,6 +5,7 @@ config:
workareaNfs:
path: "/share1/home/firefly/shared-workarea"
server: "10.87.86.26"
+ ssotap: "ssotap"
resources:
limits:
diff --git a/applications/portal/values-idfint.yaml b/applications/portal/values-idfint.yaml
index bbff39a615..0d736a812a 100644
--- a/applications/portal/values-idfint.yaml
+++ b/applications/portal/values-idfint.yaml
@@ -5,6 +5,7 @@ config:
workareaNfs:
path: "/share1/home/firefly/shared-workarea"
server: "10.22.240.130"
+ ssotap: "ssotap"
resources:
limits:
diff --git a/applications/portal/values-idfprod.yaml b/applications/portal/values-idfprod.yaml
index d3325ec38f..2abcb44575 100644
--- a/applications/portal/values-idfprod.yaml
+++ b/applications/portal/values-idfprod.yaml
@@ -5,6 +5,7 @@ config:
workareaNfs:
path: "/share1/home/firefly/shared-workarea"
server: "10.13.105.122"
+ ssotap: "ssotap"
resources:
limits:
diff --git a/applications/portal/values.yaml b/applications/portal/values.yaml
index a369100e8c..ba77eac515 100644
--- a/applications/portal/values.yaml
+++ b/applications/portal/values.yaml
@@ -61,6 +61,9 @@ config:
# -- Search path for FITS files
visualizeFitsSearchPath: "/datasets"
+ # -- Endpoint under `/api/` for the DP0.3 SSO TAP service on the instance, if present
+ ssotap: ""
+
volumes:
# -- hostPath to mount as a shared work area. Set either this or
# `workareaNfs`, not both.
diff --git a/applications/postgres/README.md b/applications/postgres/README.md
index 25d2e60e20..cfd13e68d8 100644
--- a/applications/postgres/README.md
+++ b/applications/postgres/README.md
@@ -17,4 +17,4 @@ Postgres RDBMS for LSP
| image.tag | string | The appVersion of the chart | Tag of postgres image to use |
| postgresStorageClass | string | `"standard"` | Storage class for postgres volume. Set to appropriate value for your deployment: at GKE, "standard" (if you want SSD, "premium-rwo", but if you want a good database maybe it's better to use a cloud database?), on Rubin Observatory Rancher, "rook-ceph-block", elsewhere probably "standard" |
| postgresVolumeSize | string | `"1Gi"` | Volume size for postgres. It can generally be very small |
-| volumeName | string | `""` | Volume name for postgres, if you use an existing volume that isn't automatically created from the PVC by the storage driver. |
+| volumeName | string | `""` | Volume name for postgres, if you use an existing volume that isn't automatically created from the PVC by the storage driver. |
\ No newline at end of file
diff --git a/applications/postgres/secrets.yaml b/applications/postgres/secrets.yaml
new file mode 100644
index 0000000000..5e03d36d1a
--- /dev/null
+++ b/applications/postgres/secrets.yaml
@@ -0,0 +1,45 @@
+exposurelog_password:
+ description: "Password for the exposurelog database."
+ if: exposurelog_db
+ copy:
+ application: exposurelog
+ key: exposurelog_password
+gafaelfawr_password:
+ description: "Password for the Gafaelfawr database."
+ if: gafaelfawr_db
+ copy:
+ application: gafaelfawr
+ key: database-password
+jupyterhub_password:
+ description: "Password for the Nublado v2 JupyterHub session database."
+ if: jupyterhub_db
+ copy:
+ application: nublado2
+ key: hub_db_password
+lovelog_password:
+ description: "Password for the lovelog database."
+ if: lovelog_db
+ generate:
+ type: password
+narrativelog_password:
+ description: "Password for the narrativelog database."
+ if: narrativelog_db
+ copy:
+ application: narrativelog
+ key: narrativelog_password
+nublado3_password:
+ description: "Password for the Nublado v3 JupyterHub session database."
+ if: nublado3_db
+ copy:
+ application: nublado
+ key: hub_db_password
+root_password:
+ description: "Administrator password for the whole PostgreSQL installation."
+ generate:
+ type: password
+timessquare_password:
+ description: "Password for the times-square database."
+ if: timessquare_db
+ copy:
+ application: times-square
+ key: TS_DATABASE_PASSWORD
diff --git a/applications/postgres/templates/deployment.yaml b/applications/postgres/templates/deployment.yaml
index ebde275f9f..c04846f7b9 100644
--- a/applications/postgres/templates/deployment.yaml
+++ b/applications/postgres/templates/deployment.yaml
@@ -99,6 +99,17 @@ spec:
name: "postgres"
key: "gafaelfawr_password"
{{- end }}
+ {{- with .Values.timessquare_db }}
+ - name: "VRO_DB_TIMESSQUARE_USER"
+ value: {{ .user | quote }}
+ - name: "VRO_DB_TIMESSQUARE_DB"
+ value: {{ .db | quote }}
+ - name: "VRO_DB_TIMESSQUARE_PASSWORD"
+ valueFrom:
+ secretKeyRef:
+ name: "postgres"
+ key: "timessquare_password"
+ {{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy | quote }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
ports:
diff --git a/applications/postgres/values-idfprod.yaml b/applications/postgres/values-idfprod.yaml
index 5a77f93b71..62d07440f7 100644
--- a/applications/postgres/values-idfprod.yaml
+++ b/applications/postgres/values-idfprod.yaml
@@ -1,3 +1,6 @@
jupyterhub_db:
user: "jovyan"
db: "jupyterhub"
+nublado3_db:
+ user: "nublado3"
+ db: "nublado3"
diff --git a/applications/postgres/values-minikube.yaml b/applications/postgres/values-minikube.yaml
index 1dc388a3ea..e048b69fac 100644
--- a/applications/postgres/values-minikube.yaml
+++ b/applications/postgres/values-minikube.yaml
@@ -1,14 +1,5 @@
debug: "true"
-jupyterhub_db:
- user: "jovyan"
- db: "jupyterhub"
-exposurelog_db:
- user: "exposurelog"
- db: "exposurelog"
gafaelfawr_db:
user: "gafaelfawr"
db: "gafaelfawr"
-narrativelog_db:
- user: "narrativelog"
- db: "narrativelog"
postgresStorageClass: "standard"
diff --git a/applications/postgres/values-usdf-tel-rsp.yaml b/applications/postgres/values-usdf-tel-rsp.yaml
new file mode 100644
index 0000000000..c7ae91cda0
--- /dev/null
+++ b/applications/postgres/values-usdf-tel-rsp.yaml
@@ -0,0 +1,8 @@
+jupyterhub_db:
+ user: 'jovyan'
+ db: 'jupyterhub'
+gafaelfawr_db:
+ user: 'gafaelfawr'
+ db: 'gafaelfawr'
+
+postgresStorageClass: 'wekafs--sdf-k8s01'
diff --git a/applications/postgres/values-usdfdev.yaml b/applications/postgres/values-usdfdev.yaml
index dbc5324ac3..05f630a0d5 100644
--- a/applications/postgres/values-usdfdev.yaml
+++ b/applications/postgres/values-usdfdev.yaml
@@ -7,5 +7,8 @@ nublado3_db:
gafaelfawr_db:
user: 'gafaelfawr'
db: 'gafaelfawr'
+timessquare_db:
+ user: "timessquare"
+ db: "timessquare"
postgresStorageClass: 'wekafs--sdf-k8s01'
diff --git a/applications/postgres/values-usdfprod.yaml b/applications/postgres/values-usdfprod.yaml
index c7ae91cda0..dbc5324ac3 100644
--- a/applications/postgres/values-usdfprod.yaml
+++ b/applications/postgres/values-usdfprod.yaml
@@ -1,6 +1,9 @@
jupyterhub_db:
user: 'jovyan'
db: 'jupyterhub'
+nublado3_db:
+ user: 'nublado3'
+ db: 'nublado3'
gafaelfawr_db:
user: 'gafaelfawr'
db: 'gafaelfawr'
diff --git a/applications/production-tools/README.md b/applications/production-tools/README.md
index cb7fa475cb..9db9624185 100644
--- a/applications/production-tools/README.md
+++ b/applications/production-tools/README.md
@@ -25,4 +25,4 @@ A collection of utility pages for monitoring data processing.
| podAnnotations | object | `{}` | Annotations for the production-tools deployment pod |
| replicaCount | int | `1` | Number of web deployment pods to start |
| resources | object | `{}` | Resource limits and requests for the production-tools deployment pod |
-| tolerations | list | `[]` | Tolerations for the production-tools deployment pod |
+| tolerations | list | `[]` | Tolerations for the production-tools deployment pod |
\ No newline at end of file
diff --git a/applications/sasquatch/Chart.yaml b/applications/sasquatch/Chart.yaml
index 2e39bd5300..de02248006 100644
--- a/applications/sasquatch/Chart.yaml
+++ b/applications/sasquatch/Chart.yaml
@@ -11,16 +11,32 @@ dependencies:
version: 2.1.0
repository: https://lsst-sqre.github.io/charts/
- name: influxdb
+ alias: influxdb
condition: influxdb.enabled
- version: 4.12.1
+ version: 4.12.4
+ repository: https://helm.influxdata.com/
+ - name: influxdb
+ alias: influxdb-staging
+ condition: influxdb-staging.enabled
+ version: 4.12.4
+ repository: https://helm.influxdata.com/
+ - name: influxdb
+ alias: source-influxdb
+ condition: source-influxdb.enabled
+ version: 4.12.4
repository: https://helm.influxdata.com/
- name: influxdb2
condition: influxdb2.enabled
version: 2.1.1
repository: https://helm.influxdata.com/
- name: kafka-connect-manager
+ alias: kafka-connect-manager
condition: kafka-connect-manager.enabled
version: 1.0.0
+ - name: kafka-connect-manager
+ alias: source-kafka-connect-manager
+ condition: source-kafka-connect-manager.enabled
+ version: 1.0.0
- name: chronograf
condition: chronograf.enabled
version: 1.2.5
diff --git a/applications/sasquatch/README.md b/applications/sasquatch/README.md
index 107b2cfdde..cf927ae99c 100644
--- a/applications/sasquatch/README.md
+++ b/applications/sasquatch/README.md
@@ -6,6 +6,9 @@ Rubin Observatory's telemetry service.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
+| global.baseUrl | string | Set by Argo CD | Base URL for the environment |
+| global.host | string | Set by Argo CD | Host name for ingress |
+| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
| bucketmapper.image | object | `{"repository":"ghcr.io/lsst-sqre/rubin-influx-tools","tag":"0.1.23"}` | image for monitoring-related cronjobs |
| bucketmapper.image.repository | string | `"ghcr.io/lsst-sqre/rubin-influx-tools"` | repository for rubin-influx-tools |
| bucketmapper.image.tag | string | `"0.1.23"` | tag for rubin-influx-tools |
@@ -16,12 +19,21 @@ Rubin Observatory's telemetry service.
| chronograf.ingress | object | disabled | Chronograf ingress configuration. |
| chronograf.persistence | object | `{"enabled":true,"size":"100Gi"}` | Chronograf data persistence configuration. |
| chronograf.resources.limits.cpu | int | `4` | |
-| chronograf.resources.limits.memory | string | `"16Gi"` | |
+| chronograf.resources.limits.memory | string | `"64Gi"` | |
| chronograf.resources.requests.cpu | int | `1` | |
-| chronograf.resources.requests.memory | string | `"1Gi"` | |
-| global.baseUrl | string | Set by Argo CD | Base URL for the environment |
-| global.host | string | Set by Argo CD | Host name for ingress |
-| global.vaultSecretsPath | string | Set by Argo CD | Base path for Vault secrets |
+| chronograf.resources.requests.memory | string | `"4Gi"` | |
+| influxdb-staging.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config |
+| influxdb-staging.enabled | bool | `false` | Enable InfluxDB staging deployment. |
+| influxdb-staging.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. |
+| influxdb-staging.ingress | object | disabled | InfluxDB ingress configuration. |
+| influxdb-staging.initScripts.enabled | bool | `false` | Enable InfluxDB custom initialization script. |
+| influxdb-staging.persistence.enabled | bool | `true` | Enable persistent volume claim. By default storageClass is undefined choosing the default provisioner (standard on GKE). |
+| influxdb-staging.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments |
+| influxdb-staging.resources.limits.cpu | int | `8` | |
+| influxdb-staging.resources.limits.memory | string | `"96Gi"` | |
+| influxdb-staging.resources.requests.cpu | int | `8` | |
+| influxdb-staging.resources.requests.memory | string | `"96Gi"` | |
+| influxdb-staging.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. |
| influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config |
| influxdb.enabled | bool | `true` | Enable InfluxDB. |
| influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. |
@@ -31,8 +43,8 @@ Rubin Observatory's telemetry service.
| influxdb.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments |
| influxdb.resources.limits.cpu | int | `8` | |
| influxdb.resources.limits.memory | string | `"96Gi"` | |
-| influxdb.resources.requests.cpu | int | `1` | |
-| influxdb.resources.requests.memory | string | `"1Gi"` | |
+| influxdb.resources.requests.cpu | int | `8` | |
+| influxdb.resources.requests.memory | string | `"96Gi"` | |
| influxdb.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. |
| influxdb2.adminUser.bucket | string | `"default"` | Admin default bucket. |
| influxdb2.adminUser.existingSecret | string | `"sasquatch"` | Get admin-password/admin-token keys from secret. |
@@ -46,6 +58,7 @@ Rubin Observatory's telemetry service.
| influxdb2.env[2].value | string | `"true"` | |
| influxdb2.env[3].name | string | `"INFLUXD_LOG_LEVEL"` | |
| influxdb2.env[3].value | string | `"debug"` | |
+| influxdb2.image.tag | string | `"2.7.1-alpine"` | |
| influxdb2.ingress.annotations."nginx.ingress.kubernetes.io/rewrite-target" | string | `"/api/v2/$2"` | |
| influxdb2.ingress.className | string | `"nginx"` | |
| influxdb2.ingress.enabled | bool | `false` | InfluxDB2 ingress configuration |
@@ -57,8 +70,8 @@ Rubin Observatory's telemetry service.
| influxdb2.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments. |
| influxdb2.resources.limits.cpu | int | `8` | |
| influxdb2.resources.limits.memory | string | `"96Gi"` | |
-| influxdb2.resources.requests.cpu | int | `1` | |
-| influxdb2.resources.requests.memory | string | `"1Gi"` | |
+| influxdb2.resources.requests.cpu | int | `8` | |
+| influxdb2.resources.requests.memory | string | `"16Gi"` | |
| kafdrop.enabled | bool | `true` | Enable Kafdrop. |
| kafka-connect-manager | object | `{}` | Override kafka-connect-manager configuration. |
| kapacitor.enabled | bool | `true` | Enable Kapacitor. |
@@ -72,7 +85,266 @@ Rubin Observatory's telemetry service.
| kapacitor.resources.requests.cpu | int | `1` | |
| kapacitor.resources.requests.memory | string | `"1Gi"` | |
| rest-proxy | object | `{"enabled":false}` | Override rest-proxy configuration. |
+| source-influxdb.config | object | `{"continuous_queries":{"enabled":false},"coordinator":{"log-queries-after":"15s","max-concurrent-queries":0,"query-timeout":"0s","write-timeout":"1h"},"data":{"cache-max-memory-size":0,"trace-logging-enabled":true,"wal-fsync-delay":"100ms"},"http":{"auth-enabled":true,"enabled":true,"flux-enabled":true,"max-row-limit":0},"logging":{"level":"debug"}}` | Override InfluxDB configuration. See https://docs.influxdata.com/influxdb/v1.8/administration/config |
+| source-influxdb.enabled | bool | `false` | Enable InfluxDB staging deployment. |
+| source-influxdb.image | object | `{"tag":"1.8.10"}` | InfluxDB image tag. |
+| source-influxdb.ingress | object | disabled | InfluxDB ingress configuration. |
+| source-influxdb.initScripts.enabled | bool | `false` | Enable InfluxDB custom initialization script. |
+| source-influxdb.persistence.enabled | bool | `true` | Enable persistent volume claim. By default storageClass is undefined choosing the default provisioner (standard on GKE). |
+| source-influxdb.persistence.size | string | `"1Ti"` | Persistent volume size. @default 1Ti for teststand deployments |
+| source-influxdb.resources.limits.cpu | int | `8` | |
+| source-influxdb.resources.limits.memory | string | `"96Gi"` | |
+| source-influxdb.resources.requests.cpu | int | `8` | |
+| source-influxdb.resources.requests.memory | string | `"96Gi"` | |
+| source-influxdb.setDefaultUser | object | `{"enabled":true,"user":{"existingSecret":"sasquatch"}}` | Default InfluxDB user, use influxb-user and influxdb-password keys from secret. |
+| source-kafka-connect-manager | object | `{"enabled":false,"env":{"kafkaConnectUrl":"http://sasquatch-source-connect-api.sasquatch:8083"}}` | Override source-kafka-connect-manager configuration. |
| squareEvents.enabled | bool | `false` | Enable the Square Events subchart with topic and user configurations. |
| strimzi-kafka | object | `{}` | Override strimzi-kafka configuration. |
| strimzi-registry-operator | object | `{"clusterName":"sasquatch","clusterNamespace":"sasquatch","operatorNamespace":"sasquatch"}` | strimzi-registry-operator configuration. |
| telegraf-kafka-consumer | object | `{"enabled":false}` | Override telegraf-kafka-consumer configuration. |
+| kafdrop.affinity | object | `{}` | Affinity configuration. |
+| kafdrop.cmdArgs | string | `"--message.format=AVRO --topic.deleteEnabled=false --topic.createEnabled=false"` | Command line arguments to Kafdrop. |
+| kafdrop.existingSecret | string | `""` | Existing k8s secrect use to set kafdrop environment variables. Set SCHEMAREGISTRY_AUTH for basic auth credentials in the form username:password |
+| kafdrop.host | string | Defaults to localhost. | The hostname to report for the RMI registry (used for JMX). |
+| kafdrop.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
+| kafdrop.image.repository | string | `"obsidiandynamics/kafdrop"` | Kafdrop Docker image repository. |
+| kafdrop.image.tag | string | `"3.31.0"` | Kafdrop image version. |
+| kafdrop.ingress.annotations | object | `{}` | Ingress annotations. |
+| kafdrop.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. |
+| kafdrop.ingress.hostname | string | `""` | Ingress hostname. |
+| kafdrop.ingress.path | string | `"/kafdrop"` | Ingress path. |
+| kafdrop.jmx.port | int | Defaults to 8686 | Port to use for JMX. If unspecified, JMX will not be exposed. |
+| kafdrop.jvm.opts | string | `""` | JVM options. |
+| kafdrop.kafka.broker | string | `"sasquatch-kafka-bootstrap.sasquatch:9092"` | Bootstrap list of Kafka host/port pairs |
+| kafdrop.nodeSelector | object | `{}` | Node selector configuration. |
+| kafdrop.podAnnotations | object | `{}` | Pod annotations. |
+| kafdrop.replicaCount | int | `1` | Number of kafdrop pods to run in the deployment. |
+| kafdrop.resources.limits.cpu | int | `2` | |
+| kafdrop.resources.limits.memory | string | `"4Gi"` | |
+| kafdrop.resources.requests.cpu | int | `1` | |
+| kafdrop.resources.requests.memory | string | `"200Mi"` | |
+| kafdrop.schemaregistry | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | The endpoint of Schema Registry |
+| kafdrop.server.port | int | Defaults to 9000. | The web server port to listen on. |
+| kafdrop.server.servlet | object | Defaults to /. | The context path to serve requests on (must end with a /). |
+| kafdrop.service.annotations | object | `{}` | Service annotations |
+| kafdrop.service.port | int | `9000` | Service port |
+| kafdrop.tolerations | list | `[]` | Tolerations configuration. |
+| kafka-connect-manager.enabled | bool | `true` | Enable Kafka Connect Manager. |
+| kafka-connect-manager.env.kafkaBrokerUrl | string | `"sasquatch-kafka-bootstrap.sasquatch:9092"` | Kafka broker URL. |
+| kafka-connect-manager.env.kafkaConnectUrl | string | `"http://sasquatch-connect-api.sasquatch:8083"` | Kafka connnect URL. |
+| kafka-connect-manager.env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. |
+| kafka-connect-manager.image.pullPolicy | string | `"IfNotPresent"` | |
+| kafka-connect-manager.image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | |
+| kafka-connect-manager.image.tag | string | `"1.3.1"` | |
+| kafka-connect-manager.influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. |
+| kafka-connect-manager.influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. |
+| kafka-connect-manager.influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. |
+| kafka-connect-manager.influxdbSink.connectInfluxErrorPolicy | string | `"NOOP"` | Error policy, see connector documetation for details. |
+| kafka-connect-manager.influxdbSink.connectInfluxMaxRetries | string | `"10"` | The maximum number of times a message is retried. |
+| kafka-connect-manager.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. |
+| kafka-connect-manager.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. |
+| kafka-connect-manager.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. |
+| kafka-connect-manager.influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. |
+| kafka-connect-manager.influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. |
+| kafka-connect-manager.influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. |
+| kafka-connect-manager.influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. |
+| kafka-connect-manager.influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. |
+| kafka-connect-manager.influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. |
+| kafka-connect-manager.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. |
+| kafka-connect-manager.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. |
+| kafka-connect-manager.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. |
+| kafka-connect-manager.jdbcSink.autoCreate | string | `"true"` | Whether to automatically create the destination table. |
+| kafka-connect-manager.jdbcSink.autoEvolve | string | `"false"` | Whether to automatically add columns in the table schema. |
+| kafka-connect-manager.jdbcSink.batchSize | string | `"3000"` | Specifies how many records to attempt to batch together for insertion into the destination table. |
+| kafka-connect-manager.jdbcSink.connectionUrl | string | `"jdbc:postgresql://localhost:5432/mydb"` | Database connection URL. |
+| kafka-connect-manager.jdbcSink.dbTimezone | string | `"UTC"` | Name of the JDBC timezone that should be used in the connector when inserting time-based values. |
+| kafka-connect-manager.jdbcSink.enabled | bool | `false` | Whether the JDBC Sink connector is deployed. |
+| kafka-connect-manager.jdbcSink.insertMode | string | `"insert"` | The insertion mode to use. Supported modes are: `insert`, `upsert` and `update`. |
+| kafka-connect-manager.jdbcSink.maxRetries | string | `"10"` | The maximum number of times to retry on errors before failing the task. |
+| kafka-connect-manager.jdbcSink.name | string | `"postgres-sink"` | Name of the connector to create. |
+| kafka-connect-manager.jdbcSink.retryBackoffMs | string | `"3000"` | The time in milliseconds to wait following an error before a retry attempt is made. |
+| kafka-connect-manager.jdbcSink.tableNameFormat | string | `"${topic}"` | A format string for the destination table name. |
+| kafka-connect-manager.jdbcSink.tasksMax | string | `"10"` | Number of Kafka Connect tasks. |
+| kafka-connect-manager.jdbcSink.topicRegex | string | `".*"` | Regex for selecting topics. |
+| kafka-connect-manager.s3Sink.behaviorOnNullValues | string | `"fail"` | How to handle records with a null value (for example, Kafka tombstone records). Valid options are ignore and fail. |
+| kafka-connect-manager.s3Sink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. |
+| kafka-connect-manager.s3Sink.enabled | bool | `false` | Whether the Amazon S3 Sink connector is deployed. |
+| kafka-connect-manager.s3Sink.excludedTopicRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. |
+| kafka-connect-manager.s3Sink.flushSize | string | `"1000"` | Number of records written to store before invoking file commits. |
+| kafka-connect-manager.s3Sink.locale | string | `"en-US"` | The locale to use when partitioning with TimeBasedPartitioner. |
+| kafka-connect-manager.s3Sink.name | string | `"s3-sink"` | Name of the connector to create. |
+| kafka-connect-manager.s3Sink.partitionDurationMs | string | `"3600000"` | The duration of a partition in milliseconds, used by TimeBasedPartitioner. Default is 1h for an hourly based partitioner. |
+| kafka-connect-manager.s3Sink.pathFormat | string | `"'year'=YYYY/'month'=MM/'day'=dd/'hour'=HH"` | Pattern used to format the path in the S3 object name. |
+| kafka-connect-manager.s3Sink.rotateIntervalMs | string | `"600000"` | The time interval in milliseconds to invoke file commits. Set to 10 minutes by default. |
+| kafka-connect-manager.s3Sink.s3BucketName | string | `""` | s3 bucket name. The bucket must already exist at the s3 provider. |
+| kafka-connect-manager.s3Sink.s3PartRetries | int | `3` | Maximum number of retry attempts for failed requests. Zero means no retries. |
+| kafka-connect-manager.s3Sink.s3PartSize | int | `5242880` | The Part Size in S3 Multi-part Uploads. Valid Values: [5242880,…,2147483647] |
+| kafka-connect-manager.s3Sink.s3Region | string | `"us-east-1"` | s3 region |
+| kafka-connect-manager.s3Sink.s3RetryBackoffMs | int | `200` | How long to wait in milliseconds before attempting the first retry of a failed S3 request. |
+| kafka-connect-manager.s3Sink.s3SchemaCompatibility | string | `"NONE"` | s3 schema compatibility |
+| kafka-connect-manager.s3Sink.schemaCacheConfig | int | `5000` | The size of the schema cache used in the Avro converter. |
+| kafka-connect-manager.s3Sink.storeUrl | string | `""` | The object storage connection URL, for non-AWS s3 providers. |
+| kafka-connect-manager.s3Sink.tasksMax | int | `1` | Number of Kafka Connect tasks. |
+| kafka-connect-manager.s3Sink.timestampExtractor | string | `"Record"` | The extractor determines how to obtain a timestamp from each record. |
+| kafka-connect-manager.s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. |
+| kafka-connect-manager.s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. |
+| kafka-connect-manager.s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. |
+| kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. |
+| rest-proxy.affinity | object | `{}` | Affinity configuration. |
+| rest-proxy.configurationOverrides | object | `{"client.sasl.mechanism":"SCRAM-SHA-512","client.security.protocol":"SASL_PLAINTEXT"}` | Kafka REST configuration options |
+| rest-proxy.customEnv | string | `nil` | Kafka REST additional env variables |
+| rest-proxy.heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option |
+| rest-proxy.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
+| rest-proxy.image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. |
+| rest-proxy.image.tag | string | `"7.4.0"` | Kafka REST proxy image tag. |
+| rest-proxy.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. |
+| rest-proxy.ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. |
+| rest-proxy.ingress.hostname | string | `""` | Ingress hostname. |
+| rest-proxy.ingress.path | string | `"/sasquatch-rest-proxy(/|$)(.*)"` | Ingress path. |
+| rest-proxy.kafka.bootstrapServers | string | `"SASL_PLAINTEXT://sasquatch-kafka-bootstrap.sasquatch:9092"` | Kafka bootstrap servers, use the internal listerner on port 9092 wit SASL connection. |
+| rest-proxy.kafka.cluster.name | string | `"sasquatch"` | Name of the Strimzi Kafka cluster. |
+| rest-proxy.kafka.topicPrefixes | string | `nil` | List of topic prefixes to use when exposing Kafka topics to the REST Proxy v2 API. |
+| rest-proxy.kafka.topics | string | `nil` | List of Kafka topics to create via Strimzi. Alternatively topics can be created using the REST Proxy v3 API. |
+| rest-proxy.nodeSelector | object | `{}` | Node selector configuration. |
+| rest-proxy.podAnnotations | object | `{}` | Pod annotations. |
+| rest-proxy.replicaCount | int | `1` | Number of Kafka REST proxy pods to run in the deployment. |
+| rest-proxy.resources.limits.cpu | int | `2` | Kafka REST proxy cpu limits |
+| rest-proxy.resources.limits.memory | string | `"4Gi"` | Kafka REST proxy memory limits |
+| rest-proxy.resources.requests.cpu | int | `1` | Kafka REST proxy cpu requests |
+| rest-proxy.resources.requests.memory | string | `"200Mi"` | Kafka REST proxy memory requests |
+| rest-proxy.schemaregistry.url | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL |
+| rest-proxy.service.port | int | `8082` | Kafka REST proxy service port |
+| rest-proxy.tolerations | list | `[]` | Tolerations configuration. |
+| source-kafka-connect-manager.enabled | bool | `true` | Enable Kafka Connect Manager. |
+| source-kafka-connect-manager.env.kafkaBrokerUrl | string | `"sasquatch-kafka-bootstrap.sasquatch:9092"` | Kafka broker URL. |
+| source-kafka-connect-manager.env.kafkaConnectUrl | string | `"http://sasquatch-connect-api.sasquatch:8083"` | Kafka connnect URL. |
+| source-kafka-connect-manager.env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. |
+| source-kafka-connect-manager.image.pullPolicy | string | `"IfNotPresent"` | |
+| source-kafka-connect-manager.image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | |
+| source-kafka-connect-manager.image.tag | string | `"1.3.1"` | |
+| source-kafka-connect-manager.influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. |
+| source-kafka-connect-manager.influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. |
+| source-kafka-connect-manager.influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. |
+| source-kafka-connect-manager.influxdbSink.connectInfluxErrorPolicy | string | `"NOOP"` | Error policy, see connector documetation for details. |
+| source-kafka-connect-manager.influxdbSink.connectInfluxMaxRetries | string | `"10"` | The maximum number of times a message is retried. |
+| source-kafka-connect-manager.influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. |
+| source-kafka-connect-manager.influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. |
+| source-kafka-connect-manager.influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. |
+| source-kafka-connect-manager.influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. |
+| source-kafka-connect-manager.influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. |
+| source-kafka-connect-manager.influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. |
+| source-kafka-connect-manager.influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. |
+| source-kafka-connect-manager.influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. |
+| source-kafka-connect-manager.influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. |
+| source-kafka-connect-manager.influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. |
+| source-kafka-connect-manager.influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. |
+| source-kafka-connect-manager.influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. |
+| source-kafka-connect-manager.jdbcSink.autoCreate | string | `"true"` | Whether to automatically create the destination table. |
+| source-kafka-connect-manager.jdbcSink.autoEvolve | string | `"false"` | Whether to automatically add columns in the table schema. |
+| source-kafka-connect-manager.jdbcSink.batchSize | string | `"3000"` | Specifies how many records to attempt to batch together for insertion into the destination table. |
+| source-kafka-connect-manager.jdbcSink.connectionUrl | string | `"jdbc:postgresql://localhost:5432/mydb"` | Database connection URL. |
+| source-kafka-connect-manager.jdbcSink.dbTimezone | string | `"UTC"` | Name of the JDBC timezone that should be used in the connector when inserting time-based values. |
+| source-kafka-connect-manager.jdbcSink.enabled | bool | `false` | Whether the JDBC Sink connector is deployed. |
+| source-kafka-connect-manager.jdbcSink.insertMode | string | `"insert"` | The insertion mode to use. Supported modes are: `insert`, `upsert` and `update`. |
+| source-kafka-connect-manager.jdbcSink.maxRetries | string | `"10"` | The maximum number of times to retry on errors before failing the task. |
+| source-kafka-connect-manager.jdbcSink.name | string | `"postgres-sink"` | Name of the connector to create. |
+| source-kafka-connect-manager.jdbcSink.retryBackoffMs | string | `"3000"` | The time in milliseconds to wait following an error before a retry attempt is made. |
+| source-kafka-connect-manager.jdbcSink.tableNameFormat | string | `"${topic}"` | A format string for the destination table name. |
+| source-kafka-connect-manager.jdbcSink.tasksMax | string | `"10"` | Number of Kafka Connect tasks. |
+| source-kafka-connect-manager.jdbcSink.topicRegex | string | `".*"` | Regex for selecting topics. |
+| source-kafka-connect-manager.s3Sink.behaviorOnNullValues | string | `"fail"` | How to handle records with a null value (for example, Kafka tombstone records). Valid options are ignore and fail. |
+| source-kafka-connect-manager.s3Sink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. |
+| source-kafka-connect-manager.s3Sink.enabled | bool | `false` | Whether the Amazon S3 Sink connector is deployed. |
+| source-kafka-connect-manager.s3Sink.excludedTopicRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. |
+| source-kafka-connect-manager.s3Sink.flushSize | string | `"1000"` | Number of records written to store before invoking file commits. |
+| source-kafka-connect-manager.s3Sink.locale | string | `"en-US"` | The locale to use when partitioning with TimeBasedPartitioner. |
+| source-kafka-connect-manager.s3Sink.name | string | `"s3-sink"` | Name of the connector to create. |
+| source-kafka-connect-manager.s3Sink.partitionDurationMs | string | `"3600000"` | The duration of a partition in milliseconds, used by TimeBasedPartitioner. Default is 1h for an hourly based partitioner. |
+| source-kafka-connect-manager.s3Sink.pathFormat | string | `"'year'=YYYY/'month'=MM/'day'=dd/'hour'=HH"` | Pattern used to format the path in the S3 object name. |
+| source-kafka-connect-manager.s3Sink.rotateIntervalMs | string | `"600000"` | The time interval in milliseconds to invoke file commits. Set to 10 minutes by default. |
+| source-kafka-connect-manager.s3Sink.s3BucketName | string | `""` | s3 bucket name. The bucket must already exist at the s3 provider. |
+| source-kafka-connect-manager.s3Sink.s3PartRetries | int | `3` | Maximum number of retry attempts for failed requests. Zero means no retries. |
+| source-kafka-connect-manager.s3Sink.s3PartSize | int | `5242880` | The Part Size in S3 Multi-part Uploads. Valid Values: [5242880,…,2147483647] |
+| source-kafka-connect-manager.s3Sink.s3Region | string | `"us-east-1"` | s3 region |
+| source-kafka-connect-manager.s3Sink.s3RetryBackoffMs | int | `200` | How long to wait in milliseconds before attempting the first retry of a failed S3 request. |
+| source-kafka-connect-manager.s3Sink.s3SchemaCompatibility | string | `"NONE"` | s3 schema compatibility |
+| source-kafka-connect-manager.s3Sink.schemaCacheConfig | int | `5000` | The size of the schema cache used in the Avro converter. |
+| source-kafka-connect-manager.s3Sink.storeUrl | string | `""` | The object storage connection URL, for non-AWS s3 providers. |
+| source-kafka-connect-manager.s3Sink.tasksMax | int | `1` | Number of Kafka Connect tasks. |
+| source-kafka-connect-manager.s3Sink.timestampExtractor | string | `"Record"` | The extractor determines how to obtain a timestamp from each record. |
+| source-kafka-connect-manager.s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. |
+| source-kafka-connect-manager.s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. |
+| source-kafka-connect-manager.s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. |
+| source-kafka-connect-manager.s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. |
+| square-events.cluster.name | string | `"sasquatch"` | |
+| strimzi-kafka.cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. |
+| strimzi-kafka.connect.enabled | bool | `true` | Enable Kafka Connect. |
+| strimzi-kafka.connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.35.1-kafka-3.4.0:1.2.0"` | Custom strimzi-kafka image with connector plugins used by sasquatch. |
+| strimzi-kafka.connect.replicas | int | `3` | Number of Kafka Connect replicas to run. |
+| strimzi-kafka.kafka.affinity | object | `{}` | Node affinity for Kafka broker pod assignment. |
+| strimzi-kafka.kafka.config."log.retention.bytes" | string | `"429496729600"` | Maximum retained number of bytes for a topic's data. |
+| strimzi-kafka.kafka.config."log.retention.hours" | int | `72` | Number of days for a topic's data to be retained. |
+| strimzi-kafka.kafka.config."message.max.bytes" | int | `10485760` | The largest record batch size allowed by Kafka. |
+| strimzi-kafka.kafka.config."offsets.retention.minutes" | int | `4320` | Number of minutes for a consumer group's offsets to be retained. |
+| strimzi-kafka.kafka.config."replica.fetch.max.bytes" | int | `10485760` | The number of bytes of messages to attempt to fetch for each partition. |
+| strimzi-kafka.kafka.config."replica.lag.time.max.ms" | int | `120000` | Replica lag time can't be smaller than request.timeout.ms configuration in kafka connect. |
+| strimzi-kafka.kafka.externalListener.bootstrap.annotations | object | `{}` | Annotations that will be added to the Ingress, Route, or Service resource. |
+| strimzi-kafka.kafka.externalListener.bootstrap.host | string | `""` | Name used for TLS hostname verification. |
+| strimzi-kafka.kafka.externalListener.bootstrap.loadBalancerIP | string | `""` | The loadbalancer is requested with the IP address specified in this field. This feature depends on whether the underlying cloud provider supports specifying the loadBalancerIP when a load balancer is created. This field is ignored if the cloud provider does not support the feature. Once the IP address is provisioned this option make it possible to pin the IP address. We can request the same IP next time it is provisioned. This is important because it lets us configure a DNS record, associating a hostname with that pinned IP address. |
+| strimzi-kafka.kafka.externalListener.brokers | list | `[]` | Borkers configuration. host is used in the brokers' advertised.brokers configuration and for TLS hostname verification. The format is a list of maps. |
+| strimzi-kafka.kafka.externalListener.tls.certIssuerName | string | `"letsencrypt-dns"` | Name of a ClusterIssuer capable of provisioning a TLS certificate for the broker. |
+| strimzi-kafka.kafka.externalListener.tls.enabled | bool | `false` | Whether TLS encryption is enabled. |
+| strimzi-kafka.kafka.listeners.external.enabled | bool | `true` | Whether external listener is enabled. |
+| strimzi-kafka.kafka.listeners.plain.enabled | bool | `true` | Whether internal plaintext listener is enabled. |
+| strimzi-kafka.kafka.listeners.tls.enabled | bool | `true` | Whether internal TLS listener is enabled. |
+| strimzi-kafka.kafka.replicas | int | `3` | Number of Kafka broker replicas to run. |
+| strimzi-kafka.kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. |
+| strimzi-kafka.kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. |
+| strimzi-kafka.kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. |
+| strimzi-kafka.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. |
+| strimzi-kafka.mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. |
+| strimzi-kafka.mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. |
+| strimzi-kafka.mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. |
+| strimzi-kafka.mirrormaker2.source.bootstrapServer | string | `""` | Source (active) cluster to replicate from. |
+| strimzi-kafka.mirrormaker2.source.topicsPattern | string | `"registry-schemas, lsst.sal.*"` | Topic replication from the source cluster defined as a comma-separated list or regular expression pattern. |
+| strimzi-kafka.mirrormaker2.sourceConnect.enabled | bool | `false` | Whether to deploy another Connect cluster for topics replicated from the source cluster. Requires the sourceRegistry enabled. |
+| strimzi-kafka.mirrormaker2.sourceRegistry.enabled | bool | `false` | Whether to deploy another Schema Registry for the schemas replicated from the source cluster. |
+| strimzi-kafka.mirrormaker2.sourceRegistry.schemaTopic | string | `"source.registry-schemas"` | Name of the topic Schema Registry topic replicated from the source cluster |
+| strimzi-kafka.registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry |
+| strimzi-kafka.superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. |
+| strimzi-kafka.users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). |
+| strimzi-kafka.users.kafkaConnectManager.enabled | bool | `true` | Enable user kafka-connect-manager |
+| strimzi-kafka.users.promptProcessing.enabled | bool | `true` | Enable user prompt-processing |
+| strimzi-kafka.users.replicator.enabled | bool | `false` | Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) |
+| strimzi-kafka.users.telegraf.enabled | bool | `true` | Enable user telegraf (deployed by parent Sasquatch chart) |
+| strimzi-kafka.users.tsSalKafka.enabled | bool | `true` | Enable user ts-salkafka. |
+| strimzi-kafka.zookeeper.affinity | object | `{}` | Node affinity for Zookeeper pod assignment. |
+| strimzi-kafka.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. |
+| strimzi-kafka.zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. |
+| strimzi-kafka.zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. |
+| strimzi-kafka.zookeeper.tolerations | list | `[]` | Tolerations for Zookeeper pod assignment. |
+| telegraf-kafka-consumer.affinity | object | `{}` | Affinity for pod assignment. |
+| telegraf-kafka-consumer.args | list | `[]` | Arguments passed to the Telegraf agent containers. |
+| telegraf-kafka-consumer.enabled | bool | `false` | Enable Telegraf Kafka Consumer. Note that the default configuration is meant to work with InfluxDB2. |
+| telegraf-kafka-consumer.envFromSecret | string | `""` | Name of the secret with values to be added to the environment. |
+| telegraf-kafka-consumer.env[0].name | string | `"TELEGRAF_PASSWORD"` | |
+| telegraf-kafka-consumer.env[0].valueFrom.secretKeyRef.key | string | `"telegraf-password"` | Telegraf KafkaUser password. |
+| telegraf-kafka-consumer.env[0].valueFrom.secretKeyRef.name | string | `"sasquatch"` | |
+| telegraf-kafka-consumer.env[1].name | string | `"INFLUXDB_TOKEN"` | |
+| telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.key | string | `"admin-token"` | InfluxDB admin token. |
+| telegraf-kafka-consumer.env[1].valueFrom.secretKeyRef.name | string | `"sasquatch"` | |
+| telegraf-kafka-consumer.image.pullPolicy | string | IfNotPresent | Image pull policy. |
+| telegraf-kafka-consumer.image.repo | string | `"lsstsqre/telegraf"` | Telegraf image repository. |
+| telegraf-kafka-consumer.image.tag | string | `"refreshregex"` | Telegraf image tag. |
+| telegraf-kafka-consumer.imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. |
+| telegraf-kafka-consumer.influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. |
+| telegraf-kafka-consumer.kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. |
+| telegraf-kafka-consumer.kafkaConsumers.test.flush_interval | string | `"1s"` | Default data flushing interval to InfluxDB. |
+| telegraf-kafka-consumer.kafkaConsumers.test.interval | string | `"1s"` | Data collection interval for the Kafka consumer. |
+| telegraf-kafka-consumer.kafkaConsumers.test.topicRefreshInterval | string | `"60s"` | Default interval for refreshing topics to check for new or removed regexp matches |
+| telegraf-kafka-consumer.kafkaConsumers.test.topicRegexps | string | `"[ \".*Test\" ]\n"` | List of regular expressions to specify the Kafka topics consumed by this agent. |
+| telegraf-kafka-consumer.nodeSelector | object | `{}` | Node labels for pod assignment. |
+| telegraf-kafka-consumer.podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods. |
+| telegraf-kafka-consumer.podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods. |
+| telegraf-kafka-consumer.resources | object | `{}` | Kubernetes resources requests and limits. |
+| telegraf-kafka-consumer.tolerations | list | `[]` | Tolerations for pod assignment. |
\ No newline at end of file
diff --git a/applications/sasquatch/charts/kafdrop/README.md b/applications/sasquatch/charts/kafdrop/README.md
index dea43e6e00..72ffb013aa 100644
--- a/applications/sasquatch/charts/kafdrop/README.md
+++ b/applications/sasquatch/charts/kafdrop/README.md
@@ -36,4 +36,4 @@ A subchart to deploy the Kafdrop UI for Sasquatch.
| server.servlet | object | Defaults to /. | The context path to serve requests on (must end with a /). |
| service.annotations | object | `{}` | Service annotations |
| service.port | int | `9000` | Service port |
-| tolerations | list | `[]` | Tolerations configuration. |
+| tolerations | list | `[]` | Tolerations configuration. |
\ No newline at end of file
diff --git a/applications/sasquatch/charts/kafka-connect-manager/README.md b/applications/sasquatch/charts/kafka-connect-manager/README.md
index dd8bbc8909..04edecb431 100644
--- a/applications/sasquatch/charts/kafka-connect-manager/README.md
+++ b/applications/sasquatch/charts/kafka-connect-manager/README.md
@@ -12,7 +12,7 @@ A subchart to deploy the Kafka connectors used by Sasquatch.
| env.kafkaUsername | string | `"kafka-connect-manager"` | Username for SASL authentication. |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"ghcr.io/lsst-sqre/kafkaconnect"` | |
-| image.tag | string | `"1.1.0"` | |
+| image.tag | string | `"1.3.1"` | |
| influxdbSink.autoUpdate | bool | `true` | If autoUpdate is enabled, check for new kafka topics. |
| influxdbSink.checkInterval | string | `"15000"` | The interval, in milliseconds, to check for new topics and update the connector. |
| influxdbSink.connectInfluxDb | string | `"efd"` | InfluxDB database to write to. |
@@ -21,11 +21,12 @@ A subchart to deploy the Kafka connectors used by Sasquatch.
| influxdbSink.connectInfluxRetryInterval | string | `"60000"` | The interval, in milliseconds, between retries. Only valid when the connectInfluxErrorPolicy is set to `RETRY`. |
| influxdbSink.connectInfluxUrl | string | `"http://sasquatch-influxdb.sasquatch:8086"` | InfluxDB URL. |
| influxdbSink.connectProgressEnabled | bool | `false` | Enables the output for how many records have been processed. |
-| influxdbSink.connectors | object | `{"test":{"enabled":false,"repairerConnector":false,"tags":"","topicsRegex":".*Test"}}` | Connector instances to deploy. |
+| influxdbSink.connectors | object | `{"test":{"enabled":false,"removePrefix":"source.","repairerConnector":false,"tags":"","topicsRegex":"source.lsst.sal.Test"}}` | Connector instances to deploy. |
| influxdbSink.connectors.test.enabled | bool | `false` | Whether this connector instance is deployed. |
+| influxdbSink.connectors.test.removePrefix | string | `"source."` | Remove prefix from topic name. |
| influxdbSink.connectors.test.repairerConnector | bool | `false` | Whether to deploy a repairer connector in addition to the original connector instance. |
| influxdbSink.connectors.test.tags | string | `""` | Fields in the Avro payload that are treated as InfluxDB tags. |
-| influxdbSink.connectors.test.topicsRegex | string | `".*Test"` | Regex to select topics from Kafka. |
+| influxdbSink.connectors.test.topicsRegex | string | `"source.lsst.sal.Test"` | Regex to select topics from Kafka. |
| influxdbSink.excludedTopicsRegex | string | `""` | Regex to exclude topics from the list of selected topics from Kafka. |
| influxdbSink.tasksMax | int | `1` | Maxium number of tasks to run the connector. |
| influxdbSink.timestamp | string | `"private_efdStamp"` | Timestamp field to be used as the InfluxDB time, if not specified use `sys_time()`. |
@@ -65,4 +66,4 @@ A subchart to deploy the Kafka connectors used by Sasquatch.
| s3Sink.timestampField | string | `""` | The record field to be used as timestamp by the timestamp extractor. Only applies if timestampExtractor is set to RecordField. |
| s3Sink.timezone | string | `"UTC"` | The timezone to use when partitioning with TimeBasedPartitioner. |
| s3Sink.topicsDir | string | `"topics"` | Top level directory to store the data ingested from Kafka. |
-| s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. |
+| s3Sink.topicsRegex | string | `".*"` | Regex to select topics from Kafka. |
\ No newline at end of file
diff --git a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml
index e176644d26..5aa6a34583 100644
--- a/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml
+++ b/applications/sasquatch/charts/kafka-connect-manager/templates/influxdb_sink.yaml
@@ -40,7 +40,11 @@ spec:
- name: KAFKA_CONNECT_NAME
value: influxdb-sink-{{ $key }}
- name: KAFKA_CONNECT_INFLUXDB_URL
+ {{- if $value.connectInfluxUrl }}
+ value: {{ $value.connectInfluxUrl | quote }}
+ {{- else }}
value: {{ $.Values.influxdbSink.connectInfluxUrl | quote }}
+ {{- end }}
- name: KAFKA_CONNECT_DATABASE
{{- if $value.connectInfluxDb }}
value: {{ $value.connectInfluxDb | quote }}
@@ -58,7 +62,11 @@ spec:
name: sasquatch
key: influxdb-password
- name: KAFKA_CONNECT_TASKS_MAX
+ {{- if $value.tasksMax }}
+ value: {{ $value.tasksMax | quote }}
+ {{- else }}
value: {{ $.Values.influxdbSink.tasksMax | quote }}
+ {{- end }}
- name: KAFKA_CONNECT_TOPIC_REGEX
value: {{ $value.topicsRegex | quote }}
- name: KAFKA_CONNECT_CHECK_INTERVAL
@@ -75,6 +83,10 @@ spec:
- name: KAFKA_CONNECT_INFLUXDB_TAGS
value: {{ $value.tags | quote }}
{{- end }}
+ {{- if $value.removePrefix }}
+ - name: KAFKA_CONNECT_INFLUXDB_REMOVE_PREFIX
+ value: {{ $value.removePrefix | quote }}
+ {{- end }}
- name: KAFKA_CONNECT_ERROR_POLICY
value: {{ $.Values.influxdbSink.connectInfluxErrorPolicy | quote }}
- name: KAFKA_CONNECT_MAX_RETRIES
diff --git a/applications/sasquatch/charts/kafka-connect-manager/values.yaml b/applications/sasquatch/charts/kafka-connect-manager/values.yaml
index 7c329843b6..e508350f03 100644
--- a/applications/sasquatch/charts/kafka-connect-manager/values.yaml
+++ b/applications/sasquatch/charts/kafka-connect-manager/values.yaml
@@ -6,7 +6,7 @@ enabled: true
image:
repository: ghcr.io/lsst-sqre/kafkaconnect
- tag: 1.1.0
+ tag: 1.3.1
pullPolicy: IfNotPresent
influxdbSink:
@@ -40,9 +40,11 @@ influxdbSink:
# -- Whether to deploy a repairer connector in addition to the original connector instance.
repairerConnector: false
# -- Regex to select topics from Kafka.
- topicsRegex: ".*Test"
+ topicsRegex: "source.lsst.sal.Test"
# -- Fields in the Avro payload that are treated as InfluxDB tags.
tags: ""
+ # -- Remove prefix from topic name.
+ removePrefix: "source."
# The s3Sink connector assumes Parquet format with Snappy compression
# and a time based partitioner.
diff --git a/applications/sasquatch/charts/rest-proxy/README.md b/applications/sasquatch/charts/rest-proxy/README.md
index a23b106a54..b72e90981b 100644
--- a/applications/sasquatch/charts/rest-proxy/README.md
+++ b/applications/sasquatch/charts/rest-proxy/README.md
@@ -16,7 +16,7 @@ A subchart to deploy Confluent REST proxy for Sasquatch.
| heapOptions | string | `"-Xms512M -Xmx512M"` | Kafka REST proxy JVM Heap Option |
| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. |
| image.repository | string | `"confluentinc/cp-kafka-rest"` | Kafka REST proxy image repository. |
-| image.tag | string | `"7.3.3"` | Kafka REST proxy image tag. |
+| image.tag | string | `"7.4.0"` | Kafka REST proxy image tag. |
| ingress.annotations | object | `{"nginx.ingress.kubernetes.io/rewrite-target":"/$2"}` | Ingress annotations. |
| ingress.enabled | bool | `false` | Enable Ingress. This should be true to create an ingress rule for the application. |
| ingress.hostname | string | `""` | Ingress hostname. |
@@ -34,4 +34,4 @@ A subchart to deploy Confluent REST proxy for Sasquatch.
| resources.requests.memory | string | `"200Mi"` | Kafka REST proxy memory requests |
| schemaregistry.url | string | `"http://sasquatch-schema-registry.sasquatch:8081"` | Schema registry URL |
| service.port | int | `8082` | Kafka REST proxy service port |
-| tolerations | list | `[]` | Tolerations configuration. |
+| tolerations | list | `[]` | Tolerations configuration. |
\ No newline at end of file
diff --git a/applications/sasquatch/charts/rest-proxy/values.yaml b/applications/sasquatch/charts/rest-proxy/values.yaml
index f3f0b92208..2c3054b9cb 100644
--- a/applications/sasquatch/charts/rest-proxy/values.yaml
+++ b/applications/sasquatch/charts/rest-proxy/values.yaml
@@ -9,7 +9,7 @@ image:
# -- Image pull policy.
pullPolicy: IfNotPresent
# -- Kafka REST proxy image tag.
- tag: 7.3.3
+ tag: 7.4.0
service:
# -- Kafka REST proxy service port
diff --git a/applications/sasquatch/charts/square-events/README.md b/applications/sasquatch/charts/square-events/README.md
index ce28f5c979..96c9f53e2d 100644
--- a/applications/sasquatch/charts/square-events/README.md
+++ b/applications/sasquatch/charts/square-events/README.md
@@ -6,4 +6,4 @@ Kafka topics and users for SQuaRE Events.
| Key | Type | Default | Description |
|-----|------|---------|-------------|
-| cluster.name | string | `"sasquatch"` | |
+| cluster.name | string | `"sasquatch"` | |
\ No newline at end of file
diff --git a/applications/sasquatch/charts/square-events/templates/ook-topics.yaml b/applications/sasquatch/charts/square-events/templates/ook-topics.yaml
new file mode 100644
index 0000000000..891533e26c
--- /dev/null
+++ b/applications/sasquatch/charts/square-events/templates/ook-topics.yaml
@@ -0,0 +1,13 @@
+---
+apiVersion: kafka.strimzi.io/v1beta2
+kind: KafkaTopic
+metadata:
+ name: "lsst.square-events.ook.ingest"
+ labels:
+ strimzi.io/cluster: {{ .Values.cluster.name }}
+spec:
+ partitions: 4
+ replicas: 3
+ config:
+ # http://kafka.apache.org/documentation/#topicconfigs
+ retention.ms: 604800000 # 1 week
diff --git a/applications/sasquatch/charts/square-events/templates/ook-user.yaml b/applications/sasquatch/charts/square-events/templates/ook-user.yaml
new file mode 100644
index 0000000000..0c3bb352cc
--- /dev/null
+++ b/applications/sasquatch/charts/square-events/templates/ook-user.yaml
@@ -0,0 +1,45 @@
+---
+apiVersion: kafka.strimzi.io/v1beta2
+kind: KafkaUser
+metadata:
+ name: ook
+ labels:
+ strimzi.io/cluster: {{ .Values.cluster.name }}
+spec:
+ template:
+ secret:
+ metadata:
+ annotations:
+ replicator.v1.mittwald.de/replication-allowed: "true"
+ replicator.v1.mittwald.de/replication-allowed-namespaces: "ook"
+ authentication:
+ type: tls
+ authorization:
+ type: simple
+ acls:
+ - resource:
+ type: group
+ name: "ook"
+ patternType: literal
+ operations:
+ - "Read"
+ host: "*"
+ - resource:
+ type: topic
+ name: "lsst.square-events.ook.ingest"
+ patternType: literal
+ operations:
+ - "Describe"
+ - "Read"
+ - "Write"
+ host: "*"
+ - resource:
+ type: topic
+ name: "lsst.square-events.squarebot.slack.app.mention"
+ patternType: literal
+ type: allow
+ host: "*"
+ operations:
+ - "Read"
+ - "Describe"
+ host: "*"
diff --git a/applications/sasquatch/charts/strimzi-kafka/README.md b/applications/sasquatch/charts/strimzi-kafka/README.md
index 3147c450a1..b3f18c46ce 100644
--- a/applications/sasquatch/charts/strimzi-kafka/README.md
+++ b/applications/sasquatch/charts/strimzi-kafka/README.md
@@ -8,7 +8,7 @@ A subchart to deploy Strimzi Kafka components for Sasquatch.
|-----|------|---------|-------------|
| cluster.name | string | `"sasquatch"` | Name used for the Kafka cluster, and used by Strimzi for many annotations. |
| connect.enabled | bool | `true` | Enable Kafka Connect. |
-| connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.34.0-kafka-3.3.1:1.1.0"` | Custom strimzi-kafka image with connector plugins used by sasquatch. |
+| connect.image | string | `"ghcr.io/lsst-sqre/strimzi-0.35.1-kafka-3.4.0:1.2.0"` | Custom strimzi-kafka image with connector plugins used by sasquatch. |
| connect.replicas | int | `3` | Number of Kafka Connect replicas to run. |
| kafka.affinity | object | `{}` | Node affinity for Kafka broker pod assignment. |
| kafka.config."log.retention.bytes" | string | `"429496729600"` | Maximum retained number of bytes for a topic's data. |
@@ -30,19 +30,25 @@ A subchart to deploy Strimzi Kafka components for Sasquatch.
| kafka.storage.size | string | `"500Gi"` | Size of the backing storage disk for each of the Kafka brokers. |
| kafka.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. |
| kafka.tolerations | list | `[]` | Tolerations for Kafka broker pod assignment. |
-| kafka.version | string | `"3.3.1"` | Version of Kafka to deploy. |
+| kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. |
| mirrormaker2.enabled | bool | `false` | Enable replication in the target (passive) cluster. |
+| mirrormaker2.replication.policy.class | string | IdentityReplicationPolicy | Replication policy. |
+| mirrormaker2.replication.policy.separator | string | "" | Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used. |
| mirrormaker2.source.bootstrapServer | string | `""` | Source (active) cluster to replicate from. |
| mirrormaker2.source.topicsPattern | string | `"registry-schemas, lsst.sal.*"` | Topic replication from the source cluster defined as a comma-separated list or regular expression pattern. |
+| mirrormaker2.sourceConnect.enabled | bool | `false` | Whether to deploy another Connect cluster for topics replicated from the source cluster. Requires the sourceRegistry enabled. |
+| mirrormaker2.sourceRegistry.enabled | bool | `false` | Whether to deploy another Schema Registry for the schemas replicated from the source cluster. |
+| mirrormaker2.sourceRegistry.schemaTopic | string | `"source.registry-schemas"` | Name of the topic Schema Registry topic replicated from the source cluster |
| registry.schemaTopic | string | `"registry-schemas"` | Name of the topic used by the Schema Registry |
| superusers | list | `["kafka-admin"]` | A list of usernames for users who should have global admin permissions. These users will be created, along with their credentials. |
| users.kafdrop.enabled | bool | `true` | Enable user Kafdrop (deployed by parent Sasquatch chart). |
| users.kafkaConnectManager.enabled | bool | `true` | Enable user kafka-connect-manager |
| users.promptProcessing.enabled | bool | `true` | Enable user prompt-processing |
+| users.replicator.enabled | bool | `false` | Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters) |
| users.telegraf.enabled | bool | `true` | Enable user telegraf (deployed by parent Sasquatch chart) |
| users.tsSalKafka.enabled | bool | `true` | Enable user ts-salkafka. |
| zookeeper.affinity | object | `{}` | Node affinity for Zookeeper pod assignment. |
| zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. |
| zookeeper.storage.size | string | `"100Gi"` | Size of the backing storage disk for each of the Zookeeper instances. |
| zookeeper.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes. |
-| zookeeper.tolerations | list | `[]` | Tolerations for Zookeeper pod assignment. |
+| zookeeper.tolerations | list | `[]` | Tolerations for Zookeeper pod assignment. |
\ No newline at end of file
diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml
index ea6d63ffd5..39ae427957 100644
--- a/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml
+++ b/applications/sasquatch/charts/strimzi-kafka/templates/mirrormaker2.yaml
@@ -55,9 +55,10 @@ spec:
# The frequency to check for new topics.
refresh.topics.interval.seconds: 60
# Policy to define the remote topic naming convention.
- # This setting will preserve topic names in the target cluster.
- replication.policy.separator: ""
- replication.policy.class: "org.apache.kafka.connect.mirror.IdentityReplicationPolicy"
+ # The default is to preserve topic names in the target cluster.
+ # To add the source cluster alias as a prefix to the topic name, use replication.policy.separator="." and replication.policy.class="org.apache.kafka.connect.mirror.DefaultReplicationPolicy"
+ replication.policy.separator: {{ default "" .Values.mirrormaker2.replication.policy.separator }}
+ replication.policy.class: {{ default "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" .Values.mirrormaker2.replication.policy.class }}
# Handling high volumes of messages
# By increasing the batch size, produce requests are delayed and more messages are
# added to the batch and sent to brokers at the same time.
@@ -76,7 +77,6 @@ spec:
# Increase request timeout
producer.request.timeout.ms: 120000
consumer.request.timeout.ms: 120000
-
heartbeatConnector:
config:
heartbeats.topic.replication.factor: 3
@@ -91,9 +91,7 @@ spec:
sync.group.offsets.interval.seconds: 60
# The frequency of checks for offset tracking.
emit.checkpoints.interval.seconds: 60
- # Policy to define the remote topic naming convention.
- # This setting will preserve topic names in the target cluster.
- replication.policy.class: "org.apache.kafka.connect.mirror.IdentityReplicationPolicy"
+ replication.policy.class: {{ default "org.apache.kafka.connect.mirror.IdentityReplicationPolicy" .Values.mirrormaker2.replication.policy.class }}
# Topic replication from the source cluster defined as a comma-separated list
# or regular expression pattern.
topicsPattern: {{ .Values.mirrormaker2.source.topicsPattern }}
diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/source-connect.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/source-connect.yaml
new file mode 100644
index 0000000000..c9eb89148a
--- /dev/null
+++ b/applications/sasquatch/charts/strimzi-kafka/templates/source-connect.yaml
@@ -0,0 +1,83 @@
+{{- if .Values.mirrormaker2.sourceConnect.enabled }}
+apiVersion: kafka.strimzi.io/v1beta2
+kind: KafkaConnect
+metadata:
+ name: {{ .Values.cluster.name }}-source
+ annotations:
+ # Use Connect REST API to configure connectors
+ strimzi.io/use-connector-resources: "false"
+spec:
+ image: {{ .Values.connect.image | quote }}
+ replicas: {{ .Values.connect.replicas }}
+ bootstrapServers: {{ .Values.cluster.name }}-kafka-bootstrap:9093
+ tls:
+ trustedCertificates:
+ - secretName: {{ .Values.cluster.name }}-cluster-ca-cert
+ certificate: ca.crt
+ authentication:
+ type: tls
+ certificateAndKey:
+ secretName: {{ .Values.cluster.name }}-source-connect
+ certificate: user.crt
+ key: user.key
+ config:
+ group.id: {{ .Values.cluster.name }}-source-connect
+ offset.storage.topic: {{ .Values.cluster.name }}-source-connect-offsets
+ config.storage.topic: {{ .Values.cluster.name }}-source-connect-configs
+ status.storage.topic: {{ .Values.cluster.name }}-source-connect-status
+ # -1 means it will use the default replication factor configured in the broker
+ config.storage.replication.factor: -1
+ offset.storage.replication.factor: -1
+ status.storage.replication.factor: -1
+ key.converter: io.confluent.connect.avro.AvroConverter
+ key.converter.schemas.enable: true
+ key.converter.schema.registry.url: http://sasquatch-source-schema-registry.sasquatch:8081
+ value.converter: io.confluent.connect.avro.AvroConverter
+ value.converter.schemas.enable: true
+ value.converter.schema.registry.url: http://sasquatch-source-schema-registry.sasquatch:8081
+ request.timeout.ms: 120000
+ resources:
+ requests:
+ cpu: "2"
+ memory: 4Gi
+ limits:
+ cpu: "8"
+ memory: 24Gi
+ jvmOptions:
+ "-Xmx": "8g"
+ "-Xms": "8g"
+---
+apiVersion: kafka.strimzi.io/v1beta2
+kind: KafkaUser
+metadata:
+ name: {{ .Values.cluster.name }}-source-connect
+ labels:
+ strimzi.io/cluster: {{ .Values.cluster.name }}
+spec:
+ authentication:
+ type: tls
+ authorization:
+ type: simple
+ acls:
+ - resource:
+ type: group
+ name: {{ .Values.cluster.name }}-source-connect
+ operation: Read
+ - resource:
+ type: group
+ name: "*"
+ patternType: literal
+ operation: All
+ - resource:
+ type: topic
+ name: "*"
+ patternType: literal
+ type: allow
+ host: "*"
+ operation: All
+ quotas:
+ producerByteRate: 1073741824
+ consumerByteRate: 1073741824
+ requestPercentage: 90
+ controllerMutationRate: 1000
+{{- end }}
diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/source-schema-registry.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/source-schema-registry.yaml
new file mode 100644
index 0000000000..5917abe19b
--- /dev/null
+++ b/applications/sasquatch/charts/strimzi-kafka/templates/source-schema-registry.yaml
@@ -0,0 +1,60 @@
+{{- if .Values.mirrormaker2.sourceRegistry.enabled }}
+---
+apiVersion: roundtable.lsst.codes/v1beta1
+kind: StrimziSchemaRegistry
+metadata:
+ name: {{ .Values.cluster.name }}-source-schema-registry
+spec:
+ listener: tls
+ compatibilityLevel: none
+---
+apiVersion: kafka.strimzi.io/v1beta2
+kind: KafkaUser
+metadata:
+ name: {{ .Values.cluster.name }}-source-schema-registry
+ labels:
+ strimzi.io/cluster: {{ .Values.cluster.name }}
+spec:
+ authentication:
+ type: tls
+ authorization:
+ # Official docs on authorizations required for the Schema Registry:
+ # https://docs.confluent.io/current/schema-registry/security/index.html#authorizing-access-to-the-schemas-topic
+ type: simple
+ acls:
+ # Allow Read, Write and DescribeConfigs operations on the
+ # schemas topic
+ - resource:
+ type: topic
+ name: {{ .Values.mirrormaker2.sourceRegistry.schemaTopic }}
+ patternType: literal
+ operation: Read
+ type: allow
+ - resource:
+ type: topic
+ name: {{ .Values.mirrormaker2.sourceRegistry.schemaTopic }}
+ patternType: literal
+ operation: Write
+ type: allow
+ - resource:
+ type: topic
+ name: {{ .Values.mirrormaker2.sourceRegistry.schemaTopic }}
+ patternType: literal
+ operation: DescribeConfigs
+ type: allow
+ # Allow all operations on the schema-registry* group
+ - resource:
+ type: group
+ name: schema-registry
+ patternType: prefix
+ operation: All
+ type: allow
+ # Allow Describe on the __consumer_offsets topic
+ # (The official docs also mention DescribeConfigs?)
+ - resource:
+ type: topic
+ name: "__consumer_offsets"
+ patternType: literal
+ operation: Describe
+ type: allow
+{{- end }}
diff --git a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml
index cab285b05a..f3bcddfa85 100644
--- a/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml
+++ b/applications/sasquatch/charts/strimzi-kafka/templates/users.yaml
@@ -30,7 +30,7 @@ spec:
requestPercentage: 90
controllerMutationRate: 1000
{{- end }}
-{{- if .Values.mirrormaker2.enabled -}}
+{{- if .Values.users.replicator.enabled }}
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaUser
diff --git a/applications/sasquatch/charts/strimzi-kafka/values.yaml b/applications/sasquatch/charts/strimzi-kafka/values.yaml
index 4b0347bec0..6c1fefe589 100644
--- a/applications/sasquatch/charts/strimzi-kafka/values.yaml
+++ b/applications/sasquatch/charts/strimzi-kafka/values.yaml
@@ -5,7 +5,7 @@ cluster:
kafka:
# -- Version of Kafka to deploy.
- version: "3.3.1"
+ version: "3.4.0"
# -- Number of Kafka broker replicas to run.
replicas: 3
storage:
@@ -99,7 +99,7 @@ connect:
# -- Enable Kafka Connect.
enabled: true
# -- Custom strimzi-kafka image with connector plugins used by sasquatch.
- image: ghcr.io/lsst-sqre/strimzi-0.34.0-kafka-3.3.1:1.1.0
+ image: ghcr.io/lsst-sqre/strimzi-0.35.1-kafka-3.4.0:1.2.0
# -- Number of Kafka Connect replicas to run.
replicas: 3
@@ -113,6 +113,10 @@ superusers:
- kafka-admin
users:
+ replicator:
+ # -- Enabled user replicator (used by Mirror Maker 2 and required at both source and target clusters)
+ enabled: false
+
tsSalKafka:
# -- Enable user ts-salkafka.
enabled: true
@@ -141,3 +145,20 @@ mirrormaker2:
bootstrapServer: ""
# -- Topic replication from the source cluster defined as a comma-separated list or regular expression pattern.
topicsPattern: "registry-schemas, lsst.sal.*"
+ replication:
+ policy:
+ # -- Convention used to rename topics when the DefaultReplicationPolicy replication policy is used. Default is "" when the IdentityReplicationPolicy replication policy is used.
+ # @default -- ""
+ separator: ""
+ # -- Replication policy.
+ # @default -- IdentityReplicationPolicy
+ class: "org.apache.kafka.connect.mirror.IdentityReplicationPolicy"
+ sourceRegistry:
+ # -- Whether to deploy another Schema Registry for the schemas replicated from the source cluster.
+ enabled: false
+ # -- Name of the topic Schema Registry topic replicated from the source cluster
+ schemaTopic: "source.registry-schemas"
+ sourceConnect:
+ # -- Whether to deploy another Connect cluster for topics replicated from the source cluster.
+ # Requires the sourceRegistry enabled.
+ enabled: false
diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md
index 6e50cc4eae..9c8021077e 100644
--- a/applications/sasquatch/charts/telegraf-kafka-consumer/README.md
+++ b/applications/sasquatch/charts/telegraf-kafka-consumer/README.md
@@ -18,15 +18,16 @@ Telegraf is an agent written in Go for collecting, processing, aggregating, and
| env[1].valueFrom.secretKeyRef.name | string | `"sasquatch"` | |
| image.pullPolicy | string | IfNotPresent | Image pull policy. |
| image.repo | string | `"lsstsqre/telegraf"` | Telegraf image repository. |
-| image.tag | string | `"kafka-regexp"` | Telegraf image tag. |
+| image.tag | string | `"refreshregex"` | Telegraf image tag. |
| imagePullSecrets | list | `[]` | Secret names to use for Docker pulls. |
| influxdb2.bucket | string | `"telegraf-kafka-consumer"` | Name of the InfluxDB v2 bucket to write to. |
| kafkaConsumers.test.enabled | bool | `false` | Enable the Telegraf Kafka consumer. |
| kafkaConsumers.test.flush_interval | string | `"1s"` | Default data flushing interval to InfluxDB. |
| kafkaConsumers.test.interval | string | `"1s"` | Data collection interval for the Kafka consumer. |
+| kafkaConsumers.test.topicRefreshInterval | string | `"60s"` | Default interval for refreshing topics to check for new or removed regexp matches |
| kafkaConsumers.test.topicRegexps | string | `"[ \".*Test\" ]\n"` | List of regular expressions to specify the Kafka topics consumed by this agent. |
| nodeSelector | object | `{}` | Node labels for pod assignment. |
| podAnnotations | object | `{}` | Annotations for telegraf-kafka-consumers pods. |
| podLabels | object | `{}` | Labels for telegraf-kafka-consumer pods. |
| resources | object | `{}` | Kubernetes resources requests and limits. |
-| tolerations | list | `[]` | Tolerations for pod assignment. |
+| tolerations | list | `[]` | Tolerations for pod assignment. |
\ No newline at end of file
diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml
index 4d8f32c7eb..55bfef6591 100644
--- a/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml
+++ b/applications/sasquatch/charts/telegraf-kafka-consumer/templates/configmap.yaml
@@ -16,7 +16,7 @@ data:
flush_jitter = "0s"
interval = {{ default "1s" $value.interval | quote }}
logfile = ""
- metric_batch_size = 1000
+ metric_batch_size = {{ default 1000 $value.metric_batch_size }}
metric_buffer_limit = 10000
omit_hostname = true
precision = ""
@@ -40,11 +40,11 @@ data:
]
consumer_group = "telegraf-kafka-consumer-{{ $key }}"
data_format = "avro"
- max_message_len = 1000000
+ max_processing_time = "5s"
sasl_mechanism = "SCRAM-SHA-512"
sasl_password = "$TELEGRAF_PASSWORD"
sasl_username = "telegraf"
- topic_refresh_interval = "60s"
+ topic_refresh_interval = {{ default "60s" $value.topicRefreshInterval | quote }}
topic_regexps = {{ $value.topicRegexps }}
offset = "newest"
consumer_fetch_default = "20MB"
diff --git a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml
index f0bf7f8e31..aeeb3a5701 100644
--- a/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml
+++ b/applications/sasquatch/charts/telegraf-kafka-consumer/values.yaml
@@ -8,7 +8,7 @@ image:
# -- Telegraf image repository.
repo: "lsstsqre/telegraf"
# -- Telegraf image tag.
- tag: "kafka-regexp"
+ tag: "refreshregex"
# -- Image pull policy.
# @default -- IfNotPresent
pullPolicy: "Always"
@@ -55,6 +55,9 @@ kafkaConsumers:
# -- List of regular expressions to specify the Kafka topics consumed by this agent.
topicRegexps: |
[ ".*Test" ]
+ # -- Default interval for refreshing topics to check for new or
+ # removed regexp matches
+ topicRefreshInterval: "60s"
influxdb2:
# -- Name of the InfluxDB v2 bucket to write to.
diff --git a/applications/sasquatch/values-base.yaml b/applications/sasquatch/values-base.yaml
index 2dfdf2180d..13393be76c 100644
--- a/applications/sasquatch/values-base.yaml
+++ b/applications/sasquatch/values-base.yaml
@@ -1,7 +1,29 @@
strimzi-kafka:
+ mirrormaker2:
+ enabled: true
+ source:
+ bootstrapServer: sasquatch-summit-kafka-bootstrap.lsst.codes:9094
+ topicsPattern: "lsst.sal.*, registry-schemas"
+ replication:
+ policy:
+ separator: "."
+ class: "org.apache.kafka.connect.mirror.DefaultReplicationPolicy"
+ sourceRegistry:
+ enabled: true
+ schemaTopic: source.registry-schemas
+ sourceConnect:
+ enabled: true
+ resources:
+ requests:
+ cpu: 2
+ memory: 4Gi
+ limits:
+ cpu: 4
+ memory: 8Gi
kafka:
storage:
storageClassName: rook-ceph-block
+ size: 1Ti
externalListener:
tls:
enabled: true
@@ -18,6 +40,9 @@ strimzi-kafka:
zookeeper:
storage:
storageClassName: rook-ceph-block
+ users:
+ replicator:
+ enabled: true
influxdb:
persistence:
@@ -26,6 +51,25 @@ influxdb:
enabled: true
hostname: base-lsp.lsst.codes
+influxdb-staging:
+ enabled: true
+ persistence:
+ storageClass: rook-ceph-block
+ size: 5Ti
+ ingress:
+ enabled: true
+ hostname: base-lsp.lsst.codes
+
+source-influxdb:
+ enabled: true
+ persistence:
+ storageClass: rook-ceph-block
+ size: 10Ti
+ ingress:
+ enabled: true
+ hostname: base-lsp.lsst.codes
+
+
kafka-connect-manager:
influxdbSink:
# Based on the kafka producers configuration for the BTS
@@ -33,55 +77,143 @@ kafka-connect-manager:
connectors:
auxtel:
enabled: true
- topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS"
+ topicsRegex: "lsst.sal.ATAOS|lsst.sal.ATDome|lsst.sal.ATDomeTrajectory|lsst.sal.ATHexapod|lsst.sal.ATPneumatics|lsst.sal.ATPtg|lsst.sal.ATMCS"
maintel:
enabled: true
- topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg"
+ topicsRegex: "lsst.sal.MTAOS|lsst.sal.MTDome|lsst.sal.MTDomeTrajectory|lsst.sal.MTPtg"
mtmount:
enabled: true
- topicsRegex: ".*MTMount"
+ topicsRegex: "lsst.sal.MTMount"
+ tasksMax: "8"
comcam:
enabled: true
- topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS"
+ topicsRegex: "lsst.sal.CCCamera|lsst.sal.CCHeaderService|lsst.sal.CCOODS"
eas:
enabled: true
- topicsRegex: ".*DIMM|.*DSM|.*ESS|.*WeatherForecast"
+ topicsRegex: "lsst.sal.DIMM|lsst.sal.DSM|lsst.sal.ESS|lsst.sal.WeatherForecast"
latiss:
enabled: true
- topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph"
+ topicsRegex: "lsst.sal.ATCamera|lsst.sal.ATHeaderService|lsst.sal.ATOODS|lsst.sal.ATSpectrograph"
m1m3:
enabled: true
- topicsRegex: ".*MTM1M3"
+ topicsRegex: "lsst.sal.MTM1M3"
+ tasksMax: "8"
m2:
enabled: true
- topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator"
+ topicsRegex: "lsst.sal.MTHexapod|lsst.sal.MTM2|lsst.sal.MTRotator"
obssys:
enabled: true
- topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher"
+ topicsRegex: "lsst.sal.Scheduler|lsst.sal.Script|lsst.sal.ScriptQueue|lsst.sal.Watcher"
ocps:
enabled: true
- topicsRegex: ".*OCPS"
+ topicsRegex: "lsst.sal.OCPS"
test:
enabled: true
- topicsRegex: ".*Test"
+ topicsRegex: "lsst.sal.Test"
pmd:
enabled: true
- topicsRegex: ".*PMD"
+ topicsRegex: "lsst.sal.PMD"
calsys:
enabled: true
- topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser"
+ topicsRegex: "lsst.sal.ATMonochromator|lsst.sal.ATWhiteLight|lsst.sal.CBP|lsst.sal.Electrometer|lsst.sal.FiberSpectrograph|lsst.sal.LinearStage|lsst.sal.TunableLaser"
mtaircompressor:
enabled: true
- topicsRegex: ".*MTAirCompressor"
+ topicsRegex: "lsst.sal.MTAirCompressor"
authorize:
enabled: true
- topicsRegex: ".*Authorize"
+ topicsRegex: "lsst.sal.Authorize"
lasertracker:
enabled: true
- topicsRegex: ".*LaserTracker"
+ topicsRegex: "lsst.sal.LaserTracker"
genericcamera:
enabled: true
- topicsRegex: ".*GCHeaderService|.*GenericCamera"
+ topicsRegex: "lsst.sal.GCHeaderService|lsst.sal.GenericCamera"
+
+# This needs to follow the kafka-connect-manager configuration for the summit
+# environment where data is replicated from.
+# We need to remove the "source." prefix from the topic name before writing to InfluxDB.
+source-kafka-connect-manager:
+ enabled: true
+ influxdbSink:
+ connectInfluxUrl: "http://sasquatch-influxdb-staging.sasquatch:8086"
+ connectInfluxDb: "efd"
+ connectors:
+ source-auxtel:
+ enabled: true
+ topicsRegex: "source.lsst.sal.ATAOS|source.lsst.sal.ATDome|source.lsst.sal.ATDomeTrajectory|source.lsst.sal.ATHexapod|source.lsst.sal.ATPneumatics|source.lsst.sal.ATPtg|source.lsst.sal.ATMCS"
+ removePrefix: "source."
+ source-maintel:
+ enabled: true
+ topicsRegex: "source.lsst.sal.MTAOS|source.lsst.sal.MTDome|source.lsst.sal.MTDomeTrajectory|source.lsst.sal.MTPtg"
+ removePrefix: "source."
+ source-mtmount:
+ enabled: true
+ topicsRegex: "source.lsst.sal.MTMount"
+ removePrefix: "source."
+ source-comcam:
+ enabled: true
+ topicsRegex: "source.lsst.sal.CCCamera|source.lsst.sal.CCHeaderService|source.lsst.sal.CCOODS"
+ removePrefix: "source."
+ source-eas:
+ enabled: true
+ topicsRegex: "source.lsst.sal.DIMM|source.lsst.sal.DSM|source.lsst.sal.ESS|source.lsst.sal.WeatherForecast"
+ removePrefix: "source."
+ source-latiss:
+ enabled: true
+ topicsRegex: "source.lsst.sal.ATCamera|source.lsst.sal.ATHeaderService|source.lsst.sal.ATOODS|source.lsst.sal.ATSpectrograph"
+ removePrefix: "source."
+ source-m1m3:
+ enabled: true
+ topicsRegex: "source.lsst.sal.MTM1M3"
+ removePrefix: "source."
+ source-m2:
+ enabled: true
+ topicsRegex: "source.lsst.sal.MTHexapod|source.lsst.sal.MTM2|source.lsst.sal.MTRotator"
+ removePrefix: "source."
+ source-obssys:
+ enabled: true
+ topicsRegex: "source.lsst.sal.Scheduler|source.lsst.sal.Script|source.lsst.sal.ScriptQueue|source.lsst.sal.Watcher"
+ removePrefix: "source."
+ source-ocps:
+ enabled: true
+ topicsRegex: "source.lsst.sal.OCPS"
+ removePrefix: "source."
+ source-test:
+ enabled: true
+ topicsRegex: "source.lsst.sal.Test"
+ removePrefix: "source."
+ source-pmd:
+ enabled: true
+ topicsRegex: "source.lsst.sal.PMD"
+ removePrefix: "source."
+ source-calsys:
+ enabled: true
+ topicsRegex: "source.lsst.sal.ATMonochromator|source.lsst.sal.ATWhiteLight|source.lsst.sal.CBP|source.lsst.sal.Electrometer|source.lsst.sal.FiberSpectrograph|source.lsst.sal.LinearStage|source.lsst.sal.TunableLaser"
+ removePrefix: "source."
+ source-mtaircompressor:
+ enabled: true
+ topicsRegex: "source.lsst.sal.MTAirCompressor"
+ removePrefix: "source."
+ source-authorize:
+ enabled: true
+ topicsRegex: "source.lsst.sal.Authorize"
+ removePrefix: "source."
+ source-genericcamera:
+ enabled: true
+ topicsRegex: "source.lsst.sal.GCHeaderService|source.lsst.sal.GenericCamera"
+ removePrefix: "source."
+ source-gis:
+ enabled: true
+ topicsRegex: "source.lsst.sal.GIS"
+ removePrefix: "source."
+ source-mtvms:
+ enabled: true
+ topicsRegex: "source.lsst.sal.MTVMS"
+ removePrefix: "source."
+ source-lasertracker:
+ enabled: true
+ topicsRegex: "source.lsst.sal.LaserTracker"
+ removePrefix: "source."
kafdrop:
ingress:
diff --git a/applications/sasquatch/values-idfdev.yaml b/applications/sasquatch/values-idfdev.yaml
index 86412958b5..0a1d965f07 100644
--- a/applications/sasquatch/values-idfdev.yaml
+++ b/applications/sasquatch/values-idfdev.yaml
@@ -13,33 +13,48 @@ strimzi-kafka:
host: sasquatch-dev-kafka-1.lsst.cloud
- loadBalancerIP: "35.184.86.132"
host: sasquatch-dev-kafka-2.lsst.cloud
+ users:
+ replicator:
+ enabled: true
influxdb:
ingress:
enabled: true
hostname: data-dev.lsst.cloud
+ resources:
+ requests:
+ memory: 16Gi
+ cpu: 2
+ limits:
+ memory: 16Gi
+ cpu: 2
influxdb2:
- enabled: false
+ enabled: true
ingress:
enabled: true
hostname: data-dev.lsst.cloud
telegraf-kafka-consumer:
- enabled: false
+ enabled: true
+ image:
+ tag: "refreshregex"
kafkaConsumers:
test:
enabled: true
replicaCount: 1
+ refresh_interval: "60s"
topicRegexps: |
[ ".*Test" ]
atmcs:
enabled: true
replicaCount: 1
+ refresh_interval: "60s"
topicRegexps: |
[ ".*ATMCS" ]
kafdrop:
+ cmdArgs: "--message.format=AVRO --topic.deleteEnabled=true --topic.createEnabled=true"
ingress:
enabled: true
hostname: data-dev.lsst.cloud
@@ -55,6 +70,7 @@ rest-proxy:
topicPrefixes:
- test
- lsst.dm
+ - lsst.Test
chronograf:
ingress:
diff --git a/applications/sasquatch/values-idfint.yaml b/applications/sasquatch/values-idfint.yaml
index cdfe4b4b60..12d9e206f3 100644
--- a/applications/sasquatch/values-idfint.yaml
+++ b/applications/sasquatch/values-idfint.yaml
@@ -26,11 +26,21 @@ strimzi-kafka:
limits:
cpu: 4
memory: 8Gi
+ users:
+ replicator:
+ enabled: true
influxdb:
ingress:
enabled: true
hostname: data-int.lsst.cloud
+ resources:
+ requests:
+ memory: 16Gi
+ cpu: 2
+ limits:
+ memory: 16Gi
+ cpu: 2
kafka-connect-manager:
influxdbSink:
diff --git a/applications/sasquatch/values-summit.yaml b/applications/sasquatch/values-summit.yaml
index 551a6cbd59..d9cb8fd8dc 100644
--- a/applications/sasquatch/values-summit.yaml
+++ b/applications/sasquatch/values-summit.yaml
@@ -18,6 +18,11 @@ strimzi-kafka:
zookeeper:
storage:
storageClassName: rook-ceph-block
+ users:
+ tsSalKafka:
+ enabled: true
+ replicator:
+ enabled: true
influxdb:
persistence:
@@ -27,6 +32,15 @@ influxdb:
enabled: true
hostname: summit-lsp.lsst.codes
+influxdb2:
+ enabled: true
+ persistence:
+ storageClass: rook-ceph-block
+ size: 5Ti
+ ingress:
+ enabled: true
+ hostname: summit-lsp.lsst.codes
+
kafka-connect-manager:
influxdbSink:
# Based on the kafka producers configuration for the Summit
@@ -99,6 +113,89 @@ kafka-connect-manager:
enabled: true
repairerConnector: false
topicsRegex: ".*MTVMS"
+ lasertracker:
+ enabled: true
+ repairerConnector: false
+ topicsRegex: ".*LaserTracker"
+
+telegraf-kafka-consumer:
+ enabled: true
+ kafkaConsumers:
+ auxtel:
+ enabled: true
+ topicRegexps: |
+ [ ".*ATAOS", ".*ATDome", ".*ATDomeTrajectory", ".*ATHexapod", ".*ATPneumatics", ".*ATPtg", ".*ATMCS" ]
+ maintel:
+ enabled: true
+ topicRegexps: |
+ [ ".*MTAOS", ".*MTDome", ".*MTDomeTrajectory", ".*MTPtg" ]
+ mtmount:
+ enabled: true
+ topicRegexps: |
+ [ ".*MTMount" ]
+ comcam:
+ enabled: true
+ topicRegexps: |
+ [ ".*CCCamera", ".*CCHeaderService", ".*CCOODS" ]
+ eas:
+ enabled: true
+ topicRegexps: |
+ [ ".*DIMM", ".*DSM", ".*ESS", ".*HVAC", ".*WeatherForecast" ]
+ latiss:
+ enabled: true
+ topicRegexps: |
+ [ ".*ATCamera", ".*ATHeaderService", ".*ATOODS", ".*ATSpectrograph" ]
+ m1m3:
+ enabled: true
+ flush_interval: "1s"
+ metric_batch_size: 5000
+ interval: "0.1s"
+ topicRegexps: |
+ [ ".*MTM1M3" ]
+ m2:
+ enabled: true
+ topicRegexps: |
+ [ ".*MTHexapod", ".*MTM2", ".*MTRotator" ]
+ obssys:
+ enabled: true
+ topicRegexps: |
+ [ ".*Scheduler", ".*Script", ".*ScriptQueue", ".*Watcher" ]
+ ocps:
+ enabled: true
+ topicRegexps: |
+ [ ".*OCPS" ]
+ test:
+ enabled: true
+ topicRegexps: |
+ [ "lsst.sal.Test" ]
+ pmd:
+ enabled: true
+ topicRegexps: |
+ [ ".*PMD" ]
+ calsys:
+ enabled: true
+ topicRegexps: |
+ [ ".*ATMonochromator", ".*ATWhiteLight", ".*CBP", ".*Electrometer", ".*FiberSpectrograph", ".*LinearStage", ".*TunableLaser" ]
+ mtaircompressor:
+ enabled: true
+ topicRegexps: |
+ [ ".*MTAirCompressor" ]
+ genericcamera:
+ enabled: true
+ topicRegexps: |
+ [ ".*GCHeaderService", ".*GenericCamera" ]
+ gis:
+ enabled: true
+ topicRegexps: |
+ [ ".*GIS" ]
+ mtvms:
+ enabled: true
+ topicRegexps: |
+ [ ".*MTVMS" ]
+ lasertracker:
+ enabled: true
+ topicRegexps: |
+ [ ".*LaserTracker" ]
kafdrop:
ingress:
diff --git a/applications/sasquatch/values-tucson-teststand.yaml b/applications/sasquatch/values-tucson-teststand.yaml
index 56126adfa6..e3f34eb553 100644
--- a/applications/sasquatch/values-tucson-teststand.yaml
+++ b/applications/sasquatch/values-tucson-teststand.yaml
@@ -64,7 +64,8 @@ telegraf-kafka-consumer:
[ ".*ATCamera", ".*ATHeaderService", ".*ATOODS", ".*ATSpectrograph" ]
m1m3:
enabled: true
- flush_interval: "0.1s"
+ flush_interval: "1s"
+ metric_batch_size: 5000
interval: "0.1s"
topicRegexps: |
[ ".*MTM1M3" ]
diff --git a/applications/sasquatch/values-usdfdev.yaml b/applications/sasquatch/values-usdfdev.yaml
index 1bb60f2d56..fcd3053fbf 100644
--- a/applications/sasquatch/values-usdfdev.yaml
+++ b/applications/sasquatch/values-usdfdev.yaml
@@ -11,6 +11,9 @@ strimzi-kafka:
limits:
cpu: 4
memory: 8Gi
+ users:
+ replicator:
+ enabled: true
influxdb:
ingress:
@@ -108,6 +111,18 @@ kafka-connect-manager:
timestamp: "timestamp"
connectInfluxDb: "lsst.camera"
topicsRegex: "lsst.camera.*"
+ lsstverify:
+ enabled: true
+ timestamp: "timestamp"
+ connectInfluxDb: "lsst.verify"
+ topicsRegex: "lsst.verify.*"
+ tags: dataset_tag,band,instrument,skymap,detector,physical_filter,tract,exposure,patch,visit,run
+ lsstlf:
+ enabled: true
+ timestamp: "timestamp"
+ connectInfluxDb: "lsst.lf"
+ topicsRegex: "lsst.lf.*"
+ tags: benchmark_env,module,benchmark_type
kafdrop:
ingress:
@@ -129,6 +144,8 @@ rest-proxy:
- lsst.example
- lsst.rubintv
- lsst.camera
+ - lsst.verify
+ - lsst.lf
chronograf:
ingress:
diff --git a/applications/sasquatch/values-usdfprod.yaml b/applications/sasquatch/values-usdfprod.yaml
index 734f5952a2..4d52d13fd8 100644
--- a/applications/sasquatch/values-usdfprod.yaml
+++ b/applications/sasquatch/values-usdfprod.yaml
@@ -11,6 +11,9 @@ strimzi-kafka:
limits:
cpu: 4
memory: 8Gi
+ users:
+ replicator:
+ enabled: true
influxdb:
ingress:
@@ -20,6 +23,15 @@ influxdb:
enabled: true
size: 15Ti
+source-influxdb:
+ enabled: true
+ ingress:
+ enabled: true
+ hostname: usdf-rsp.slac.stanford.edu
+ persistence:
+ enabled: true
+ size: 15Ti
+
kafka-connect-manager:
influxdbSink:
# Based on the kafka producers configuration for the Summit
@@ -27,52 +39,74 @@ kafka-connect-manager:
connectors:
auxtel:
enabled: true
+ repairerConnector: false
topicsRegex: ".*ATAOS|.*ATDome|.*ATDomeTrajectory|.*ATHexapod|.*ATPneumatics|.*ATPtg|.*ATMCS"
maintel:
enabled: true
+ repairerConnector: false
topicsRegex: ".*MTAOS|.*MTDome|.*MTDomeTrajectory|.*MTPtg"
mtmount:
enabled: true
+ repairerConnector: false
topicsRegex: ".*MTMount"
+ tasksMax: "8"
comcam:
enabled: true
+ repairerConnector: false
topicsRegex: ".*CCCamera|.*CCHeaderService|.*CCOODS"
eas:
enabled: true
+ repairerConnector: false
topicsRegex: ".*DIMM|.*DSM|.*ESS|.*HVAC|.*WeatherForecast|.*WeatherStation"
latiss:
enabled: true
+ repairerConnector: false
topicsRegex: ".*ATCamera|.*ATHeaderService|.*ATOODS|.*ATSpectrograph"
m1m3:
enabled: true
+ repairerConnector: false
topicsRegex: ".*MTM1M3"
+ tasksMax: "8"
m2:
enabled: true
+ repairerConnector: false
topicsRegex: ".*MTHexapod|.*MTM2|.*MTRotator"
obssys:
enabled: true
+ repairerConnector: false
topicsRegex: ".*Scheduler|.*Script|.*ScriptQueue|.*Watcher"
ocps:
enabled: true
+ repairerConnector: false
topicsRegex: ".*OCPS"
test:
enabled: true
+ repairerConnector: false
topicsRegex: ".*Test"
pmd:
enabled: true
+ repairerConnector: false
topicsRegex: ".*PMD"
calsys:
enabled: true
+ repairerConnector: false
topicsRegex: ".*ATMonochromator|.*ATWhiteLight|.*CBP|.*Electrometer|.*FiberSpectrograph|.*LinearStage|.*TunableLaser"
mtaircompressor:
enabled: true
+ repairerConnector: false
topicsRegex: ".*MTAirCompressor"
genericcamera:
enabled: true
+ repairerConnector: false
topicsRegex: ".*GCHeaderService|.*GenericCamera"
gis:
enabled: true
+ repairerConnector: false
topicsRegex: ".*GIS"
+ lasertracker:
+ enabled: true
+ repairerConnector: false
+ topicsRegex: ".*LaserTracker"
kafdrop:
ingress:
diff --git a/applications/sasquatch/values.yaml b/applications/sasquatch/values.yaml
index cffe084c1a..2ae3a8e006 100644
--- a/applications/sasquatch/values.yaml
+++ b/applications/sasquatch/values.yaml
@@ -42,6 +42,7 @@ influxdb:
config:
data:
cache-max-memory-size: 0
+ # Duration a write will wait before fsyncing. This is useful for slower disks or when WAL write contention is present.
wal-fsync-delay: "100ms"
trace-logging-enabled: true
http:
@@ -66,14 +67,142 @@ influxdb:
# init.iql: |+
resources:
requests:
- memory: 1Gi
- cpu: 1
+ memory: 96Gi
+ cpu: 8
+ limits:
+ memory: 96Gi
+ cpu: 8
+
+influxdb-staging:
+ # -- Enable InfluxDB staging deployment.
+ enabled: false
+ # -- InfluxDB image tag.
+ image:
+ tag: "1.8.10"
+ persistence:
+ # -- Enable persistent volume claim.
+ # By default storageClass is undefined choosing the default provisioner (standard on GKE).
+ enabled: true
+ # -- Persistent volume size.
+ # @default 1Ti for teststand deployments
+ size: 1Ti
+ # -- Default InfluxDB user, use influxb-user and influxdb-password keys from secret.
+ setDefaultUser:
+ enabled: true
+ user:
+ existingSecret: sasquatch
+ # -- InfluxDB ingress configuration.
+ # @default -- disabled
+ ingress:
+ enabled: false
+ tls: false
+ hostname: ""
+ annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: /$2
+ className: "nginx"
+ path: /influxdb-staging(/|$)(.*)
+ # -- Override InfluxDB configuration.
+ # See https://docs.influxdata.com/influxdb/v1.8/administration/config
+ config:
+ data:
+ cache-max-memory-size: 0
+ wal-fsync-delay: "100ms"
+ trace-logging-enabled: true
+ http:
+ enabled: true
+ flux-enabled: true
+ auth-enabled: true
+ max-row-limit: 0
+ coordinator:
+ write-timeout: "1h"
+ max-concurrent-queries: 0
+ query-timeout: "0s"
+ log-queries-after: "15s"
+ continuous_queries:
+ enabled: false
+ logging:
+ level: "debug"
+ initScripts:
+ # -- Enable InfluxDB custom initialization script.
+ enabled: false
+ # scripts:
+ # # -- InfluxDB custom initialization script.
+ # init.iql: |+
+ resources:
+ requests:
+ memory: 96Gi
+ cpu: 8
+ limits:
+ memory: 96Gi
+ cpu: 8
+
+source-influxdb:
+ # -- Enable InfluxDB staging deployment.
+ enabled: false
+ # -- InfluxDB image tag.
+ image:
+ tag: "1.8.10"
+ persistence:
+ # -- Enable persistent volume claim.
+ # By default storageClass is undefined choosing the default provisioner (standard on GKE).
+ enabled: true
+ # -- Persistent volume size.
+ # @default 1Ti for teststand deployments
+ size: 1Ti
+ # -- Default InfluxDB user, use influxb-user and influxdb-password keys from secret.
+ setDefaultUser:
+ enabled: true
+ user:
+ existingSecret: sasquatch
+ # -- InfluxDB ingress configuration.
+ # @default -- disabled
+ ingress:
+ enabled: false
+ tls: false
+ hostname: ""
+ annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: /$2
+ className: "nginx"
+ path: /source-influxdb(/|$)(.*)
+ # -- Override InfluxDB configuration.
+ # See https://docs.influxdata.com/influxdb/v1.8/administration/config
+ config:
+ data:
+ cache-max-memory-size: 0
+ wal-fsync-delay: "100ms"
+ trace-logging-enabled: true
+ http:
+ enabled: true
+ flux-enabled: true
+ auth-enabled: true
+ max-row-limit: 0
+ coordinator:
+ write-timeout: "1h"
+ max-concurrent-queries: 0
+ query-timeout: "0s"
+ log-queries-after: "15s"
+ continuous_queries:
+ enabled: false
+ logging:
+ level: "debug"
+ initScripts:
+ # -- Enable InfluxDB custom initialization script.
+ enabled: false
+ # scripts:
+ # # -- InfluxDB custom initialization script.
+ # init.iql: |+
+ resources:
+ requests:
+ memory: 96Gi
+ cpu: 8
limits:
memory: 96Gi
cpu: 8
influxdb2:
enabled: false
+ image:
+ tag: 2.7.1-alpine
adminUser:
# -- Admin default organization.
organization: "default"
@@ -114,8 +243,8 @@ influxdb2:
influx bucket create --name telegraf-kafka-consumer --org default
resources:
requests:
- memory: 1Gi
- cpu: 1
+ memory: 16Gi
+ cpu: 8
limits:
memory: 96Gi
cpu: 8
@@ -123,6 +252,12 @@ influxdb2:
# -- Override kafka-connect-manager configuration.
kafka-connect-manager: {}
+# -- Override source-kafka-connect-manager configuration.
+source-kafka-connect-manager:
+ enabled: false
+ env:
+ kafkaConnectUrl: "http://sasquatch-source-connect-api.sasquatch:8083"
+
# -- Override telegraf-kafka-consumer configuration.
telegraf-kafka-consumer:
enabled: false
@@ -159,10 +294,10 @@ chronograf:
envFromSecret: "sasquatch"
resources:
requests:
- memory: 1Gi
+ memory: 4Gi
cpu: 1
limits:
- memory: 16Gi
+ memory: 64Gi
cpu: 4
kapacitor:
diff --git a/applications/semaphore/README.md b/applications/semaphore/README.md
index ea3233aef9..8df8f5ff2f 100644
--- a/applications/semaphore/README.md
+++ b/applications/semaphore/README.md
@@ -40,4 +40,4 @@ Semaphore is the user notification and messaging service for the Rubin Science P
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.create | bool | `false` | Specifies whether a service account should be created. |
| serviceAccount.name | string | `""` | |
-| tolerations | list | `[]` | |
+| tolerations | list | `[]` | |
\ No newline at end of file
diff --git a/applications/semaphore/values-usdfdev.yaml b/applications/semaphore/values-usdfdev.yaml
index 12ae9f3476..1cb6319f9f 100644
--- a/applications/semaphore/values-usdfdev.yaml
+++ b/applications/semaphore/values-usdfdev.yaml
@@ -1,9 +1,9 @@
semaphore:
- config:
- phalanx_env: "usdfdev"
- github_app_id: "131502"
- enable_github_app: "True"
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
+config:
+ phalanx_env: "usdfdev"
+ github_app_id: "337324"
+ enable_github_app: "True"
diff --git a/applications/semaphore/values-usdfprod.yaml b/applications/semaphore/values-usdfprod.yaml
index a4dba3f052..b6473f3f72 100644
--- a/applications/semaphore/values-usdfprod.yaml
+++ b/applications/semaphore/values-usdfprod.yaml
@@ -1,2 +1,9 @@
+semaphore:
+ ingress:
+ enabled: true
+ annotations:
+ kubernetes.io/ingress.class: nginx
config:
phalanx_env: "usdfprod"
+ github_app_id: "337324"
+ enable_github_app: "True"
diff --git a/applications/sherlock/README.md b/applications/sherlock/README.md
index 459c2c462d..c54fc8c7bb 100644
--- a/applications/sherlock/README.md
+++ b/applications/sherlock/README.md
@@ -31,4 +31,4 @@ Application ingress status and metrics
| replicaCount | int | `1` | Number of web deployment pods to start |
| resources | object | `{}` | Resource limits and requests for the sherlock deployment pod |
| serviceAccount.name | string | `""` | |
-| tolerations | list | `[]` | Tolerations for the sherlock deployment pod |
+| tolerations | list | `[]` | Tolerations for the sherlock deployment pod |
\ No newline at end of file
diff --git a/applications/sherlock/secrets.yaml b/applications/sherlock/secrets.yaml
new file mode 100644
index 0000000000..401e4d83a3
--- /dev/null
+++ b/applications/sherlock/secrets.yaml
@@ -0,0 +1,7 @@
+publish_key:
+ description: >-
+ Secret used to publish Sherlock data to the central collector service.
+ Whenever this secret is changed, it will need to be updated in the
+ central collector service.
+ generate:
+ type: password
diff --git a/applications/sqlproxy-cross-project/README.md b/applications/sqlproxy-cross-project/README.md
index 4b132f8b03..fcb9080470 100644
--- a/applications/sqlproxy-cross-project/README.md
+++ b/applications/sqlproxy-cross-project/README.md
@@ -19,10 +19,10 @@ GCP SQL Proxy as a service
| fullnameOverride | string | `""` | Override the full name for resources (includes the release name) |
| image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the Cloud SQL Proxy image |
| image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Proxy image to use |
-| image.tag | string | `"1.33.7"` | Tag of Cloud SQL Proxy image to use |
+| image.tag | string | `"1.33.9"` | Tag of Cloud SQL Proxy image to use |
| nameOverride | string | `""` | Override the base name for resources |
| nodeSelector | object | `{}` | Node selector rules for the Cloud SQL Proxy pod |
| podAnnotations | object | `{}` | Annotations for the Cloud SQL Proxy pod |
| replicaCount | int | `1` | Number of pods to start |
| resources | object | `{}` | Resource limits and requests for the Cloud SQL Proxy pod |
-| tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod |
+| tolerations | list | `[]` | Tolerations for the Cloud SQL Proxy pod |
\ No newline at end of file
diff --git a/applications/sqlproxy-cross-project/values.yaml b/applications/sqlproxy-cross-project/values.yaml
index b2b3aac43b..aee2f73071 100644
--- a/applications/sqlproxy-cross-project/values.yaml
+++ b/applications/sqlproxy-cross-project/values.yaml
@@ -14,7 +14,7 @@ image:
repository: "gcr.io/cloudsql-docker/gce-proxy"
# -- Tag of Cloud SQL Proxy image to use
- tag: "1.33.7"
+ tag: "1.33.9"
# -- Pull policy for the Cloud SQL Proxy image
pullPolicy: "IfNotPresent"
diff --git a/applications/squarebot/Chart.yaml b/applications/squarebot/Chart.yaml
index 70d23309d5..6eca1c0571 100644
--- a/applications/squarebot/Chart.yaml
+++ b/applications/squarebot/Chart.yaml
@@ -10,3 +10,8 @@ sources:
maintainers:
- name: jonathansick
url: https://github.com/jonathansick
+annotations:
+ phalanx.lsst.io/docs: |
+ - id: "SQR-075"
+ title: "Shared Pydantic schemas as the basis for Kafka/Avro messages in SQuaRE Roundtable"
+ url: "https://sqr-076.lsst.io/"
diff --git a/applications/squarebot/README.md b/applications/squarebot/README.md
index 51b5a0593c..c890062ccb 100644
--- a/applications/squarebot/README.md
+++ b/applications/squarebot/README.md
@@ -46,4 +46,4 @@ Squarebot feeds events from services like Slack and GitHub into the SQuaRE Event
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
| serviceAccount.name | string | `""` | |
-| tolerations | list | `[]` | |
+| tolerations | list | `[]` | |
\ No newline at end of file
diff --git a/applications/squareone/README.md b/applications/squareone/README.md
index 544c9c7c56..98391ce5b4 100644
--- a/applications/squareone/README.md
+++ b/applications/squareone/README.md
@@ -44,4 +44,4 @@ Squareone is the homepage UI for the Rubin Science Platform.
| podAnnotations | object | `{}` | Annotations for squareone pods |
| replicaCount | int | `1` | Number of squareone pods to run in the deployment. |
| resources | object | `{}` | |
-| tolerations | list | `[]` | |
+| tolerations | list | `[]` | |
\ No newline at end of file
diff --git a/applications/squareone/templates/ingress-times-square.yaml b/applications/squareone/templates/ingress-times-square.yaml
new file mode 100644
index 0000000000..1d09045cab
--- /dev/null
+++ b/applications/squareone/templates/ingress-times-square.yaml
@@ -0,0 +1,31 @@
+apiVersion: gafaelfawr.lsst.io/v1alpha1
+kind: GafaelfawrIngress
+metadata:
+ name: {{ template "squareone.fullname" . }}-times-square
+ labels:
+ {{- include "squareone.labels" . | nindent 4 }}
+config:
+ baseUrl: {{ .Values.global.baseUrl | quote }}
+ scopes:
+ all:
+ - "exec:notebook"
+ loginRedirect: true
+template:
+ metadata:
+ name: {{ template "squareone.fullname" . }}-times-square
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ spec:
+ rules:
+ - host: {{ required "global.host must be set" .Values.global.host | quote }}
+ http:
+ paths:
+ - path: "/times-square"
+ pathType: "Prefix"
+ backend:
+ service:
+ name: {{ template "squareone.fullname" . }}
+ port:
+ number: 80
diff --git a/applications/squareone/values-usdfdev.yaml b/applications/squareone/values-usdfdev.yaml
index b9ae4d7f26..c0d3946711 100644
--- a/applications/squareone/values-usdfdev.yaml
+++ b/applications/squareone/values-usdfdev.yaml
@@ -2,3 +2,4 @@ replicaCount: 3
config:
siteName: "Rubin Science Platform"
semaphoreUrl: "https://usdf-rsp-dev.slac.stanford.edu/semaphore"
+ timesSquareUrl: "https://usdf-rsp-dev.slac.stanford.edu/times-square/api"
diff --git a/applications/squareone/values.yaml b/applications/squareone/values.yaml
index 60bcfed8fd..abab9d546a 100644
--- a/applications/squareone/values.yaml
+++ b/applications/squareone/values.yaml
@@ -124,12 +124,17 @@ config:
### Data Preview 0.2 (DP0.2)
DP0.2 is the second phase of the Data Preview 0 program using
- precursor data (simulated images from the DESC DC2 data
- challenge). For the first time, all the derived data products
- have been generated “in-house” on an early version of the Rubin
- processing infrastructure using version 23.0 of the LSST Science
- Pipelines. As a result, the data model is significantly
- different from the DP0.1 dataset.
+ simulated images from the DESC DC2 data challenge processed with
+ version 23.0 of the LSST Science Pipelines.
+
+
+
+
+
+ ### DP0.2 Tutorials
+
+ Tutorials for exploring the DP0.2 dataset on the Rubin Science
+ Platform.
@@ -153,10 +158,8 @@ config:
### Rubin Science Platform
- The Notebook aspect is a powerful data analysis environment with
- Jupyter Notebooks and terminals in the browser.
- Documentation for the Rubin Science Platform, including account set up,
- portal, notebooks, and API aspects.
+ Guides for setting up an account on the Rubin Science Platform
+ and using the Portal, Notebook, and API Aspects.
@@ -175,7 +178,7 @@ config:
The Science Pipelines include the Butler for accessing LSST data
and a pipeline framework for processing data. The LSST Science
- Pipelines Python package is preinstalled in the Notebook aspect.
+ Pipelines is preinstalled in the Notebook Aspect.
diff --git a/applications/ssotap/Chart.yaml b/applications/ssotap/Chart.yaml
index 990e2d0670..eff6e39966 100644
--- a/applications/ssotap/Chart.yaml
+++ b/applications/ssotap/Chart.yaml
@@ -5,4 +5,4 @@ description: IVOA TAP service
sources:
- https://github.com/lsst-sqre/tap-postgres
- https://github.com/opencadc/tap
-appVersion: "1.10"
+appVersion: "1.12"
diff --git a/applications/ssotap/README.md b/applications/ssotap/README.md
index ee0b20be14..06aea4d46f 100644
--- a/applications/ssotap/README.md
+++ b/applications/ssotap/README.md
@@ -18,7 +18,7 @@ IVOA TAP service
| config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) |
| config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket |
| config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. |
-| config.tapSchemaAddress | string | `"tap-schema-db.tap-schema.svc.cluster.local:3306"` | Address to a MySQL database containing TAP schema data |
+| config.tapSchemaAddress | string | `"cadc-tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data |
| fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) |
| global.baseUrl | string | Set by Argo CD | Base URL for the environment |
| global.host | string | Set by Argo CD | Host name for ingress |
@@ -45,6 +45,14 @@ IVOA TAP service
| podAnnotations | object | `{}` | Annotations for the Gafaelfawr frontend pod |
| replicaCount | int | `1` | Number of pods to start |
| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod |
+| tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod |
+| tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image |
+| tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. |
+| tapSchema.image.tag | string | `"2.1"` | Tag of TAP schema image |
+| tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod |
+| tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod |
+| tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod |
+| tapSchema.tolerations | list | `[]` | Tolerations for the mock QServ pod |
| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod |
| uws.affinity | object | `{}` | Affinity rules for the UWS database pod |
| uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image |
@@ -54,4 +62,4 @@ IVOA TAP service
| uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod |
| uws.resources | object | `{}` | Resource limits and requests for the UWS database pod |
| uws.tolerations | list | `[]` | Tolerations for the UWS database pod |
-| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) |
+| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) |
\ No newline at end of file
diff --git a/applications/ssotap/secrets.yaml b/applications/ssotap/secrets.yaml
new file mode 100644
index 0000000000..4280c602a3
--- /dev/null
+++ b/applications/ssotap/secrets.yaml
@@ -0,0 +1,4 @@
+"google_creds.json":
+ description: >-
+ Google service account credentials used to write async job output to
+ Google Cloud Storage.
diff --git a/applications/ssotap/templates/tap-schema-db-deployment.yaml b/applications/ssotap/templates/tap-schema-db-deployment.yaml
new file mode 100644
index 0000000000..0623cf1c9a
--- /dev/null
+++ b/applications/ssotap/templates/tap-schema-db-deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "cadc-tap.fullname" . }}-tap-schema-db
+ labels:
+ {{- include "cadc-tap.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "cadc-tap.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.tapSchema.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "cadc-tap.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: "schema-db"
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: "tap-schema-db"
+ env:
+ - name: MYSQL_DATABASE
+ value: "TAP_SCHEMA"
+ - name: MYSQL_USER
+ value: "TAP_SCHEMA"
+ - name: MYSQL_PASSWORD
+ value: "TAP_SCHEMA"
+ - name: MYSQL_ROOT_HOST
+ value: "%"
+ image: "{{ .Values.tapSchema.image.repository }}:{{ .Values.tapSchema.image.tag}}"
+ imagePullPolicy: {{ .Values.tapSchema.image.pullPolicy | quote }}
+ ports:
+ - containerPort: 3306
+ protocol: "TCP"
+ {{- with .Values.tapSchema.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ imagePullSecrets:
+ - name: "pull-secret"
+ {{- with .Values.tapSchema.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tapSchema.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tapSchema.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/applications/ssotap/templates/tap-schema-db-service.yaml b/applications/ssotap/templates/tap-schema-db-service.yaml
new file mode 100644
index 0000000000..e5b9dd0856
--- /dev/null
+++ b/applications/ssotap/templates/tap-schema-db-service.yaml
@@ -0,0 +1,15 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ template "cadc-tap.fullname" . }}-schema-db
+ labels:
+ {{- include "cadc-tap.labels" . | nindent 4 }}
+spec:
+ type: "ClusterIP"
+ ports:
+ - protocol: "TCP"
+ port: 3306
+ targetPort: 3306
+ selector:
+ {{- include "cadc-tap.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: "schema-db"
diff --git a/applications/ssotap/values-idfdev.yaml b/applications/ssotap/values-idfdev.yaml
index 9516c1dd94..866cac4322 100644
--- a/applications/ssotap/values-idfdev.yaml
+++ b/applications/ssotap/values-idfdev.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfdev-sso"
+
resources:
requests:
cpu: 2.0
@@ -8,12 +12,13 @@ resources:
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
pg:
mock:
enabled: false
database: "dp03_catalogs"
+ # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23
host: "usdf-pg-catalogs.slac.stanford.edu:5432"
username: "dp03"
diff --git a/applications/ssotap/values-idfint.yaml b/applications/ssotap/values-idfint.yaml
index 1d65e85abf..cdf447c971 100644
--- a/applications/ssotap/values-idfint.yaml
+++ b/applications/ssotap/values-idfint.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfint-sso"
+
resources:
requests:
cpu: 2.0
@@ -10,12 +14,13 @@ replicaCount: 2
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
pg:
mock:
enabled: false
database: "dp03_catalogs"
+ # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23
host: "usdf-pg-catalogs.slac.stanford.edu:5432"
username: "dp03"
diff --git a/applications/ssotap/values-idfprod.yaml b/applications/ssotap/values-idfprod.yaml
index edef98d5b7..3ea3ff015b 100644
--- a/applications/ssotap/values-idfprod.yaml
+++ b/applications/ssotap/values-idfprod.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfprod-sso"
+
resources:
requests:
cpu: 2.0
@@ -10,13 +14,14 @@ replicaCount: 2
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
pg:
mock:
enabled: false
database: "dp03_catalogs"
+ # Redirect to interim cloud Postgres during USDF outage 6/25/23-7/4/23
host: "usdf-pg-catalogs.slac.stanford.edu:5432"
username: "dp03"
diff --git a/applications/ssotap/values-minikube.yaml b/applications/ssotap/values-minikube.yaml
index 6e3f1aca1e..732b70bbdb 100644
--- a/applications/ssotap/values-minikube.yaml
+++ b/applications/ssotap/values-minikube.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfprod-sso"
+
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
diff --git a/applications/ssotap/values-usdfdev.yaml b/applications/ssotap/values-usdfdev.yaml
index a8802d4c5e..9a73e3d658 100644
--- a/applications/ssotap/values-usdfdev.yaml
+++ b/applications/ssotap/values-usdfdev.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-usdf-dev-sso"
+
resources:
requests:
cpu: 2.0
@@ -6,25 +10,14 @@ resources:
cpu: 8.0
memory: "32G"
-replicaCount: 2
-
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
pg:
mock:
enabled: false
- database: "lsstdb1"
- host: "usdf-butler.slac.stanford.edu:5432"
- username: "rubin"
-
-uws:
- resources:
- requests:
- cpu: 0.25
- memory: "1G"
- limits:
- cpu: 2.0
- memory: "4G"
+ database: "dp03_catalogs"
+ host: "usdf-pg-catalogs.slac.stanford.edu:5432"
+ username: "dp03"
diff --git a/applications/ssotap/values-usdfprod.yaml b/applications/ssotap/values-usdfprod.yaml
index a8802d4c5e..279ebf4f79 100644
--- a/applications/ssotap/values-usdfprod.yaml
+++ b/applications/ssotap/values-usdfprod.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-usdf-prod-sso"
+
resources:
requests:
cpu: 2.0
@@ -10,15 +14,15 @@ replicaCount: 2
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
pg:
mock:
enabled: false
- database: "lsstdb1"
- host: "usdf-butler.slac.stanford.edu:5432"
- username: "rubin"
+ database: "dp03_catalogs"
+ host: "usdf-pg-catalogs.slac.stanford.edu:5432"
+ username: "dp03"
uws:
resources:
diff --git a/applications/ssotap/values.yaml b/applications/ssotap/values.yaml
index fbf2c73154..619108e8aa 100644
--- a/applications/ssotap/values.yaml
+++ b/applications/ssotap/values.yaml
@@ -53,7 +53,7 @@ vaultSecretsPath: ""
config:
# -- Address to a MySQL database containing TAP schema data
- tapSchemaAddress: "tap-schema-db.tap-schema.svc.cluster.local:3306"
+ tapSchemaAddress: "cadc-tap-schema-db:3306"
# -- Datalink payload URL
datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip"
@@ -120,6 +120,33 @@ pg:
# -- Affinity rules for the mock postgres pod
affinity: {}
+tapSchema:
+ image:
+ # -- TAP schema image to ue. This must be overridden by each environment
+ # with the TAP schema for that environment.
+ repository: "lsstsqre/tap-schema-mock"
+
+ # -- Pull policy for the TAP schema image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of TAP schema image
+ tag: "2.1"
+
+ # -- Resource limits and requests for the TAP schema database pod
+ resources: {}
+
+ # -- Annotations for the mock QServ pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the mock QServ pod
+ nodeSelector: {}
+
+ # -- Tolerations for the mock QServ pod
+ tolerations: []
+
+ # -- Affinity rules for the mock QServ pod
+ affinity: {}
+
uws:
image:
# -- UWS database image to use
diff --git a/applications/strimzi-access-operator/README.md b/applications/strimzi-access-operator/README.md
index 7b3410f2be..aa6ae9b69a 100644
--- a/applications/strimzi-access-operator/README.md
+++ b/applications/strimzi-access-operator/README.md
@@ -13,4 +13,4 @@ Strimzi Access Operator
| image.tag | string | The appVersion of the chart | Tag of the image |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created. |
-| serviceAccount.name | string | `""` | |
+| serviceAccount.name | string | `""` | |
\ No newline at end of file
diff --git a/applications/strimzi/Chart.yaml b/applications/strimzi/Chart.yaml
index c2a93f8082..24139cc212 100644
--- a/applications/strimzi/Chart.yaml
+++ b/applications/strimzi/Chart.yaml
@@ -7,5 +7,5 @@ home: https://strimzi.io
appVersion: "0.26.0"
dependencies:
- name: strimzi-kafka-operator
- version: "0.34.0"
+ version: "0.35.1"
repository: https://strimzi.io/charts/
diff --git a/applications/tap-schema/README.md b/applications/tap-schema/README.md
index 4c53f0c3ef..0d651cce04 100644
--- a/applications/tap-schema/README.md
+++ b/applications/tap-schema/README.md
@@ -20,4 +20,4 @@ The TAP_SCHEMA database
| nodeSelector | object | `{}` | Node selector rules for the MySQL pod |
| podAnnotations | object | `{}` | Annotations for the MySQL pod |
| resources | object | `{}` | Resource limits and requests for the MySQL pod |
-| tolerations | list | `[]` | Tolerations for the MySQL pod |
+| tolerations | list | `[]` | Tolerations for the MySQL pod |
\ No newline at end of file
diff --git a/applications/tap/README.md b/applications/tap/README.md
index f709fc5a44..de764134af 100644
--- a/applications/tap/README.md
+++ b/applications/tap/README.md
@@ -18,7 +18,7 @@ IVOA TAP service
| config.gcsBucketType | string | GCS | GCS bucket type (GCS or S3) |
| config.gcsBucketUrl | string | None, must be set | Base URL for results stored in GCS bucket |
| config.jvmMaxHeapSize | string | `"4G"` | Java heap size, which will set the maximum size of the heap. Otherwise Java would determine it based on how much memory is available and black maths. |
-| config.tapSchemaAddress | string | `"tap-schema-db.tap-schema.svc.cluster.local:3306"` | Address to a MySQL database containing TAP schema data |
+| config.tapSchemaAddress | string | `"cadc-tap-schema-db:3306"` | Address to a MySQL database containing TAP schema data |
| fullnameOverride | string | `"cadc-tap"` | Override the full name for resources (includes the release name) |
| global.baseUrl | string | Set by Argo CD | Base URL for the environment |
| global.host | string | Set by Argo CD | Host name for ingress |
@@ -43,6 +43,14 @@ IVOA TAP service
| qserv.mock.tolerations | list | `[]` | Tolerations for the mock QServ pod |
| replicaCount | int | `1` | Number of pods to start |
| resources | object | `{}` | Resource limits and requests for the Gafaelfawr frontend pod |
+| tapSchema.affinity | object | `{}` | Affinity rules for the mock QServ pod |
+| tapSchema.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the TAP schema image |
+| tapSchema.image.repository | string | `"lsstsqre/tap-schema-mock"` | TAP schema image to ue. This must be overridden by each environment with the TAP schema for that environment. |
+| tapSchema.image.tag | string | `"2.0.2"` | Tag of TAP schema image |
+| tapSchema.nodeSelector | object | `{}` | Node selection rules for the mock QServ pod |
+| tapSchema.podAnnotations | object | `{}` | Annotations for the mock QServ pod |
+| tapSchema.resources | object | `{}` | Resource limits and requests for the TAP schema database pod |
+| tapSchema.tolerations | list | `[]` | Tolerations for the mock QServ pod |
| tolerations | list | `[]` | Tolerations for the Gafaelfawr frontend pod |
| uws.affinity | object | `{}` | Affinity rules for the UWS database pod |
| uws.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for the UWS database image |
@@ -52,4 +60,4 @@ IVOA TAP service
| uws.podAnnotations | object | `{}` | Annotations for the UWS databse pod |
| uws.resources | object | `{}` | Resource limits and requests for the UWS database pod |
| uws.tolerations | list | `[]` | Tolerations for the UWS database pod |
-| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) |
+| vaultSecretsPath | string | None, must be set | Path to the Vault secret (`secret/k8s_operator//tap`, for example) |
\ No newline at end of file
diff --git a/applications/tap/secrets.yaml b/applications/tap/secrets.yaml
new file mode 100644
index 0000000000..4280c602a3
--- /dev/null
+++ b/applications/tap/secrets.yaml
@@ -0,0 +1,4 @@
+"google_creds.json":
+ description: >-
+ Google service account credentials used to write async job output to
+ Google Cloud Storage.
diff --git a/applications/tap/templates/tap-schema-db-deployment.yaml b/applications/tap/templates/tap-schema-db-deployment.yaml
new file mode 100644
index 0000000000..0623cf1c9a
--- /dev/null
+++ b/applications/tap/templates/tap-schema-db-deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ template "cadc-tap.fullname" . }}-tap-schema-db
+ labels:
+ {{- include "cadc-tap.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "cadc-tap.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ {{- with .Values.tapSchema.podAnnotations }}
+ annotations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "cadc-tap.selectorLabels" . | nindent 8 }}
+ app.kubernetes.io/component: "schema-db"
+ spec:
+ automountServiceAccountToken: false
+ containers:
+ - name: "tap-schema-db"
+ env:
+ - name: MYSQL_DATABASE
+ value: "TAP_SCHEMA"
+ - name: MYSQL_USER
+ value: "TAP_SCHEMA"
+ - name: MYSQL_PASSWORD
+ value: "TAP_SCHEMA"
+ - name: MYSQL_ROOT_HOST
+ value: "%"
+ image: "{{ .Values.tapSchema.image.repository }}:{{ .Values.tapSchema.image.tag}}"
+ imagePullPolicy: {{ .Values.tapSchema.image.pullPolicy | quote }}
+ ports:
+ - containerPort: 3306
+ protocol: "TCP"
+ {{- with .Values.tapSchema.resources }}
+ resources:
+ {{- toYaml . | nindent 12 }}
+ {{- end }}
+ imagePullSecrets:
+ - name: "pull-secret"
+ {{- with .Values.tapSchema.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tapSchema.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tapSchema.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/applications/tap/templates/tap-schema-db-service.yaml b/applications/tap/templates/tap-schema-db-service.yaml
new file mode 100644
index 0000000000..e5b9dd0856
--- /dev/null
+++ b/applications/tap/templates/tap-schema-db-service.yaml
@@ -0,0 +1,15 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ template "cadc-tap.fullname" . }}-schema-db
+ labels:
+ {{- include "cadc-tap.labels" . | nindent 4 }}
+spec:
+ type: "ClusterIP"
+ ports:
+ - protocol: "TCP"
+ port: 3306
+ targetPort: 3306
+ selector:
+ {{- include "cadc-tap.selectorLabels" . | nindent 4 }}
+ app.kubernetes.io/component: "schema-db"
diff --git a/applications/tap/values-ccin2p3.yaml b/applications/tap/values-ccin2p3.yaml
index c573f85a6c..23363bbc93 100644
--- a/applications/tap/values-ccin2p3.yaml
+++ b/applications/tap/values-ccin2p3.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfprod-tap"
+
config:
gcsBucket: "async-results.lsst.codes"
gcsBucketUrl: "https://cccephs3.in2p3.fr:8080"
diff --git a/applications/tap/values-idfdev.yaml b/applications/tap/values-idfdev.yaml
index b0a7af3d2f..13247032ca 100644
--- a/applications/tap/values-idfdev.yaml
+++ b/applications/tap/values-idfdev.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfdev-tap"
+
resources:
requests:
cpu: 2.0
@@ -8,7 +12,7 @@ resources:
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
qserv:
diff --git a/applications/tap/values-idfint.yaml b/applications/tap/values-idfint.yaml
index 11bab7d2a0..635c879fe1 100644
--- a/applications/tap/values-idfint.yaml
+++ b/applications/tap/values-idfint.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfint-tap"
+
resources:
requests:
cpu: 2.0
@@ -10,7 +14,7 @@ replicaCount: 2
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
qserv:
diff --git a/applications/tap/values-idfprod.yaml b/applications/tap/values-idfprod.yaml
index a96be3b075..d451c4d94a 100644
--- a/applications/tap/values-idfprod.yaml
+++ b/applications/tap/values-idfprod.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfprod-tap"
+
resources:
requests:
cpu: 2.0
@@ -10,7 +14,7 @@ replicaCount: 2
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
jvmMaxHeapSize: "31G"
qserv:
diff --git a/applications/tap/values-minikube.yaml b/applications/tap/values-minikube.yaml
index 6e3f1aca1e..67040a8f0c 100644
--- a/applications/tap/values-minikube.yaml
+++ b/applications/tap/values-minikube.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-idfprod-tap"
+
config:
gcsBucket: "async-results.lsst.codes"
- gcsBucketUrl: "http://async-results.lsst.codes"
+ gcsBucketUrl: "https://tap-files.lsst.codes"
diff --git a/applications/tap/values-roe.yaml b/applications/tap/values-roe.yaml
index 9f302b709c..34a231353d 100644
--- a/applications/tap/values-roe.yaml
+++ b/applications/tap/values-roe.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "stvoutsin/tap-schema-roe"
+
config:
gcsBucket: "async"
gcsBucketUrl: "https://somerville.ed.ac.uk:6780"
diff --git a/applications/tap/values-usdfdev.yaml b/applications/tap/values-usdfdev.yaml
index 83a2b11731..026cc54a10 100644
--- a/applications/tap/values-usdfdev.yaml
+++ b/applications/tap/values-usdfdev.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-usdf-dev-tap"
+
qserv:
host: "172.24.49.51:4040"
mock:
diff --git a/applications/tap/values-usdfprod.yaml b/applications/tap/values-usdfprod.yaml
index 83a2b11731..e274871f19 100644
--- a/applications/tap/values-usdfprod.yaml
+++ b/applications/tap/values-usdfprod.yaml
@@ -1,3 +1,7 @@
+tapSchema:
+ image:
+ repository: "lsstsqre/tap-schema-usdf-prod-tap"
+
qserv:
host: "172.24.49.51:4040"
mock:
diff --git a/applications/tap/values.yaml b/applications/tap/values.yaml
index 1aec4d4528..9132115129 100644
--- a/applications/tap/values.yaml
+++ b/applications/tap/values.yaml
@@ -53,7 +53,7 @@ vaultSecretsPath: ""
config:
# -- Address to a MySQL database containing TAP schema data
- tapSchemaAddress: "tap-schema-db.tap-schema.svc.cluster.local:3306"
+ tapSchemaAddress: "cadc-tap-schema-db:3306"
# -- Datalink payload URL
datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip"
@@ -114,6 +114,33 @@ qserv:
# -- Affinity rules for the mock QServ pod
affinity: {}
+tapSchema:
+ image:
+ # -- TAP schema image to ue. This must be overridden by each environment
+ # with the TAP schema for that environment.
+ repository: "lsstsqre/tap-schema-mock"
+
+ # -- Pull policy for the TAP schema image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of TAP schema image
+ tag: "2.0.2"
+
+ # -- Resource limits and requests for the TAP schema database pod
+ resources: {}
+
+ # -- Annotations for the mock QServ pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the mock QServ pod
+ nodeSelector: {}
+
+ # -- Tolerations for the mock QServ pod
+ tolerations: []
+
+ # -- Affinity rules for the mock QServ pod
+ affinity: {}
+
uws:
image:
# -- UWS database image to use
diff --git a/applications/telegraf-ds/Chart.yaml b/applications/telegraf-ds/Chart.yaml
index 48e744058b..e344e67a9e 100644
--- a/applications/telegraf-ds/Chart.yaml
+++ b/applications/telegraf-ds/Chart.yaml
@@ -8,7 +8,7 @@ sources:
- https://github.com/influxdata/helm-charts
dependencies:
- name: telegraf-ds
- version: 1.1.9
+ version: 1.1.11
repository: https://helm.influxdata.com/
annotations:
phalanx.lsst.io/docs: |
diff --git a/applications/telegraf-ds/README.md b/applications/telegraf-ds/README.md
index ccd81dc59f..e46dc546ea 100644
--- a/applications/telegraf-ds/README.md
+++ b/applications/telegraf-ds/README.md
@@ -27,4 +27,4 @@ Kubernetes node telemetry collection service
| telegraf-ds.resources.limits.memory | string | `"512Mi"` | |
| telegraf-ds.serviceAccount.name | string | `"telegraf-ds"` | |
| telegraf-ds.volumes[0].configMap.name | string | `"telegraf-generated-config"` | |
-| telegraf-ds.volumes[0].name | string | `"telegraf-generated-config"` | |
+| telegraf-ds.volumes[0].name | string | `"telegraf-generated-config"` | |
\ No newline at end of file
diff --git a/applications/telegraf-ds/secrets.yaml b/applications/telegraf-ds/secrets.yaml
new file mode 100644
index 0000000000..e348603dd9
--- /dev/null
+++ b/applications/telegraf-ds/secrets.yaml
@@ -0,0 +1,12 @@
+influx-token:
+ description: >-
+ Authentication token used to send data to the central InfluxDB 2 database
+ for monitoring information. This secret can be changed at any time.
+ copy:
+ application: telegraf
+ key: influx-token
+org-id:
+ description: "Organization identity to which monitoring data should be sent."
+ copy:
+ application: telegraf
+ key: influx-token
diff --git a/applications/telegraf/Chart.yaml b/applications/telegraf/Chart.yaml
index 8a7cd52436..9579821110 100644
--- a/applications/telegraf/Chart.yaml
+++ b/applications/telegraf/Chart.yaml
@@ -8,7 +8,7 @@ sources:
- https://github.com/influxdata/helm-charts
dependencies:
- name: telegraf
- version: 1.8.27
+ version: 1.8.29
repository: https://helm.influxdata.com/
annotations:
phalanx.lsst.io/docs: |
diff --git a/applications/telegraf/README.md b/applications/telegraf/README.md
index d54785fc49..5c3dae0ff8 100644
--- a/applications/telegraf/README.md
+++ b/applications/telegraf/README.md
@@ -34,4 +34,4 @@ Application telemetry collection service
| telegraf.service.enabled | bool | `false` | |
| telegraf.tplVersion | int | `2` | |
| telegraf.volumes[0].configMap.name | string | `"telegraf-generated-config"` | |
-| telegraf.volumes[0].name | string | `"telegraf-generated-config"` | |
+| telegraf.volumes[0].name | string | `"telegraf-generated-config"` | |
\ No newline at end of file
diff --git a/applications/telegraf/secrets.yaml b/applications/telegraf/secrets.yaml
new file mode 100644
index 0000000000..15e3b21954
--- /dev/null
+++ b/applications/telegraf/secrets.yaml
@@ -0,0 +1,7 @@
+influx-token:
+ description: >-
+ Authentication token used to send data to the central InfluxDB 2 database
+ for monitoring information. This secret can be changed at any time.
+org-id:
+ description: "Organization identity to which monitoring data should be sent."
+ value: "square"
diff --git a/applications/times-square/Chart.yaml b/applications/times-square/Chart.yaml
index 3caa5359cb..17c186befe 100644
--- a/applications/times-square/Chart.yaml
+++ b/applications/times-square/Chart.yaml
@@ -8,11 +8,11 @@ sources:
type: application
# The default version tag of the times-square docker image
-appVersion: "0.7.0"
+appVersion: "0.9.1"
dependencies:
- name: redis
- version: 1.0.5
+ version: 1.0.6
repository: https://lsst-sqre.github.io/charts/
annotations:
diff --git a/applications/times-square/README.md b/applications/times-square/README.md
index 832f05a38a..fac927192e 100644
--- a/applications/times-square/README.md
+++ b/applications/times-square/README.md
@@ -18,12 +18,13 @@ An API service for managing and rendering parameterized Jupyter notebooks.
| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud |
| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images |
| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use |
-| cloudsql.image.tag | string | `"1.33.7"` | Cloud SQL Auth Proxy tag to use |
+| cloudsql.image.tag | string | `"1.33.9"` | Cloud SQL Auth Proxy tag to use |
| cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance |
| cloudsql.serviceAccount | string | `""` | The Google service account that has an IAM binding to the `times-square` Kubernetes service accounts and has the `cloudsql.client` role |
| config.databaseUrl | string | None, must be set | URL for the PostgreSQL database |
| config.enableGitHubApp | string | `"False"` | Toggle to enable the GitHub App functionality |
| config.githubAppId | string | `""` | GitHub application ID |
+| config.githubOrgs | string | `"lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst"` | GitHub organizations that can sync repos to Times Square (comma-separated). |
| config.logLevel | string | `"INFO"` | Logging level: "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" |
| config.name | string | `"times-square"` | Name of the service. |
| config.profile | string | `"production"` | Run profile: "production" or "development" |
@@ -59,4 +60,4 @@ An API service for managing and rendering parameterized Jupyter notebooks.
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. If CloudSQL is in use, the annotation specifying the Google service account will also be added. |
| serviceAccount.create | bool | `false` | Force creation of a service account. Normally, no service account is used or mounted. If CloudSQL is enabled, a service account is always created regardless of this value. |
| serviceAccount.name | string | Name based on the fullname template | Name of the service account to use |
-| tolerations | list | `[]` | Tolerations for the times-square deployment pod |
+| tolerations | list | `[]` | Tolerations for the times-square deployment pod |
\ No newline at end of file
diff --git a/applications/times-square/templates/configmap.yaml b/applications/times-square/templates/configmap.yaml
index 4ee03962d6..739914e85b 100644
--- a/applications/times-square/templates/configmap.yaml
+++ b/applications/times-square/templates/configmap.yaml
@@ -15,3 +15,4 @@ data:
TS_REDIS_QUEUE_URL: {{ required "config.redisQueueUrl must be set" .Values.config.redisQueueUrl | quote }}
TS_ENABLE_GITHUB_APP: {{ .Values.config.enableGitHubApp | quote }}
TS_GITHUB_APP_ID: {{ .Values.config.githubAppId | quote }}
+ TS_GITHUB_ORGS: {{ .Values.config.githubOrgs | quote }}
diff --git a/applications/times-square/templates/gafaelfawrtoken.yaml b/applications/times-square/templates/gafaelfawrtoken.yaml
index f173ea4fa2..0971a2dcf1 100644
--- a/applications/times-square/templates/gafaelfawrtoken.yaml
+++ b/applications/times-square/templates/gafaelfawrtoken.yaml
@@ -9,3 +9,4 @@ spec:
scopes:
- "admin:token"
- "exec:admin"
+ - "exec:notebook"
diff --git a/applications/times-square/templates/ingress.yaml b/applications/times-square/templates/ingress.yaml
index 8fd58c6eab..2ff6a6c077 100644
--- a/applications/times-square/templates/ingress.yaml
+++ b/applications/times-square/templates/ingress.yaml
@@ -8,7 +8,7 @@ config:
baseUrl: {{ .Values.global.baseUrl | quote }}
scopes:
all:
- - "exec:admin"
+ - "exec:notebook"
loginRedirect: true
template:
metadata:
diff --git a/applications/times-square/values-usdfdev.yaml b/applications/times-square/values-usdfdev.yaml
new file mode 100644
index 0000000000..bafe7055cc
--- /dev/null
+++ b/applications/times-square/values-usdfdev.yaml
@@ -0,0 +1,12 @@
+image:
+ pullPolicy: Always
+config:
+ logLevel: "DEBUG"
+ databaseUrl: "postgresql://timessquare@postgres.postgres/timessquare"
+ githubAppId: "327289"
+ enableGitHubApp: "True"
+cloudsql:
+ enabled: false
+redis:
+ persistence:
+ storageClass: "wekafs--sdf-k8s01"
diff --git a/applications/times-square/values.yaml b/applications/times-square/values.yaml
index 080268f60b..56260c16f3 100644
--- a/applications/times-square/values.yaml
+++ b/applications/times-square/values.yaml
@@ -113,6 +113,9 @@ config:
# -- Toggle to enable the GitHub App functionality
enableGitHubApp: "False"
+ # -- GitHub organizations that can sync repos to Times Square (comma-separated).
+ githubOrgs: "lsst,lsst-sqre,lsst-dm,lsst-ts,lsst-sitcom,lsst-pst"
+
cloudsql:
# -- Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases
# on Google Cloud
@@ -123,7 +126,7 @@ cloudsql:
repository: "gcr.io/cloudsql-docker/gce-proxy"
# -- Cloud SQL Auth Proxy tag to use
- tag: "1.33.7"
+ tag: "1.33.9"
# -- Pull policy for Cloud SQL Auth Proxy images
pullPolicy: "IfNotPresent"
diff --git a/applications/vault-secrets-operator/Chart.yaml b/applications/vault-secrets-operator/Chart.yaml
index ab0467b701..b85db7d0ca 100644
--- a/applications/vault-secrets-operator/Chart.yaml
+++ b/applications/vault-secrets-operator/Chart.yaml
@@ -5,7 +5,7 @@ sources:
- https://github.com/ricoberger/vault-secrets-operator
dependencies:
- name: vault-secrets-operator
- version: 2.4.0
+ version: 2.5.1
repository: https://ricoberger.github.io/helm-charts/
annotations:
phalanx.lsst.io/docs: |
diff --git a/applications/vault-secrets-operator/README.md b/applications/vault-secrets-operator/README.md
index b3e48c2406..9ffebd8f42 100644
--- a/applications/vault-secrets-operator/README.md
+++ b/applications/vault-secrets-operator/README.md
@@ -11,4 +11,4 @@
| vault-secrets-operator.environmentVars | list | Set `VAULT_TOKEN` and `VAULT_TOKEN_LEASE_DURATION` from secret | Additional environment variables used to configure the operator |
| vault-secrets-operator.serviceAccount.createSecret | bool | `false` | Disable creation of a secret for the service account. It shouldn't be needed and it conflicts with the secret we create that contains the credentials for talking to Vault. |
| vault-secrets-operator.vault.address | string | `"https://vault.lsst.codes"` | URL of the underlying Vault implementation |
-| vault-secrets-operator.vault.reconciliationTime | int | `60` | Sync secrets from vault on this cadence |
+| vault-secrets-operator.vault.reconciliationTime | int | `60` | Sync secrets from vault on this cadence |
\ No newline at end of file
diff --git a/applications/vault-secrets-operator/values-usdf-tel-rsp.yaml b/applications/vault-secrets-operator/values-usdf-tel-rsp.yaml
new file mode 100644
index 0000000000..bfb0f3700f
--- /dev/null
+++ b/applications/vault-secrets-operator/values-usdf-tel-rsp.yaml
@@ -0,0 +1,22 @@
+vault-secrets-operator:
+ environmentVars:
+ - name: VAULT_AUTH_METHOD
+ value: approle
+ - name: VAULT_ROLE_ID
+ valueFrom:
+ secretKeyRef:
+ name: vault-secrets-operator
+ key: VAULT_ROLE_ID
+ - name: VAULT_SECRET_ID
+ valueFrom:
+ secretKeyRef:
+ name: vault-secrets-operator
+ key: VAULT_SECRET_ID
+ - name: VAULT_TOKEN_MAX_TTL
+ valueFrom:
+ secretKeyRef:
+ name: vault-secrets-operator
+ key: VAULT_TOKEN_MAX_TTL
+ vault:
+ address: "https://vault.slac.stanford.edu"
+ authMethod: approle
diff --git a/applications/vo-cutouts/Chart.yaml b/applications/vo-cutouts/Chart.yaml
index 6ff5db7f38..76f821973d 100644
--- a/applications/vo-cutouts/Chart.yaml
+++ b/applications/vo-cutouts/Chart.yaml
@@ -8,7 +8,7 @@ appVersion: 1.0.0
dependencies:
- name: redis
- version: 1.0.5
+ version: 1.0.6
repository: https://lsst-sqre.github.io/charts/
annotations:
diff --git a/applications/vo-cutouts/README.md b/applications/vo-cutouts/README.md
index fad03120b2..985623fe3f 100644
--- a/applications/vo-cutouts/README.md
+++ b/applications/vo-cutouts/README.md
@@ -14,7 +14,7 @@ Image cutout service complying with IVOA SODA
| cloudsql.enabled | bool | `false` | Enable the Cloud SQL Auth Proxy sidecar, used with CloudSQL databases on Google Cloud |
| cloudsql.image.pullPolicy | string | `"IfNotPresent"` | Pull policy for Cloud SQL Auth Proxy images |
| cloudsql.image.repository | string | `"gcr.io/cloudsql-docker/gce-proxy"` | Cloud SQL Auth Proxy image to use |
-| cloudsql.image.tag | string | `"1.33.7"` | Cloud SQL Auth Proxy tag to use |
+| cloudsql.image.tag | string | `"1.33.9"` | Cloud SQL Auth Proxy tag to use |
| cloudsql.instanceConnectionName | string | `""` | Instance connection name for a CloudSQL PostgreSQL instance |
| cloudsql.serviceAccount | string | None, must be set | The Google service account that has an IAM binding to the `vo-cutouts` Kubernetes service accounts and has the `cloudsql.client` role, access to the GCS bucket, and ability to sign URLs as itself |
| config.databaseUrl | string | None, must be set | URL for the PostgreSQL database |
@@ -64,4 +64,4 @@ Image cutout service complying with IVOA SODA
| redis.tolerations | list | `[]` | Tolerations for the Redis pod |
| replicaCount | int | `1` | Number of web frontend pods to start |
| resources | object | `{}` | Resource limits and requests for the vo-cutouts frontend pod |
-| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod |
+| tolerations | list | `[]` | Tolerations for the vo-cutouts frontend pod |
\ No newline at end of file
diff --git a/applications/vo-cutouts/secrets-idfdev.yaml b/applications/vo-cutouts/secrets-idfdev.yaml
new file mode 100644
index 0000000000..57998942f8
--- /dev/null
+++ b/applications/vo-cutouts/secrets-idfdev.yaml
@@ -0,0 +1,20 @@
+aws-credentials:
+ description: >-
+ Google Cloud Storage credentials to the Butler data store, formatted using
+ AWS syntax for use with boto.
+ copy:
+ application: nublado
+ key: "aws-credentials.ini"
+google-credentials:
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the native
+ Google syntax, containing the private asymmetric key.
+ copy:
+ application: nublado
+ key: "butler-gcs-idf-creds.json"
+postgres-credentials:
+ description: >-
+ PostgreSQL credentials in its pgpass format for the Butler database.
+ copy:
+ application: nublado
+ key: "postgres-credentials.txt"
diff --git a/applications/vo-cutouts/secrets.yaml b/applications/vo-cutouts/secrets.yaml
new file mode 100644
index 0000000000..197cd339df
--- /dev/null
+++ b/applications/vo-cutouts/secrets.yaml
@@ -0,0 +1,13 @@
+database-password:
+ description: >-
+ Password used to authenticate to the PostgreSQL database used to store
+ vo-cutouts job information. This password may be changed at any time.
+redis-password:
+ description: >-
+ Password used to authenticate vo-cutouts to its internal Redis server,
+ deployed as part of the same Argo CD application and used to manage the
+ work queue. This secret can be changed at any time, but both the Redis
+ server and all vo-cutouts deployments will then have to be restarted to
+ pick up the new value.
+ generate:
+ type: password
diff --git a/applications/vo-cutouts/values.yaml b/applications/vo-cutouts/values.yaml
index 99bd762f47..6b93573ab6 100644
--- a/applications/vo-cutouts/values.yaml
+++ b/applications/vo-cutouts/values.yaml
@@ -75,7 +75,7 @@ cloudsql:
repository: "gcr.io/cloudsql-docker/gce-proxy"
# -- Cloud SQL Auth Proxy tag to use
- tag: "1.33.7"
+ tag: "1.33.9"
# -- Pull policy for Cloud SQL Auth Proxy images
pullPolicy: "IfNotPresent"
diff --git a/docs/Makefile b/docs/Makefile
deleted file mode 100644
index 02d05fdc68..0000000000
--- a/docs/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# Makefile for Sphinx documentation.
-# Use tox -e docs,docs-linkcheck to build the docs.
-
-.PHONY: help
-help:
- @echo "Please use \`make ' where is one of"
- @echo " clean delete builds"
-
-.PHONY: clean
-clean:
- rm -rf _build/*
diff --git a/docs/about/contributing-docs.rst b/docs/about/contributing-docs.rst
index 1ace4686d7..9a48d4af89 100644
--- a/docs/about/contributing-docs.rst
+++ b/docs/about/contributing-docs.rst
@@ -64,9 +64,9 @@ Submitting a pull request and sharing documentation drafts
Members of the `lsst-sqre/phalanx`_ repository should submit pull requests following the `Data Management workflow guide`_.
Note that GitHub Actions builds the documentation and uploads a draft edition of the documentation to the web.
-You can find your branch's development edition at https://phalanx.lsst.io/v.
+You can find your branch's development edition at `the list of available versions `__.
-If you are submitting a GitHub pull request from a fork, the documentation will build as a check, however the draft won't upload for public staging.
+If you are submitting a GitHub pull request from a fork, the documentation will build as a check, but the draft won't upload for public staging.
More information on writing documentation
=========================================
diff --git a/docs/about/introduction.rst b/docs/about/introduction.rst
index 186eda79bc..83be13fdbd 100644
--- a/docs/about/introduction.rst
+++ b/docs/about/introduction.rst
@@ -65,7 +65,7 @@ In Phalanx, the word *application* specifically refers to a Helm chart located i
That Helm chart directory includes the Kubernetes templates and Docker image references to deploy the application, as well as values files to configure the application for each environment.
Argo CD
-=======
+========
`Argo CD`_ manages the Kubernetes deployments of each application's Helm chart from the Phalanx repository.
Each environment runs its own instance of Argo CD (as Argo CD is itself an application in Phalanx).
diff --git a/docs/applications/argo-cd/index.rst b/docs/applications/argo-cd/index.rst
index 92ed9114c9..ea807af5a1 100644
--- a/docs/applications/argo-cd/index.rst
+++ b/docs/applications/argo-cd/index.rst
@@ -20,4 +20,5 @@ Guides
bootstrap
authentication
upgrade
+ troubleshoot
values
diff --git a/docs/applications/argo-cd/troubleshoot.rst b/docs/applications/argo-cd/troubleshoot.rst
new file mode 100644
index 0000000000..e64d1a98db
--- /dev/null
+++ b/docs/applications/argo-cd/troubleshoot.rst
@@ -0,0 +1,46 @@
+.. px-app-troubleshooting:: argocd
+
+######################
+Troubleshooting argocd
+######################
+
+.. _argocd-fix-corrupt-git-index:
+
+Fixing a corrupt git index
+==========================
+
+The Git index for the cloned repository that controls the Argo CD apps is corrupted if an Argo CD app shows the following error:
+
+.. code-block:: shell
+
+ rpc error: code = Internal desc = Failed to fetch default: `git fetch origin --tags --force --prune` failed exit status 128: fatal: .git/index: index file smaller than expected
+
+The Git repository needs to be removed and re-created by following these steps:
+
+#. Find the ``argocd-repo-server`` pod and grep the logs:
+
+ .. code-block:: shell
+
+ pod=$(kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-repo-server | grep argocd | awk '{print $1}')
+ kubectl logs -n argocd $pod | grep -B1 "index file smaller than expected" | grep -B1 "execID"
+
+#. In that grep, find the ``dir`` field. For example:
+
+ .. code-block:: shell
+
+ {"dir":"/tmp/_argocd-repo/35fe76f8-488a-4871-baaa-5f81d81331b1","execID":"a98af","level":"info","msg":"git fetch origin --tags --force --prune","time":"2023-06-13T18:48:12Z"}
+ {"execID":"a98af","level":"error","msg":"`git fetch origin --tags --force --prune` failed exit status 128: fatal: .git/index: index file smaller than expected","time":"2023-06-13T18:48:12Z"}
+
+#. Exec into the repo server pod:
+
+ .. code-block:: shell
+
+ kubectl exec -it -n argocd $pod -- /bin/bash
+
+#. Using the directory found from the logs, execute:
+
+ .. code-block:: shell
+
+ rm /path/from/log/.git/index
+
+The system will refresh itself automatically, so all the needs to be done further is wait and see if the error clears.
\ No newline at end of file
diff --git a/docs/applications/argo-workflows/index.rst b/docs/applications/argo-workflows/index.rst
index cde4b808bb..da6c4b6846 100644
--- a/docs/applications/argo-workflows/index.rst
+++ b/docs/applications/argo-workflows/index.rst
@@ -1,8 +1,8 @@
.. px-app:: argo-workflows
-#############################################
-production-tools — Data Production monitoring
-#############################################
+###############################
+argo-workflows — Argo workflows
+###############################
Argo Workflows is a workflow engine for job parallelization on Kubernetes.
diff --git a/docs/applications/cert-manager/add-new-hostname.rst b/docs/applications/cert-manager/add-new-hostname.rst
new file mode 100644
index 0000000000..e8aadf1ccf
--- /dev/null
+++ b/docs/applications/cert-manager/add-new-hostname.rst
@@ -0,0 +1,30 @@
+#######################################
+Add TLS certificates for a new hostname
+#######################################
+
+Every hostname served by an instance of the Rubin Science Platform must have its own TLS certificate, generated by cert-manager, unless that environment is configured to use external certificates.
+When adding a new hostname to an existing environment, one must therefore repeat part of the instructions in :px-app-bootstrap:`cert-manager`.
+Here are those abbreviated instructions.
+
+These instructions are specific to environments that use Route 53.
+
+The hostname must be in the same domain that is already used as the primary hostname of the environment.
+See :doc:`route53-setup` and :px-app-bootstrap:`cert-manager` for the details of that setup.
+
+Instructions
+============
+
+In the following, ```` is the new hostname that needs a TLS certificate.
+```` is the domain shared by that hostname and the primary fully-qualified domain name for that cluster.
+For example, when adding ``alert-stream-int-broker-0.lsst.cloud``, the hostname is ``alert-stream-int-broker-0`` and the domain name is ``lsst.cloud``.
+
+#. Go to the Route 53 UI for the domain used by this RSP environment.
+ In the above example, that would be ``lsst.cloud``.
+
+#. Create a CNAME named ``_acme-challenge.`` whose value is ``_acme-challenge.tls.``.
+ Do this by adding a new record, selecting :guilabel:`CNAME` from the lower drop-down menu, and then selecting :guilabel:`IP address or other value` from the top drop-down menu.
+ Then, enter ``_acme-challenge.tls.`` as the CNAME target.
+
+This should be all that's required to allow cert-manager to create certificates for that hostname.
+You will then need to configure the ``tls`` portion of the relevant ``Ingress`` or ``GafaelfawrIngress`` object.
+For more information on how to do that, see :px-app-notes:`cert-manager`.
diff --git a/docs/applications/cert-manager/bootstrap.rst b/docs/applications/cert-manager/bootstrap.rst
index d7dd489a21..e80d4dc530 100644
--- a/docs/applications/cert-manager/bootstrap.rst
+++ b/docs/applications/cert-manager/bootstrap.rst
@@ -21,6 +21,9 @@ Select **CNAME** from the lower drop-down menu and then **IP address or other va
For example, if the cluster name is ``data-dev.lsst.cloud``, create a CNAME record at ``_acme-challenge.data-dev.lsst.cloud`` whose value is ``_acme-challenge.tls.lsst.cloud``.
In the Route 53 console, the name of the record you create in the ``lsst.cloud`` hosted zone will be ``_acme-challenge.data-dev`` (yes, including the period).
+This will need to be done for each hostname served by this instance of the RSP.
+See :doc:`add-new-hostname` for a shorter version of these instructions to follow for each new hostname added.
+
Add the following to the ``values-*.yaml`` file for an environment:
.. code-block:: yaml
diff --git a/docs/applications/cert-manager/index.rst b/docs/applications/cert-manager/index.rst
index 6bffb9f54b..c9658c6676 100644
--- a/docs/applications/cert-manager/index.rst
+++ b/docs/applications/cert-manager/index.rst
@@ -17,8 +17,9 @@ Guides
.. toctree::
- notes
bootstrap
+ add-new-hostname
route53-setup
upgrade
+ notes
values
diff --git a/docs/applications/giftless/index.rst b/docs/applications/giftless/index.rst
new file mode 100644
index 0000000000..13746c3abb
--- /dev/null
+++ b/docs/applications/giftless/index.rst
@@ -0,0 +1,21 @@
+.. px-app:: giftless
+
+#########################
+Giftless — Git LFS server
+#########################
+
+Giftless, a Git LFS server provided by Datopian, is the Rubin Observatory provider of Git LFS services.
+This implementation provides both read-only and read-write endpoints for Git LFS.
+
+See `Datatopian's documentation `__ for details.
+
+.. jinja:: giftless
+ :file: applications/_summary.rst.jinja
+
+Guides
+======
+
+.. toctree::
+ :maxdepth: 1
+
+ values
diff --git a/docs/applications/giftless/values.md b/docs/applications/giftless/values.md
new file mode 100644
index 0000000000..f2566e36dc
--- /dev/null
+++ b/docs/applications/giftless/values.md
@@ -0,0 +1,12 @@
+```{px-app-values} giftless
+```
+
+# Giftless Helm values reference
+
+Helm values reference table for the {px-app}`giftless` application.
+
+```{include} ../../../applications/giftless/README.md
+---
+start-after: "## Values"
+---
+```
diff --git a/docs/applications/index.rst b/docs/applications/index.rst
index cb21da0bde..1b1ef2ca93 100644
--- a/docs/applications/index.rst
+++ b/docs/applications/index.rst
@@ -13,7 +13,6 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde
:caption: Cluster infrastructure
argo-cd/index
- argo-workflows/index
cert-manager/index
ingress-nginx/index
gafaelfawr/index
@@ -28,12 +27,12 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde
datalinker/index
hips/index
linters/index
+ livetap/index
mobu/index
moneypenny/index
noteburst/index
nublado/index
nublado2/index
- obstap/index
portal/index
semaphore/index
sherlock/index
@@ -49,9 +48,11 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde
:maxdepth: 1
:caption: RSP+
+ argo-workflows/index
alert-stream-broker/index
exposurelog/index
narrativelog/index
+ obsloctap/index
plot-navigator/index
production-tools/index
sasquatch/index
@@ -65,5 +66,8 @@ To learn how to develop applications for Phalanx, see the :doc:`/developers/inde
:maxdepth: 1
:caption: Roundtable
+ giftless/index
kubernetes-replicator/index
+ monitoring/index
+ ook/index
squarebot/index
diff --git a/docs/applications/obstap/index.rst b/docs/applications/livetap/index.rst
similarity index 56%
rename from docs/applications/obstap/index.rst
rename to docs/applications/livetap/index.rst
index 86c9be1e34..ef6c41b229 100644
--- a/docs/applications/obstap/index.rst
+++ b/docs/applications/livetap/index.rst
@@ -1,12 +1,12 @@
-.. px-app:: obstap
+.. px-app:: livetap
-###########################################
-obstap — IVOA OBSCore Table Access Protocol
-###########################################
+############################################
+livetap — IVOA livetap Table Access Protocol
+############################################
-OBSTAP_ (OBSCore Table Access Protocol) is an IVOA_ service that provides access to the ObsCore table which is hosted on postgres.
+LIVETAP (Live Obscore Table Access Protocol) is an IVOA_ service that provides access to the live obscore table which is hosted on postgres.
On the Rubin Science Platform, it is provided by `tap-postgres `__, which is derived from the `CADC TAP service `__.
-This service provides access to the ObsTAP tables that are created and served by the butler.
+This service provides access to the ObsCore tables that are created and served by the butler and updated live.
The TAP data itself, apart from schema queries, comes from Postgres.
The TAP schema is provided by the separate :px-app:`tap-schema` application.
diff --git a/docs/applications/obstap/notebook-tap.py b/docs/applications/livetap/notebook-tap.py
similarity index 100%
rename from docs/applications/obstap/notebook-tap.py
rename to docs/applications/livetap/notebook-tap.py
diff --git a/docs/applications/livetap/notes.rst b/docs/applications/livetap/notes.rst
new file mode 100644
index 0000000000..74b82e3a41
--- /dev/null
+++ b/docs/applications/livetap/notes.rst
@@ -0,0 +1,11 @@
+.. px-app-notes:: livetap
+
+##############################
+LiveTAP architecture and notes
+##############################
+
+The ``livetap`` application consists of the TAP Java web application, a PostgreSQL database used to track user job submissions (the backing store for the UWS_ protocol), and (on development deployments) a mock version of postgres. There is a table that is updated by the butler to keep a live version of the ObsCore table.
+
+.. diagrams:: notebook-tap.py
+
+.. diagrams:: portal-tap.py
diff --git a/docs/applications/obstap/portal-tap.py b/docs/applications/livetap/portal-tap.py
similarity index 100%
rename from docs/applications/obstap/portal-tap.py
rename to docs/applications/livetap/portal-tap.py
diff --git a/docs/applications/livetap/values.md b/docs/applications/livetap/values.md
new file mode 100644
index 0000000000..0f4d697630
--- /dev/null
+++ b/docs/applications/livetap/values.md
@@ -0,0 +1,12 @@
+```{px-app-values} livetap
+```
+
+# livetap Helm values reference
+
+Helm values reference table for the {px-app}`livetap` application.
+
+```{include} ../../../applications/livetap/README.md
+---
+start-after: "## Values"
+---
+```
diff --git a/docs/applications/monitoring/index.rst b/docs/applications/monitoring/index.rst
new file mode 100644
index 0000000000..133f031725
--- /dev/null
+++ b/docs/applications/monitoring/index.rst
@@ -0,0 +1,19 @@
+.. px-app:: monitoring
+
+########################
+Monitoring Chronograf UI
+########################
+
+Monitoring is an implementation of the Chronograf UI for monitoring the
+health and resource usage of Phalanx applications.
+
+.. jinja:: monitoring
+ :file: applications/_summary.rst.jinja
+
+Guides
+======
+
+.. toctree::
+ :maxdepth: 1
+
+ values
diff --git a/docs/applications/monitoring/values.md b/docs/applications/monitoring/values.md
new file mode 100644
index 0000000000..cad9e5bc7f
--- /dev/null
+++ b/docs/applications/monitoring/values.md
@@ -0,0 +1,12 @@
+```{px-app-values} monitoring
+```
+
+# Monitoring Helm values reference
+
+Helm values reference table for the {px-app}`monitoring` application.
+
+```{include} ../../../applications/monitoring/README.md
+---
+start-after: "## Values"
+---
+```
diff --git a/docs/applications/obsloctap/index.rst b/docs/applications/obsloctap/index.rst
new file mode 100644
index 0000000000..889cf8770c
--- /dev/null
+++ b/docs/applications/obsloctap/index.rst
@@ -0,0 +1,23 @@
+.. px-app:: obsloctap
+
+####################################
+obsloctap — serve observing schedule
+####################################
+
+Lookup and reformat ``lsst.sal.Scheduler.logevent_predictedSchedule``.
+Return a json file of the future observations.
+Todo: Also track which observations were made implement ObsLocTAP_ IVOA standard.
+
+.. _ObsLocTAP: https://www.ivoa.net/documents/ObsLocTAP/
+
+
+.. jinja:: obsloctap
+ :file: applications/_summary.rst.jinja
+
+Guides
+======
+
+.. toctree::
+ :maxdepth: 1
+
+ values
diff --git a/docs/applications/obsloctap/values.md b/docs/applications/obsloctap/values.md
new file mode 100644
index 0000000000..2630a7302f
--- /dev/null
+++ b/docs/applications/obsloctap/values.md
@@ -0,0 +1,12 @@
+```{px-app-values} obsloctap
+```
+
+# Helm values reference
+
+Helm values reference table for the {px-app}`obsloctap` application.
+
+```{include} ../../../applications/obsloctap/README.md
+---
+start-after: "## Values"
+---
+```
diff --git a/docs/applications/obstap/notes.rst b/docs/applications/obstap/notes.rst
deleted file mode 100644
index 56fe36427a..0000000000
--- a/docs/applications/obstap/notes.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-.. px-app-notes:: obstap
-
-#############################
-OBSTAP architecture and notes
-#############################
-
-The ``obstap`` application consists of the TAP Java web application, a PostgreSQL database used to track user job submissions (the backing store for the UWS_ protocol), and (on development deployments) a mock version of postgres.
-
-.. diagrams:: notebook-tap.py
-
-.. diagrams:: portal-tap.py
diff --git a/docs/applications/obstap/values.md b/docs/applications/obstap/values.md
deleted file mode 100644
index ca5d90c35e..0000000000
--- a/docs/applications/obstap/values.md
+++ /dev/null
@@ -1,12 +0,0 @@
-```{px-app-values} obstap
-```
-
-# obstap Helm values reference
-
-Helm values reference table for the {px-app}`obstap` application.
-
-```{include} ../../../applications/obstap/README.md
----
-start-after: "## Values"
----
-```
diff --git a/docs/applications/ook/index.rst b/docs/applications/ook/index.rst
new file mode 100644
index 0000000000..9228055682
--- /dev/null
+++ b/docs/applications/ook/index.rst
@@ -0,0 +1,19 @@
+.. px-app:: ook
+
+############################
+ook — Documentation indexing
+############################
+
+Ook is the librarian service for Rubin Observatory.
+Ook indexes documentation content into the Algolia search engine that powers the Rubin Observatory documentation portal, https://www.lsst.io.
+
+.. jinja:: ook
+ :file: applications/_summary.rst.jinja
+
+Guides
+======
+
+.. toctree::
+ :maxdepth: 1
+
+ values
diff --git a/docs/applications/ook/values.md b/docs/applications/ook/values.md
new file mode 100644
index 0000000000..25c5b97f83
--- /dev/null
+++ b/docs/applications/ook/values.md
@@ -0,0 +1,12 @@
+```{px-app-values} ook
+```
+
+# Ook Helm values reference
+
+Helm values reference table for the {px-app}`ook` application.
+
+```{include} ../../../applications/ook/README.md
+---
+start-after: "## Values"
+---
+```
diff --git a/docs/conf.py b/docs/conf.py
index 9d4279b46a..fe8a311691 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -32,3 +32,6 @@
linkcheck_exclude_documents = [
r"applications/.*/values",
]
+
+# Include JSON schemas in the documentation output tree.
+html_extra_path = ["extras"]
diff --git a/docs/developers/add-application.rst b/docs/developers/add-application.rst
index b809c1c27d..37d133771f 100644
--- a/docs/developers/add-application.rst
+++ b/docs/developers/add-application.rst
@@ -33,7 +33,13 @@ You will need to make at least the following changes to the default Helm chart t
See `the Gafaelfawr's documentation on Ingress configurations `__ for more information, and see :dmtn:`235` for a guide to what scopes to use to protect the application.
-- If your application exposes Prometheus endpoints, you will want to configure these in the `telegraf application's prometheus_config `__.
+Other Phalanx configuration
+---------------------------
+
+If the application needs to listen on hostnames other than the normal cluster-wide hostname, you will need to configure :px-app:`cert-manager` so that it can generate a TLS certificate for that hostname.
+See :doc:`/applications/cert-manager/add-new-hostname` for more details.
+
+If your application exposes Prometheus endpoints, you will want to configure these in the `telegraf application's prometheus_config `__.
Documentation
-------------
@@ -41,11 +47,16 @@ Documentation
Phalanx uses `helm-docs`_ to generate documentation for Helm charts.
This produces a nice Markdown README file that documents all the chart options, but it requires special formatting of the ``values.yaml`` file that is not present in the default Helm template.
+Documentation is **REQUIRED**.
+Every new application added to Phalanx must have a corresponding folder in the `docs/applications directory `__ containing at least an ``index.rst`` file and a ``values.md`` file.
+The ``values.md`` file is boilerplate to incorporate the documentation of the ``values.yaml`` file for the new application.
+
+For a simple example that you can copy if desired, see the `docs for the HIPS service `__.
+
Publication
-----------
Rubin-developed Helm charts for the Science Platform are stored as part of the `phalanx repository `__. They can be found in the `applications directory `__.
-
Examples
--------
diff --git a/docs/developers/create-an-application.rst b/docs/developers/create-an-application.rst
index c9159e804d..516cf11a85 100644
--- a/docs/developers/create-an-application.rst
+++ b/docs/developers/create-an-application.rst
@@ -22,9 +22,8 @@ Select ``FastAPI application (Safir)`` from the list of project types.
This will create a new GitHub repository with the basic framework of a FastAPI_ application that will work well inside the Rubin Science Platform.
The template uses Safir_ to simplify and regularize many parts of your FastAPI_ application, from logger to database handling.
-Any Python application destined for the RSP should regularly update its dependencies to pick up any security fixes.
-If your application follows the code layout of the FastAPI template, use `neophile `__ to automatically create PRs to update your dependencies.
-To add your application to the list of repositories that neophile updates, submit a PR to add the repository owner and name to `neophile's configuration `__.
+Any Python application destined for the RSP must regularly update its dependencies to pick up any security fixes and make new releases with those updated dependencies.
+If you use the template as described above, GitHub Actions CI will warn you when application dependencies are out of date.
Each release of your application must be tagged.
The tag should use `semantic versioning`_ (for example, ``1.3.2``).
diff --git a/docs/developers/deploy-from-a-branch.rst b/docs/developers/deploy-from-a-branch.rst
index 1eb2fc813c..50de6bcef9 100644
--- a/docs/developers/deploy-from-a-branch.rst
+++ b/docs/developers/deploy-from-a-branch.rst
@@ -21,7 +21,7 @@ Through this process it is possible to develop an application in a fairly tight
Preparing and pushing a branch
==============================
-Start by creating a branch of the `phalanx repository`_ and editing your appliation.
+Start by creating a branch of the `phalanx repository`_ and editing your application.
You can make many types of edits to the application.
The most straightforward changes are updates to your application's Docker images or the Helm sub-charts the application depends on.
diff --git a/docs/documenteer.toml b/docs/documenteer.toml
index 69ba7e0d65..68c4b5ae13 100644
--- a/docs/documenteer.toml
+++ b/docs/documenteer.toml
@@ -31,6 +31,8 @@ ignore = [
'^https://roundtable.lsst.cloud',
'^https://usdf-rsp.slac.stanford.edu',
'^https://usdf-rsp-dev.slac.stanford.edu',
+ '^https://usdf-tel-rsp.slac.stanford.edu',
'^https://github.com/lsst-sqre/phalanx/blob/main/applications/strimzi/values.yaml',
'^https://github.com/orgs/',
+ '^https://ook.lsst.io/', # FIXME readd when Ook docs are published
]
diff --git a/docs/environments/index.rst b/docs/environments/index.rst
index 6f28095e71..a6d39fdcfc 100644
--- a/docs/environments/index.rst
+++ b/docs/environments/index.rst
@@ -25,3 +25,4 @@ To learn more about operating a Phalanx environment, see the :doc:`/admin/index`
tucson-teststand/index
usdfdev/index
usdfprod/index
+ usdf-tel-rsp/index
diff --git a/docs/environments/usdf-tel-rsp/index.rst b/docs/environments/usdf-tel-rsp/index.rst
new file mode 100644
index 0000000000..67daf7a522
--- /dev/null
+++ b/docs/environments/usdf-tel-rsp/index.rst
@@ -0,0 +1,10 @@
+.. px-env:: usdf-tel-rsp
+
+############################################################
+usdf-tel-rsp — usdf-tel-rsp.slac.stanford.edu (Dev for USDF)
+############################################################
+
+``usdf-tel-rsp`` is a development environment for the Rubin Science Platform at the United States Data Facility (USDF) hosted at SLAC.
+
+.. jinja:: usdf-tel-rsp
+ :file: environments/_summary.rst.jinja
diff --git a/docs/extras/schemas/secrets.json b/docs/extras/schemas/secrets.json
new file mode 100644
index 0000000000..3aebc02f71
--- /dev/null
+++ b/docs/extras/schemas/secrets.json
@@ -0,0 +1,135 @@
+{
+ "$id": "https://phalanx.lsst.io/schemas/secrets.json",
+ "title": "Phalanx application secret definitions",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/ConditionalSecretConfig"
+ },
+ "definitions": {
+ "ConditionalSecretCopyRules": {
+ "title": "ConditionalSecretCopyRules",
+ "description": "Possibly conditional rules for copying a secret value from another.",
+ "type": "object",
+ "properties": {
+ "if": {
+ "title": "Condition",
+ "description": "Configuration only applies if this Helm chart setting is set to a true value",
+ "type": "string"
+ },
+ "application": {
+ "title": "Application",
+ "type": "string"
+ },
+ "key": {
+ "title": "Key",
+ "type": "string"
+ }
+ },
+ "required": [
+ "application",
+ "key"
+ ],
+ "additionalProperties": false
+ },
+ "ConditionalSimpleSecretGenerateRules": {
+ "title": "ConditionalSimpleSecretGenerateRules",
+ "description": "Conditional rules for generating a secret value with no source.",
+ "type": "object",
+ "properties": {
+ "if": {
+ "title": "Condition",
+ "description": "Configuration only applies if this Helm chart setting is set to a true value",
+ "type": "string"
+ },
+ "type": {
+ "title": "Type",
+ "enum": [
+ "password",
+ "gafaelfawr-token",
+ "fernet-key",
+ "rsa-private-key"
+ ],
+ "type": "string"
+ }
+ },
+ "required": [
+ "type"
+ ],
+ "additionalProperties": false
+ },
+ "ConditionalSourceSecretGenerateRules": {
+ "title": "ConditionalSourceSecretGenerateRules",
+ "description": "Conditional rules for generating a secret from another secret.",
+ "type": "object",
+ "properties": {
+ "if": {
+ "title": "Condition",
+ "description": "Configuration only applies if this Helm chart setting is set to a true value",
+ "type": "string"
+ },
+ "type": {
+ "title": "Type",
+ "enum": [
+ "bcrypt-password-hash",
+ "mtime"
+ ],
+ "type": "string"
+ },
+ "source": {
+ "title": "Source",
+ "type": "string"
+ }
+ },
+ "required": [
+ "type",
+ "source"
+ ]
+ },
+ "ConditionalSecretConfig": {
+ "title": "ConditionalSecretConfig",
+ "description": "Possibly conditional specification for an application secret.",
+ "type": "object",
+ "properties": {
+ "if": {
+ "title": "Condition",
+ "description": "Configuration only applies if this Helm chart setting is set to a true value",
+ "type": "string"
+ },
+ "description": {
+ "title": "Description",
+ "type": "string"
+ },
+ "copy": {
+ "title": "Copy",
+ "description": "Rules for where the secret should be copied from",
+ "allOf": [
+ {
+ "$ref": "#/definitions/ConditionalSecretCopyRules"
+ }
+ ]
+ },
+ "generate": {
+ "title": "Generate",
+ "anyOf": [
+ {
+ "$ref": "#/definitions/ConditionalSimpleSecretGenerateRules"
+ },
+ {
+ "$ref": "#/definitions/ConditionalSourceSecretGenerateRules"
+ }
+ ]
+ },
+ "value": {
+ "title": "Value",
+ "type": "string",
+ "writeOnly": true,
+ "format": "password"
+ }
+ },
+ "required": [
+ "description"
+ ],
+ "additionalProperties": false
+ }
+ }
+}
\ No newline at end of file
diff --git a/environments/README.md b/environments/README.md
index c9e91660fb..755e90de14 100644
--- a/environments/README.md
+++ b/environments/README.md
@@ -14,19 +14,22 @@
| exposurelog.enabled | bool | `false` | |
| fqdn | string | None, must be set | Fully-qualified domain name where the environment is running |
| gafaelfawr.enabled | bool | `false` | |
+| giftless.enabled | bool | `false` | |
| hips.enabled | bool | `false` | |
| ingress-nginx.enabled | bool | `false` | |
| kubernetes-replicator.enabled | bool | `false` | |
| linters.enabled | bool | `false` | |
+| livetap.enabled | bool | `false` | |
| mobu.enabled | bool | `false` | |
| moneypenny.enabled | bool | `false` | |
+| monitoring.enabled | bool | `false` | |
| narrativelog.enabled | bool | `false` | |
| noteburst.enabled | bool | `false` | |
| nublado.enabled | bool | `false` | |
| nublado2.enabled | bool | `false` | |
| obsloctap.enabled | bool | `false` | |
-| obstap.enabled | bool | `false` | |
| onepasswordUuid | string | `"dg5afgiadsffeklfr6jykqymeu"` | UUID of the 1Password item in which to find Vault tokens |
+| ook.enabled | bool | `false` | |
| plot-navigator.enabled | bool | `false` | |
| portal.enabled | bool | `false` | |
| postgres.enabled | bool | `false` | |
@@ -51,4 +54,4 @@
| times-square.enabled | bool | `false` | |
| vault-secrets-operator.enabled | bool | `false` | |
| vaultPathPrefix | string | None, must be set | Prefix for Vault secrets for this environment |
-| vo-cutouts.enabled | bool | `false` | |
+| vo-cutouts.enabled | bool | `false` | |
\ No newline at end of file
diff --git a/environments/templates/giftless-application.yaml b/environments/templates/giftless-application.yaml
new file mode 100644
index 0000000000..940d35b335
--- /dev/null
+++ b/environments/templates/giftless-application.yaml
@@ -0,0 +1,37 @@
+{{- if .Values.giftless.enabled -}}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: "giftless"
+spec:
+ finalizers:
+ - "kubernetes"
+---
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: "giftless"
+ namespace: "argocd"
+ finalizers:
+ - "resources-finalizer.argocd.argoproj.io"
+spec:
+ destination:
+ namespace: "giftless"
+ server: "https://kubernetes.default.svc"
+ project: "default"
+ source:
+ path: "applications/giftless"
+ repoURL: {{ .Values.repoURL | quote }}
+ targetRevision: {{ .Values.targetRevision | quote }}
+ helm:
+ parameters:
+ - name: "global.host"
+ value: {{ .Values.fqdn | quote }}
+ - name: "global.baseUrl"
+ value: "https://{{ .Values.fqdn }}"
+ - name: "global.vaultSecretsPath"
+ value: {{ .Values.vaultPathPrefix | quote }}
+ valueFiles:
+ - "values.yaml"
+ - "values-{{ .Values.environment }}.yaml"
+{{- end -}}
diff --git a/environments/templates/obstap-application.yaml b/environments/templates/livetap-application.yaml
similarity index 86%
rename from environments/templates/obstap-application.yaml
rename to environments/templates/livetap-application.yaml
index 0816365292..b9b4161242 100644
--- a/environments/templates/obstap-application.yaml
+++ b/environments/templates/livetap-application.yaml
@@ -1,8 +1,8 @@
-{{- if .Values.obstap.enabled -}}
+{{- if .Values.livetap.enabled -}}
apiVersion: v1
kind: Namespace
metadata:
- name: obstap
+ name: livetap
spec:
finalizers:
- kubernetes
@@ -10,17 +10,17 @@ spec:
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
- name: obstap
+ name: livetap
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
- namespace: obstap
+ namespace: livetap
server: https://kubernetes.default.svc
project: default
source:
- path: applications/obstap
+ path: applications/livetap
repoURL: {{ .Values.repoURL }}
targetRevision: {{ .Values.targetRevision }}
helm:
diff --git a/environments/templates/monitoring-application.yaml b/environments/templates/monitoring-application.yaml
new file mode 100644
index 0000000000..a985fc3790
--- /dev/null
+++ b/environments/templates/monitoring-application.yaml
@@ -0,0 +1,37 @@
+{{- if .Values.monitoring.enabled -}}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: "monitoring"
+spec:
+ finalizers:
+ - "kubernetes"
+---
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: "monitoring"
+ namespace: "argocd"
+ finalizers:
+ - "resources-finalizer.argocd.argoproj.io"
+spec:
+ destination:
+ namespace: "monitoring"
+ server: "https://kubernetes.default.svc"
+ project: "default"
+ source:
+ path: "applications/monitoring"
+ repoURL: {{ .Values.repoURL | quote }}
+ targetRevision: {{ .Values.targetRevision | quote }}
+ helm:
+ parameters:
+ - name: "global.host"
+ value: {{ .Values.fqdn | quote }}
+ - name: "global.baseUrl"
+ value: "https://{{ .Values.fqdn }}"
+ - name: "global.vaultSecretsPath"
+ value: {{ .Values.vaultPathPrefix | quote }}
+ valueFiles:
+ - "values.yaml"
+ - "values-{{ .Values.environment }}.yaml"
+{{- end -}}
diff --git a/environments/templates/ook-application.yaml b/environments/templates/ook-application.yaml
new file mode 100644
index 0000000000..65d244bdd3
--- /dev/null
+++ b/environments/templates/ook-application.yaml
@@ -0,0 +1,37 @@
+{{- if .Values.ook.enabled -}}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: "ook"
+spec:
+ finalizers:
+ - "kubernetes"
+---
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: "ook"
+ namespace: "argocd"
+ finalizers:
+ - "resources-finalizer.argocd.argoproj.io"
+spec:
+ destination:
+ namespace: "ook"
+ server: "https://kubernetes.default.svc"
+ project: "default"
+ source:
+ path: "applications/ook"
+ repoURL: {{ .Values.repoURL | quote }}
+ targetRevision: {{ .Values.targetRevision | quote }}
+ helm:
+ parameters:
+ - name: "global.host"
+ value: {{ .Values.fqdn | quote }}
+ - name: "global.baseUrl"
+ value: "https://{{ .Values.fqdn }}"
+ - name: "global.vaultSecretsPathPrefix"
+ value: {{ .Values.vaultPathPrefix | quote }}
+ valueFiles:
+ - "values.yaml"
+ - "values-{{ .Values.environment }}.yaml"
+{{- end -}}
diff --git a/environments/values-base.yaml b/environments/values-base.yaml
index aae83667d5..b5aefd97cc 100644
--- a/environments/values-base.yaml
+++ b/environments/values-base.yaml
@@ -5,7 +5,7 @@ vaultPathPrefix: secret/k8s_operator/base-lsp.lsst.codes
alert-stream-broker:
enabled: false
argo-workflows:
- enabled: false
+ enabled: true
cachemachine:
enabled: true
cert-manager:
@@ -26,12 +26,16 @@ mobu:
enabled: false
moneypenny:
enabled: true
+monitoring:
+ enabled: false
narrativelog:
enabled: true
noteburst:
enabled: false
nublado:
enabled: false
+ook:
+ enabled: false
nublado2:
enabled: true
plot-navigator:
diff --git a/environments/values-ccin2p3.yaml b/environments/values-ccin2p3.yaml
index 0325ad4e15..3bf358195a 100644
--- a/environments/values-ccin2p3.yaml
+++ b/environments/values-ccin2p3.yaml
@@ -24,6 +24,8 @@ ingress-nginx:
enabled: true
kubernetes-replicator:
enabled: false
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
@@ -32,6 +34,8 @@ nublado:
enabled: false
nublado2:
enabled: true
+ook:
+ enabled: false
plot-navigator:
enabled: false
portal:
diff --git a/environments/values-idfdev.yaml b/environments/values-idfdev.yaml
index 79543ea23a..bfdc4fcd34 100644
--- a/environments/values-idfdev.yaml
+++ b/environments/values-idfdev.yaml
@@ -8,7 +8,7 @@ alert-stream-broker:
argo-workflows:
enabled: true
cachemachine:
- enabled: true
+ enabled: false
cert-manager:
enabled: true
datalinker:
@@ -26,7 +26,9 @@ kubernetes-replicator:
mobu:
enabled: true
moneypenny:
- enabled: true
+ enabled: false
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
@@ -34,7 +36,9 @@ noteburst:
nublado:
enabled: true
nublado2:
- enabled: true
+ enabled: false
+ook:
+ enabled: false
plot-navigator:
enabled: false
portal:
@@ -49,6 +53,8 @@ semaphore:
enabled: true
sherlock:
enabled: true
+ssotap:
+ enabled: true
squareone:
enabled: true
squarebot:
diff --git a/environments/values-idfint.yaml b/environments/values-idfint.yaml
index 3b419a2bf8..b3a9fb0790 100644
--- a/environments/values-idfint.yaml
+++ b/environments/values-idfint.yaml
@@ -29,6 +29,8 @@ mobu:
enabled: true
moneypenny:
enabled: true
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
@@ -37,8 +39,8 @@ nublado:
enabled: true
nublado2:
enabled: true
-obstap:
- enabled: true
+ook:
+ enabled: false
plot-navigator:
enabled: true
portal:
diff --git a/environments/values-idfprod.yaml b/environments/values-idfprod.yaml
index d05bc8932e..9cd950d111 100644
--- a/environments/values-idfprod.yaml
+++ b/environments/values-idfprod.yaml
@@ -27,14 +27,18 @@ mobu:
enabled: true
moneypenny:
enabled: true
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
enabled: false
nublado:
- enabled: false
+ enabled: true
nublado2:
enabled: true
+ook:
+ enabled: false
plot-navigator:
enabled: false
portal:
@@ -55,6 +59,8 @@ squareone:
enabled: true
squash-api:
enabled: false
+ssotap:
+ enabled: true
strimzi:
enabled: false
strimzi-access-operator:
diff --git a/environments/values-minikube.yaml b/environments/values-minikube.yaml
index 5a513cad05..680818694d 100644
--- a/environments/values-minikube.yaml
+++ b/environments/values-minikube.yaml
@@ -7,7 +7,7 @@ alert-stream-broker:
argo-workflows:
enabled: false
cachemachine:
- enabled: true
+ enabled: false
cert-manager:
enabled: true
datalinker:
@@ -25,15 +25,19 @@ kubernetes-replicator:
mobu:
enabled: true
moneypenny:
- enabled: true
+ enabled: false
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
- enabled: true
+ enabled: false
nublado:
enabled: false
nublado2:
enabled: false
+ook:
+ enabled: false
plot-navigator:
enabled: false
portal:
diff --git a/environments/values-roe.yaml b/environments/values-roe.yaml
index ce1ed5c4fc..8cee958cfa 100644
--- a/environments/values-roe.yaml
+++ b/environments/values-roe.yaml
@@ -26,6 +26,8 @@ mobu:
enabled: true
moneypenny:
enabled: true
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
@@ -34,6 +36,8 @@ nublado:
enabled: false
nublado2:
enabled: true
+ook:
+ enabled: false
plot-navigator:
enabled: false
portal:
diff --git a/environments/values-roundtable-dev.yaml b/environments/values-roundtable-dev.yaml
index 4d176ccd45..c3b346eb4e 100644
--- a/environments/values-roundtable-dev.yaml
+++ b/environments/values-roundtable-dev.yaml
@@ -16,6 +16,8 @@ exposurelog:
enabled: false
gafaelfawr:
enabled: true
+giftless:
+ enabled: true
hips:
enabled: false
ingress-nginx:
@@ -26,6 +28,8 @@ mobu:
enabled: false
moneypenny:
enabled: false
+monitoring:
+ enabled: true
narrativelog:
enabled: false
noteburst:
@@ -34,6 +38,8 @@ nublado:
enabled: false
nublado2:
enabled: false
+ook:
+ enabled: true
plot-navigator:
enabled: false
portal:
diff --git a/environments/values-roundtable-prod.yaml b/environments/values-roundtable-prod.yaml
index ca564b0db7..fc12e79520 100644
--- a/environments/values-roundtable-prod.yaml
+++ b/environments/values-roundtable-prod.yaml
@@ -26,6 +26,8 @@ mobu:
enabled: false
moneypenny:
enabled: false
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
@@ -34,6 +36,8 @@ nublado:
enabled: false
nublado2:
enabled: false
+ook:
+ enabled: false
plot-navigator:
enabled: false
portal:
diff --git a/environments/values-summit.yaml b/environments/values-summit.yaml
index 91a8766a88..4628e29617 100644
--- a/environments/values-summit.yaml
+++ b/environments/values-summit.yaml
@@ -26,6 +26,8 @@ mobu:
enabled: false
moneypenny:
enabled: true
+monitoring:
+ enabled: false
narrativelog:
enabled: true
noteburst:
@@ -34,6 +36,8 @@ nublado:
enabled: false
nublado2:
enabled: true
+ook:
+ enabled: false
plot-navigator:
enabled: false
portal:
diff --git a/environments/values-tucson-teststand.yaml b/environments/values-tucson-teststand.yaml
index 910147557b..093415d77e 100644
--- a/environments/values-tucson-teststand.yaml
+++ b/environments/values-tucson-teststand.yaml
@@ -26,6 +26,8 @@ mobu:
enabled: false
moneypenny:
enabled: true
+monitoring:
+ enabled: false
narrativelog:
enabled: true
noteburst:
@@ -34,6 +36,8 @@ nublado:
enabled: false
nublado2:
enabled: true
+ook:
+ enabled: false
plot-navigator:
enabled: false
portal:
diff --git a/environments/values-usdf-tel-rsp.yaml b/environments/values-usdf-tel-rsp.yaml
new file mode 100644
index 0000000000..c54bf09e0e
--- /dev/null
+++ b/environments/values-usdf-tel-rsp.yaml
@@ -0,0 +1,83 @@
+environment: usdf-tel-rsp
+fqdn: usdf-tel-rsp.slac.stanford.edu
+vaultPathPrefix: secret/rubin/usdf-tel-rsp
+# butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-repos.yaml"
+repoURL: https://github.com/lsst-sqre/phalanx.git
+
+alert-stream-broker:
+ enabled: false
+argo-workflows:
+ enabled: false
+cachemachine:
+ enabled: false
+cert-manager:
+ enabled: false
+datalinker:
+ enabled: false
+exposurelog:
+ enabled: false
+gafaelfawr:
+ enabled: true
+hips:
+ enabled: false
+ingress-nginx:
+ enabled: false
+kubernetes-replicator:
+ enabled: false
+mobu:
+ enabled: false
+moneypenny:
+ enabled: false
+monitoring:
+ enabled: false
+narrativelog:
+ enabled: false
+noteburst:
+ enabled: false
+nublado:
+ enabled: false
+nublado2:
+ enabled: false
+plot-navigator:
+ enabled: false
+portal:
+ enabled: false
+postgres:
+ enabled: true
+sasquatch:
+ enabled: false
+production-tools:
+ enabled: false
+semaphore:
+ enabled: false
+sherlock:
+ enabled: false
+ssotap:
+ enabled: false
+squarebot:
+ enabled: false
+squareone:
+ enabled: false
+squash-api:
+ enabled: false
+strimzi:
+ enabled: false
+strimzi-access-operator:
+ enabled: false
+strimzi-registry-operator:
+ enabled: false
+tap:
+ enabled: false
+tap-schema:
+ enabled: false
+telegraf:
+ enabled: false
+telegraf-ds:
+ enabled: false
+times-square:
+ enabled: false
+vault-secrets-operator:
+ enabled: true
+vo-cutouts:
+ enabled: false
+# comment to test pre-commit
diff --git a/environments/values-usdfdev.yaml b/environments/values-usdfdev.yaml
index 614f37169b..c95712319d 100644
--- a/environments/values-usdfdev.yaml
+++ b/environments/values-usdfdev.yaml
@@ -24,22 +24,28 @@ ingress-nginx:
enabled: false
kubernetes-replicator:
enabled: false
+livetap:
+ enabled: true
mobu:
enabled: true
moneypenny:
enabled: true
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
- enabled: false
+ enabled: true
nublado:
enabled: true
nublado2:
enabled: true
obsloctap:
- enabled: false
+ enabled: true
obstap:
enabled: true
+ook:
+ enabled: false
plot-navigator:
enabled: true
portal:
@@ -54,6 +60,8 @@ semaphore:
enabled: true
sherlock:
enabled: false
+ssotap:
+ enabled: true
squarebot:
enabled: false
squareone:
@@ -75,7 +83,7 @@ telegraf:
telegraf-ds:
enabled: false
times-square:
- enabled: false
+ enabled: true
vault-secrets-operator:
enabled: true
vo-cutouts:
diff --git a/environments/values-usdfprod.yaml b/environments/values-usdfprod.yaml
index 6142490c7f..bde5f1f196 100644
--- a/environments/values-usdfprod.yaml
+++ b/environments/values-usdfprod.yaml
@@ -24,20 +24,24 @@ ingress-nginx:
enabled: false
kubernetes-replicator:
enabled: false
+livetap:
+ enabled: true
mobu:
enabled: true
moneypenny:
enabled: true
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
enabled: false
nublado:
- enabled: false
-nublado2:
enabled: true
-obstap:
+nublado2:
enabled: true
+ook:
+ enabled: false
plot-navigator:
enabled: true
portal:
@@ -52,6 +56,8 @@ semaphore:
enabled: true
sherlock:
enabled: false
+ssotap:
+ enabled: true
squarebot:
enabled: false
squareone:
diff --git a/environments/values.yaml b/environments/values.yaml
index 3946a1256f..d8df8a7d06 100644
--- a/environments/values.yaml
+++ b/environments/values.yaml
@@ -30,6 +30,8 @@ exposurelog:
enabled: false
gafaelfawr:
enabled: false
+giftless:
+ enabled: false
hips:
enabled: false
ingress-nginx:
@@ -38,10 +40,14 @@ kubernetes-replicator:
enabled: false
linters:
enabled: false
+livetap:
+ enabled: false
mobu:
enabled: false
moneypenny:
enabled: false
+monitoring:
+ enabled: false
narrativelog:
enabled: false
noteburst:
@@ -50,9 +56,9 @@ nublado:
enabled: false
nublado2:
enabled: false
-obsloctap:
+ook:
enabled: false
-obstap:
+obsloctap:
enabled: false
plot-navigator:
enabled: false
diff --git a/installer/generate_secrets.py b/installer/generate_secrets.py
index 67d06ad0dc..df5b407e17 100755
--- a/installer/generate_secrets.py
+++ b/installer/generate_secrets.py
@@ -6,7 +6,7 @@
import os
import secrets
from collections import defaultdict
-from datetime import datetime, timezone
+from datetime import UTC, datetime
from pathlib import Path
import bcrypt
@@ -31,7 +31,7 @@ class SecretGenerator:
will be regenerated.
"""
- def __init__(self, environment, regenerate):
+ def __init__(self, environment, regenerate) -> None:
self.secrets = defaultdict(dict)
self.environment = environment
self.regenerate = regenerate
@@ -109,7 +109,7 @@ def input_file(self, component, name, description):
fname = input(prompt_string)
if fname:
- with open(fname, "r") as f:
+ with open(fname) as f:
self.secrets[component][name] = f.read()
@staticmethod
@@ -334,9 +334,7 @@ def _argocd(self):
h = bcrypt.hashpw(
new_pw.encode("ascii"), bcrypt.gensalt(rounds=15)
).decode("ascii")
- now_time = datetime.now(timezone.utc).strftime(
- "%Y-%m-%dT%H:%M:%SZ"
- )
+ now_time = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")
self._set("argocd", "admin.password", h)
self._set("argocd", "admin.passwordMtime", now_time)
@@ -426,7 +424,7 @@ class OnePasswordSecretGenerator(SecretGenerator):
will be regenerated.
"""
- def __init__(self, environment, regenerate):
+ def __init__(self, environment, regenerate) -> None:
super().__init__(environment, regenerate)
self.op_secrets = {}
self.op = new_client_from_environment()
@@ -517,7 +515,7 @@ def generate(self):
"""
super().generate()
- for composite_key, secret_value in self.op_secrets.items():
+ for composite_key, _secret_value in self.op_secrets.items():
item_component, item_name = composite_key.split()
# Special case for components that may not be present in every
# environment, but nonetheless might be 1Password secrets (see
diff --git a/installer/install.sh b/installer/install.sh
index 09ced44135..6ad1832ef9 100755
--- a/installer/install.sh
+++ b/installer/install.sh
@@ -1,8 +1,9 @@
#!/bin/bash -e
-USAGE="Usage: ./install.sh ENVIRONMENT VAULT_TOKEN"
+USAGE="Usage: ./install.sh ENVIRONMENT VAULT_TOKEN [VAULT_ADDR] [VAULT_TOKEN_LEASE_DURATION]"
ENVIRONMENT=${1:?$USAGE}
export VAULT_TOKEN=${2:?$USAGE}
-export VAULT_ADDR=https://vault.lsst.ac.uk
+export VAULT_ADDR=${3:-https://vault.lsst.codes}
+export VAULT_TOKEN_LEASE_DURATION=${4:-31536000}
VAULT_PATH_PREFIX=`yq -r .vaultPathPrefix ../environments/values-$ENVIRONMENT.yaml`
ARGOCD_PASSWORD=`vault kv get --field=argocd.admin.plaintext_password $VAULT_PATH_PREFIX/installer`
@@ -18,7 +19,7 @@ kubectl create ns vault-secrets-operator || true
kubectl create secret generic vault-secrets-operator \
--namespace vault-secrets-operator \
--from-literal=VAULT_TOKEN=$VAULT_TOKEN \
- --from-literal=VAULT_TOKEN_LEASE_DURATION=31536000 \
+ --from-literal=VAULT_TOKEN_LEASE_DURATION=$VAULT_TOKEN_LEASE_DURATION \
--dry-run=client -o yaml | kubectl apply -f -
echo "Set up docker pull secret for vault-secrets-operator..."
@@ -28,7 +29,6 @@ kubectl create secret generic pull-secret -n vault-secrets-operator \
--type=kubernetes.io/dockerconfigjson \
--dry-run=client -o yaml | kubectl apply -f -
-
echo "Update / install vault-secrets-operator..."
# ArgoCD depends on pull-secret, which depends on vault-secrets-operator.
helm dependency update ../applications/vault-secrets-operator
diff --git a/installer/vault_key.py b/installer/vault_key.py
index f7f47b4bad..6e60759ae8 100755
--- a/installer/vault_key.py
+++ b/installer/vault_key.py
@@ -7,7 +7,7 @@
class VaultKeyRetriever:
- def __init__(self):
+ def __init__(self) -> None:
self.op = new_client_from_environment()
vault_keys = self.op.get_item(
os.environ["VAULT_DOC_UUID"], "RSP-Vault"
@@ -23,6 +23,7 @@ def retrieve_key(self, environment, key_type):
for e in self.vault_keys:
if env_key in e:
return e[env_key][key_type]["id"]
+ return None
if __name__ == "__main__":
diff --git a/pyproject.toml b/pyproject.toml
index 55bca21138..18e3f7377a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ name = "phalanx"
version = "1.0.0"
description = "Python support code for the Rubin Phalanx platform."
license = {file = "LICENSE"}
-readme= "README.rst"
+readme= "README.md"
keywords = [
"rubin",
"lsst",
@@ -18,28 +18,11 @@ classifiers = [
"Intended Audience :: Developers",
"Operating System :: POSIX",
]
-requires-python = ">=3.8"
-dependencies = [
- "PyYAML",
- "GitPython",
-]
-
-[project.optional-dependencies]
-dev = [
- # Testing
- "coverage[toml]",
- "pytest",
- "pre-commit",
- "mypy",
- "types-PyYAML",
- # Documentation
- "documenteer[guide]>=0.7.0b4",
- "sphinx-diagrams",
- "sphinx-jinja",
-]
+requires-python = ">=3.11"
[project.scripts]
expand-charts = "phalanx.testing.expandcharts:main"
+phalanx = "phalanx.cli:main"
[project.urls]
Homepage = "https://phalanx.lsst.io"
@@ -55,6 +38,25 @@ build-backend = "setuptools.build_meta"
[tool.setuptools_scm]
+[tool.black]
+line-length = 79
+target-version = ["py311"]
+exclude = '''
+/(
+ \.eggs
+ | \.git
+ | \.mypy_cache
+ | \.ruff_cache
+ | \.tox
+ | \.venv
+ | _build
+ | build
+ | dist
+)/
+'''
+# Use single-quoted strings so TOML treats the string like a Python r-string
+# Multi-line strings are implicitly treated by black as regular expressions
+
[tool.coverage.run]
parallel = true
branch = true
@@ -77,26 +79,121 @@ exclude_lines = [
"if TYPE_CHECKING:"
]
-[tool.black]
+[tool.mypy]
+disallow_untyped_defs = true
+disallow_incomplete_defs = true
+ignore_missing_imports = true
+local_partial_types = true
+no_implicit_reexport = true
+plugins = ["pydantic.mypy"]
+show_error_codes = true
+strict_equality = true
+warn_redundant_casts = true
+warn_unreachable = true
+warn_unused_ignores = true
+
+[tool.pydantic-mypy]
+init_forbid_extra = true
+init_typed = true
+warn_required_dynamic_aliases = true
+warn_untyped_fields = true
+
+# The rule used with Ruff configuration is to disable every lint that has
+# legitimate exceptions that are not dodgy code, rather than cluttering code
+# with noqa markers. This is therefore a reiatively relaxed configuration that
+# errs on the side of disabling legitimate lints.
+#
+# Reference for settings: https://beta.ruff.rs/docs/settings/
+# Reference for rules: https://beta.ruff.rs/docs/rules/
+[tool.ruff]
+exclude = [
+ "docs/**",
+ "installer/**",
+]
line-length = 79
-target-version = ['py38']
-exclude = '''
-/(
- \.eggs
- | \.git
- | \.mypy_cache
- | \.tox
- | \.venv
- | _build
- | build
- | dist
-)/
-'''
-# Use single-quoted strings so TOML treats the string like a Python r-string
-# Multi-line strings are implicitly treated by black as regular expressions
+ignore = [
+ "ANN101", # self should not have a type annotation
+ "ANN102", # cls should not have a type annotation
+ "ANN401", # sometimes Any is the right type
+ "ARG001", # unused function arguments are often legitimate
+ "ARG002", # unused method arguments are often legitimate
+ "ARG005", # unused lambda arguments are often legitimate
+ "BLE001", # we want to catch and report Exception in background tasks
+ "C414", # nested sorted is how you sort by multiple keys with reverse
+ "COM812", # omitting trailing commas allows black autoreformatting
+ "D102", # sometimes we use docstring inheritence
+ "D104", # don't see the point of documenting every package
+ "D105", # our style doesn't require docstrings for magic methods
+ "D106", # Pydantic uses a nested Config class that doesn't warrant docs
+ "EM101", # justification (duplicate string in traceback) is silly
+ "EM102", # justification (duplicate string in traceback) is silly
+ "FBT003", # positional booleans are normal for Pydantic field defaults
+ "G004", # forbidding logging f-strings is appealing, but not our style
+ "PD011", # false positive with non-NumPY code that uses .values
+ "PLR0911", # way too strict of a function complexity constraint
+ "PLR0913", # factory pattern uses constructors with many arguments
+ "PLR2004", # too aggressive about magic values
+ "RET505", # disagree that omitting else always makes code more readable
+ "S105", # good idea but too many false positives on non-passwords
+ "S106", # good idea but too many false positives on non-passwords
+ "SIM102", # sometimes the formatting of nested if statements is clearer
+ "SIM114", # sometimes or conditions result in long lines and awkward code
+ "SIM117", # sometimes nested with contexts are clearer
+ "TCH001", # we decided to not maintain separate TYPE_CHECKING blocks
+ "TCH002", # we decided to not maintain separate TYPE_CHECKING blocks
+ "TCH003", # we decided to not maintain separate TYPE_CHECKING blocks
+ "TID252", # if we're going to use relative imports, use them always
+ "TRY003", # good general advice but lint is way too aggressive
+
+ # Phalanx-specific exclusions.
+ "T201", # print makes sense to use because Phalanx is interactive
+]
+select = ["ALL"]
+target-version = "py311"
+
+[tool.ruff.per-file-ignores]
+"tests/**" = [
+ "D103", # tests don't need docstrings
+ "PLR0915", # tests are allowed to be long, sometimes that's convenient
+ "PT012", # way too aggressive about limiting pytest.raises blocks
+ "S101", # tests should use assert
+ "SLF001", # tests are allowed to access private members
+]
+
+[tool.ruff.isort]
+known-first-party = ["gafaelfawr", "tests"]
+split-on-trailing-comma = false
+
+[tool.ruff.flake8-bugbear]
+extend-immutable-calls = [
+ "fastapi.Form",
+ "fastapi.Header",
+ "fastapi.Depends",
+ "fastapi.Path",
+ "fastapi.Query",
+]
+
+# These are too useful as attributes or methods to allow the conflict with the
+# built-in to rule out their use.
+[tool.ruff.flake8-builtins]
+builtins-ignorelist = [
+ "all",
+ "any",
+ "help",
+ "id",
+ "list",
+ "type",
+]
+
+[tool.ruff.flake8-pytest-style]
+fixture-parentheses = false
+mark-parentheses = false
+
+[tool.ruff.pep8-naming]
+classmethod-decorators = [
+ "pydantic.root_validator",
+ "pydantic.validator",
+]
-[tool.isort]
-include_trailing_comma = true
-multi_line_output = 3
-known_first_party = []
-skip = ["docs/conf.py"]
+[tool.ruff.pydocstyle]
+convention = "numpy"
diff --git a/renovate.json b/renovate.json
index 76bb9056af..226aabab4b 100644
--- a/renovate.json
+++ b/renovate.json
@@ -1,9 +1,17 @@
{
"extends": [
- "config:base"
+ "config:recommended"
],
- "prConcurrentLimit": 5,
+ "configMigration": true,
+ "packageRules": [
+ {
+ "matchPackageNames": ["lsstsqre/tap-schema-mock"],
+ "allowedVersions": "<10"
+ }
+ ],
+ "rebaseWhen": "conflicted",
"schedule": [
"before 6am on Monday"
- ]
+ ],
+ "timezone": "America/Los_Angeles"
}
diff --git a/requirements/dev.in b/requirements/dev.in
new file mode 100644
index 0000000000..5f81da8931
--- /dev/null
+++ b/requirements/dev.in
@@ -0,0 +1,23 @@
+# Editable development dependencies. Add direct development, test, and
+# documentation dependencies here, as well as implicit dev dependencies
+# with constrained versions.
+#
+# After editing, update requirements/dev.txt by running:
+# make update-deps
+
+-c main.txt
+
+# Testing
+coverage[toml]
+mypy
+pre-commit
+pytest
+pytest-cov
+ruff
+types-PyYAML
+
+# Documentation
+documenteer[guide]>=0.7.0,<1
+pydantic<2
+sphinx-diagrams
+sphinx-jinja
diff --git a/requirements/dev.txt b/requirements/dev.txt
new file mode 100644
index 0000000000..da6822d4d1
--- /dev/null
+++ b/requirements/dev.txt
@@ -0,0 +1,992 @@
+#
+# This file is autogenerated by pip-compile with Python 3.11
+# by the following command:
+#
+# pip-compile --allow-unsafe --config=pyproject.toml --generate-hashes --output-file=requirements/dev.txt requirements/dev.in
+#
+alabaster==0.7.13 \
+ --hash=sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3 \
+ --hash=sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2
+ # via sphinx
+babel==2.12.1 \
+ --hash=sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610 \
+ --hash=sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455
+ # via sphinx
+beautifulsoup4==4.12.2 \
+ --hash=sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da \
+ --hash=sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a
+ # via pydata-sphinx-theme
+certifi==2023.7.22 \
+ --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \
+ --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9
+ # via
+ # -c requirements/main.txt
+ # requests
+cfgv==3.3.1 \
+ --hash=sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426 \
+ --hash=sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736
+ # via pre-commit
+charset-normalizer==3.2.0 \
+ --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
+ --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
+ --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
+ --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
+ --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
+ --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
+ --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
+ --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
+ --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
+ --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
+ --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
+ --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
+ --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
+ --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
+ --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
+ --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
+ --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
+ --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
+ --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
+ --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
+ --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
+ --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
+ --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
+ --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
+ --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
+ --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
+ --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
+ --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
+ --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
+ --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
+ --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
+ --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
+ --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
+ --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
+ --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
+ --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
+ --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
+ --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
+ --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
+ --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
+ --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
+ --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
+ --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
+ --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
+ --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
+ --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
+ --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
+ --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
+ --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
+ --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
+ --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
+ --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
+ --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
+ --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
+ --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
+ --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
+ --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
+ --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
+ --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
+ --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
+ --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
+ --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
+ --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
+ --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
+ --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
+ --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
+ --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
+ --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
+ --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
+ --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
+ --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
+ --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
+ --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
+ --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
+ --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
+ # via
+ # -c requirements/main.txt
+ # requests
+click==8.1.6 \
+ --hash=sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd \
+ --hash=sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5
+ # via
+ # -c requirements/main.txt
+ # documenteer
+contourpy==1.1.0 \
+ --hash=sha256:052cc634bf903c604ef1a00a5aa093c54f81a2612faedaa43295809ffdde885e \
+ --hash=sha256:084eaa568400cfaf7179b847ac871582199b1b44d5699198e9602ecbbb5f6104 \
+ --hash=sha256:0b6616375d7de55797d7a66ee7d087efe27f03d336c27cf1f32c02b8c1a5ac70 \
+ --hash=sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882 \
+ --hash=sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f \
+ --hash=sha256:17cfaf5ec9862bc93af1ec1f302457371c34e688fbd381f4035a06cd47324f48 \
+ --hash=sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e \
+ --hash=sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a \
+ --hash=sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37 \
+ --hash=sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a \
+ --hash=sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2 \
+ --hash=sha256:25ae46595e22f93592d39a7eac3d638cda552c3e1160255258b695f7b58e5655 \
+ --hash=sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545 \
+ --hash=sha256:2b836d22bd2c7bb2700348e4521b25e077255ebb6ab68e351ab5aa91ca27e027 \
+ --hash=sha256:30f511c05fab7f12e0b1b7730ebdc2ec8deedcfb505bc27eb570ff47c51a8f15 \
+ --hash=sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94 \
+ --hash=sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439 \
+ --hash=sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d \
+ --hash=sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa \
+ --hash=sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae \
+ --hash=sha256:62013a2cf68abc80dadfd2307299bfa8f5aa0dcaec5b2954caeb5fa094171103 \
+ --hash=sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc \
+ --hash=sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa \
+ --hash=sha256:911ff4fd53e26b019f898f32db0d4956c9d227d51338fb3b03ec72ff0084ee5f \
+ --hash=sha256:9382a1c0bc46230fb881c36229bfa23d8c303b889b788b939365578d762b5c18 \
+ --hash=sha256:9f2931ed4741f98f74b410b16e5213f71dcccee67518970c42f64153ea9313b9 \
+ --hash=sha256:a67259c2b493b00e5a4d0f7bfae51fb4b3371395e47d079a4446e9b0f4d70e76 \
+ --hash=sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493 \
+ --hash=sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9 \
+ --hash=sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed \
+ --hash=sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4 \
+ --hash=sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f \
+ --hash=sha256:dffcc2ddec1782dd2f2ce1ef16f070861af4fb78c69862ce0aab801495dda6a3 \
+ --hash=sha256:e53046c3863828d21d531cc3b53786e6580eb1ba02477e8681009b6aa0870b21 \
+ --hash=sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e \
+ --hash=sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1 \
+ --hash=sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a \
+ --hash=sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002 \
+ --hash=sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256
+ # via matplotlib
+coverage[toml]==7.2.7 \
+ --hash=sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f \
+ --hash=sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2 \
+ --hash=sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a \
+ --hash=sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a \
+ --hash=sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01 \
+ --hash=sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6 \
+ --hash=sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7 \
+ --hash=sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f \
+ --hash=sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02 \
+ --hash=sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c \
+ --hash=sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063 \
+ --hash=sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a \
+ --hash=sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5 \
+ --hash=sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959 \
+ --hash=sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97 \
+ --hash=sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6 \
+ --hash=sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f \
+ --hash=sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9 \
+ --hash=sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5 \
+ --hash=sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f \
+ --hash=sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562 \
+ --hash=sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe \
+ --hash=sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9 \
+ --hash=sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f \
+ --hash=sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb \
+ --hash=sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb \
+ --hash=sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1 \
+ --hash=sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb \
+ --hash=sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250 \
+ --hash=sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e \
+ --hash=sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511 \
+ --hash=sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5 \
+ --hash=sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59 \
+ --hash=sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2 \
+ --hash=sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d \
+ --hash=sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3 \
+ --hash=sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4 \
+ --hash=sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de \
+ --hash=sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9 \
+ --hash=sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833 \
+ --hash=sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0 \
+ --hash=sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9 \
+ --hash=sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d \
+ --hash=sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050 \
+ --hash=sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d \
+ --hash=sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6 \
+ --hash=sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353 \
+ --hash=sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb \
+ --hash=sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e \
+ --hash=sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8 \
+ --hash=sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495 \
+ --hash=sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2 \
+ --hash=sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd \
+ --hash=sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27 \
+ --hash=sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1 \
+ --hash=sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818 \
+ --hash=sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4 \
+ --hash=sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e \
+ --hash=sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850 \
+ --hash=sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3
+ # via
+ # -r requirements/dev.in
+ # pytest-cov
+cycler==0.11.0 \
+ --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \
+ --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f
+ # via matplotlib
+diagrams==0.23.3 \
+ --hash=sha256:543c707c36a2c896dfdf8f23e993a9c7ae48bb1a667f6baf19151eb98e57a134 \
+ --hash=sha256:c497094f9d3600a94bdcfb62b6daf331d2eb7f9b355246e548dae7a4b5c97be0
+ # via sphinx-diagrams
+distlib==0.3.7 \
+ --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \
+ --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8
+ # via virtualenv
+documenteer[guide]==0.8.4 \
+ --hash=sha256:c92a0766766bcdcbbbd3b06fbb251b5c2dbad41f81be37677cc61fbd58604594 \
+ --hash=sha256:f54553006cc2416613163644a93b20c7f1934e9026de08d52a7e35055cb37e19
+ # via -r requirements/dev.in
+docutils==0.19 \
+ --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \
+ --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc
+ # via
+ # myst-parser
+ # pybtex-docutils
+ # pydata-sphinx-theme
+ # sphinx
+ # sphinx-jinja
+ # sphinxcontrib-bibtex
+filelock==3.12.2 \
+ --hash=sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81 \
+ --hash=sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec
+ # via virtualenv
+fonttools==4.41.1 \
+ --hash=sha256:1df1b6f4c7c4bc8201eb47f3b268adbf2539943aa43c400f84556557e3e109c0 \
+ --hash=sha256:2a22b2c425c698dcd5d6b0ff0b566e8e9663172118db6fd5f1941f9b8063da9b \
+ --hash=sha256:33191f062549e6bb1a4782c22a04ebd37009c09360e2d6686ac5083774d06d95 \
+ --hash=sha256:38cdecd8f1fd4bf4daae7fed1b3170dfc1b523388d6664b2204b351820aa78a7 \
+ --hash=sha256:3ae64303ba670f8959fdaaa30ba0c2dabe75364fdec1caeee596c45d51ca3425 \
+ --hash=sha256:3d1f9471134affc1e3b1b806db6e3e2ad3fa99439e332f1881a474c825101096 \
+ --hash=sha256:4e3334d51f0e37e2c6056e67141b2adabc92613a968797e2571ca8a03bd64773 \
+ --hash=sha256:4edc795533421e98f60acee7d28fc8d941ff5ac10f44668c9c3635ad72ae9045 \
+ --hash=sha256:547ab36a799dded58a46fa647266c24d0ed43a66028cd1cd4370b246ad426cac \
+ --hash=sha256:59eba8b2e749a1de85760da22333f3d17c42b66e03758855a12a2a542723c6e7 \
+ --hash=sha256:704bccd69b0abb6fab9f5e4d2b75896afa48b427caa2c7988792a2ffce35b441 \
+ --hash=sha256:73ef0bb5d60eb02ba4d3a7d23ada32184bd86007cb2de3657cfcb1175325fc83 \
+ --hash=sha256:7763316111df7b5165529f4183a334aa24c13cdb5375ffa1dc8ce309c8bf4e5c \
+ --hash=sha256:849ec722bbf7d3501a0e879e57dec1fc54919d31bff3f690af30bb87970f9784 \
+ --hash=sha256:891cfc5a83b0307688f78b9bb446f03a7a1ad981690ac8362f50518bc6153975 \
+ --hash=sha256:952cb405f78734cf6466252fec42e206450d1a6715746013f64df9cbd4f896fa \
+ --hash=sha256:a7bbb290d13c6dd718ec2c3db46fe6c5f6811e7ea1e07f145fd8468176398224 \
+ --hash=sha256:a9b3cc10dc9e0834b6665fd63ae0c6964c6bc3d7166e9bc84772e0edd09f9fa2 \
+ --hash=sha256:aaaef294d8e411f0ecb778a0aefd11bb5884c9b8333cc1011bdaf3b58ca4bd75 \
+ --hash=sha256:afce2aeb80be72b4da7dd114f10f04873ff512793d13ce0b19d12b2a4c44c0f0 \
+ --hash=sha256:b0938ebbeccf7c80bb9a15e31645cf831572c3a33d5cc69abe436e7000c61b14 \
+ --hash=sha256:b2d1ee95be42b80d1f002d1ee0a51d7a435ea90d36f1a5ae331be9962ee5a3f1 \
+ --hash=sha256:b927e5f466d99c03e6e20961946314b81d6e3490d95865ef88061144d9f62e38 \
+ --hash=sha256:bdd729744ae7ecd7f7311ad25d99da4999003dcfe43b436cf3c333d4e68de73d \
+ --hash=sha256:c2071267deaa6d93cb16288613419679c77220543551cbe61da02c93d92df72f \
+ --hash=sha256:cac73bbef7734e78c60949da11c4903ee5837168e58772371bd42a75872f4f82 \
+ --hash=sha256:da2c2964bdc827ba6b8a91dc6de792620be4da3922c4cf0599f36a488c07e2b2 \
+ --hash=sha256:e16a9449f21a93909c5be2f5ed5246420f2316e94195dbfccb5238aaa38f9751 \
+ --hash=sha256:e5c2b0a95a221838991e2f0e455dec1ca3a8cc9cd54febd68cc64d40fdb83669 \
+ --hash=sha256:ec453a45778524f925a8f20fd26a3326f398bfc55d534e37bab470c5e415caa1 \
+ --hash=sha256:edee0900cf0eedb29d17c7876102d6e5a91ee333882b1f5abc83e85b934cadb5 \
+ --hash=sha256:f14f3ccea4cc7dd1b277385adf3c3bf18f9860f87eab9c2fb650b0af16800f55 \
+ --hash=sha256:f240d9adf0583ac8fc1646afe7f4ac039022b6f8fa4f1575a2cfa53675360b69 \
+ --hash=sha256:f48602c0b3fd79cd83a34c40af565fe6db7ac9085c8823b552e6e751e3a5b8be
+ # via matplotlib
+gitdb==4.0.10 \
+ --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \
+ --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7
+ # via
+ # -c requirements/main.txt
+ # gitpython
+gitpython==3.1.32 \
+ --hash=sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6 \
+ --hash=sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f
+ # via
+ # -c requirements/main.txt
+ # documenteer
+graphviz==0.20.1 \
+ --hash=sha256:587c58a223b51611c0cf461132da386edd896a029524ca61a1462b880bf97977 \
+ --hash=sha256:8c58f14adaa3b947daf26c19bc1e98c4e0702cdc31cf99153e6f06904d492bf8
+ # via diagrams
+identify==2.5.26 \
+ --hash=sha256:7243800bce2f58404ed41b7c002e53d4d22bcf3ae1b7900c2d7aefd95394bf7f \
+ --hash=sha256:c22a8ead0d4ca11f1edd6c9418c3220669b3b7533ada0a0ffa6cc0ef85cf9b54
+ # via pre-commit
+idna==3.4 \
+ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+ --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
+ # via
+ # -c requirements/main.txt
+ # requests
+imagesize==1.4.1 \
+ --hash=sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b \
+ --hash=sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a
+ # via sphinx
+iniconfig==2.0.0 \
+ --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \
+ --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374
+ # via pytest
+jinja2==3.1.2 \
+ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
+ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
+ # via
+ # diagrams
+ # myst-parser
+ # sphinx
+ # sphinx-jinja
+kiwisolver==1.4.4 \
+ --hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
+ --hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
+ --hash=sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c \
+ --hash=sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c \
+ --hash=sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0 \
+ --hash=sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4 \
+ --hash=sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9 \
+ --hash=sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286 \
+ --hash=sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767 \
+ --hash=sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c \
+ --hash=sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6 \
+ --hash=sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b \
+ --hash=sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004 \
+ --hash=sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf \
+ --hash=sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494 \
+ --hash=sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac \
+ --hash=sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626 \
+ --hash=sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766 \
+ --hash=sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514 \
+ --hash=sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6 \
+ --hash=sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f \
+ --hash=sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d \
+ --hash=sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191 \
+ --hash=sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d \
+ --hash=sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51 \
+ --hash=sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f \
+ --hash=sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8 \
+ --hash=sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454 \
+ --hash=sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb \
+ --hash=sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da \
+ --hash=sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8 \
+ --hash=sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de \
+ --hash=sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a \
+ --hash=sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9 \
+ --hash=sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008 \
+ --hash=sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3 \
+ --hash=sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32 \
+ --hash=sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938 \
+ --hash=sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1 \
+ --hash=sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9 \
+ --hash=sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d \
+ --hash=sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824 \
+ --hash=sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b \
+ --hash=sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd \
+ --hash=sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2 \
+ --hash=sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5 \
+ --hash=sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69 \
+ --hash=sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3 \
+ --hash=sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae \
+ --hash=sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597 \
+ --hash=sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e \
+ --hash=sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955 \
+ --hash=sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca \
+ --hash=sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a \
+ --hash=sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea \
+ --hash=sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede \
+ --hash=sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4 \
+ --hash=sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6 \
+ --hash=sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686 \
+ --hash=sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408 \
+ --hash=sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871 \
+ --hash=sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29 \
+ --hash=sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750 \
+ --hash=sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897 \
+ --hash=sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0 \
+ --hash=sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2 \
+ --hash=sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09 \
+ --hash=sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c
+ # via matplotlib
+latexcodec==2.0.1 \
+ --hash=sha256:2aa2551c373261cefe2ad3a8953a6d6533e68238d180eb4bb91d7964adb3fe9a \
+ --hash=sha256:c277a193638dc7683c4c30f6684e3db728a06efb0dc9cf346db8bd0aa6c5d271
+ # via pybtex
+linkify-it-py==2.0.2 \
+ --hash=sha256:19f3060727842c254c808e99d465c80c49d2c7306788140987a1a7a29b0d6ad2 \
+ --hash=sha256:a3a24428f6c96f27370d7fe61d2ac0be09017be5190d68d8658233171f1b6541
+ # via markdown-it-py
+markdown-it-py[linkify]==3.0.0 \
+ --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \
+ --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb
+ # via
+ # documenteer
+ # mdit-py-plugins
+ # myst-parser
+markupsafe==2.1.3 \
+ --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \
+ --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \
+ --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \
+ --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \
+ --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \
+ --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \
+ --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \
+ --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \
+ --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \
+ --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \
+ --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \
+ --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \
+ --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \
+ --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \
+ --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \
+ --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \
+ --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \
+ --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \
+ --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \
+ --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \
+ --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \
+ --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \
+ --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \
+ --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \
+ --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \
+ --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \
+ --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \
+ --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \
+ --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \
+ --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \
+ --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \
+ --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \
+ --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \
+ --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \
+ --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \
+ --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \
+ --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \
+ --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \
+ --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \
+ --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \
+ --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \
+ --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \
+ --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \
+ --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \
+ --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \
+ --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \
+ --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \
+ --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \
+ --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \
+ --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2
+ # via jinja2
+matplotlib==3.7.2 \
+ --hash=sha256:070f8dddd1f5939e60aacb8fa08f19551f4b0140fab16a3669d5cd6e9cb28fc8 \
+ --hash=sha256:0c3cca3e842b11b55b52c6fb8bd6a4088693829acbfcdb3e815fa9b7d5c92c1b \
+ --hash=sha256:0f506a1776ee94f9e131af1ac6efa6e5bc7cb606a3e389b0ccb6e657f60bb676 \
+ --hash=sha256:12f01b92ecd518e0697da4d97d163b2b3aa55eb3eb4e2c98235b3396d7dad55f \
+ --hash=sha256:152ee0b569a37630d8628534c628456b28686e085d51394da6b71ef84c4da201 \
+ --hash=sha256:1c308b255efb9b06b23874236ec0f10f026673ad6515f602027cc8ac7805352d \
+ --hash=sha256:1cd120fca3407a225168238b790bd5c528f0fafde6172b140a2f3ab7a4ea63e9 \
+ --hash=sha256:20f844d6be031948148ba49605c8b96dfe7d3711d1b63592830d650622458c11 \
+ --hash=sha256:23fb1750934e5f0128f9423db27c474aa32534cec21f7b2153262b066a581fd1 \
+ --hash=sha256:2699f7e73a76d4c110f4f25be9d2496d6ab4f17345307738557d345f099e07de \
+ --hash=sha256:26bede320d77e469fdf1bde212de0ec889169b04f7f1179b8930d66f82b30cbc \
+ --hash=sha256:2ecb5be2b2815431c81dc115667e33da0f5a1bcf6143980d180d09a717c4a12e \
+ --hash=sha256:2f8e4a49493add46ad4a8c92f63e19d548b2b6ebbed75c6b4c7f46f57d36cdd1 \
+ --hash=sha256:305e3da477dc8607336ba10bac96986d6308d614706cae2efe7d3ffa60465b24 \
+ --hash=sha256:30e1409b857aa8a747c5d4f85f63a79e479835f8dffc52992ac1f3f25837b544 \
+ --hash=sha256:318c89edde72ff95d8df67d82aca03861240512994a597a435a1011ba18dbc7f \
+ --hash=sha256:35d74ebdb3f71f112b36c2629cf32323adfbf42679e2751252acd468f5001c07 \
+ --hash=sha256:50e0a55ec74bf2d7a0ebf50ac580a209582c2dd0f7ab51bc270f1b4a0027454e \
+ --hash=sha256:5dea00b62d28654b71ca92463656d80646675628d0828e08a5f3b57e12869e13 \
+ --hash=sha256:60c521e21031632aa0d87ca5ba0c1c05f3daacadb34c093585a0be6780f698e4 \
+ --hash=sha256:6515e878f91894c2e4340d81f0911857998ccaf04dbc1bba781e3d89cbf70608 \
+ --hash=sha256:6d2ff3c984b8a569bc1383cd468fc06b70d7b59d5c2854ca39f1436ae8394117 \
+ --hash=sha256:71667eb2ccca4c3537d9414b1bc00554cb7f91527c17ee4ec38027201f8f1603 \
+ --hash=sha256:717157e61b3a71d3d26ad4e1770dc85156c9af435659a25ee6407dc866cb258d \
+ --hash=sha256:71f7a8c6b124e904db550f5b9fe483d28b896d4135e45c4ea381ad3b8a0e3256 \
+ --hash=sha256:936bba394682049919dda062d33435b3be211dc3dcaa011e09634f060ec878b2 \
+ --hash=sha256:a1733b8e84e7e40a9853e505fe68cc54339f97273bdfe6f3ed980095f769ddc7 \
+ --hash=sha256:a2c1590b90aa7bd741b54c62b78de05d4186271e34e2377e0289d943b3522273 \
+ --hash=sha256:a7e28d6396563955f7af437894a36bf2b279462239a41028323e04b85179058b \
+ --hash=sha256:a8035ba590658bae7562786c9cc6ea1a84aa49d3afab157e414c9e2ea74f496d \
+ --hash=sha256:a8cdb91dddb04436bd2f098b8fdf4b81352e68cf4d2c6756fcc414791076569b \
+ --hash=sha256:ac60daa1dc83e8821eed155796b0f7888b6b916cf61d620a4ddd8200ac70cd64 \
+ --hash=sha256:af4860132c8c05261a5f5f8467f1b269bf1c7c23902d75f2be57c4a7f2394b3e \
+ --hash=sha256:bc221ffbc2150458b1cd71cdd9ddd5bb37962b036e41b8be258280b5b01da1dd \
+ --hash=sha256:ce55289d5659b5b12b3db4dc9b7075b70cef5631e56530f14b2945e8836f2d20 \
+ --hash=sha256:d9881356dc48e58910c53af82b57183879129fa30492be69058c5b0d9fddf391 \
+ --hash=sha256:dbcf59334ff645e6a67cd5f78b4b2cdb76384cdf587fa0d2dc85f634a72e1a3e \
+ --hash=sha256:ebf577c7a6744e9e1bd3fee45fc74a02710b214f94e2bde344912d85e0c9af7c \
+ --hash=sha256:f081c03f413f59390a80b3e351cc2b2ea0205839714dbc364519bcf51f4b56ca \
+ --hash=sha256:fdbb46fad4fb47443b5b8ac76904b2e7a66556844f33370861b4788db0f8816a \
+ --hash=sha256:fdcd28360dbb6203fb5219b1a5658df226ac9bebc2542a9e8f457de959d713d0
+ # via sphinxext-opengraph
+mdit-py-plugins==0.4.0 \
+ --hash=sha256:b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9 \
+ --hash=sha256:d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b
+ # via myst-parser
+mdurl==0.1.2 \
+ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
+ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
+ # via markdown-it-py
+mypy==1.4.1 \
+ --hash=sha256:01fd2e9f85622d981fd9063bfaef1aed6e336eaacca00892cd2d82801ab7c042 \
+ --hash=sha256:0dde1d180cd84f0624c5dcaaa89c89775550a675aff96b5848de78fb11adabcd \
+ --hash=sha256:141dedfdbfe8a04142881ff30ce6e6653c9685b354876b12e4fe6c78598b45e2 \
+ --hash=sha256:16f0db5b641ba159eff72cff08edc3875f2b62b2fa2bc24f68c1e7a4e8232d01 \
+ --hash=sha256:190b6bab0302cec4e9e6767d3eb66085aef2a1cc98fe04936d8a42ed2ba77bb7 \
+ --hash=sha256:2460a58faeea905aeb1b9b36f5065f2dc9a9c6e4c992a6499a2360c6c74ceca3 \
+ --hash=sha256:34a9239d5b3502c17f07fd7c0b2ae6b7dd7d7f6af35fbb5072c6208e76295816 \
+ --hash=sha256:43b592511672017f5b1a483527fd2684347fdffc041c9ef53428c8dc530f79a3 \
+ --hash=sha256:43d24f6437925ce50139a310a64b2ab048cb2d3694c84c71c3f2a1626d8101dc \
+ --hash=sha256:45d32cec14e7b97af848bddd97d85ea4f0db4d5a149ed9676caa4eb2f7402bb4 \
+ --hash=sha256:470c969bb3f9a9efcedbadcd19a74ffb34a25f8e6b0e02dae7c0e71f8372f97b \
+ --hash=sha256:566e72b0cd6598503e48ea610e0052d1b8168e60a46e0bfd34b3acf2d57f96a8 \
+ --hash=sha256:5703097c4936bbb9e9bce41478c8d08edd2865e177dc4c52be759f81ee4dd26c \
+ --hash=sha256:7549fbf655e5825d787bbc9ecf6028731973f78088fbca3a1f4145c39ef09462 \
+ --hash=sha256:8207b7105829eca6f3d774f64a904190bb2231de91b8b186d21ffd98005f14a7 \
+ --hash=sha256:8c4d8e89aa7de683e2056a581ce63c46a0c41e31bd2b6d34144e2c80f5ea53dc \
+ --hash=sha256:98324ec3ecf12296e6422939e54763faedbfcc502ea4a4c38502082711867258 \
+ --hash=sha256:9bbcd9ab8ea1f2e1c8031c21445b511442cc45c89951e49bbf852cbb70755b1b \
+ --hash=sha256:9d40652cc4fe33871ad3338581dca3297ff5f2213d0df345bcfbde5162abf0c9 \
+ --hash=sha256:a2746d69a8196698146a3dbe29104f9eb6a2a4d8a27878d92169a6c0b74435b6 \
+ --hash=sha256:ae704dcfaa180ff7c4cfbad23e74321a2b774f92ca77fd94ce1049175a21c97f \
+ --hash=sha256:bfdca17c36ae01a21274a3c387a63aa1aafe72bff976522886869ef131b937f1 \
+ --hash=sha256:c482e1246726616088532b5e964e39765b6d1520791348e6c9dc3af25b233828 \
+ --hash=sha256:ca637024ca67ab24a7fd6f65d280572c3794665eaf5edcc7e90a866544076878 \
+ --hash=sha256:e02d700ec8d9b1859790c0475df4e4092c7bf3272a4fd2c9f33d87fac4427b8f \
+ --hash=sha256:e5952d2d18b79f7dc25e62e014fe5a23eb1a3d2bc66318df8988a01b1a037c5b
+ # via -r requirements/dev.in
+mypy-extensions==1.0.0 \
+ --hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
+ --hash=sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782
+ # via mypy
+myst-parser==2.0.0 \
+ --hash=sha256:7c36344ae39c8e740dad7fdabf5aa6fc4897a813083c6cc9990044eb93656b14 \
+ --hash=sha256:ea929a67a6a0b1683cdbe19b8d2e724cd7643f8aa3e7bb18dd65beac3483bead
+ # via documenteer
+nodeenv==1.8.0 \
+ --hash=sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2 \
+ --hash=sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec
+ # via pre-commit
+numpy==1.25.2 \
+ --hash=sha256:0d60fbae8e0019865fc4784745814cff1c421df5afee233db6d88ab4f14655a2 \
+ --hash=sha256:1a1329e26f46230bf77b02cc19e900db9b52f398d6722ca853349a782d4cff55 \
+ --hash=sha256:1b9735c27cea5d995496f46a8b1cd7b408b3f34b6d50459d9ac8fe3a20cc17bf \
+ --hash=sha256:2792d23d62ec51e50ce4d4b7d73de8f67a2fd3ea710dcbc8563a51a03fb07b01 \
+ --hash=sha256:3e0746410e73384e70d286f93abf2520035250aad8c5714240b0492a7302fdca \
+ --hash=sha256:4c3abc71e8b6edba80a01a52e66d83c5d14433cbcd26a40c329ec7ed09f37901 \
+ --hash=sha256:5883c06bb92f2e6c8181df7b39971a5fb436288db58b5a1c3967702d4278691d \
+ --hash=sha256:5c97325a0ba6f9d041feb9390924614b60b99209a71a69c876f71052521d42a4 \
+ --hash=sha256:60e7f0f7f6d0eee8364b9a6304c2845b9c491ac706048c7e8cf47b83123b8dbf \
+ --hash=sha256:76b4115d42a7dfc5d485d358728cdd8719be33cc5ec6ec08632a5d6fca2ed380 \
+ --hash=sha256:7dc869c0c75988e1c693d0e2d5b26034644399dd929bc049db55395b1379e044 \
+ --hash=sha256:834b386f2b8210dca38c71a6e0f4fd6922f7d3fcff935dbe3a570945acb1b545 \
+ --hash=sha256:8b77775f4b7df768967a7c8b3567e309f617dd5e99aeb886fa14dc1a0791141f \
+ --hash=sha256:90319e4f002795ccfc9050110bbbaa16c944b1c37c0baeea43c5fb881693ae1f \
+ --hash=sha256:b79e513d7aac42ae918db3ad1341a015488530d0bb2a6abcbdd10a3a829ccfd3 \
+ --hash=sha256:bb33d5a1cf360304754913a350edda36d5b8c5331a8237268c48f91253c3a364 \
+ --hash=sha256:bec1e7213c7cb00d67093247f8c4db156fd03075f49876957dca4711306d39c9 \
+ --hash=sha256:c5462d19336db4560041517dbb7759c21d181a67cb01b36ca109b2ae37d32418 \
+ --hash=sha256:c5652ea24d33585ea39eb6a6a15dac87a1206a692719ff45d53c5282e66d4a8f \
+ --hash=sha256:d7806500e4f5bdd04095e849265e55de20d8cc4b661b038957354327f6d9b295 \
+ --hash=sha256:db3ccc4e37a6873045580d413fe79b68e47a681af8db2e046f1dacfa11f86eb3 \
+ --hash=sha256:dfe4a913e29b418d096e696ddd422d8a5d13ffba4ea91f9f60440a3b759b0187 \
+ --hash=sha256:eb942bfb6f84df5ce05dbf4b46673ffed0d3da59f13635ea9b926af3deb76926 \
+ --hash=sha256:f08f2e037bba04e707eebf4bc934f1972a315c883a9e0ebfa8a7756eabf9e357 \
+ --hash=sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760
+ # via
+ # contourpy
+ # matplotlib
+packaging==23.1 \
+ --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \
+ --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f
+ # via
+ # matplotlib
+ # pydata-sphinx-theme
+ # pytest
+ # sphinx
+pillow==10.0.0 \
+ --hash=sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5 \
+ --hash=sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530 \
+ --hash=sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d \
+ --hash=sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca \
+ --hash=sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891 \
+ --hash=sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992 \
+ --hash=sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7 \
+ --hash=sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3 \
+ --hash=sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba \
+ --hash=sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3 \
+ --hash=sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3 \
+ --hash=sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f \
+ --hash=sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538 \
+ --hash=sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3 \
+ --hash=sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d \
+ --hash=sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c \
+ --hash=sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017 \
+ --hash=sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3 \
+ --hash=sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223 \
+ --hash=sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e \
+ --hash=sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3 \
+ --hash=sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6 \
+ --hash=sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640 \
+ --hash=sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334 \
+ --hash=sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1 \
+ --hash=sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba \
+ --hash=sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa \
+ --hash=sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0 \
+ --hash=sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396 \
+ --hash=sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d \
+ --hash=sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485 \
+ --hash=sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf \
+ --hash=sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43 \
+ --hash=sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37 \
+ --hash=sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2 \
+ --hash=sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd \
+ --hash=sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86 \
+ --hash=sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967 \
+ --hash=sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629 \
+ --hash=sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568 \
+ --hash=sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed \
+ --hash=sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f \
+ --hash=sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551 \
+ --hash=sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3 \
+ --hash=sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614 \
+ --hash=sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff \
+ --hash=sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d \
+ --hash=sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883 \
+ --hash=sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684 \
+ --hash=sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0 \
+ --hash=sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de \
+ --hash=sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b \
+ --hash=sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3 \
+ --hash=sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199 \
+ --hash=sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51 \
+ --hash=sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90
+ # via matplotlib
+platformdirs==3.10.0 \
+ --hash=sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d \
+ --hash=sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d
+ # via virtualenv
+pluggy==1.2.0 \
+ --hash=sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849 \
+ --hash=sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3
+ # via pytest
+pre-commit==3.3.3 \
+ --hash=sha256:10badb65d6a38caff29703362271d7dca483d01da88f9d7e05d0b97171c136cb \
+ --hash=sha256:a2256f489cd913d575c145132ae196fe335da32d91a8294b7afe6622335dd023
+ # via -r requirements/dev.in
+pybtex==0.24.0 \
+ --hash=sha256:818eae35b61733e5c007c3fcd2cfb75ed1bc8b4173c1f70b56cc4c0802d34755 \
+ --hash=sha256:e1e0c8c69998452fea90e9179aa2a98ab103f3eed894405b7264e517cc2fcc0f
+ # via
+ # pybtex-docutils
+ # sphinxcontrib-bibtex
+pybtex-docutils==1.0.2 \
+ --hash=sha256:43aa353b6d498fd5ac30f0073a98e332d061d34fe619d3d50d1761f8fd4aa016 \
+ --hash=sha256:6f9e3c25a37bcaac8c4f69513272706ec6253bb708a93d8b4b173f43915ba239
+ # via sphinxcontrib-bibtex
+pydantic==1.10.12 \
+ --hash=sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303 \
+ --hash=sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe \
+ --hash=sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47 \
+ --hash=sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494 \
+ --hash=sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33 \
+ --hash=sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86 \
+ --hash=sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d \
+ --hash=sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c \
+ --hash=sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a \
+ --hash=sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565 \
+ --hash=sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb \
+ --hash=sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62 \
+ --hash=sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62 \
+ --hash=sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0 \
+ --hash=sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523 \
+ --hash=sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d \
+ --hash=sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405 \
+ --hash=sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f \
+ --hash=sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b \
+ --hash=sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718 \
+ --hash=sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed \
+ --hash=sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb \
+ --hash=sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5 \
+ --hash=sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc \
+ --hash=sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942 \
+ --hash=sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe \
+ --hash=sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246 \
+ --hash=sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350 \
+ --hash=sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303 \
+ --hash=sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09 \
+ --hash=sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33 \
+ --hash=sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8 \
+ --hash=sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a \
+ --hash=sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1 \
+ --hash=sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6 \
+ --hash=sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d
+ # via
+ # -c requirements/main.txt
+ # -r requirements/dev.in
+ # documenteer
+pydata-sphinx-theme==0.12.0 \
+ --hash=sha256:7a07c3ac1fb1cfbb5f7d1e147a9500fb120e329d610e0fa2caac4a645141bdd9 \
+ --hash=sha256:c17dbab67a3774f06f34f6378e896fcd0668cc8b5da1c1ba017e65cf1df0af58
+ # via documenteer
+pygments==2.15.1 \
+ --hash=sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c \
+ --hash=sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1
+ # via
+ # pydata-sphinx-theme
+ # sphinx
+ # sphinx-prompt
+pyparsing==3.0.9 \
+ --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \
+ --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc
+ # via matplotlib
+pytest==7.4.0 \
+ --hash=sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32 \
+ --hash=sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a
+ # via
+ # -r requirements/dev.in
+ # pytest-cov
+pytest-cov==4.1.0 \
+ --hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \
+ --hash=sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a
+ # via -r requirements/dev.in
+python-dateutil==2.8.2 \
+ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
+ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9
+ # via matplotlib
+pyyaml==6.0.1 \
+ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \
+ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \
+ --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \
+ --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \
+ --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \
+ --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \
+ --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \
+ --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \
+ --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \
+ --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \
+ --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \
+ --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \
+ --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \
+ --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \
+ --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \
+ --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \
+ --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \
+ --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \
+ --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \
+ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \
+ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \
+ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \
+ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \
+ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \
+ --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \
+ --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \
+ --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \
+ --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \
+ --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \
+ --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \
+ --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \
+ --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \
+ --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \
+ --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \
+ --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \
+ --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \
+ --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \
+ --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
+ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
+ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
+ # via
+ # -c requirements/main.txt
+ # documenteer
+ # myst-parser
+ # pre-commit
+ # pybtex
+requests==2.31.0 \
+ --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+ --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
+ # via
+ # -c requirements/main.txt
+ # documenteer
+ # sphinx
+ruff==0.0.282 \
+ --hash=sha256:01b76309ddab16eb258dabc5e86e73e6542f59f3ea6b4ab886ecbcfc80ce062c \
+ --hash=sha256:0710ea2cadc504b96c1d94c414a7802369d0fff2ab7c94460344bba69135cb40 \
+ --hash=sha256:1f05f5e6d6df6f8b1974c08f963c33f0a4d8cfa15cba12d35ca3ece8e9be5b1f \
+ --hash=sha256:2ca52536e1c7603fe4cbb5ad9dc141df47c3200df782f5ec559364716ea27f96 \
+ --hash=sha256:3f30c9958ab9cb02bf0c574c629e87c19454cbbdb82750e49e3d1559a5a8f216 \
+ --hash=sha256:47a7a9366ab8e4ee20df9339bef172eec7b2e9e123643bf3ede005058f5b114e \
+ --hash=sha256:5374b40b6d860d334d28678a53a92f0bf04b53acdf0395900361ad54ce71cd1d \
+ --hash=sha256:826e4de98e91450a6fe699a4e4a7cf33b9a90a2c5c270dc5b202241c37359ff8 \
+ --hash=sha256:aab9ed5bfba6b0a2242a7ec9a72858c802ceeaf0076fe72b2ad455639275f22c \
+ --hash=sha256:bd25085c42ebaffe336ed7bda8a0ae7b6c454a5f386ec8b2299503f79bd12bdf \
+ --hash=sha256:d1ccbceb44e94fe2205b63996166e98a513a19ed23ec01d7193b7494b94ba30d \
+ --hash=sha256:d99758f8bbcb8f8da99acabf711ffad5e7a015247adf27211100b3586777fd56 \
+ --hash=sha256:e177cbb6dc0b1dbef5e999900d798b73e33602abf9b6c62d5d2cbe101026d931 \
+ --hash=sha256:eee9c8c50bc77eb9c0811c91d9d67ff39fe4f394c2f44ada37dac6d45e50c9f1 \
+ --hash=sha256:ef677c26bae756e4c98af6d8972da83caea550bc92ffef97a6e939ca5b24ad06 \
+ --hash=sha256:f03fba9621533d67d7ab995847467d78b9337e3697779ef2cea6f1deaee5fbef \
+ --hash=sha256:f51bbb64f8f29e444c16d21b269ba82e25f8d536beda3df7c9fe1816297e508e
+ # via -r requirements/dev.in
+six==1.16.0 \
+ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
+ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254
+ # via
+ # latexcodec
+ # pybtex
+ # python-dateutil
+smmap==5.0.0 \
+ --hash=sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94 \
+ --hash=sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936
+ # via
+ # -c requirements/main.txt
+ # gitdb
+snowballstemmer==2.2.0 \
+ --hash=sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1 \
+ --hash=sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a
+ # via sphinx
+soupsieve==2.4.1 \
+ --hash=sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8 \
+ --hash=sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea
+ # via beautifulsoup4
+sphinx==6.2.1 \
+ --hash=sha256:6d56a34697bb749ffa0152feafc4b19836c755d90a7c59b72bc7dfd371b9cc6b \
+ --hash=sha256:97787ff1fa3256a3eef9eda523a63dbf299f7b47e053cfcf684a1c2a8380c912
+ # via
+ # documenteer
+ # myst-parser
+ # pydata-sphinx-theme
+ # sphinx-autodoc-typehints
+ # sphinx-automodapi
+ # sphinx-copybutton
+ # sphinx-design
+ # sphinx-jinja
+ # sphinx-prompt
+ # sphinxcontrib-bibtex
+ # sphinxcontrib-jquery
+ # sphinxext-opengraph
+sphinx-autodoc-typehints==1.22 \
+ --hash=sha256:71fca2d5eee9b034204e4c686ab20b4d8f5eb9409396216bcae6c87c38e18ea6 \
+ --hash=sha256:ef4a8b9d52de66065aa7d3adfabf5a436feb8a2eff07c2ddc31625d8807f2b69
+ # via documenteer
+sphinx-automodapi==0.15.0 \
+ --hash=sha256:06848f261fb127b25d35f27c2c4fddb041e76498733da064504f8077cbd27bec \
+ --hash=sha256:fd5871e054df7f3e299dde959afffa849f4d01c6eac274c366b06472afcb06aa
+ # via documenteer
+sphinx-copybutton==0.5.2 \
+ --hash=sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd \
+ --hash=sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e
+ # via documenteer
+sphinx-design==0.5.0 \
+ --hash=sha256:1af1267b4cea2eedd6724614f19dcc88fe2e15aff65d06b2f6252cee9c4f4c1e \
+ --hash=sha256:e8e513acea6f92d15c6de3b34e954458f245b8e761b45b63950f65373352ab00
+ # via documenteer
+sphinx-diagrams==0.4.0 \
+ --hash=sha256:3cf2e0179bdd9ccdb28164fcfcae9b167999a1abe40e159e0c26a225490074d1 \
+ --hash=sha256:4860291cb04d6361f898d20ba28dca7345f757cdc240caf144c8bf20c73067a0
+ # via -r requirements/dev.in
+sphinx-jinja==2.0.2 \
+ --hash=sha256:705ebeb9b7a6018ca3f93724315a7c1effa6ba3db44d630e7eaaa15e4ac081a8 \
+ --hash=sha256:c6232b59a894139770be1dc6d0b00a379e4288ce78157904e1f8473dea3e0718
+ # via -r requirements/dev.in
+sphinx-prompt==1.5.0 \
+ --hash=sha256:fa4e90d8088b5a996c76087d701fc7e31175f8b9dc4aab03a507e45051067162
+ # via documenteer
+sphinxcontrib-applehelp==1.0.4 \
+ --hash=sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228 \
+ --hash=sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e
+ # via sphinx
+sphinxcontrib-bibtex==2.5.0 \
+ --hash=sha256:71b42e5db0e2e284f243875326bf9936aa9a763282277d75048826fef5b00eaa \
+ --hash=sha256:748f726eaca6efff7731012103417ef130ecdcc09501b4d0c54283bf5f059f76
+ # via documenteer
+sphinxcontrib-devhelp==1.0.2 \
+ --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \
+ --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4
+ # via sphinx
+sphinxcontrib-htmlhelp==2.0.1 \
+ --hash=sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff \
+ --hash=sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903
+ # via sphinx
+sphinxcontrib-jquery==4.1 \
+ --hash=sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a \
+ --hash=sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae
+ # via documenteer
+sphinxcontrib-jsmath==1.0.1 \
+ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \
+ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8
+ # via sphinx
+sphinxcontrib-mermaid==0.9.2 \
+ --hash=sha256:252ef13dd23164b28f16d8b0205cf184b9d8e2b714a302274d9f59eb708e77af \
+ --hash=sha256:6795a72037ca55e65663d2a2c1a043d636dc3d30d418e56dd6087d1459d98a5d
+ # via documenteer
+sphinxcontrib-qthelp==1.0.3 \
+ --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \
+ --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6
+ # via sphinx
+sphinxcontrib-serializinghtml==1.1.5 \
+ --hash=sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd \
+ --hash=sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952
+ # via sphinx
+sphinxext-opengraph==0.8.2 \
+ --hash=sha256:45a693b6704052c426576f0a1f630649c55b4188bc49eb63e9587e24a923db39 \
+ --hash=sha256:6a05bdfe5176d9dd0a1d58a504f17118362ab976631213cd36fb44c4c40544c9
+ # via documenteer
+typed-ast==1.5.5 \
+ --hash=sha256:042eb665ff6bf020dd2243307d11ed626306b82812aba21836096d229fdc6a10 \
+ --hash=sha256:045f9930a1550d9352464e5149710d56a2aed23a2ffe78946478f7b5416f1ede \
+ --hash=sha256:0635900d16ae133cab3b26c607586131269f88266954eb04ec31535c9a12ef1e \
+ --hash=sha256:118c1ce46ce58fda78503eae14b7664163aa735b620b64b5b725453696f2a35c \
+ --hash=sha256:16f7313e0a08c7de57f2998c85e2a69a642e97cb32f87eb65fbfe88381a5e44d \
+ --hash=sha256:1efebbbf4604ad1283e963e8915daa240cb4bf5067053cf2f0baadc4d4fb51b8 \
+ --hash=sha256:2188bc33d85951ea4ddad55d2b35598b2709d122c11c75cffd529fbc9965508e \
+ --hash=sha256:2b946ef8c04f77230489f75b4b5a4a6f24c078be4aed241cfabe9cbf4156e7e5 \
+ --hash=sha256:335f22ccb244da2b5c296e6f96b06ee9bed46526db0de38d2f0e5a6597b81155 \
+ --hash=sha256:381eed9c95484ceef5ced626355fdc0765ab51d8553fec08661dce654a935db4 \
+ --hash=sha256:429ae404f69dc94b9361bb62291885894b7c6fb4640d561179548c849f8492ba \
+ --hash=sha256:44f214394fc1af23ca6d4e9e744804d890045d1643dd7e8229951e0ef39429b5 \
+ --hash=sha256:48074261a842acf825af1968cd912f6f21357316080ebaca5f19abbb11690c8a \
+ --hash=sha256:4bc1efe0ce3ffb74784e06460f01a223ac1f6ab31c6bc0376a21184bf5aabe3b \
+ --hash=sha256:57bfc3cf35a0f2fdf0a88a3044aafaec1d2f24d8ae8cd87c4f58d615fb5b6311 \
+ --hash=sha256:597fc66b4162f959ee6a96b978c0435bd63791e31e4f410622d19f1686d5e769 \
+ --hash=sha256:5f7a8c46a8b333f71abd61d7ab9255440d4a588f34a21f126bbfc95f6049e686 \
+ --hash=sha256:5fe83a9a44c4ce67c796a1b466c270c1272e176603d5e06f6afbc101a572859d \
+ --hash=sha256:61443214d9b4c660dcf4b5307f15c12cb30bdfe9588ce6158f4a005baeb167b2 \
+ --hash=sha256:622e4a006472b05cf6ef7f9f2636edc51bda670b7bbffa18d26b255269d3d814 \
+ --hash=sha256:6eb936d107e4d474940469e8ec5b380c9b329b5f08b78282d46baeebd3692dc9 \
+ --hash=sha256:7f58fabdde8dcbe764cef5e1a7fcb440f2463c1bbbec1cf2a86ca7bc1f95184b \
+ --hash=sha256:83509f9324011c9a39faaef0922c6f720f9623afe3fe220b6d0b15638247206b \
+ --hash=sha256:8c524eb3024edcc04e288db9541fe1f438f82d281e591c548903d5b77ad1ddd4 \
+ --hash=sha256:94282f7a354f36ef5dbce0ef3467ebf6a258e370ab33d5b40c249fa996e590dd \
+ --hash=sha256:b445c2abfecab89a932b20bd8261488d574591173d07827c1eda32c457358b18 \
+ --hash=sha256:be4919b808efa61101456e87f2d4c75b228f4e52618621c77f1ddcaae15904fa \
+ --hash=sha256:bfd39a41c0ef6f31684daff53befddae608f9daf6957140228a08e51f312d7e6 \
+ --hash=sha256:c631da9710271cb67b08bd3f3813b7af7f4c69c319b75475436fcab8c3d21bee \
+ --hash=sha256:cc95ffaaab2be3b25eb938779e43f513e0e538a84dd14a5d844b8f2932593d88 \
+ --hash=sha256:d09d930c2d1d621f717bb217bf1fe2584616febb5138d9b3e8cdd26506c3f6d4 \
+ --hash=sha256:d40c10326893ecab8a80a53039164a224984339b2c32a6baf55ecbd5b1df6431 \
+ --hash=sha256:d41b7a686ce653e06c2609075d397ebd5b969d821b9797d029fccd71fdec8e04 \
+ --hash=sha256:d5c0c112a74c0e5db2c75882a0adf3133adedcdbfd8cf7c9d6ed77365ab90a1d \
+ --hash=sha256:e1a976ed4cc2d71bb073e1b2a250892a6e968ff02aa14c1f40eba4f365ffec02 \
+ --hash=sha256:e48bf27022897577d8479eaed64701ecaf0467182448bd95759883300ca818c8 \
+ --hash=sha256:ed4a1a42df8a3dfb6b40c3d2de109e935949f2f66b19703eafade03173f8f437 \
+ --hash=sha256:f0aefdd66f1784c58f65b502b6cf8b121544680456d1cebbd300c2c813899274 \
+ --hash=sha256:fc2b8c4e1bc5cd96c1a823a885e6b158f8451cf6f5530e1829390b4d27d0807f \
+ --hash=sha256:fd946abf3c31fb50eee07451a6aedbfff912fcd13cf357363f5b4e834cc5e71a \
+ --hash=sha256:fe58ef6a764de7b4b36edfc8592641f56e69b7163bba9f9c8089838ee596bfb2
+ # via diagrams
+types-pyyaml==6.0.12.11 \
+ --hash=sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b \
+ --hash=sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d
+ # via -r requirements/dev.in
+typing-extensions==4.7.1 \
+ --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \
+ --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2
+ # via
+ # -c requirements/main.txt
+ # mypy
+ # pydantic
+uc-micro-py==1.0.2 \
+ --hash=sha256:30ae2ac9c49f39ac6dce743bd187fcd2b574b16ca095fa74cd9396795c954c54 \
+ --hash=sha256:8c9110c309db9d9e87302e2f4ad2c3152770930d88ab385cd544e7a7e75f3de0
+ # via linkify-it-py
+urllib3==2.0.4 \
+ --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \
+ --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4
+ # via
+ # -c requirements/main.txt
+ # requests
+virtualenv==20.24.2 \
+ --hash=sha256:43a3052be36080548bdee0b42919c88072037d50d56c28bd3f853cbe92b953ff \
+ --hash=sha256:fd8a78f46f6b99a67b7ec5cf73f92357891a7b3a40fd97637c27f854aae3b9e0
+ # via pre-commit
+
+# The following packages are considered to be unsafe in a requirements file:
+setuptools==68.0.0 \
+ --hash=sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f \
+ --hash=sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235
+ # via nodeenv
diff --git a/requirements/main.in b/requirements/main.in
new file mode 100644
index 0000000000..f17684e983
--- /dev/null
+++ b/requirements/main.in
@@ -0,0 +1,15 @@
+# Editable runtime dependencies (equivalent to project.dependencies).
+# Add direct runtime dependencies here, as well as implicit dependencies
+# with constrained versions. These should be sufficient to run the phalanx
+# command-line tool.
+#
+# After editing, update requirements/main.txt by running:
+# make update-deps
+
+bcrypt
+click
+cryptography
+GitPython
+hvac
+PyYAML
+safir
diff --git a/requirements/main.txt b/requirements/main.txt
new file mode 100644
index 0000000000..1a626f61f2
--- /dev/null
+++ b/requirements/main.txt
@@ -0,0 +1,392 @@
+#
+# This file is autogenerated by pip-compile with Python 3.11
+# by the following command:
+#
+# pip-compile --allow-unsafe --config=pyproject.toml --generate-hashes --output-file=requirements/main.txt requirements/main.in
+#
+anyio==3.7.1 \
+ --hash=sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780 \
+ --hash=sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5
+ # via
+ # httpcore
+ # starlette
+bcrypt==4.0.1 \
+ --hash=sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535 \
+ --hash=sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0 \
+ --hash=sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410 \
+ --hash=sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd \
+ --hash=sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665 \
+ --hash=sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab \
+ --hash=sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71 \
+ --hash=sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215 \
+ --hash=sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b \
+ --hash=sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda \
+ --hash=sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9 \
+ --hash=sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a \
+ --hash=sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344 \
+ --hash=sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f \
+ --hash=sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d \
+ --hash=sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c \
+ --hash=sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c \
+ --hash=sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2 \
+ --hash=sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d \
+ --hash=sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e \
+ --hash=sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3
+ # via -r requirements/main.in
+certifi==2023.7.22 \
+ --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \
+ --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9
+ # via
+ # httpcore
+ # httpx
+ # requests
+cffi==1.15.1 \
+ --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \
+ --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \
+ --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \
+ --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \
+ --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \
+ --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \
+ --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \
+ --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \
+ --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \
+ --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \
+ --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \
+ --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \
+ --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \
+ --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \
+ --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \
+ --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \
+ --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \
+ --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \
+ --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \
+ --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \
+ --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \
+ --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \
+ --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \
+ --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \
+ --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \
+ --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \
+ --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \
+ --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \
+ --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \
+ --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \
+ --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \
+ --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \
+ --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \
+ --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \
+ --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \
+ --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \
+ --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \
+ --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \
+ --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \
+ --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \
+ --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \
+ --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \
+ --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \
+ --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \
+ --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \
+ --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \
+ --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \
+ --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \
+ --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \
+ --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \
+ --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \
+ --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \
+ --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \
+ --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \
+ --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \
+ --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \
+ --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \
+ --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \
+ --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \
+ --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \
+ --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \
+ --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \
+ --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \
+ --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0
+ # via cryptography
+charset-normalizer==3.2.0 \
+ --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
+ --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
+ --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
+ --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
+ --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
+ --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
+ --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
+ --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
+ --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
+ --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
+ --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
+ --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
+ --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
+ --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
+ --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
+ --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
+ --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
+ --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
+ --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
+ --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
+ --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
+ --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
+ --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
+ --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
+ --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
+ --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
+ --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
+ --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
+ --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
+ --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
+ --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
+ --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
+ --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
+ --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
+ --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
+ --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
+ --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
+ --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
+ --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
+ --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
+ --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
+ --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
+ --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
+ --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
+ --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
+ --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
+ --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
+ --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
+ --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
+ --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
+ --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
+ --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
+ --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
+ --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
+ --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
+ --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
+ --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
+ --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
+ --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
+ --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
+ --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
+ --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
+ --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
+ --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
+ --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
+ --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
+ --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
+ --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
+ --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
+ --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
+ --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
+ --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
+ --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
+ --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
+ --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
+ # via requests
+click==8.1.6 \
+ --hash=sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd \
+ --hash=sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5
+ # via -r requirements/main.in
+cryptography==41.0.2 \
+ --hash=sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711 \
+ --hash=sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7 \
+ --hash=sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd \
+ --hash=sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e \
+ --hash=sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58 \
+ --hash=sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0 \
+ --hash=sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d \
+ --hash=sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83 \
+ --hash=sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831 \
+ --hash=sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766 \
+ --hash=sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b \
+ --hash=sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c \
+ --hash=sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182 \
+ --hash=sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f \
+ --hash=sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa \
+ --hash=sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4 \
+ --hash=sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a \
+ --hash=sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2 \
+ --hash=sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76 \
+ --hash=sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5 \
+ --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \
+ --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \
+ --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14
+ # via
+ # -r requirements/main.in
+ # pyjwt
+ # safir
+fastapi==0.100.1 \
+ --hash=sha256:522700d7a469e4a973d92321ab93312448fbe20fca9c8da97effc7e7bc56df23 \
+ --hash=sha256:ec6dd52bfc4eff3063cfcd0713b43c87640fefb2687bbbe3d8a08d94049cdf32
+ # via safir
+gidgethub==5.3.0 \
+ --hash=sha256:4dd92f2252d12756b13f9dd15cde322bfb0d625b6fb5d680da1567ec74b462c0 \
+ --hash=sha256:9ece7d37fbceb819b80560e7ed58f936e48a65d37ec5f56db79145156b426a25
+ # via safir
+gitdb==4.0.10 \
+ --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \
+ --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7
+ # via gitpython
+gitpython==3.1.32 \
+ --hash=sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6 \
+ --hash=sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f
+ # via -r requirements/main.in
+h11==0.14.0 \
+ --hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \
+ --hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761
+ # via httpcore
+httpcore==0.17.3 \
+ --hash=sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888 \
+ --hash=sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87
+ # via httpx
+httpx==0.24.1 \
+ --hash=sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd \
+ --hash=sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd
+ # via safir
+hvac==1.1.1 \
+ --hash=sha256:466e883665b4082933106b292649f9fba3bc0709a1ec1729e9e35b29477164b3 \
+ --hash=sha256:f9dbcc46b98b250c785eb1050aa11ee34a0c8b6616b75218cf1346a9817992f9
+ # via -r requirements/main.in
+idna==3.4 \
+ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
+ --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
+ # via
+ # anyio
+ # httpx
+ # requests
+pycparser==2.21 \
+ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \
+ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206
+ # via cffi
+pydantic==1.10.12 \
+ --hash=sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303 \
+ --hash=sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe \
+ --hash=sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47 \
+ --hash=sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494 \
+ --hash=sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33 \
+ --hash=sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86 \
+ --hash=sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d \
+ --hash=sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c \
+ --hash=sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a \
+ --hash=sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565 \
+ --hash=sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb \
+ --hash=sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62 \
+ --hash=sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62 \
+ --hash=sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0 \
+ --hash=sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523 \
+ --hash=sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d \
+ --hash=sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405 \
+ --hash=sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f \
+ --hash=sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b \
+ --hash=sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718 \
+ --hash=sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed \
+ --hash=sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb \
+ --hash=sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5 \
+ --hash=sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc \
+ --hash=sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942 \
+ --hash=sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe \
+ --hash=sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246 \
+ --hash=sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350 \
+ --hash=sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303 \
+ --hash=sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09 \
+ --hash=sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33 \
+ --hash=sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8 \
+ --hash=sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a \
+ --hash=sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1 \
+ --hash=sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6 \
+ --hash=sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d
+ # via
+ # fastapi
+ # safir
+pyhcl==0.4.4 \
+ --hash=sha256:2d9b9dcdf1023d812bfed561ba72c99104c5b3f52e558d595130a44ce081b003
+ # via hvac
+pyjwt[crypto]==2.8.0 \
+ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \
+ --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320
+ # via gidgethub
+pyyaml==6.0.1 \
+ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \
+ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \
+ --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \
+ --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \
+ --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \
+ --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \
+ --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \
+ --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \
+ --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \
+ --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \
+ --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \
+ --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \
+ --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \
+ --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \
+ --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \
+ --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \
+ --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \
+ --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \
+ --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \
+ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \
+ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \
+ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \
+ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \
+ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \
+ --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \
+ --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \
+ --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \
+ --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \
+ --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \
+ --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \
+ --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \
+ --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \
+ --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \
+ --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \
+ --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \
+ --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \
+ --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \
+ --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
+ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
+ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
+ # via -r requirements/main.in
+requests==2.31.0 \
+ --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
+ --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
+ # via hvac
+safir==4.3.1 \
+ --hash=sha256:6d1fcb7aba10e02fd456076d29e38aaa8699574f52b0fc2f326a9ee3958b41ea \
+ --hash=sha256:da473520785428ae3b9da80406403054d46c089a34d0beceeb88c4cb78925cd3
+ # via -r requirements/main.in
+smmap==5.0.0 \
+ --hash=sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94 \
+ --hash=sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936
+ # via gitdb
+sniffio==1.3.0 \
+ --hash=sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101 \
+ --hash=sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384
+ # via
+ # anyio
+ # httpcore
+ # httpx
+starlette==0.27.0 \
+ --hash=sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75 \
+ --hash=sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91
+ # via
+ # fastapi
+ # safir
+structlog==23.1.0 \
+ --hash=sha256:270d681dd7d163c11ba500bc914b2472d2b50a8ef00faa999ded5ff83a2f906b \
+ --hash=sha256:79b9e68e48b54e373441e130fa447944e6f87a05b35de23138e475c05d0f7e0e
+ # via safir
+typing-extensions==4.7.1 \
+ --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \
+ --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2
+ # via
+ # fastapi
+ # pydantic
+uritemplate==4.1.1 \
+ --hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \
+ --hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e
+ # via gidgethub
+urllib3==2.0.4 \
+ --hash=sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11 \
+ --hash=sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4
+ # via requests
diff --git a/src/phalanx/__init__.py b/src/phalanx/__init__.py
index a48ec8244e..8b6d52e3cf 100644
--- a/src/phalanx/__init__.py
+++ b/src/phalanx/__init__.py
@@ -1,6 +1,4 @@
-"""The phalanx package provides support tooling for Phalanx, SQuaRE's
-application deployment platform.
-"""
+"""Support tooling for Phalanx, SQuaRE's application development platform."""
__all__ = ["__version__"]
diff --git a/src/phalanx/cli.py b/src/phalanx/cli.py
new file mode 100644
index 0000000000..6585058b67
--- /dev/null
+++ b/src/phalanx/cli.py
@@ -0,0 +1,124 @@
+"""Phalanx command-line interface."""
+
+from __future__ import annotations
+
+import json
+import sys
+from pathlib import Path
+
+import click
+from pydantic.tools import schema_of
+
+from .factory import Factory
+from .models.secrets import ConditionalSecretConfig
+
+__all__ = [
+ "help",
+ "secrets_audit",
+ "secrets_list",
+ "secrets_schema",
+ "secrets_static_template",
+ "secrets_vault_secrets",
+]
+
+
+@click.group(context_settings={"help_option_names": ["-h", "--help"]})
+@click.version_option(message="%(version)s")
+def main() -> None:
+ """Administrative command-line interface for gafaelfawr."""
+
+
+@main.command()
+@click.argument("topic", default=None, required=False, nargs=1)
+@click.pass_context
+def help(ctx: click.Context, topic: str | None) -> None:
+ """Show help for any command."""
+ # The help command implementation is taken from
+ # https://www.burgundywall.com/post/having-click-help-subcommand
+ if topic:
+ if topic in main.commands:
+ click.echo(main.commands[topic].get_help(ctx))
+ else:
+ raise click.UsageError(f"Unknown help topic {topic}", ctx)
+ else:
+ if not ctx.parent:
+ raise RuntimeError("help called without topic or parent")
+ click.echo(ctx.parent.get_help())
+
+
+@main.group()
+def secrets() -> None:
+ """Secret manipulation commands."""
+
+
+@secrets.command("audit")
+@click.argument("environment")
+def secrets_audit(environment: str) -> None:
+ """Audit the secrets for the given environment for inconsistencies."""
+ factory = Factory()
+ secrets_service = factory.create_secrets_service()
+ sys.stdout.write(secrets_service.audit(environment))
+
+
+@secrets.command("list")
+@click.argument("environment")
+def secrets_list(environment: str) -> None:
+ """List all secrets required for a given environment."""
+ factory = Factory()
+ secrets_service = factory.create_secrets_service()
+ secrets = secrets_service.list_secrets(environment)
+ for secret in secrets:
+ print(secret.application, secret.key)
+
+
+@secrets.command("schema")
+@click.option(
+ "-o",
+ "--output",
+ type=click.Path(path_type=Path),
+ default=None,
+ help="Path to which to write schema.",
+)
+def secrets_schema(*, output: Path | None) -> None:
+ """Generate schema for application secret definition."""
+ schema = schema_of(
+ dict[str, ConditionalSecretConfig],
+ title="Phalanx application secret definitions",
+ )
+
+ # Pydantic v1 doesn't have any way that I can find to add attributes to
+ # the top level of a schema that isn't generated from a model, and the
+ # top-level secrets schema is a dict, so manually add in the $id attribute
+ # pointing to the canonical URL. Do this in a slightly odd way so that the
+ # $id attribute will be at the top of the file, not at the bottom.
+ schema = {"$id": "https://phalanx.lsst.io/schemas/secrets.json", **schema}
+
+ json_schema = json.dumps(schema, indent=2)
+ if output:
+ output.write_text(json_schema)
+ else:
+ sys.stdout.write(json_schema)
+
+
+@secrets.command("static-template")
+@click.argument("environment")
+def secrets_static_template(environment: str) -> None:
+ """Generate a template for providing static secrets for an environment."""
+ factory = Factory()
+ secrets_service = factory.create_secrets_service()
+ sys.stdout.write(secrets_service.generate_static_template(environment))
+
+
+@secrets.command("vault-secrets")
+@click.argument("environment")
+@click.argument("output", type=click.Path(path_type=Path))
+def secrets_vault_secrets(environment: str, output: Path) -> None:
+ """Write the Vault secrets for the given environment.
+
+ One JSON file per application with secrets will be created in the output
+ directory, containing the secrets for that application. If the value of a
+ secret is not known, it will be written as null.
+ """
+ factory = Factory()
+ secrets_service = factory.create_secrets_service()
+ secrets_service.save_vault_secrets(environment, output)
diff --git a/src/phalanx/docs/models.py b/src/phalanx/docs/models.py
index 592ce7e5a0..aa28adf245 100644
--- a/src/phalanx/docs/models.py
+++ b/src/phalanx/docs/models.py
@@ -6,7 +6,7 @@
from dataclasses import dataclass, field
from functools import cached_property
from pathlib import Path
-from typing import Any, Dict, List, Optional, Tuple
+from typing import Any
import yaml
@@ -19,8 +19,10 @@
@dataclass(kw_only=True)
class DocLink:
- """A model describing a document link, based on an individual array item
- in the ``phalanx.lsst.io/docs`` chart annotation.
+ """A model describing a document link.
+
+ This is based on an individual array item in the ``phalanx.lsst.io/docs``
+ chart annotation.
"""
url: str
@@ -29,11 +31,11 @@ class DocLink:
title: str
"""Document title."""
- id: Optional[str]
+ id: str | None
"""Document identifier."""
def __str__(self) -> str:
- """A reStructuredText-formatted link."""
+ """Format as a reStructuredText-formatted link."""
if self.id is not None:
label = f"{self.id}: {self.title}"
else:
@@ -52,13 +54,13 @@ class Application:
This name is used to label directories, etc.
"""
- values: Dict[str, Dict]
+ values: dict[str, dict]
"""The parsed Helm values for each environment."""
- chart: Dict[str, Any]
+ chart: dict[str, Any]
"""The parsed Helm Chart.yaml file."""
- active_environments: List[str] = field(default_factory=list)
+ active_environments: list[str] = field(default_factory=list)
"""Environments where this application is active."""
namespace: str
@@ -68,7 +70,7 @@ class Application:
"""Contents of the README.md from the applications Phalanx directory."""
@property
- def homepage_url(self) -> Optional[str]:
+ def homepage_url(self) -> str | None:
"""The Helm home field, typically used for the app's docs."""
if "home" in self.chart:
return self.chart["home"]
@@ -76,7 +78,7 @@ def homepage_url(self) -> Optional[str]:
return None
@property
- def source_urls(self) -> Optional[List[str]]:
+ def source_urls(self) -> list[str] | None:
"""Application source URLs, typically from the Helm sources field."""
if "sources" in self.chart:
return self.chart["sources"]
@@ -85,8 +87,9 @@ def source_urls(self) -> Optional[List[str]]:
@property
def values_table_md(self) -> str:
- """The markdown-formatted Helm values documenation generated by
- helm-docs in the README.
+ """Markdown-formatted Helm values documenation.
+
+ Generated by :command:`helm-docs` in the :file:`README.md`.
"""
lines = self.readme.splitlines()
for i, line in enumerate(lines):
@@ -95,19 +98,18 @@ def values_table_md(self) -> str:
return ""
@cached_property
- def doc_links(self) -> List[DocLink]:
+ def doc_links(self) -> list[DocLink]:
"""reStructuredText-formatted list of links."""
key = "phalanx.lsst.io/docs"
if "annotations" in self.chart and key in self.chart["annotations"]:
docs_data = yaml.safe_load(self.chart["annotations"][key])
- docs = [DocLink(**d) for d in docs_data]
- return docs
+ return [DocLink(**d) for d in docs_data]
else:
return []
@classmethod
def load(
- cls, *, app_dir: Path, root_dir: Path, env_values: Dict[str, Dict]
+ cls, *, app_dir: Path, root_dir: Path, env_values: dict[str, dict]
) -> Application:
"""Load an application from the Phalanx repository.
@@ -124,10 +126,7 @@ def load(
# Open the chart's README
readme_path = app_dir.joinpath("README.md")
- if readme_path.is_file():
- readme = readme_path.read_text()
- else:
- readme = ""
+ readme = readme_path.read_text() if readme_path.is_file() else ""
# Open the chart's Chart.yaml
chart_path = app_dir.joinpath("Chart.yaml")
@@ -137,14 +136,14 @@ def load(
chart = {}
# Load the app's values files for each environment
- values: Dict[str, Dict] = {}
+ values: dict[str, dict] = {}
for values_path in app_dir.glob("values-*.yaml"):
env_name = values_path.stem.removeprefix("values-")
values[env_name] = yaml.safe_load(values_path.read_text())
# Determine what environments use this app based on the environment's
# values file.
- active_environments: List[str] = []
+ active_environments: list[str] = []
for env_name, env_configs in env_values.items():
if app_name == "argocd":
active_environments.append(env_name)
@@ -200,11 +199,11 @@ class Environment:
vault_path_prefix: str
"""The Vault key prefix for this environment."""
- apps: List[Application]
+ apps: list[Application]
"""The applications that are enabled for this service."""
@property
- def argocd_url(self) -> Optional[str]:
+ def argocd_url(self) -> str | None:
"""Path to the Argo CD UI."""
argocd = self.get_app("argocd")
if argocd is None:
@@ -219,7 +218,7 @@ def argocd_url(self) -> Optional[str]:
return "N/A"
@property
- def argocd_rbac_csv(self) -> Optional[List[str]]:
+ def argocd_rbac_csv(self) -> list[str] | None:
"""The Argo CD RBAC table, as a list of CSV lines."""
argocd = self.get_app("argocd")
if argocd is None:
@@ -234,10 +233,11 @@ def argocd_rbac_csv(self) -> Optional[List[str]]:
for line in rbac_csv.splitlines()
]
print(lines)
- return lines
except KeyError:
# Some environments may not configure an RBAC
return None
+ else:
+ return lines
@property
def identity_provider(self) -> str:
@@ -259,13 +259,13 @@ def identity_provider(self) -> str:
return "Unknown"
@property
- def gafaelfawr_roles(self) -> List[Tuple[str, List[str]]]:
+ def gafaelfawr_roles(self) -> list[tuple[str, list[str]]]:
"""Gafaelfawr role mapping (reStructuredText).
Group strings may be formatted as reStructuredText links to GitHub
teams.
"""
- roles: List[Tuple[str, List[str]]] = []
+ roles: list[tuple[str, list[str]]] = []
gafaelfawr = self.get_app("gafaelfawr")
if gafaelfawr is None:
@@ -280,7 +280,7 @@ def gafaelfawr_roles(self) -> List[Tuple[str, List[str]]]:
role_names = sorted(group_mapping.keys())
for role_name in role_names:
- groups: List[str] = []
+ groups: list[str] = []
for group in group_mapping[role_name]:
if isinstance(group, str):
# e.g. a comanage group
@@ -297,7 +297,7 @@ def gafaelfawr_roles(self) -> List[Tuple[str, List[str]]]:
return roles
- def get_app(self, name) -> Optional[Application]:
+ def get_app(self, name: str) -> Application | None:
"""Get the named application."""
for app in self.apps:
if app.name == name:
@@ -306,14 +306,14 @@ def get_app(self, name) -> Optional[Application]:
@classmethod
def load(
- cls, *, values: Dict[str, Any], applications: List[Application]
+ cls, *, values: dict[str, Any], applications: list[Application]
) -> Environment:
"""Load an environment by inspecting the Phalanx repository."""
# Extract name from dir/values-envname.yaml
name = values["environment"]
# Get Application instances active in this environment
- apps: List[Application] = []
+ apps: list[Application] = []
for app in applications:
if app.name == "argocd":
# argocd is a special case because it's not toggled per env
@@ -334,10 +334,10 @@ def load(
class Phalanx:
"""Root container for Phalanx data."""
- environments: List[Environment] = field(default_factory=list)
+ environments: list[Environment] = field(default_factory=list)
"""Phalanx environments."""
- apps: List[Application] = field(default_factory=list)
+ apps: list[Application] = field(default_factory=list)
"""Phalanx applications."""
@classmethod
@@ -355,11 +355,11 @@ def load_phalanx(cls, root_dir: Path) -> Phalanx:
A model of the Phalanx platform, including environment and
application configuration.
"""
- apps: List[Application] = []
- envs: List[Environment] = []
+ apps: list[Application] = []
+ envs: list[Environment] = []
# Pre-load the values files for each environment
- env_values: Dict[str, Dict[str, Any]] = {}
+ env_values: dict[str, dict[str, Any]] = {}
for env_values_path in root_dir.joinpath(ENVIRONMENTS_DIR).glob(
"values-*.yaml"
):
@@ -380,7 +380,7 @@ def load_phalanx(cls, root_dir: Path) -> Phalanx:
apps.sort(key=lambda a: a.name)
# Gather environments
- for env_name, values in env_values.items():
+ for values in env_values.values():
env = Environment.load(values=values, applications=apps)
envs.append(env)
diff --git a/src/phalanx/exceptions.py b/src/phalanx/exceptions.py
new file mode 100644
index 0000000000..acb5322d3a
--- /dev/null
+++ b/src/phalanx/exceptions.py
@@ -0,0 +1,79 @@
+"""Exceptions for the Phalanx command-line tool."""
+
+from __future__ import annotations
+
+from collections.abc import Iterable
+
+from .models.secrets import Secret
+
+__all__ = [
+ "InvalidEnvironmentConfigError",
+ "InvalidSecretConfigError",
+ "UnknownEnvironmentError",
+ "UnresolvedSecretsError",
+]
+
+
+class InvalidEnvironmentConfigError(Exception):
+ """Configuration for an environment is invalid.
+
+ Parameters
+ ----------
+ name
+ Name of the environment.
+ error
+ Error message.
+ """
+
+ def __init__(self, name: str, error: str) -> None:
+ msg = "Invalid configuration for environment {name}: {error}"
+ super().__init__(msg)
+
+
+class InvalidSecretConfigError(Exception):
+ """Secret configuration is invalid.
+
+ Parameters
+ ----------
+ application
+ Name of the application.
+ key
+ Secret key.
+ error
+ Error message.
+ """
+
+ def __init__(self, application: str, key: str, error: str) -> None:
+ name = f"{application}/{key}"
+ msg = f"Invalid configuration for secret {name}: {error}"
+ super().__init__(msg)
+
+
+class UnresolvedSecretsError(Exception):
+ """Some secrets could not be resolved.
+
+ Parameters
+ ----------
+ secrets
+ Secrets that could not be resolved.
+ """
+
+ def __init__(self, secrets: Iterable[Secret]) -> None:
+ names = [f"{u.application}/{u.key}" for u in secrets]
+ names_str = ", ".join(names)
+ msg = f"Some secrets could not be resolved: {names_str}"
+ super().__init__(msg)
+
+
+class UnknownEnvironmentError(Exception):
+ """No configuration found for an environment name.
+
+ Parameters
+ ----------
+ name
+ Name of the environment.
+ """
+
+ def __init__(self, name: str) -> None:
+ msg = f"No configuration found for environment {name}"
+ super().__init__(msg)
diff --git a/src/phalanx/factory.py b/src/phalanx/factory.py
new file mode 100644
index 0000000000..b3cd820534
--- /dev/null
+++ b/src/phalanx/factory.py
@@ -0,0 +1,35 @@
+"""Factory for Phalanx support code components."""
+
+from __future__ import annotations
+
+from .services.secrets import SecretsService
+from .storage.config import ConfigStorage
+from .storage.vault import VaultStorage
+
+__all__ = ["Factory"]
+
+
+class Factory:
+ """Factory to create Phalanx components."""
+
+ def create_config_storage(self) -> ConfigStorage:
+ """Create storage layer for the Phalanx configuration.
+
+ Returns
+ -------
+ ConfigStorage
+ Storage service for loading the Phalanx configuration.
+ """
+ return ConfigStorage()
+
+ def create_secrets_service(self) -> SecretsService:
+ """Create service for manipulating Phalanx secrets.
+
+ Returns
+ -------
+ SecretsService
+ Service for manipulating secrets.
+ """
+ config_storage = self.create_config_storage()
+ vault_storage = VaultStorage()
+ return SecretsService(config_storage, vault_storage)
diff --git a/src/phalanx/models/__init__.py b/src/phalanx/models/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/src/phalanx/models/applications.py b/src/phalanx/models/applications.py
new file mode 100644
index 0000000000..65ef052758
--- /dev/null
+++ b/src/phalanx/models/applications.py
@@ -0,0 +1,75 @@
+"""Pydantic models for Phalanx applications."""
+
+from __future__ import annotations
+
+from typing import Any
+
+from pydantic import BaseModel
+
+from .secrets import ConditionalSecretConfig, Secret
+
+__all__ = [
+ "Application",
+ "ApplicationInstance",
+]
+
+
+class Application(BaseModel):
+ """A Phalanx application."""
+
+ name: str
+ """Name of the application."""
+
+ values: dict[str, Any]
+ """Base Helm chart values."""
+
+ environment_values: dict[str, dict[str, Any]]
+ """Per-environment Helm chart overrides by environment name."""
+
+ secrets: dict[str, ConditionalSecretConfig]
+ """Secrets for the application, by secret key."""
+
+ environment_secrets: dict[str, dict[str, ConditionalSecretConfig]]
+ """Per-environment secrets for the application, by secret key."""
+
+
+class ApplicationInstance(BaseModel):
+ """A Phalanx application as configured for a specific environment."""
+
+ name: str
+ """Name of the application."""
+
+ environment: str
+ """Name of the environment for which the application is configured."""
+
+ values: dict[str, Any]
+ """Merged Helm values for the application in this environment."""
+
+ secrets: list[Secret] = []
+ """Secrets required for this application in this environment."""
+
+ def is_values_setting_true(self, setting: str) -> bool:
+ """Determine whether a given Helm values setting is true.
+
+ The values setting is considered true if the corresponding values
+ parameter is present and set to a true value (a non-empty array or
+ dictionary or a string, number, or boolean value that evaluates to
+ true in Python).
+
+ Parameters
+ ----------
+ setting
+ Setting to check.
+
+ Returns
+ -------
+ bool
+ `True` if the setting was set to a true value, `False` otherwise.
+ """
+ path = setting.split(".")
+ values = self.values
+ for key in path:
+ if key not in values:
+ return False
+ values = values[key]
+ return bool(values)
diff --git a/src/phalanx/models/environments.py b/src/phalanx/models/environments.py
new file mode 100644
index 0000000000..4d15824f49
--- /dev/null
+++ b/src/phalanx/models/environments.py
@@ -0,0 +1,67 @@
+"""Pydantic models for Phalanx environments."""
+
+from __future__ import annotations
+
+from pydantic import BaseModel, Field
+from safir.pydantic import CamelCaseModel
+
+from .applications import ApplicationInstance
+from .secrets import Secret
+
+__all__ = [
+ "Environment",
+ "EnvironmentConfig",
+]
+
+
+class EnvironmentConfig(CamelCaseModel):
+ """Configuration for a Phalanx environment.
+
+ This is a partial model for the environment :file:`values.yaml` file.
+ """
+
+ environment: str
+ """Name of the environment."""
+
+ vault_url: str
+ """URL of Vault server for this environment."""
+
+ vault_path_prefix: str
+ """Prefix of Vault paths, including the Kv2 mount point."""
+
+ applications: list[str] = Field(
+ [], description="List of enabled applications"
+ )
+
+
+class Environment(BaseModel):
+ """A Phalanx environment and its associated settings."""
+
+ name: str
+ """Name of the environment."""
+
+ vault_url: str
+ """URL of Vault server for this environment."""
+
+ vault_path_prefix: str
+ """Prefix of Vault paths, including the Kv2 mount point."""
+
+ applications: dict[str, ApplicationInstance]
+ """Applications enabled for that environment, by name."""
+
+ def all_applications(self) -> list[ApplicationInstance]:
+ """Return enabled applications in sorted order."""
+ return sorted(self.applications.values(), key=lambda a: a.name)
+
+ def all_secrets(self) -> list[Secret]:
+ """Return a list of all secrets regardless of application.
+
+ Returns
+ -------
+ list of Secret
+ All secrets from all applications.
+ """
+ secrets = []
+ for application in self.all_applications():
+ secrets.extend(application.secrets)
+ return secrets
diff --git a/src/phalanx/models/secrets.py b/src/phalanx/models/secrets.py
new file mode 100644
index 0000000000..14b295bae0
--- /dev/null
+++ b/src/phalanx/models/secrets.py
@@ -0,0 +1,257 @@
+"""Pydantic models for Phalanx application secrets."""
+
+from __future__ import annotations
+
+import os
+import secrets
+from base64 import urlsafe_b64encode
+from datetime import UTC, datetime
+from enum import Enum
+from typing import Any, Literal
+
+import bcrypt
+from cryptography.fernet import Fernet
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import rsa
+from pydantic import BaseModel, Extra, Field, SecretStr, validator
+
+__all__ = [
+ "ConditionalMixin",
+ "ConditionalSecretConfig",
+ "ConditionalSecretCopyRules",
+ "ConditionalSecretGenerateRules",
+ "ConditionalSimpleSecretGenerateRules",
+ "ConditionalSourceSecretGenerateRules",
+ "ResolvedSecret",
+ "Secret",
+ "SecretConfig",
+ "SecretCopyRules",
+ "SecretGenerateRules",
+ "SecretGenerateType",
+ "SimpleSecretGenerateRules",
+ "SourceSecretGenerateRules",
+]
+
+
+class ConditionalMixin(BaseModel):
+ """Mix-in class for elements that may have a condition."""
+
+ condition: str | None = Field(
+ None,
+ description=(
+ "Configuration only applies if this Helm chart setting is set to a"
+ " true value"
+ ),
+ title="Condition",
+ alias="if",
+ )
+
+
+class SecretCopyRules(BaseModel):
+ """Rules for copying a secret value from another secret."""
+
+ application: str
+ """Application from which the secret should be copied."""
+
+ key: str
+ """Secret key from which the secret should be copied."""
+
+ class Config:
+ allow_population_by_field_name = True
+ extra = Extra.forbid
+
+
+class ConditionalSecretCopyRules(SecretCopyRules, ConditionalMixin):
+ """Possibly conditional rules for copying a secret value from another."""
+
+
+class SecretGenerateType(str, Enum):
+ """Type of secret for generated secrets."""
+
+ password = "password"
+ gafaelfawr_token = "gafaelfawr-token"
+ fernet_key = "fernet-key"
+ rsa_private_key = "rsa-private-key"
+ bcrypt_password_hash = "bcrypt-password-hash"
+ mtime = "mtime"
+
+
+class SimpleSecretGenerateRules(BaseModel):
+ """Rules for generating a secret value with no source information."""
+
+ type: Literal[
+ SecretGenerateType.password,
+ SecretGenerateType.gafaelfawr_token,
+ SecretGenerateType.fernet_key,
+ SecretGenerateType.rsa_private_key,
+ ]
+ """Type of secret."""
+
+ class Config:
+ allow_population_by_field_name = True
+ extra = Extra.forbid
+
+ def generate(self) -> SecretStr:
+ """Generate a new secret following these rules."""
+ match self.type:
+ case SecretGenerateType.password:
+ return SecretStr(secrets.token_hex(32))
+ case SecretGenerateType.gafaelfawr_token:
+ key = urlsafe_b64encode(os.urandom(16)).decode().rstrip("=")
+ secret = urlsafe_b64encode(os.urandom(16)).decode().rstrip("=")
+ return SecretStr(f"gt-{key}.{secret}")
+ case SecretGenerateType.fernet_key:
+ return SecretStr(Fernet.generate_key().decode())
+ case SecretGenerateType.rsa_private_key:
+ private_key = rsa.generate_private_key(
+ backend=default_backend(),
+ public_exponent=65537,
+ key_size=2048,
+ )
+ private_key_bytes = private_key.private_bytes(
+ serialization.Encoding.PEM,
+ serialization.PrivateFormat.PKCS8,
+ serialization.NoEncryption(),
+ )
+ return SecretStr(private_key_bytes.decode())
+
+
+class ConditionalSimpleSecretGenerateRules(
+ SimpleSecretGenerateRules, ConditionalMixin
+):
+ """Conditional rules for generating a secret value with no source."""
+
+
+class SourceSecretGenerateRules(BaseModel):
+ """Rules for generating a secret from another secret."""
+
+ type: Literal[
+ SecretGenerateType.bcrypt_password_hash,
+ SecretGenerateType.mtime,
+ ]
+ """Type of secret."""
+
+ source: str
+ """Key of secret on which this secret is based.
+
+ This may only be set by secrets of type ``bcrypt-password-hash`` or
+ ``mtime``.
+ """
+
+ def generate(self, source: SecretStr) -> SecretStr:
+ match self.type:
+ case SecretGenerateType.bcrypt_password_hash:
+ password_hash = bcrypt.hashpw(
+ source.get_secret_value().encode(),
+ bcrypt.gensalt(rounds=15),
+ )
+ return SecretStr(password_hash.decode())
+ case SecretGenerateType.mtime:
+ date = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")
+ return SecretStr(date)
+
+
+class ConditionalSourceSecretGenerateRules(
+ SourceSecretGenerateRules, ConditionalMixin
+):
+ """Conditional rules for generating a secret from another secret."""
+
+
+SecretGenerateRules = SimpleSecretGenerateRules | SourceSecretGenerateRules
+ConditionalSecretGenerateRules = (
+ ConditionalSimpleSecretGenerateRules | ConditionalSourceSecretGenerateRules
+)
+
+
+class SecretConfig(BaseModel):
+ """Specification for an application secret."""
+
+ description: str
+ """Description of the secret."""
+
+ copy_rules: SecretCopyRules | None = Field(
+ None,
+ description="Rules for where the secret should be copied from",
+ alias="copy",
+ )
+
+ generate: SecretGenerateRules | None = None
+ """Rules for how the secret should be generated."""
+
+ value: SecretStr | None = None
+ """Secret value."""
+
+ class Config:
+ allow_population_by_field_name = True
+ extra = Extra.forbid
+
+
+class ConditionalSecretConfig(SecretConfig, ConditionalMixin):
+ """Possibly conditional specification for an application secret."""
+
+ copy_rules: ConditionalSecretCopyRules | None = Field(
+ None,
+ description="Rules for where the secret should be copied from",
+ alias="copy",
+ )
+
+ generate: ConditionalSecretGenerateRules | None = None
+ """Rules for how the secret should be generated."""
+
+ @validator("generate")
+ def _validate_generate(
+ cls,
+ v: ConditionalSecretGenerateRules | None,
+ values: dict[str, Any],
+ ) -> ConditionalSecretGenerateRules | None:
+ has_copy = "copy" in values and "condition" not in values["copy"]
+ if v and has_copy:
+ msg = "both copy and generate may not be set for the same secret"
+ raise ValueError(msg)
+ return v
+
+ @validator("value")
+ def _validate_value(
+ cls, v: SecretStr | None, values: dict[str, Any]
+ ) -> SecretStr | None:
+ has_copy = values.get("copy") and "condition" not in values["copy"]
+ has_generate = (
+ values.get("generate") and "condition" not in values["generate"]
+ )
+ if v and (has_copy or has_generate):
+ msg = "value may not be set if copy or generate is set"
+ raise ValueError(msg)
+ return v
+
+
+class Secret(SecretConfig):
+ """Specification for an application secret for a specific environment.
+
+ The same as `SecretConfig` except augmented with the secret application
+ and key for internal convenience.
+ """
+
+ key: str
+ """Key of the secret."""
+
+ application: str
+ """Application of the secret."""
+
+
+class ResolvedSecret(BaseModel):
+ """A secret that has been resolved for a given application instance.
+
+ Secret resolution means that the configuration has been translated into
+ either a secret value or knowledge that the secret is a static secret that
+ must come from elsewhere.
+ """
+
+ key: str
+ """Key of the secret."""
+
+ application: str
+ """Application for which the secret is required."""
+
+ value: SecretStr | None = None
+ """Value of the secret if known."""
diff --git a/src/phalanx/services/__init__.py b/src/phalanx/services/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/src/phalanx/services/secrets.py b/src/phalanx/services/secrets.py
new file mode 100644
index 0000000000..ead4b40138
--- /dev/null
+++ b/src/phalanx/services/secrets.py
@@ -0,0 +1,293 @@
+"""Service to manipulate Phalanx secrets."""
+
+from __future__ import annotations
+
+import json
+from collections import defaultdict
+from pathlib import Path
+
+import yaml
+from pydantic import SecretStr
+
+from ..exceptions import UnresolvedSecretsError
+from ..models.applications import ApplicationInstance
+from ..models.environments import Environment
+from ..models.secrets import ResolvedSecret, Secret, SourceSecretGenerateRules
+from ..storage.config import ConfigStorage
+from ..storage.vault import VaultStorage
+from ..yaml import YAMLFoldedString
+
+__all__ = ["SecretsService"]
+
+
+class SecretsService:
+ """Service to manipulate Phalanx secrets.
+
+ Parameters
+ ----------
+ config_storage
+ Storage object for the Phalanx configuration.
+ vault_storage
+ Storage object for Vault.
+ """
+
+ def __init__(
+ self, config_storage: ConfigStorage, vault_storage: VaultStorage
+ ) -> None:
+ self._config = config_storage
+ self._vault = vault_storage
+
+ def audit(self, env_name: str) -> str:
+ """Compare existing secrets to configuration and report problems.
+
+ Parameters
+ ----------
+ env_name
+ Name of the environment to audit.
+
+ Returns
+ -------
+ str
+ Audit report as a text document.
+ """
+ environment = self._config.load_environment(env_name)
+ vault_client = self._vault.get_vault_client(environment)
+
+ # Retrieve all the current secrets from Vault and resolve all of the
+ # secrets.
+ secrets = environment.all_secrets()
+ vault_secrets = vault_client.get_environment_secrets(environment)
+ resolved = self._resolve_secrets(secrets, environment, vault_secrets)
+
+ # Compare the resolved secrets to the Vault data.
+ missing = []
+ mismatch = []
+ unknown = []
+ for app_name, values in resolved.items():
+ for key, value in values.items():
+ if key in vault_secrets[app_name]:
+ if value.value:
+ expected = value.value.get_secret_value()
+ else:
+ expected = None
+ vault = vault_secrets[app_name][key].get_secret_value()
+ if expected != vault:
+ import logging
+
+ logging.error("mismatch %s %s", expected, vault)
+ mismatch.append(f"{app_name} {key}")
+ del vault_secrets[app_name][key]
+ else:
+ missing.append(f"{app_name} {key}")
+ unknown = [f"{a} {k}" for a, lv in vault_secrets.items() for k in lv]
+
+ # Generate the textual report.
+ report = ""
+ if missing:
+ report += "Missing secrets:\n• " + "\n• ".join(missing) + "\n"
+ if mismatch:
+ report += "Incorrect secrets:\n• " + "\n• ".join(mismatch) + "\n"
+ if unknown:
+ unknown_str = "\n ".join(unknown)
+ report += "Unknown secrets in Vault:\n• " + unknown_str + "\n"
+ return report
+
+ def generate_static_template(self, env_name: str) -> str:
+ """Generate a template for providing static secrets.
+
+ The template provides space for all static secrets required for a
+ given environment. The resulting file, once the values have been
+ added, can be used as input to other secret commands instead of an
+ external secret source such as 1Password.
+
+ Parameters
+ ----------
+ env_name
+ Name of the environment.
+
+ Returns
+ -------
+ dict
+ YAML template the user can fill out, as a string.
+ """
+ secrets = self.list_secrets(env_name)
+ template: defaultdict[str, dict[str, dict[str, str | None]]]
+ template = defaultdict(dict)
+ for secret in secrets:
+ static = not (secret.copy_rules or secret.generate or secret.value)
+ if static:
+ template[secret.application][secret.key] = {
+ "description": YAMLFoldedString(secret.description),
+ "value": None,
+ }
+ return yaml.dump(template, width=72)
+
+ def list_secrets(self, env_name: str) -> list[Secret]:
+ """List all required secrets for the given environment.
+
+ Parameters
+ ----------
+ env_name
+ Name of the environment.
+
+ Returns
+ -------
+ list of Secret
+ Secrets required for the given environment.
+ """
+ environment = self._config.load_environment(env_name)
+ return environment.all_secrets()
+
+ def save_vault_secrets(self, env_name: str, path: Path) -> None:
+ """Generate JSON files containing the Vault secrets for an environment.
+
+ One file per application with secrets will be written to the provided
+ path. Each file will be named after the application with ``.json``
+ appended, and will contain the secret values for that application.
+ Secrets that are required but have no known value will be written as
+ null.
+
+ Parameters
+ ----------
+ env_name
+ Name of the environment.
+ path
+ Output path.
+ """
+ environment = self._config.load_environment(env_name)
+ vault_client = self._vault.get_vault_client(environment)
+ vault_secrets = vault_client.get_environment_secrets(environment)
+ for app_name, values in vault_secrets.items():
+ app_secrets: dict[str, str | None] = {}
+ for key, secret in values.items():
+ if secret:
+ app_secrets[key] = secret.get_secret_value()
+ else:
+ app_secrets[key] = None
+ with (path / f"{app_name}.json").open("w") as fh:
+ json.dump(app_secrets, fh, indent=2)
+
+ def _resolve_secrets(
+ self,
+ secrets: list[Secret],
+ environment: Environment,
+ vault_secrets: dict[str, dict[str, SecretStr]],
+ ) -> dict[str, dict[str, ResolvedSecret]]:
+ """Resolve the secrets for a Phalanx environment.
+
+ Resolving secrets is the process where the secret configuration is
+ resolved using per-environment Helm chart values to generate the list
+ of secrets required for a given environment and their values.
+
+ Parameters
+ ----------
+ secrets
+ Secret configuration by application and key.
+ environment
+ Phalanx environment for which to resolve secrets.
+ vault_secrets
+ Current values from Vault. These will be used if compatible with
+ the secret definitions.
+
+ Returns
+ -------
+ dict
+ Resolved secrets by application and secret key.
+
+ Raises
+ ------
+ UnresolvedSecretsError
+ Raised if some secrets could not be resolved.
+ """
+ resolved: defaultdict[str, dict[str, ResolvedSecret]]
+ resolved = defaultdict(dict)
+ unresolved = list(secrets)
+ left = len(unresolved)
+ while unresolved:
+ secrets = unresolved
+ unresolved = []
+ for config in secrets:
+ vault_values = vault_secrets[config.application]
+ secret = self._resolve_secret(
+ config=config,
+ instance=environment.applications[config.application],
+ resolved=resolved,
+ current_value=vault_values.get(config.key),
+ )
+ if secret:
+ resolved[secret.application][secret.key] = secret
+ else:
+ unresolved.append(config)
+ if len(unresolved) >= left:
+ raise UnresolvedSecretsError(unresolved)
+ left = len(unresolved)
+ return resolved
+
+ def _resolve_secret(
+ self,
+ *,
+ config: Secret,
+ instance: ApplicationInstance,
+ resolved: dict[str, dict[str, ResolvedSecret]],
+ current_value: SecretStr | None,
+ ) -> ResolvedSecret | None:
+ """Resolve a single secret.
+
+ Parameters
+ ----------
+ config
+ Configuration of the secret.
+ instance
+ Application instance owning this secret.
+ resolved
+ Other secrets for that environment that have already been
+ resolved.
+ current_value
+ Current secret value in Vault, if known.
+
+ Returns
+ -------
+ ResolvedSecret or None
+ Resolved value of the secret, or `None` if the secret cannot yet
+ be resolved (because, for example, the secret from which it is
+ copied has not yet been resolved).
+ """
+ # If a value was already provided, this is the easy case.
+ if config.value:
+ return ResolvedSecret(
+ key=config.key,
+ application=config.application,
+ value=config.value,
+ )
+
+ # Do copying or generation if configured.
+ if config.copy_rules:
+ application = config.copy_rules.application
+ other = resolved.get(application, {}).get(config.copy_rules.key)
+ if not other:
+ return None
+ return ResolvedSecret(
+ key=config.key,
+ application=config.application,
+ value=other.value,
+ )
+ if config.generate and not current_value:
+ if isinstance(config.generate, SourceSecretGenerateRules):
+ other_key = config.generate.source
+ other = resolved.get(config.application, {}).get(other_key)
+ if not (other and other.value):
+ return None
+ value = config.generate.generate(other.value)
+ else:
+ value = config.generate.generate()
+ return ResolvedSecret(
+ key=config.key,
+ application=config.application,
+ value=value,
+ )
+
+ # The remaining case is that the secret is a static secret or a
+ # generated secret for which we already have a value.
+ return ResolvedSecret(
+ key=config.key, application=config.application, value=current_value
+ )
diff --git a/src/phalanx/storage/__init__.py b/src/phalanx/storage/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/src/phalanx/storage/config.py b/src/phalanx/storage/config.py
new file mode 100644
index 0000000000..6bc838f06a
--- /dev/null
+++ b/src/phalanx/storage/config.py
@@ -0,0 +1,298 @@
+"""Parsing and analysis of Phalanx configuration."""
+
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Any
+
+import yaml
+
+from ..exceptions import InvalidSecretConfigError, UnknownEnvironmentError
+from ..models.applications import Application, ApplicationInstance
+from ..models.environments import Environment, EnvironmentConfig
+from ..models.secrets import ConditionalSecretConfig, Secret
+
+__all__ = ["ConfigStorage"]
+
+
+def _merge_overrides(
+ base: dict[str, Any], overrides: dict[str, Any]
+) -> dict[str, Any]:
+ """Merge values settings with overrides.
+
+ Parameters
+ ----------
+ base
+ Base settings.
+ overrides
+ Overrides that should take precedence.
+
+ Returns
+ -------
+ dict
+ Merged dictionary.
+ """
+ for key, value in overrides.items():
+ if key in base:
+ if isinstance(base[key], dict) and isinstance(value, dict):
+ _merge_overrides(base[key], value)
+ else:
+ base[key] = value
+ else:
+ base[key] = value
+ return base
+
+
+class ConfigStorage:
+ """Analyze Phalanx configuration and convert it to models."""
+
+ def __init__(self) -> None:
+ self._path = Path.cwd()
+
+ def load_environment(self, environment_name: str) -> Environment:
+ """Load the configuration of a Phalanx environment from disk.
+
+ Parameters
+ ----------
+ environment_name
+ Name of the environment.
+
+ Returns
+ -------
+ Environment
+ Environment configuration.
+
+ Raises
+ ------
+ UnknownEnvironmentError
+ Raised if the named environment has no configuration.
+ """
+ config = self._load_environment_config(environment_name)
+ applications = [self._load_application(a) for a in config.applications]
+ instances = {
+ a.name: self._resolve_application(a, environment_name)
+ for a in applications
+ }
+ return Environment(
+ name=config.environment,
+ vault_url=config.vault_url,
+ vault_path_prefix=config.vault_path_prefix,
+ applications=instances,
+ )
+
+ def _is_condition_satisfied(
+ self, instance: ApplicationInstance, condition: str | None
+ ) -> bool:
+ """Evaluate a secret condition on an application instance.
+
+ This is a convenience wrapper around
+ `ApplicationInstance.is_is_values_setting_true` that also treats a
+ `None` condition parameter as true.
+
+ Parameters
+ ----------
+ instance
+ Application instance for a specific environment.
+ condition
+ Condition, or `None` if there is no condition.
+
+ Returns
+ -------
+ bool
+ `True` if condition is `None` or corresponds to a values setting
+ whose value is true, `False` otherwise.
+ """
+ if not condition:
+ return True
+ else:
+ return instance.is_values_setting_true(condition)
+
+ def _load_application(self, name: str) -> Application:
+ """Load the configuration for an application from disk.
+
+ Parameters
+ ----------
+ name
+ Name of the application.
+
+ Returns
+ -------
+ Application
+ Application data.
+ """
+ base_path = Path.cwd() / "applications" / name
+
+ # Load main values file.
+ values_path = base_path / "values.yaml"
+ if values_path.exists():
+ with values_path.open("r") as fh:
+ values = yaml.safe_load(fh)
+ else:
+ values = {}
+
+ # Load environment-specific values files.
+ environment_values = {}
+ for path in base_path.glob("values-*.yaml"):
+ env_name = path.stem[len("values-") :]
+ with path.open("r") as fh:
+ env_values = yaml.safe_load(fh)
+ if env_values:
+ environment_values[env_name] = env_values
+
+ # Load the secrets configuration.
+ secrets_path = base_path / "secrets.yaml"
+ secrets = {}
+ if secrets_path.exists():
+ with secrets_path.open("r") as fh:
+ raw_secrets = yaml.safe_load(fh)
+ secrets = {
+ k: ConditionalSecretConfig.parse_obj(s)
+ for k, s in raw_secrets.items()
+ }
+
+ # Load the environment-specific secrets configuration.
+ environment_secrets = {}
+ for path in base_path.glob("secrets-*.yaml"):
+ env_name = path.stem[len("secrets-") :]
+ with path.open("r") as fh:
+ raw_secrets = yaml.safe_load(fh)
+ environment_secrets[env_name] = {
+ k: ConditionalSecretConfig.parse_obj(s)
+ for k, s in raw_secrets.items()
+ }
+
+ # Return the resulting application.
+ return Application(
+ name=name,
+ values=values,
+ environment_values=environment_values,
+ secrets=secrets,
+ environment_secrets=environment_secrets,
+ )
+
+ def _load_environment_config(
+ self, environment_name: str
+ ) -> EnvironmentConfig:
+ """Load the configuration for a Phalanx environment.
+
+ Parameters
+ ----------
+ environment_name
+ Name of the environent.
+
+ Returns
+ -------
+ Environment
+ Loaded environment.
+
+ Raises
+ ------
+ InvalidEnvironmentConfigError
+ Raised if the configuration for an environment is invalid.
+ UnknownEnvironmentError
+ Raised if the named environment has no configuration.
+ """
+ values_name = f"values-{environment_name}.yaml"
+ values_path = Path.cwd() / "environments" / values_name
+ if not values_path.exists():
+ raise UnknownEnvironmentError(environment_name)
+ with values_path.open() as fh:
+ values = yaml.safe_load(fh)
+ environment = EnvironmentConfig.parse_obj(values)
+
+ # Eventually this will have more structure and will be parsed directly
+ # by Pydantic, but for now assume any key whose value is a dictionary
+ # with an enabled key is indicating an application that is or is not
+ # enabled.
+ applications = []
+ for key, value in values.items():
+ if isinstance(value, dict) and "enabled" in value:
+ if value["enabled"]:
+ applications.append(key)
+
+ # For now, this is hard-coded, but we'll eventually figure it out from
+ # the Argo CD Application resource templates.
+ applications.append("argocd")
+
+ # Return the configuration.
+ environment.applications = sorted(applications)
+ return environment
+
+ def _resolve_application(
+ self, application: Application, environment_name: str
+ ) -> ApplicationInstance:
+ """Resolve an application to its environment-specific configuration.
+
+ Parameters
+ ----------
+ application
+ Application to resolve.
+ environment_name
+ Name of the environment the application should be configured for.
+
+ Returns
+ -------
+ ApplicationInstance
+ Resolved application.
+
+ Raises
+ ------
+ InvalidSecretConfigError
+ Raised if the secret configuration has conflicting rules.
+ """
+ # Merge values with any environment overrides.
+ values = application.values
+ if environment_name in application.environment_values:
+ env_values = application.environment_values[environment_name]
+ values = _merge_overrides(values, env_values)
+
+ # Create an initial application instance without secrets so that we
+ # can use its class methods.
+ instance = ApplicationInstance(
+ name=application.name,
+ environment=environment_name,
+ values=values,
+ )
+
+ # Merge secrets with any environment secrets.
+ secrets = application.secrets
+ if environment_name in application.environment_secrets:
+ secrets = application.secrets.copy()
+ secrets.update(application.environment_secrets[environment_name])
+
+ # Evaluate the conditions on all of the secrets. Both the top-level
+ # condition and any conditions on the copy and generate rules will be
+ # resolved, so that any subsequent processing based on the instance no
+ # longer needs to worry about conditions.
+ required_secrets = []
+ for key, config in secrets.items():
+ if not self._is_condition_satisfied(instance, config.condition):
+ continue
+ copy = config.copy_rules
+ if copy:
+ condition = copy.condition
+ if not self._is_condition_satisfied(instance, condition):
+ copy = None
+ generate = config.generate
+ if generate:
+ condition = generate.condition
+ if not self._is_condition_satisfied(instance, condition):
+ generate = None
+ if copy and generate:
+ msg = "Copy and generate rules conflict"
+ raise InvalidSecretConfigError(instance.name, key, msg)
+ secret = Secret(
+ application=application.name,
+ key=key,
+ description=config.description,
+ copy_rules=copy,
+ generate=generate,
+ value=config.value,
+ )
+ required_secrets.append(secret)
+
+ # Add the secrets to the new instance and return it.
+ instance.secrets = sorted(
+ required_secrets, key=lambda s: (s.application, s.key)
+ )
+ return instance
diff --git a/src/phalanx/storage/vault.py b/src/phalanx/storage/vault.py
new file mode 100644
index 0000000000..20266ce221
--- /dev/null
+++ b/src/phalanx/storage/vault.py
@@ -0,0 +1,94 @@
+"""Store, retrieve, and manipulate data stored in Vault."""
+
+from __future__ import annotations
+
+import hvac
+from pydantic import SecretStr
+
+from ..models.environments import Environment
+
+__all__ = ["VaultClient", "VaultStorage"]
+
+
+class VaultClient:
+ """Store, retrieve, and manipulate data stored in Vault.
+
+ The Vault authentication token is taken from either the ``VAULT_TOKEN``
+ environment variable or a :file:`.vault-token` file in the user's home
+ directory.
+
+ Parameters
+ ----------
+ url
+ URL of the Vault server.
+ path
+ Path within that Vault server where secrets for an environment are
+ stored.
+ """
+
+ def __init__(self, url: str, path: str) -> None:
+ mount, path = path.split("/", 1)
+ self._vault = hvac.Client(url)
+ self._vault.secrets.kv.default_kv_version = 2
+ self._path = path
+
+ def get_application_secrets(
+ self, application: str
+ ) -> dict[str, SecretStr]:
+ """Get the secrets for an application currently stored in Vault.
+
+ Parameters
+ ----------
+ application
+ Name of the application.
+
+ Returns
+ -------
+ dict of pydantic.SecretStr
+ Mapping from secret key to its secret from Vault.
+ """
+ path = f"{self._path}/{application}"
+ r = self._vault.secrets.kv.read_secret(
+ path=path, raise_on_deleted_version=True
+ )
+ return {k: SecretStr(v) for k, v in r["data"]["data"].items()}
+
+ def get_environment_secrets(
+ self, environment: Environment
+ ) -> dict[str, dict[str, SecretStr]]:
+ """Get the secrets for an environment currently stored in Vault.
+
+ Parameters
+ ----------
+ environment
+ Name of the environment.
+
+ Returns
+ -------
+ dict of dict
+ Mapping from application to secret key to its secret from Vault.
+ """
+ vault_secrets = {}
+ for application in environment.all_applications():
+ vault_secret = self.get_application_secrets(application.name)
+ vault_secrets[application.name] = vault_secret
+ return vault_secrets
+
+
+class VaultStorage:
+ """Create Vault clients for specific environments."""
+
+ def get_vault_client(self, env: Environment) -> VaultClient:
+ """Return a Vault client configured for the given environment.
+
+ Parameters
+ ----------
+ env
+ Phalanx environment.
+
+ Returns
+ -------
+ VaultClient
+ Vault client configured to manage secrets for that environment.
+ """
+ return VaultClient(env.vault_url, env.vault_path_prefix)
diff --git a/src/phalanx/testing/expandcharts.py b/src/phalanx/testing/expandcharts.py
index fcfd03a817..6e6714ae96 100644
--- a/src/phalanx/testing/expandcharts.py
+++ b/src/phalanx/testing/expandcharts.py
@@ -14,13 +14,14 @@
from pathlib import Path
from typing import TYPE_CHECKING
-from git import DiffIndex, Repo
+from git import DiffIndex
+from git.repo import Repo
if TYPE_CHECKING:
- from typing import List, Sequence
+ from collections.abc import Sequence
-def get_changed_charts() -> List[str]:
+def get_changed_charts() -> list[str]:
"""Get a list of charts that have changed relative to main."""
repo = Repo(str(Path.cwd()))
@@ -29,7 +30,10 @@ def get_changed_charts() -> List[str]:
if (path / "Chart.yaml").exists():
diff = repo.head.commit.diff("origin/main", paths=[str(path)])
for change_type in DiffIndex.change_type:
- if any(diff.iter_change_type(change_type)): # type: ignore
+ changes = diff.iter_change_type(
+ change_type # type: ignore[arg-type]
+ )
+ if any(changes):
print("Found changed chart", path.name)
charts.append(path.name)
break
@@ -37,7 +41,7 @@ def get_changed_charts() -> List[str]:
return charts
-def get_environments() -> List[str]:
+def get_environments() -> list[str]:
"""Get the list of supported environments."""
science_platform_path = Path.cwd() / "environments"
@@ -74,6 +78,7 @@ def expand_chart(chart: str, environments: Sequence[str]) -> None:
def main() -> None:
+ """Entry point for expand-charts command."""
expanded_path = Path.cwd() / "applications-expanded"
if expanded_path.exists():
shutil.rmtree(expanded_path)
diff --git a/src/phalanx/yaml.py b/src/phalanx/yaml.py
new file mode 100644
index 0000000000..3ff6dc3091
--- /dev/null
+++ b/src/phalanx/yaml.py
@@ -0,0 +1,32 @@
+"""Utility functions for manipulating YAML.
+
+In several places in the Phalanx code, we want to be able to wrap long strings
+to make them more readable or be able to dump `collections.defaultdict`
+objects without adding special object tagging. This module collects utility
+functions to make this easier.
+"""
+
+from __future__ import annotations
+
+from collections import defaultdict
+
+import yaml
+from yaml.representer import Representer
+
+__all__ = ["YAMLFoldedString"]
+
+
+class YAMLFoldedString(str):
+ """A string that will be folded when encoded in YAML."""
+
+ __slots__ = ()
+
+
+def _folded_string_representer(
+ dumper: yaml.Dumper, data: YAMLFoldedString
+) -> yaml.Node:
+ return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=">")
+
+
+yaml.add_representer(YAMLFoldedString, _folded_string_representer)
+yaml.add_representer(defaultdict, Representer.represent_dict)
diff --git a/starters/web-service/README.md b/starters/web-service/README.md
index 6e0e4b91e0..4eb9053bb3 100644
--- a/starters/web-service/README.md
+++ b/starters/web-service/README.md
@@ -24,4 +24,4 @@ Helm starter chart for a new RSP service.
| podAnnotations | object | `{}` | Annotations for the deployment pod |
| replicaCount | int | `1` | Number of web deployment pods to start |
| resources | object | `{}` | Resource limits and requests for the deployment pod |
-| tolerations | list | `[]` | Tolerations for the deployment pod |
+| tolerations | list | `[]` | Tolerations for the deployment pod |
\ No newline at end of file
diff --git a/tests/cli/__init__.py b/tests/cli/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/cli/secrets_test.py b/tests/cli/secrets_test.py
new file mode 100644
index 0000000000..ddc5555265
--- /dev/null
+++ b/tests/cli/secrets_test.py
@@ -0,0 +1,95 @@
+"""Tests for the secrets command-line subcommand."""
+
+from __future__ import annotations
+
+import json
+import os
+from pathlib import Path
+
+from click.testing import CliRunner
+from phalanx.cli import main
+from phalanx.factory import Factory
+
+from ..support.data import phalanx_test_path, read_output_data
+from ..support.vault import MockVaultClient
+
+
+def test_audit(mock_vault: MockVaultClient) -> None:
+ input_path = phalanx_test_path()
+ os.chdir(str(input_path))
+ input_path / "vault" / "idfdev"
+ factory = Factory()
+ config_storage = factory.create_config_storage()
+ environment = config_storage.load_environment("idfdev")
+ mock_vault.load_test_data(environment.vault_path_prefix, "idfdev")
+
+ runner = CliRunner()
+ result = runner.invoke(
+ main, ["secrets", "audit", "idfdev"], catch_exceptions=False
+ )
+ assert result.exit_code == 0
+ assert result.output == read_output_data("idfdev", "secrets-audit")
+
+
+def test_list() -> None:
+ input_path = phalanx_test_path()
+ os.chdir(str(input_path))
+ runner = CliRunner()
+ result = runner.invoke(
+ main, ["secrets", "list", "idfdev"], catch_exceptions=False
+ )
+ assert result.exit_code == 0
+ assert result.output == read_output_data("idfdev", "secrets-list")
+
+
+def test_schema() -> None:
+ runner = CliRunner()
+ result = runner.invoke(main, ["secrets", "schema"], catch_exceptions=False)
+ assert result.exit_code == 0
+ current = (
+ Path(__file__).parent.parent.parent
+ / "docs"
+ / "extras"
+ / "schemas"
+ / "secrets.json"
+ )
+ assert result.output == current.read_text()
+
+
+def test_static_template() -> None:
+ input_path = phalanx_test_path()
+ os.chdir(str(input_path))
+ runner = CliRunner()
+ result = runner.invoke(
+ main, ["secrets", "static-template", "idfdev"], catch_exceptions=False
+ )
+ assert result.exit_code == 0
+ assert result.output == read_output_data("idfdev", "static-secrets.yaml")
+
+
+def test_vault_secrets(tmp_path: Path, mock_vault: MockVaultClient) -> None:
+ input_path = phalanx_test_path()
+ vault_input_path = input_path / "vault" / "idfdev"
+ os.chdir(str(input_path))
+ factory = Factory()
+ config_storage = factory.create_config_storage()
+ environment = config_storage.load_environment("idfdev")
+ mock_vault.load_test_data(environment.vault_path_prefix, "idfdev")
+
+ runner = CliRunner()
+ result = runner.invoke(
+ main,
+ ["secrets", "vault-secrets", "idfdev", str(tmp_path)],
+ catch_exceptions=False,
+ )
+ assert result.exit_code == 0
+ assert result.output == ""
+
+ expected_files = {p.name for p in vault_input_path.iterdir()}
+ output_files = {p.name for p in tmp_path.iterdir()}
+ assert expected_files == output_files
+ for expected_path in vault_input_path.iterdir():
+ with expected_path.open() as fh:
+ expected = json.load(fh)
+ with (tmp_path / expected_path.name).open() as fh:
+ assert expected == json.load(fh)
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000000..cb8acf4222
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,15 @@
+"""Test fixtures."""
+
+from __future__ import annotations
+
+from collections.abc import Iterator
+
+import pytest
+
+from .support.vault import MockVaultClient, patch_vault
+
+
+@pytest.fixture
+def mock_vault() -> Iterator[MockVaultClient]:
+ """Mock out the HVAC Vault client API."""
+ yield from patch_vault()
diff --git a/tests/data/input/applications/argocd/secrets.yaml b/tests/data/input/applications/argocd/secrets.yaml
new file mode 100644
index 0000000000..8c32f231ab
--- /dev/null
+++ b/tests/data/input/applications/argocd/secrets.yaml
@@ -0,0 +1,34 @@
+"admin.plaintext_password":
+ description: >-
+ Admin password for Argo CD. This password is normally not used because
+ Argo CD is configured to use Google or GitHub authentication, but it is
+ used by the installer (which cannot use external authentication) and is
+ useful as a fallback if external authentication is not working for some
+ reason. This secret can be changed at any time.
+ generate:
+ type: password
+"admin.password":
+ description: >-
+ bcrypt hash of the admin password. This is the only version of the admin
+ password exposed to the running Argo CD pod. It will be updated
+ automatically if the admin password is changed.
+ generate:
+ type: bcrypt-password-hash
+ source: admin.plaintext_password
+"admin.passwordMtime":
+ description: "Last modification time for the admin password."
+ generate:
+ type: mtime
+ source: admin.plaintext_password
+"dex.clientSecret":
+ description: >-
+ OAuth 2 or OpenID Connect client secret, used to authenticate to GitHub or
+ Google as part of the authentication flow. This secret can be changed at
+ any time.
+"server.secretkey":
+ description: >-
+ Key used to validate user session cookies. Argo CD will generate this
+ secret if it is missing, but we provide it because the Argo CD secret is
+ managed via a VaultSecret.
+ generate:
+ type: password
diff --git a/tests/data/input/applications/argocd/values-idfdev.yaml b/tests/data/input/applications/argocd/values-idfdev.yaml
new file mode 100644
index 0000000000..8586477a32
--- /dev/null
+++ b/tests/data/input/applications/argocd/values-idfdev.yaml
@@ -0,0 +1,61 @@
+argo-cd:
+ # Some time we may want to play with this more, but currently we're
+ # just using GafaelfawrIngress to protect Argo Workflows and requiring
+ # 'exec:admin' scope. It is theoretically possible to piggyback
+ # Workflows off of Dex SSO, but how to actually hook up the RBAC is
+ # going to need a lot of experimentation, creating service tokens, etc.
+
+ #dex:
+ # env:
+ # - name: ARGO_WORKFLOWS_SSO_CLIENT_SECRET
+ # valueFrom:
+ # secretKeyRef:
+ # name: argo-sso-secret
+ # key: client-secret
+
+ server:
+ ingress:
+ hosts:
+ - "data-dev.lsst.cloud"
+
+ config:
+ url: "https://data-dev.lsst.cloud/argo-cd"
+ dex.config: |
+ connectors:
+ # Auth using Google.
+ # See https://dexidp.io/docs/connectors/google/
+ - type: google
+ id: google
+ name: Google
+ config:
+ clientID: 176818997517-o2tu9978r099fnsnh1acd608gkmopfhu.apps.googleusercontent.com
+ clientSecret: $dex.clientSecret
+ hostedDomains:
+ - lsst.cloud
+ redirectURI: https://data-dev.lsst.cloud/argo-cd/api/dex/callback
+
+ # Again, change this if we want to use SSO
+
+ # staticClients:
+ # - id: argo-workflows-sso
+ # name: Argo Workflow
+ # redirectURIs:
+ # - https://data-dev-workflows.lsst.cloud/oauth2/callback
+ # secretEnv: ARGO_WORKFLOWS_SSO_CLIENT_SECRET
+
+ rbacConfig:
+ policy.csv: |
+ g, adam@lsst.cloud, role:admin
+ g, afausti@lsst.cloud, role:admin
+ g, christine@lsst.cloud, role:admin
+ g, dspeck@lsst.cloud, role:admin
+ g, frossie@lsst.cloud, role:admin
+ g, jsick@lsst.cloud, role:admin
+ g, krughoff@lsst.cloud, role:admin
+ g, rra@lsst.cloud, role:admin
+ g, gpdf@lsst.cloud, role:admin
+ g, loi@lsst.cloud, role:admin
+ g, roby@lsst.cloud, role:admin
+ g, kkoehler@lsst.cloud, role:admin
+ g, fritzm@lsst.cloud, role:admin
+ scopes: "[email]"
diff --git a/tests/data/input/applications/argocd/values.yaml b/tests/data/input/applications/argocd/values.yaml
new file mode 100644
index 0000000000..dcb51f1c8c
--- /dev/null
+++ b/tests/data/input/applications/argocd/values.yaml
@@ -0,0 +1,83 @@
+# Argo CD configuration
+# https://github.com/argoproj/argo-helm/blob/main/charts/argo-cd/values.yaml
+
+argo-cd:
+ global:
+ logging:
+ # -- Set the global logging format. Either: `text` or `json`
+ format: "json"
+
+ redis:
+ metrics:
+ # -- Enable Redis metrics service
+ enabled: true
+
+ controller:
+ metrics:
+ # -- Enable controller metrics service
+ enabled: true
+
+ applicationLabels:
+ # -- Enable adding additional labels to `argocd_app_labels` metric
+ enabled: true
+
+ # -- Labels to add to `argocd_app_labels` metric
+ labels: ["name", "instance"]
+
+ repoServer:
+ metrics:
+ # -- Enable repo server metrics service
+ enabled: true
+
+ notifications:
+ metrics:
+ # -- Enable notifications metrics service
+ enabled: true
+
+ server:
+ metrics:
+ # -- Enable server metrics service
+ enabled: true
+
+ ingress:
+ # -- Create an ingress for the Argo CD server
+ enabled: true
+
+ # -- Additional annotations to add to the Argo CD ingress
+ # @default -- Rewrite requests to remove `/argo-cd/` prefix
+ annotations:
+ nginx.ingress.kubernetes.io/rewrite-target: "/$2"
+
+ # -- Ingress class to use for Argo CD ingress
+ ingressClassName: "nginx"
+
+ # -- Paths to route to Argo CD
+ paths:
+ - "/argo-cd(/|$)(.*)"
+
+ # -- Type of path expression for Argo CD ingress
+ pathType: "ImplementationSpecific"
+
+ configs:
+ cm:
+ # -- Configure resource comparison
+ resource.compareoptions: |
+ ignoreAggregatedRoles: true
+
+ params:
+ # -- Do not use TLS (this is terminated at the ingress)
+ server.insecure: true
+
+ # -- Base href for `index.html` when running under a reverse proxy
+ server.basehref: "/argo-cd"
+
+ secret:
+ # -- Create the Argo CD secret (we manage this with Vault)
+ createSecret: false
+
+# The following will be set by parameters injected by Argo CD and should not
+# be set in the individual environment values files.
+global:
+ # -- Base path for Vault secrets
+ # @default -- Set by Argo CD
+ vaultSecretsPath: ""
diff --git a/tests/data/input/applications/gafaelfawr/secrets.yaml b/tests/data/input/applications/gafaelfawr/secrets.yaml
new file mode 100644
index 0000000000..972698fe99
--- /dev/null
+++ b/tests/data/input/applications/gafaelfawr/secrets.yaml
@@ -0,0 +1,81 @@
+bootstrap-token:
+ description: >-
+ Token with admin access, regardless of any other scopes or configuration,
+ which can be used to add new Gafaelfawr administrators and bootstrap
+ creation of other tokens with arbitrary scopes. To use this token,
+ retrieve it from the Kubernetes secret and then use it in API calls like
+ any other Gafaelfawr token. This secret can be changed at any time.
+ generate:
+ type: gafaelfawr-token
+cilogon-client-secret:
+ description: >-
+ Secret used to authenticate to CILogon as part of the OpenID Connect login
+ protocol to obtain an identity token for the user. This secret can be
+ changed at any time.
+ if: config.cilogon.clientId
+database-password:
+ description: >-
+ Password used to authenticate to the PostgreSQL database used to store
+ Gafaelfawr data. This password may be changed at any time.
+ generate:
+ if: config.internalDatabase
+ type: password
+forgerock-password:
+ description: >-
+ Password used to authenticate to a ForgeRock Identity server using HTTP
+ Basic authentication to retrieve GID mappings for groups.
+ if: config.forgerock.url
+github-client-secret:
+ description: >-
+ GitHub OAuth App secret used to authenticate to GitHub as part of the
+ OAuth 2 login protocol to obtain an identity token for the user. This
+ secret can be changed at any time.
+ if: config.github.clientId
+ldap-keytab:
+ description: >-
+ Kerberos keytab used to authenticate to the LDAP server via GSSAPI binds
+ to retrieve user and group information. This keytab can be changed at any
+ time.
+ if: config.ldap.kerberosConfig
+ldap-password:
+ description: >-
+ Password to authenticate to the LDAP server via simple binds to retrieve
+ user and group information. This password can be changed at any time.
+ if: config.ldap.userDn
+oidc-client-secret:
+ description: >-
+ Secret used to authenticate to a remote OpenID Connect authentication
+ server. This secret can be changed at any time.
+ if: config.oidc.clientId
+redis-password:
+ description: >-
+ Password used to authenticate Gafaelfawr to its internal Redis server,
+ deployed as part of the same Argo CD application. This secret can be
+ changed at any time, but both the Redis server and all Gafaelfawr
+ deployments will then have to be restarted to pick up the new value.
+ generate:
+ type: password
+session-secret:
+ description: >-
+ Encryption key used to encrypt the contents of Redis and the cookie data
+ stored in user web browsers that holds their session token and related
+ information. Changing this secret will invalidate all existing Redis data
+ and all user authentication cookies.
+ generate:
+ type: fernet-key
+signing-key:
+ description: >-
+ RSA private key used to sign JWTs issued by Gafaelfawr when it acts as an
+ OpenID Connect server. Changing this secret will invalidate all existing
+ issued OpenID Connect JWTs.
+ if: config.oidcServer.enabled
+ generate:
+ type: rsa-private-key
+slack-webhook:
+ description: >-
+ Slack web hook used to report internal errors to Slack. This secret may be
+ changed at any time.
+ if: config.slackAlerts
+ copy:
+ application: mobu
+ key: app-alert-webhook
diff --git a/tests/data/input/applications/gafaelfawr/values-idfdev.yaml b/tests/data/input/applications/gafaelfawr/values-idfdev.yaml
new file mode 100644
index 0000000000..fc371da3c2
--- /dev/null
+++ b/tests/data/input/applications/gafaelfawr/values-idfdev.yaml
@@ -0,0 +1,72 @@
+# Use the CSI storage class so that we can use snapshots.
+redis:
+ persistence:
+ storageClass: "standard-rwo"
+
+config:
+ logLevel: "DEBUG"
+ slackAlerts: true
+
+ cilogon:
+ clientId: "cilogon:/client_id/46f9ae932fd30e9fb1b246972a3c0720"
+ enrollmentUrl: "https://id-dev.lsst.cloud/registry/co_petitions/start/coef:6"
+ test: true
+ usernameClaim: "username"
+
+ ldap:
+ url: "ldaps://ldap-test.cilogon.org"
+ userDn: "uid=readonly_user,ou=system,o=LSST,o=CO,dc=lsst_dev,dc=org"
+ groupBaseDn: "ou=groups,o=LSST,o=CO,dc=lsst_dev,dc=org"
+ groupObjectClass: "eduMember"
+ groupMemberAttr: "hasMember"
+ userBaseDn: "ou=people,o=LSST,o=CO,dc=lsst_dev,dc=org"
+ userSearchAttr: "voPersonApplicationUID"
+ addUserGroup: true
+
+ firestore:
+ project: "rsp-firestore-dev-31c4"
+
+ # Support OpenID Connect clients like Chronograf.
+ oidcServer:
+ enabled: true
+
+ # User quota settings for services.
+ quota:
+ default:
+ notebook:
+ cpu: 4.0
+ memory: 16
+
+ groupMapping:
+ "admin:jupyterlab":
+ - "g_admins"
+ "admin:provision":
+ - "g_admins"
+ "exec:admin":
+ - "g_admins"
+ "exec:internal-tools":
+ - "g_users"
+ "exec:notebook":
+ - "g_users"
+ "exec:portal":
+ - "g_users"
+ "read:image":
+ - "g_users"
+ "read:tap":
+ - "g_users"
+ "write:sasquatch":
+ - "g_admins"
+
+ initialAdmins:
+ - "adam"
+ - "afausti"
+ - "cbanek"
+ - "frossie"
+ - "jsick"
+ - "rra"
+ - "simonkrughoff"
+
+cloudsql:
+ enabled: true
+ instanceConnectionName: "science-platform-dev-7696:us-central1:science-platform-dev-e9e11de2"
+ serviceAccount: "gafaelfawr@science-platform-dev-7696.iam.gserviceaccount.com"
diff --git a/tests/data/input/applications/gafaelfawr/values.yaml b/tests/data/input/applications/gafaelfawr/values.yaml
new file mode 100644
index 0000000000..42bb9e5064
--- /dev/null
+++ b/tests/data/input/applications/gafaelfawr/values.yaml
@@ -0,0 +1,429 @@
+# Default values for Gafaelfawr.
+
+# -- Override the base name for resources
+nameOverride: ""
+
+# -- Override the full name for resources (includes the release name)
+fullnameOverride: ""
+
+# -- Number of web frontend pods to start
+replicaCount: 1
+
+image:
+ # -- Gafaelfawr image to use
+ repository: "ghcr.io/lsst-sqre/gafaelfawr"
+
+ # -- Pull policy for the Gafaelfawr image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of Gafaelfawr image to use
+ # @default -- The appVersion of the chart
+ tag: ""
+
+# -- Resource limits and requests for the Gafaelfawr frontend pod
+resources: {}
+
+# -- Annotations for the Gafaelfawr frontend pod
+podAnnotations: {}
+
+# -- Node selector rules for the Gafaelfawr frontend pod
+nodeSelector: {}
+
+# -- Tolerations for the Gafaelfawr frontend pod
+tolerations: []
+
+# -- Affinity rules for the Gafaelfawr frontend pod
+affinity: {}
+
+config:
+ # -- Whether to use the PostgreSQL server internal to the Kubernetes cluster
+ internalDatabase: false
+
+ # -- URL for the PostgreSQL database
+ # @default -- None, must be set if neither `cloudsql.enabled` nor
+ # `config.internalDatabase` are true
+ databaseUrl: ""
+
+ # -- Choose from the text form of Python logging levels
+ logLevel: "INFO"
+
+ # -- Session length and token expiration (in minutes)
+ # @default -- `43200` (30 days)
+ tokenLifetimeMinutes: 43200
+
+ # -- List of netblocks used for internal Kubernetes IP addresses, used to
+ # determine the true client IP for logging
+ # @default -- [`10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`]
+ proxies:
+ - "10.0.0.0/8"
+ - "172.16.0.0/12"
+ - "192.168.0.0/16"
+
+ # -- HTML footer to add to any login error page (will be enclosed in a
+ # tag).
+ errorFooter: ""
+
+ # -- Whether to send certain serious alerts to Slack. If `true`, the
+ # `slack-webhook` secret must also be set.
+ slackAlerts: false
+
+ github:
+ # -- GitHub client ID. One and only one of this, `config.cilogon.clientId`,
+ # or `config.oidc.clientId` must be set.
+ clientId: ""
+
+ cilogon:
+ # -- CILogon client ID. One and only one of this,
+ # `config.github.clientId`, or `config.oidc.clientId` must be set.
+ clientId: ""
+
+ # -- Where to send the user if their username cannot be found in LDAP
+ # @default -- Login fails with an error
+ enrollmentUrl: ""
+
+ # -- Whether to use the test instance of CILogon
+ test: false
+
+ # -- Additional parameters to add
+ loginParams:
+ skin: "LSST"
+
+ # -- Claim from which to get the username
+ # @default -- `"uid"`
+ usernameClaim: ""
+
+ # -- Claim from which to get the numeric UID (only used if not retrieved
+ # from LDAP or Firestore)
+ # @default -- `"uidNumber"`
+ uidClaim: ""
+
+ # -- Claim from which to get the primary GID (only used if not retrieved
+ # from LDAP or Firestore)
+ # @default -- Do not set a primary GID
+ gidClaim: ""
+
+ # -- Claim from which to get the group membership (only used if not
+ # retrieved from LDAP)
+ # @default -- `"isMemberOf"`
+ groupsClaim: ""
+
+ oidc:
+ # -- Client ID for generic OpenID Connect support. One and only one of
+ # this, `config.cilogon.clientId`, or `config.github.clientId` must be set.
+ clientId: ""
+
+ # -- Audience for the JWT token
+ # @default -- Value of `config.oidc.clientId`
+ audience: ""
+
+ # -- URL to which to redirect the user for authorization
+ # @default -- None, must be set
+ loginUrl: ""
+
+ # -- Additional parameters to add to the login request
+ loginParams: {}
+
+ # -- URL from which to retrieve the token for the user
+ # @default -- None, must be set
+ tokenUrl: ""
+
+ # -- Where to send the user if their username cannot be found in LDAP
+ # @default -- Login fails with an error
+ enrollmentUrl: ""
+
+ # -- Issuer for the JWT token
+ # @default -- None, must be set
+ issuer: ""
+
+ # -- Scopes to request from the OpenID Connect provider
+ scopes:
+ - "openid"
+
+ # -- Claim from which to get the username
+ # @default -- `"sub"`
+ usernameClaim: ""
+
+ # -- Claim from which to get the numeric UID (only used if not retrieved
+ # from LDAP or Firestore)
+ # @default -- `"uidNumber"`
+ uidClaim: ""
+
+ # -- Claim from which to get the primary GID (only used if not retrieved
+ # from LDAP or Firestore)
+ # @default -- Do not set a primary GID
+ gidClaim: ""
+
+ # -- Claim from which to get the group membership (only used if not
+ # retrieved from LDAP)
+ # @default -- `"isMemberOf"`
+ groupsClaim: ""
+
+ ldap:
+ # -- LDAP server URL from which to retrieve user group information
+ # @default -- Do not use LDAP
+ url: ""
+
+ # -- Bind DN for simple bind authentication. If set, `ldap-secret` must be
+ # set in the Gafaelfawr Vault secret. Set this or `kerberosConfig`, not
+ # both.
+ # @default -- Use anonymous binds
+ userDn: ""
+
+ # -- Enable GSSAPI (Kerberos) binds to LDAP using this `krb5.conf` file.
+ # If set, `ldap-keytab` must be set in the Gafaelfawr Vault secret. Set
+ # either this or `userDn`, not both.
+ # @default -- Use anonymous binds
+ kerberosConfig: ""
+
+ # -- Base DN for the LDAP search to find a user's groups
+ # @default -- None, must be set
+ groupBaseDn: ""
+
+ # -- Object class containing group information
+ groupObjectClass: "posixGroup"
+
+ # -- Member attribute of the object class. Values must match the username
+ # returned in the token from the OpenID Connect authentication server.
+ groupMemberAttr: "member"
+
+ # -- Base DN for the LDAP search to find a user's entry
+ # @default -- Get user metadata from the upstream authentication provider
+ userBaseDn: ""
+
+ # -- Search attribute containing the user's username
+ userSearchAttr: "uid"
+
+ # -- Attribute containing the user's full name
+ nameAttr: "displayName"
+
+ # -- Attribute containing the user's email address
+ emailAttr: "mail"
+
+ # -- Attribute containing the user's UID number (set to `uidNumber` for
+ # most LDAP servers)
+ # @default -- Get UID from upstream authentication provider
+ uidAttr: ""
+
+ # -- Attribute containing the user's primary GID (set to `gidNumber` for
+ # most LDAP servers)
+ # @default -- Use GID of user private group
+ gidAttr: ""
+
+ # -- Whether to synthesize a user private group for each user with a GID
+ # equal to their UID
+ addUserGroup: false
+
+ firestore:
+ # -- If set, assign UIDs and GIDs using Google Firestore in the given
+ # project. Cloud SQL must be enabled and the Cloud SQL service account
+ # must have read/write access to that Firestore instance.
+ # @default -- Firestore support is disabled
+ project: ""
+
+ forgerock:
+ # -- If set, obtain the GIDs for groups from this ForgeRock Identity
+ # Management server.
+ # @default -- ForgeRock Identity Management support is disabled
+ url: ""
+
+ # -- Username to use for HTTP Basic authentication to ForgeRock Identity
+ # Managemnt. The corresponding password must be in the
+ # `forgerock-passsword` key of the Gafaelfawr Vault secret.
+ # @default -- None, must be set if `config.forgerock.url` is set
+ username: ""
+
+ oidcServer:
+ # -- Whether to support OpenID Connect clients. If set to true,
+ # `oidc-server-secrets` must be set in the Gafaelfawr secret.
+ enabled: false
+
+ # -- Quota settings (see
+ # [Quotas](https://gafaelfawr.lsst.io/user-guide/helm.html#quotas)).
+ quota: {}
+
+ # -- Usernames to add as administrators when initializing a new database.
+ # Used only if there are no administrators.
+ initialAdmins: []
+
+ # -- Names and descriptions of all scopes in use. This is used to populate
+ # the new token creation page. Only scopes listed here will be options when
+ # creating a new token. See [DMTN-235](https://dmtn-235.lsst.io/).
+ # @default -- See the `values.yaml` file
+ knownScopes:
+ "admin:jupyterlab": >-
+ Can create and destroy labs for any user
+ "admin:token": >-
+ Can create and modify tokens for any user
+ "admin:provision": >-
+ Can perform privileged user provisioning
+ "exec:admin": >-
+ Administrative access to all APIs
+ "exec:internal-tools": >-
+ Use project-internal tools.
+ "exec:notebook": >-
+ Use the Notebook Aspect
+ "exec:portal": >-
+ Use the Portal Aspect
+ "read:alertdb": >-
+ Retrieve alert packets and schemas from the alert archive database
+ "read:image": >-
+ Retrieve images from project datasets
+ "read:tap": >-
+ Execute SELECT queries in the TAP interface on project datasets
+ "write:sasquatch": >-
+ "Write access to the Sasquatch telemetry service"
+ "user:token": >-
+ Can create and modify user tokens
+
+ # -- Defines a mapping of scopes to groups that provide that scope. See
+ # [DMTN-235](https://dmtn-235.lsst.io/) for more details on scopes.
+ groupMapping: {}
+
+ingress:
+ # -- Defines additional FQDNs for Gafaelfawr. This doesn't work for
+ # cookie or browser authentication, but for token-based services like
+ # git-lfs or the webdav server it does.
+ additionalHosts: []
+
+cloudsql:
+ # -- Enable the Cloud SQL Auth Proxy, used with CloudSQL databases on Google
+ # Cloud. This will be run as a sidecar for the main Gafaelfawr pods, and as
+ # a separate service (behind a `NetworkPolicy`) for other, lower-traffic
+ # services.
+ enabled: false
+
+ image:
+ # -- Cloud SQL Auth Proxy image to use
+ repository: "gcr.io/cloudsql-docker/gce-proxy"
+
+ # -- Cloud SQL Auth Proxy tag to use
+ tag: "1.33.8"
+
+ # -- Pull policy for Cloud SQL Auth Proxy images
+ pullPolicy: "IfNotPresent"
+
+ # -- Instance connection name for a CloudSQL PostgreSQL instance
+ # @default -- None, must be set if Cloud SQL Auth Proxy is enabled
+ instanceConnectionName: ""
+
+ # -- The Google service account that has an IAM binding to the `gafaelfawr`
+ # Kubernetes service account and has the `cloudsql.client` role
+ # @default -- None, must be set if Cloud SQL Auth Proxy is enabled
+ serviceAccount: ""
+
+ # -- Resource limits and requests for the Cloud SQL Proxy pod
+ resources: {}
+
+ # -- Annotations for the Cloud SQL Proxy pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the Cloud SQL Proxy pod
+ nodeSelector: {}
+
+ # -- Tolerations for the Cloud SQL Proxy pod
+ tolerations: []
+
+ # -- Affinity rules for the Cloud SQL Proxy pod
+ affinity: {}
+
+maintenance:
+ # -- Cron schedule string for Gafaelfawr data consistency audit (in UTC)
+ auditSchedule: "30 3 * * *"
+
+ # -- Cron schedule string for Gafaelfawr periodic maintenance (in UTC)
+ maintenanceSchedule: "5 * * * *"
+
+ # -- Resource limits and requests for Gafaelfawr maintenance and audit pods
+ resources: {}
+
+ # -- Annotations for Gafaelfawr maintenance and audit pods
+ podAnnotations: {}
+
+ # -- Node selection rules for Gafaelfawr maintenance and audit pods
+ nodeSelector: {}
+
+ # -- Tolerations for Gafaelfawr maintenance and audit pods
+ tolerations: []
+
+ # -- Affinity rules for Gafaelfawr maintenance and audit pods
+ affinity: {}
+
+operator:
+ # -- Resource limits and requests for the Gafaelfawr Kubernetes operator
+ resources: {}
+
+ # -- Annotations for the token management pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the token management pod
+ nodeSelector: {}
+
+ # -- Tolerations for the token management pod
+ tolerations: []
+
+ # -- Affinity rules for the token management pod
+ affinity: {}
+
+redis:
+ config:
+ # -- Name of secret containing Redis password (may require changing if
+ # fullnameOverride is set)
+ secretName: "gafaelfawr-secret"
+
+ # -- Key inside secret from which to get the Redis password (do not
+ # change)
+ secretKey: "redis-password"
+
+ persistence:
+ # -- Whether to persist Redis storage and thus tokens. Setting this to
+ # false will use `emptyDir` and reset all tokens on every restart. Only
+ # use this for a test deployment.
+ enabled: true
+
+ # -- Amount of persistent storage to request
+ size: "1Gi"
+
+ # -- Class of storage to request
+ storageClass: ""
+
+ # -- Access mode of storage to request
+ accessMode: "ReadWriteOnce"
+
+ # -- Use an existing PVC, not dynamic provisioning. If this is set, the
+ # size, storageClass, and accessMode settings are ignored.
+ volumeClaimName: ""
+
+ # -- Resource limits and requests for the Redis pod
+ # @default -- See `values.yaml`
+ resources:
+ limits:
+ cpu: "1"
+ requests:
+ cpu: "100m"
+
+ # -- Pod annotations for the Redis pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the Redis pod
+ nodeSelector: {}
+
+ # -- Tolerations for the Redis pod
+ tolerations: []
+
+ # -- Affinity rules for the Redis pod
+ affinity: {}
+
+# The following will be set by parameters injected by Argo CD and should not
+# be set in the individual environment values files.
+global:
+ # -- Base URL for the environment
+ # @default -- Set by Argo CD
+ baseUrl: ""
+
+ # -- Host name for ingress
+ # @default -- Set by Argo CD
+ host: ""
+
+ # -- Base path for Vault secrets
+ # @default -- Set by Argo CD
+ vaultSecretsPath: ""
diff --git a/tests/data/input/applications/mobu/secrets.yaml b/tests/data/input/applications/mobu/secrets.yaml
new file mode 100644
index 0000000000..87f8c046fd
--- /dev/null
+++ b/tests/data/input/applications/mobu/secrets.yaml
@@ -0,0 +1,11 @@
+app-alert-webhook:
+ description: >-
+ Slack web hook to which to post internal application alerts. This secret
+ is not used directly by mobu, but is copied from here to all of the
+ applications that report internal problems to Slack. It should normally be
+ separate from mobu's own web hook, since the separate identities attached
+ to the messages helps make the type of mesasge clearer, but the same web
+ hook as mobu's own alerts can be used in a pinch.
+ALERT_HOOK:
+ description: >-
+ Slack web hook to which mobu should report failures and daily status.
diff --git a/tests/data/input/applications/mobu/values.yaml b/tests/data/input/applications/mobu/values.yaml
new file mode 100644
index 0000000000..241d73d661
--- /dev/null
+++ b/tests/data/input/applications/mobu/values.yaml
@@ -0,0 +1,67 @@
+# Default values for mobu.
+
+# -- Override the base name for resources
+nameOverride: ""
+
+# -- Override the full name for resources (includes the release name)
+fullnameOverride: ""
+
+image:
+ # -- mobu image to use
+ repository: "ghcr.io/lsst-sqre/mobu"
+
+ # -- Pull policy for the mobu image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of mobu image to use
+ # @default -- The appVersion of the chart
+ tag: ""
+
+ingress:
+ # -- Additional annotations to add to the ingress
+ annotations: {}
+
+config:
+ # -- Autostart specification. Must be a list of mobu flock specifications.
+ # Each flock listed will be automatically started when mobu is started.
+ autostart: []
+
+ # -- If set to true, include the output from all flocks in the main mobu log
+ # and disable structured JSON logging.
+ debug: false
+
+ # -- If set to true, do not configure mobu to send alerts to Slack.
+ disableSlackAlerts: false
+
+ # -- Prefix for mobu's API routes.
+ pathPrefix: "/mobu"
+
+# -- Resource limits and requests for the mobu frontend pod
+resources: {}
+
+# -- Annotations for the mobu frontend pod
+podAnnotations: {}
+
+# -- Node selector rules for the mobu frontend pod
+nodeSelector: {}
+
+# -- Tolerations for the mobu frontend pod
+tolerations: []
+
+# -- Affinity rules for the mobu frontend pod
+affinity: {}
+
+# The following will be set by parameters injected by Argo CD and should not
+# be set in the individual environment values files.
+global:
+ # -- Base URL for the environment
+ # @default -- Set by Argo CD
+ baseUrl: ""
+
+ # -- Host name for ingress
+ # @default -- Set by Argo CD
+ host: ""
+
+ # -- Base path for Vault secrets
+ # @default -- Set by Argo CD
+ vaultSecretsPath: ""
diff --git a/tests/data/input/applications/nublado/secrets-idfdev.yaml b/tests/data/input/applications/nublado/secrets-idfdev.yaml
new file mode 100644
index 0000000000..97d5af3ca8
--- /dev/null
+++ b/tests/data/input/applications/nublado/secrets-idfdev.yaml
@@ -0,0 +1,15 @@
+"aws-credentials.ini":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store, formatted using
+ AWS syntax for use with boto.
+"butler-gcs-idf-creds.json":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the native
+ Google syntax, containing the private asymmetric key.
+"butler-hmac-idf-creds.json":
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the private
+ key syntax used for HMACs.
+"postgres-credentials.txt":
+ description: >-
+ PostgreSQL credentials in its pgpass format for the Butler database.
diff --git a/tests/data/input/applications/nublado/secrets.yaml b/tests/data/input/applications/nublado/secrets.yaml
new file mode 100644
index 0000000000..f0e2c52cee
--- /dev/null
+++ b/tests/data/input/applications/nublado/secrets.yaml
@@ -0,0 +1,23 @@
+cryptkeeper_key:
+ description: "Encryption key for internal key management."
+ generate:
+ type: password
+crypto_key:
+ description: "Encryption key for JupyterHub stored state."
+ generate:
+ type: password
+hub_db_password:
+ description: "Password to authenticate to the JupyterHub session database."
+ generate:
+ type: password
+ if: hub.internalDatabase
+proxy_token:
+ description: "Token authenticating JupyterHub to the proxy server."
+ generate:
+ type: password
+slack_webhook:
+ description: "Slack web hook to which to post alerts."
+ if: controller.slackAlerts
+ copy:
+ application: mobu
+ key: app-alert-webhook
diff --git a/tests/data/input/applications/nublado/values-idfdev.yaml b/tests/data/input/applications/nublado/values-idfdev.yaml
new file mode 100644
index 0000000000..d886967228
--- /dev/null
+++ b/tests/data/input/applications/nublado/values-idfdev.yaml
@@ -0,0 +1,72 @@
+controller:
+ googleServiceAccount: "nublado-controller@science-platform-dev-7696.iam.gserviceaccount.com"
+ slackAlerts: true
+ config:
+ safir:
+ logLevel: "DEBUG"
+ fileserver:
+ enabled: true
+ timeout: 43200
+ images:
+ source:
+ type: "google"
+ location: "us-central1"
+ projectId: "rubin-shared-services-71ec"
+ repository: "sciplat"
+ image: "sciplat-lab"
+ recommendedTag: "recommended"
+ numReleases: 1
+ numWeeklies: 2
+ numDailies: 3
+ lab:
+ env:
+ AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod,https://github.com/rubin-dp0/tutorial-notebooks@prod"
+ AWS_SHARED_CREDENTIALS_FILE: "/opt/lsst/software/jupyterlab/secrets/aws-credentials.ini"
+ PGPASSFILE: "/opt/lsst/software/jupyterlab/secrets/postgres-credentials.txt"
+ DAF_BUTLER_REPOSITORY_INDEX: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml"
+ GOOGLE_APPLICATION_CREDENTIALS: "/opt/lsst/software/jupyterlab/secrets/butler-gcs-idf-creds.json"
+ S3_ENDPOINT_URL: "https://storage.googleapis.com"
+ initContainers:
+ - name: "initdir"
+ image: "ghcr.io/lsst-sqre/initdir:0.0.4"
+ privileged: true
+ volumes:
+ - containerPath: "/home"
+ mode: "rw"
+ source:
+ type: nfs
+ serverPath: "/share1/home"
+ server: "10.87.86.26"
+
+ secrets:
+ - secretName: "nublado-lab-secret"
+ secretKey: "aws-credentials.ini"
+ - secretName: "nublado-lab-secret"
+ secretKey: "butler-gcs-idf-creds.json"
+ - secretName: "nublado-lab-secret"
+ secretKey: "butler-hmac-idf-creds.json"
+ - secretName: "nublado-lab-secret"
+ secretKey: "postgres-credentials.txt"
+ volumes:
+ - containerPath: "/home"
+ mode: "rw"
+ source:
+ type: nfs
+ serverPath: "/share1/home"
+ server: "10.87.86.26"
+ - containerPath: "/project"
+ mode: "rw"
+ source:
+ type: nfs
+ serverPath: "/share1/project"
+ server: "10.87.86.26"
+ - containerPath: "/scratch"
+ mode: "rw"
+ source:
+ type: nfs
+ serverPath: "/share1/scratch"
+ server: "10.87.86.26"
+jupyterhub:
+ hub:
+ db:
+ url: "postgresql://nublado3@postgres.postgres/nublado3"
diff --git a/tests/data/input/applications/nublado/values.yaml b/tests/data/input/applications/nublado/values.yaml
new file mode 100644
index 0000000000..0f89b35a56
--- /dev/null
+++ b/tests/data/input/applications/nublado/values.yaml
@@ -0,0 +1,462 @@
+# Default values for Nublado.
+
+controller:
+ image:
+ # -- nublado image to use
+ repository: ghcr.io/lsst-sqre/jupyterlab-controller
+
+ # -- Pull policy for the nublado image
+ pullPolicy: IfNotPresent
+
+ # -- Tag of nublado image to use
+ # @default -- The appVersion of the chart
+ tag: ""
+
+ # -- Affinity rules for the lab controller pod
+ affinity: {}
+
+ # -- Node selector rules for the lab controller pod
+ nodeSelector: {}
+
+ # -- Annotations for the lab controller pod
+ podAnnotations: {}
+
+ # -- Resource limits and requests for the lab controller pod
+ resources: {}
+
+ # -- Tolerations for the lab controller pod
+ tolerations: []
+
+ ingress:
+ # -- Additional annotations to add for the lab controller pod ingress
+ annotations: {}
+
+ # -- If Google Artifact Registry is used as the image source, the Google
+ # service account that has an IAM binding to the `nublado-controller`
+ # Kubernetes service account and has the Artifact Registry reader role
+ # @default -- None, must be set when using Google Artifact Registry
+ googleServiceAccount: ""
+
+ # -- Whether to enable Slack alerts. If set to true, `slack_webhook` must be
+ # set in the corresponding Nublado Vault secret.
+ slackAlerts: false
+
+ # Passed as YAML to the lab controller.
+ config:
+ fileserver:
+ # -- Enable fileserver management
+ enabled: false
+
+ # -- Image for fileserver container
+ image: ghcr.io/lsst-sqre/worblehat
+
+ # -- Tag for fileserver container
+ tag: 0.1.0
+
+ # -- Pull policy for fileserver container
+ pullPolicy: IfNotPresent
+
+ # -- Timeout for user fileservers, in seconds
+ timeout: 3600
+
+ # -- Namespace for user fileservers
+ namespace: fileservers
+
+ images:
+ # -- Source for prepulled images. For Docker, set `type` to `docker`,
+ # `registry` to the hostname and `repository` to the name of the
+ # repository. For Google Artifact Repository, set `type` to `google`,
+ # `location` to the region, `projectId` to the Google project,
+ # `repository` to the name of the repository, and `image` to the name of
+ # the image.
+ # @default -- None, must be specified
+ source: {}
+
+ # -- Tag marking the recommended image (shown first in the menu)
+ recommendedTag: "recommended"
+
+ # -- Number of most-recent releases to prepull.
+ numReleases: 1
+
+ # -- Number of most-recent weeklies to prepull.
+ numWeeklies: 2
+
+ # -- Number of most-recent dailies to prepull.
+ numDailies: 3
+
+ # -- Restrict images to this SAL cycle, if given.
+ cycle: null
+
+ # -- List of additional image tags to prepull. Listing the image tagged
+ # as recommended here is recommended when using a Docker image source to
+ # ensure its name can be expanded properly in the menu.
+ pin: []
+
+ # -- Additional tags besides `recommendedTag` that should be recognized
+ # as aliases.
+ aliasTags: []
+
+ lab:
+ # -- Environment variables to set for every user lab.
+ # @default -- See `values.yaml`
+ env:
+ API_ROUTE: "/api"
+ AUTO_REPO_SPECS: "https://github.com/lsst-sqre/system-test@prod"
+ CULL_KERNEL_IDLE_TIMEOUT: "432000" # These might be set from group?
+ CULL_KERNEL_CONNECTED: "True"
+ CULL_KERNEL_INTERVAL: "300"
+ FIREFLY_ROUTE: "/portal/app"
+ HUB_ROUTE: "/nb/hub"
+ NO_ACTIVITY_TIMEOUT: "432000" # Also from group?
+ TAP_ROUTE: "/api/tap"
+
+ # -- Containers run as init containers with each user pod. Each should
+ # set `name`, `image` (a Docker image reference), and `privileged`, and
+ # may contain `volumes` (similar to the main `volumes`
+ # configuration). If `privileged` is true, the container will run as
+ # root with `allowPrivilegeEscalation` true. Otherwise it will, run as
+ # UID 1000.
+ initcontainers: []
+
+ # -- Pull secret to use for labs. Set to the string `pull-secret` to use
+ # the normal pull secret from Vault.
+ # @default -- Do not use a pull secret
+ pullSecret: null
+
+ # -- Secrets to set in the user pods. Each should have a `secretKey` key
+ # pointing to a secret in the same namespace as the controller
+ # (generally `nublado-secret`) and `secretRef` pointing to a field in
+ # that key.
+ secrets: []
+
+ # -- Available lab sizes. Names must be chosen from `fine`,
+ # `diminutive`, `tiny`, `small`, `medium`, `large`, `huge`,
+ # `gargantuan`, and `colossal` in that order. Each should specify the
+ # maximum CPU equivalents and memory. SI prefixes for memory are
+ # supported.
+ # @default -- See `values.yaml` (specifies `small`, `medium`, and
+ # `large`)
+ sizes:
+ small:
+ cpu: 1.0
+ memory: 4Gi
+ medium:
+ cpu: 2.0
+ memory: 8Gi
+ large:
+ cpu: 4.0
+ memory: 16Gi
+
+ # -- Volumes that should be mounted in lab pods. This supports NFS,
+ # HostPath, and PVC volume types (differentiated in source.type)
+ volumes: []
+ # volumes:
+ # - containerPath: "/project"
+ # mode: "rw"
+ # source:
+ # type: nfs
+ # serverPath: "/share1/project"
+ # server: "10.87.86.26"
+
+ # -- Files to be mounted as ConfigMaps inside the user lab pod.
+ # `contents` contains the file contents. Set `modify` to true to make
+ # the file writable in the pod.
+ # @default -- See `values.yaml`
+ files:
+ /etc/passwd:
+ modify: true
+ contents: |
+ root:x:0:0:root:/root:/bin/bash
+ bin:x:1:1:bin:/bin:/sbin/nologin
+ daemon:x:2:2:daemon:/sbin:/sbin/nologin
+ adm:x:3:4:adm:/var/adm:/sbin/nologin
+ lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin
+ sync:x:5:0:sync:/sbin:/bin/sync
+ shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown
+ halt:x:7:0:halt:/sbin:/sbin/halt
+ mail:x:8:12:mail:/var/spool/mail:/sbin/nologin
+ operator:x:11:0:operator:/root:/sbin/nologin
+ games:x:12:100:games:/usr/games:/sbin/nologin
+ ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin
+ tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin
+ dbus:x:81:81:System message bus:/:/sbin/nologin
+ nobody:x:99:99:Nobody:/:/sbin/nologin
+ systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin
+ lsst_lcl:x:1000:1000::/home/lsst_lcl:/bin/bash
+ /etc/group:
+ modify: true
+ contents: |
+ root:x:0:
+ bin:x:1:
+ daemon:x:2:
+ sys:x:3:
+ adm:x:4:
+ tty:x:5:
+ disk:x:6:
+ lp:x:7:
+ mem:x:8:
+ kmem:x:9:
+ wheel:x:10:
+ cdrom:x:11:
+ mail:x:12:
+ man:x:15:
+ dialout:x:18:
+ floppy:x:19:
+ games:x:20:
+ utmp:x:22:
+ tape:x:33:
+ utempter:x:35:
+ video:x:39:
+ ftp:x:50:
+ lock:x:54:
+ tss:x:59:
+ audio:x:63:
+ dbus:x:81:
+ screen:x:84:
+ nobody:x:99:
+ users:x:100:
+ systemd-journal:x:190:
+ systemd-network:x:192:
+ cgred:x:997:
+ ssh_keys:x:998:
+ input:x:999:
+ /opt/lsst/software/jupyterlab/lsst_dask.yml:
+ modify: false
+ contents: |
+ # No longer used, but preserves compatibility with runlab.sh
+ dask_worker.yml: |
+ enabled: false
+ /opt/lsst/software/jupyterlab/panda/idds.cfg.client.template:
+ modify: false
+ contents: |
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # You may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Authors:
+ # - Wen Guan, , 2020
+ [common]
+ # if logdir is configured, idds will write to idds.log in this directory.
+ # else idds will go to stdout/stderr.
+ # With supervisord, it's good to write to stdout/stderr, then supervisord can manage and rotate logs.
+ # logdir = /var/log/idds
+ loglevel = INFO
+ [rest]
+ host = https://iddsserver.cern.ch:443/idds
+ #url_prefix = /idds
+ #cacher_dir = /tmp
+ cacher_dir = /data/idds
+
+ safir:
+ # -- Level of Python logging
+ logLevel: "INFO"
+
+ # -- Path prefix that will be routed to the controller
+ pathPrefix: "/nublado"
+
+# JupyterHub configuration handled directly by this chart rather than by Zero
+# to JupyterHub.
+hub:
+ # -- Whether to use the cluster-internal PostgreSQL server instead of an
+ # external server. This is not used directly by the Nublado chart, but
+ # controls how the database password is managed.
+ internalDatabase: true
+
+ timeout:
+ # -- Timeout for the Kubernetes spawn process in seconds. (Allow long
+ # enough to pull uncached images if needed.)
+ spawn: 600
+
+ # -- Timeout for JupyterLab to start. Currently this sometimes takes over
+ # 60 seconds for reasons we don't understand.
+ startup: 90
+
+# JupyterHub proxy configuration handled directly by this chart rather than by
+# Zero to JupyterHub.
+proxy:
+ ingress:
+ # -- Additional annotations to add to the proxy ingress (also used to talk
+ # to JupyterHub and all user labs)
+ # @default -- Increase `proxy-read-timeout` and `proxy-send-timeout` to 5m
+ annotations:
+ nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
+ nginx.ingress.kubernetes.io/proxy-send-timeout: "300"
+
+# Configuration for the Zero to JupyterHub subchart.
+jupyterhub:
+ hub:
+ # -- Whether to require metrics requests to be authenticated
+ authenticatePrometheus: false
+
+ image:
+ # -- Image to use for JupyterHub
+ name: ghcr.io/lsst-sqre/rsp-restspawner
+
+ # -- Tag of image to use for JupyterHub
+ tag: 0.3.2
+
+ # -- Resource limits and requests
+ resources:
+ limits:
+ cpu: 900m
+ memory: 1Gi # Should support about 200 users
+
+ db:
+ # -- Type of database to use
+ type: "postgres"
+
+ # -- Database password (not used)
+ # @default -- Comes from nublado-secret
+ password: "true"
+
+ # -- URL of PostgreSQL server
+ # @default -- Use the in-cluster PostgreSQL installed by Phalanx
+ url: "postgresql://jovyan@postgres.postgres/jupyterhub"
+
+ # -- Security context for JupyterHub container
+ containerSecurityContext:
+ runAsUser: 768
+ runAsGroup: 768
+ allowPrivilegeEscalation: false
+
+ # -- Base URL on which JupyterHub listens
+ baseUrl: "/nb"
+
+ # -- Existing secret to use for private keys
+ existingSecret: "nublado-secret"
+
+ # -- Additional environment variables to set
+ # @default -- Gets `JUPYTERHUB_CRYPT_KEY` from `nublado-secret`
+ extraEnv:
+ JUPYTERHUB_CRYPT_KEY:
+ valueFrom:
+ secretKeyRef:
+ name: "nublado-secret"
+ key: "hub.config.CryptKeeper.keys"
+
+ # -- Additional volumes to make available to JupyterHub
+ # @default -- The `hub-config` `ConfigMap` and the Gafaelfawr token
+ extraVolumes:
+ - name: "hub-config"
+ configMap:
+ name: "hub-config"
+ - name: "hub-gafaelfawr-token"
+ secret:
+ secretName: "hub-gafaelfawr-token"
+
+ # -- Additional volume mounts for JupyterHub
+ # @default -- `hub-config` and the Gafaelfawr token
+ extraVolumeMounts:
+ - name: "hub-config"
+ mountPath: "/usr/local/etc/jupyterhub/jupyterhub_config.d"
+ - name: "hub-gafaelfawr-token"
+ mountPath: "/etc/gafaelfawr"
+
+ networkPolicy:
+ # -- Whether to enable the default `NetworkPolicy` (currently, the
+ # upstream one does not work correctly)
+ enabled: false
+
+ loadRoles:
+ server:
+ # -- Default scopes for the user's lab, overridden to allow the lab to
+ # delete itself (which we use for our added menu items)
+ scopes: ["self"]
+
+ prePuller:
+ continuous:
+ # -- Whether to run the JupyterHub continuous prepuller (the Nublado
+ # controller does its own prepulling)
+ enabled: false
+
+ hook:
+ # -- Whether to run the JupyterHub hook prepuller (the Nublado
+ # controller does its own prepulling)
+ enabled: false
+
+ singleuser:
+ cloudMetadata:
+ # -- Whether to configure iptables to block cloud metadata endpoints.
+ # This is unnecessary in our environments (they are blocked by cluster
+ # configuration) and thus is disabled to reduce complexity.
+ blockWithIptables: false
+
+ # -- Start command for labs
+ cmd: "/opt/lsst/software/jupyterlab/runlab.sh"
+
+ # -- Default URL prefix for lab endpoints
+ defaultUrl: "/lab"
+
+ proxy:
+ service:
+ # -- Only expose the proxy to the cluster, overriding the default of
+ # exposing the proxy directly to the Internet
+ type: ClusterIP
+
+ chp:
+ networkPolicy:
+ # -- Enable access to the proxy from other namespaces, since we put
+ # each user's lab environment in its own namespace
+ interNamespaceAccessLabels: accept
+
+ # This currently causes Minikube deployment in GH-actions to fail.
+ # We want it sometime but it's not critical; it will help with
+ # scale-down
+ # pdb:
+ # enabled: true
+ # minAvailable: 1
+
+ # Rather than using the JupyterHub-provided ingress, which requires us to
+ # repeat the global host name and manually configure authentication, we
+ # instead install our own GafaelfawrIngress.
+ ingress:
+ # -- Whether to enable the default ingress
+ enabled: false
+
+ cull:
+ # -- Enable the lab culler.
+ enabled: true
+
+ # -- Default idle timeout before the lab is automatically deleted in
+ # seconds
+ # @default -- 2592000 (30 days)
+ timeout: 2592000
+
+ # -- How frequently to check for idle labs in seconds
+ # @default -- 600 (10 minutes)
+ every: 600
+
+ # -- Whether to log out the server when culling their lab
+ users: true
+
+ # -- Whether to remove named servers when culling their lab
+ removeNamedServers: true
+
+ # -- Maximum age of a lab regardless of activity
+ # @default -- 5184000 (60 days)
+ maxAge: 5184000
+
+ scheduling:
+ userScheduler:
+ # -- Whether the user scheduler should be enabled
+ enabled: false
+
+ userPlaceholder:
+ # -- Whether to spawn placeholder pods representing fake users to force
+ # autoscaling in advance of running out of resources
+ enabled: false
+
+# The following will be set by parameters injected by Argo CD and should not
+# be set in the individual environment values files.
+global:
+ # -- Base URL for the environment
+ # @default -- Set by Argo CD
+ baseUrl: ""
+
+ # -- Host name for ingress
+ # @default -- Set by Argo CD
+ host: ""
+
+ # -- Base path for Vault secrets
+ # @default -- Set by Argo CD
+ vaultSecretsPath: ""
diff --git a/tests/data/input/applications/postgres/secrets.yaml b/tests/data/input/applications/postgres/secrets.yaml
new file mode 100644
index 0000000000..5e03d36d1a
--- /dev/null
+++ b/tests/data/input/applications/postgres/secrets.yaml
@@ -0,0 +1,45 @@
+exposurelog_password:
+ description: "Password for the exposurelog database."
+ if: exposurelog_db
+ copy:
+ application: exposurelog
+ key: exposurelog_password
+gafaelfawr_password:
+ description: "Password for the Gafaelfawr database."
+ if: gafaelfawr_db
+ copy:
+ application: gafaelfawr
+ key: database-password
+jupyterhub_password:
+ description: "Password for the Nublado v2 JupyterHub session database."
+ if: jupyterhub_db
+ copy:
+ application: nublado2
+ key: hub_db_password
+lovelog_password:
+ description: "Password for the lovelog database."
+ if: lovelog_db
+ generate:
+ type: password
+narrativelog_password:
+ description: "Password for the narrativelog database."
+ if: narrativelog_db
+ copy:
+ application: narrativelog
+ key: narrativelog_password
+nublado3_password:
+ description: "Password for the Nublado v3 JupyterHub session database."
+ if: nublado3_db
+ copy:
+ application: nublado
+ key: hub_db_password
+root_password:
+ description: "Administrator password for the whole PostgreSQL installation."
+ generate:
+ type: password
+timessquare_password:
+ description: "Password for the times-square database."
+ if: timessquare_db
+ copy:
+ application: times-square
+ key: TS_DATABASE_PASSWORD
diff --git a/tests/data/input/applications/postgres/values-idfdev.yaml b/tests/data/input/applications/postgres/values-idfdev.yaml
new file mode 100644
index 0000000000..20c336e86a
--- /dev/null
+++ b/tests/data/input/applications/postgres/values-idfdev.yaml
@@ -0,0 +1,3 @@
+nublado3_db:
+ user: "nublado3"
+ db: "nublado3"
diff --git a/tests/data/input/applications/postgres/values.yaml b/tests/data/input/applications/postgres/values.yaml
new file mode 100644
index 0000000000..ded4248a51
--- /dev/null
+++ b/tests/data/input/applications/postgres/values.yaml
@@ -0,0 +1,37 @@
+# Default values for fileserver.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# -- Set to non-empty to enable debugging output
+debug: ""
+
+image:
+ # -- postgres image to use
+ repository: "lsstsqre/lsp-postgres"
+
+ # -- Pull policy for the postgres image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of postgres image to use
+ # @default -- The appVersion of the chart
+ tag: ""
+
+# -- Volume size for postgres. It can generally be very small
+postgresVolumeSize: "1Gi"
+
+# -- Storage class for postgres volume. Set to appropriate value for your
+# deployment: at GKE, "standard" (if you want SSD, "premium-rwo", but if you
+# want a good database maybe it's better to use a cloud database?), on Rubin
+# Observatory Rancher, "rook-ceph-block", elsewhere probably "standard"
+postgresStorageClass: "standard"
+
+# -- Volume name for postgres, if you use an existing volume that isn't
+# automatically created from the PVC by the storage driver.
+volumeName: ""
+
+# The following will be set by parameters injected by Argo CD and should not
+# be set in the individual environment values files.
+global:
+ # -- Base path for Vault secrets
+ # @default -- Set by Argo CD
+ vaultSecretsPath: ""
diff --git a/tests/data/input/applications/tap/secrets.yaml b/tests/data/input/applications/tap/secrets.yaml
new file mode 100644
index 0000000000..4280c602a3
--- /dev/null
+++ b/tests/data/input/applications/tap/secrets.yaml
@@ -0,0 +1,4 @@
+"google_creds.json":
+ description: >-
+ Google service account credentials used to write async job output to
+ Google Cloud Storage.
diff --git a/tests/data/input/applications/tap/values.yaml b/tests/data/input/applications/tap/values.yaml
new file mode 100644
index 0000000000..9132115129
--- /dev/null
+++ b/tests/data/input/applications/tap/values.yaml
@@ -0,0 +1,184 @@
+# Default values for cadc-tap.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+# -- Override the base name for resources
+nameOverride: ""
+
+# -- Override the full name for resources (includes the release name)
+fullnameOverride: "cadc-tap"
+
+# -- Number of pods to start
+replicaCount: 1
+
+image:
+ # -- tap image to use
+ repository: "ghcr.io/lsst-sqre/lsst-tap-service"
+
+ # -- Pull policy for the tap image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of tap image to use
+ # @default -- The appVersion of the chart
+ tag: ""
+
+# Settings for the ingress rules.
+ingress:
+ # -- Additional annotations to use for endpoints that allow anonymous
+ # access, such as `/capabilities` and `/availability`
+ anonymousAnnotations: {}
+
+ # -- Additional annotations to use for endpoints that are authenticated,
+ # such as `/sync`, `/async`, and `/tables`
+ authenticatedAnnotations: {}
+
+# -- Resource limits and requests for the Gafaelfawr frontend pod
+resources: {}
+
+# -- Annotations for the Gafaelfawr frontend pod
+podAnnotations: {}
+
+# -- Node selector rules for the Gafaelfawr frontend pod
+nodeSelector: {}
+
+# -- Tolerations for the Gafaelfawr frontend pod
+tolerations: []
+
+# -- Affinity rules for the Gafaelfawr frontend pod
+affinity: {}
+
+# -- Path to the Vault secret (`secret/k8s_operator//tap`, for example)
+# @default -- None, must be set
+vaultSecretsPath: ""
+
+config:
+ # -- Address to a MySQL database containing TAP schema data
+ tapSchemaAddress: "cadc-tap-schema-db:3306"
+
+ # -- Datalink payload URL
+ datalinkPayloadUrl: "https://github.com/lsst/sdm_schemas/releases/download/1.2.2/datalink-snippets.zip"
+
+ # -- Gafaelfawr hostname to get user information from a token
+ # @default -- Value of `ingress.host`
+ gafaelfawrHost: ""
+
+ # -- Name of GCS bucket in which to store results
+ # @default -- None, must be set
+ gcsBucket: ""
+
+ # -- Base URL for results stored in GCS bucket
+ # @default -- None, must be set
+ gcsBucketUrl: ""
+
+ # -- GCS bucket type (GCS or S3)
+ # @default -- GCS
+ gcsBucketType: "GCS"
+
+ # -- Java heap size, which will set the maximum size of the heap. Otherwise
+ # Java would determine it based on how much memory is available and black
+ # maths.
+ jvmMaxHeapSize: 4G
+
+qserv:
+ # -- QServ hostname:port to connect to
+ # @default -- `"mock-qserv:3306"` (the mock QServ)
+ host: "mock-qserv:3306"
+
+ mock:
+ # -- Spin up a container to pretend to be QServ.
+ enabled: true
+
+ image:
+ # -- Mock QServ image to use
+ repository: "ghcr.io/lsst-sqre/lsst-tap-mock-qserv"
+
+ # -- Pull policy for the mock QServ image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of mock QServ image to use
+ # @default -- The appVersion of the chart
+ tag: ""
+
+ # -- Resource limits and requests for the mock QServ pod
+ resources: {}
+
+ # -- Annotations for the mock QServ pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the mock QServ pod
+ nodeSelector: {}
+
+ # -- Tolerations for the mock QServ pod
+ tolerations: []
+
+ # -- Affinity rules for the mock QServ pod
+ affinity: {}
+
+tapSchema:
+ image:
+ # -- TAP schema image to ue. This must be overridden by each environment
+ # with the TAP schema for that environment.
+ repository: "lsstsqre/tap-schema-mock"
+
+ # -- Pull policy for the TAP schema image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of TAP schema image
+ tag: "2.0.2"
+
+ # -- Resource limits and requests for the TAP schema database pod
+ resources: {}
+
+ # -- Annotations for the mock QServ pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the mock QServ pod
+ nodeSelector: {}
+
+ # -- Tolerations for the mock QServ pod
+ tolerations: []
+
+ # -- Affinity rules for the mock QServ pod
+ affinity: {}
+
+uws:
+ image:
+ # -- UWS database image to use
+ repository: "ghcr.io/lsst-sqre/lsst-tap-uws-db"
+
+ # -- Pull policy for the UWS database image
+ pullPolicy: "IfNotPresent"
+
+ # -- Tag of UWS database image to use
+ # @default -- The appVersion of the chart
+ tag: ""
+
+ # -- Resource limits and requests for the UWS database pod
+ resources: {}
+
+ # -- Annotations for the UWS databse pod
+ podAnnotations: {}
+
+ # -- Node selection rules for the UWS database pod
+ nodeSelector: {}
+
+ # -- Tolerations for the UWS database pod
+ tolerations: []
+
+ # -- Affinity rules for the UWS database pod
+ affinity: {}
+
+# The following will be set by parameters injected by Argo CD and should not
+# be set in the individual environment values files.
+global:
+ # -- Base URL for the environment
+ # @default -- Set by Argo CD
+ baseUrl: ""
+
+ # -- Host name for ingress
+ # @default -- Set by Argo CD
+ host: ""
+
+ # -- Base path for Vault secrets
+ # @default -- Set by Argo CD
+ vaultSecretsPath: ""
diff --git a/tests/data/input/environments/values-idfdev.yaml b/tests/data/input/environments/values-idfdev.yaml
new file mode 100644
index 0000000000..52573351d0
--- /dev/null
+++ b/tests/data/input/environments/values-idfdev.yaml
@@ -0,0 +1,14 @@
+environment: idfdev
+fqdn: data-dev.lsst.cloud
+vaultUrl: https://vault.lsst.codes/
+vaultPathPrefix: secret/k8s_operator/data-dev.lsst.cloud
+butlerRepositoryIndex: "s3://butler-us-central1-repo-locations/data-dev-repos.yaml"
+
+gafaelfawr:
+ enabled: true
+mobu:
+ enabled: true
+nublado:
+ enabled: true
+postgres:
+ enabled: true
diff --git a/tests/data/input/environments/values.yaml b/tests/data/input/environments/values.yaml
new file mode 100644
index 0000000000..00e05a072e
--- /dev/null
+++ b/tests/data/input/environments/values.yaml
@@ -0,0 +1,28 @@
+# These four settings should be set in each environment values-*.yaml file.
+
+# -- Name of the environment
+# @default -- None, must be set
+environment: ""
+
+# -- Fully-qualified domain name where the environment is running
+# @default -- None, must be set
+fqdn: ""
+
+# -- URL of Vault server for this environment
+# @default -- None, must be set
+vaultUrl: ""
+
+# -- Prefix for Vault secrets for this environment
+# @default -- None, must be set
+vaultPathPrefix: ""
+
+# -- Butler repository index to use for this environment
+# @default -- None, must be set
+butlerRepositoryIndex: ""
+
+gafaelfawr:
+ enabled: false
+nublado:
+ enabled: false
+postgres:
+ enabled: false
diff --git a/tests/data/input/vault/idfdev/argocd.json b/tests/data/input/vault/idfdev/argocd.json
new file mode 100644
index 0000000000..2c771b39cc
--- /dev/null
+++ b/tests/data/input/vault/idfdev/argocd.json
@@ -0,0 +1,6 @@
+{
+ "admin.password": "$2b$15$kRdR2PlZ.tV5tavXViDlv.QB7FYk0HH8IMTiGk9spSashzIqgmcJ.",
+ "admin.passwordMtime": "2021-10-08T21:31:31Z",
+ "admin.plaintext_password": "6f80a0863ae18da19d53e99b9ce67f82f54bc6164f368b6863471002992f0063",
+ "dex.clientSecret": "some-random-secret"
+}
diff --git a/tests/data/input/vault/idfdev/gafaelfawr.json b/tests/data/input/vault/idfdev/gafaelfawr.json
new file mode 100644
index 0000000000..4341934715
--- /dev/null
+++ b/tests/data/input/vault/idfdev/gafaelfawr.json
@@ -0,0 +1,8 @@
+{
+ "bootstrap-token": "gt-xESy7hFgaLI3t7Cg8TMy8Q.gev0rVktVpL6GGZs6kv5fg",
+ "cilogon": "y",
+ "cilogon-client-secret": "some-cilogon-password",
+ "database-password": "some-database-password",
+ "ldap-password": "some-ldap-password",
+ "redis-password": "3b17413a28b5be73cc963a98558cb50eadf337e78d422476810fd562d9538492"
+}
diff --git a/tests/data/input/vault/idfdev/mobu.json b/tests/data/input/vault/idfdev/mobu.json
new file mode 100644
index 0000000000..8f8391d71a
--- /dev/null
+++ b/tests/data/input/vault/idfdev/mobu.json
@@ -0,0 +1,4 @@
+{
+ "ALERT_HOOK": "https://hooks.slack.com/mobu-slack-hook",
+ "app-alert-webhook": "https://hooks.slack.com/app-slack-hook"
+}
diff --git a/tests/data/input/vault/idfdev/nublado.json b/tests/data/input/vault/idfdev/nublado.json
new file mode 100644
index 0000000000..4e9c9f89f6
--- /dev/null
+++ b/tests/data/input/vault/idfdev/nublado.json
@@ -0,0 +1,6 @@
+{
+ "cryptkeeper_key": "411820d5a6cc6c3bcd73c1fbd61f9c9eb06454272825345ba0cbe0304fef4168",
+ "crypto_key": "59d636b6428cde68710166611187371e58ea3ff7a9e07e75b2286fc08f7763c8",
+ "hub_db_password": "2e188c579b159d59f83e478203261c91fe2f5db3858111bc318f0f9d5dbe055e",
+ "slack_webhook": "https://hooks.slack.com/app-slack-hook"
+}
diff --git a/tests/data/input/vault/idfdev/postgres.json b/tests/data/input/vault/idfdev/postgres.json
new file mode 100644
index 0000000000..55729b763b
--- /dev/null
+++ b/tests/data/input/vault/idfdev/postgres.json
@@ -0,0 +1,4 @@
+{
+ "nublado3_password": "e1e4cde6276b8612837ca0a0ef74b16796004d91388480aee8843d9cc21079a6",
+ "root_password": "a7605f445f47bfabdb3a35f5a4eca85ee57a74ff3b266bdb6c11c69c4451e8d2f23574f13879bba2c4520f454f5034ece24f642278315931ca2dad1be384534a"
+}
diff --git a/tests/data/output/idfdev/secrets-audit b/tests/data/output/idfdev/secrets-audit
new file mode 100644
index 0000000000..6fdfe22865
--- /dev/null
+++ b/tests/data/output/idfdev/secrets-audit
@@ -0,0 +1,14 @@
+Missing secrets:
+• argocd server.secretkey
+• gafaelfawr session-secret
+• gafaelfawr signing-key
+• gafaelfawr slack-webhook
+• nublado aws-credentials.ini
+• nublado butler-gcs-idf-creds.json
+• nublado butler-hmac-idf-creds.json
+• nublado postgres-credentials.txt
+• nublado proxy_token
+Incorrect secrets:
+• postgres nublado3_password
+Unknown secrets in Vault:
+• gafaelfawr cilogon
diff --git a/tests/data/output/idfdev/secrets-list b/tests/data/output/idfdev/secrets-list
new file mode 100644
index 0000000000..1acf5b3628
--- /dev/null
+++ b/tests/data/output/idfdev/secrets-list
@@ -0,0 +1,26 @@
+argocd admin.password
+argocd admin.passwordMtime
+argocd admin.plaintext_password
+argocd dex.clientSecret
+argocd server.secretkey
+gafaelfawr bootstrap-token
+gafaelfawr cilogon-client-secret
+gafaelfawr database-password
+gafaelfawr ldap-password
+gafaelfawr redis-password
+gafaelfawr session-secret
+gafaelfawr signing-key
+gafaelfawr slack-webhook
+mobu ALERT_HOOK
+mobu app-alert-webhook
+nublado aws-credentials.ini
+nublado butler-gcs-idf-creds.json
+nublado butler-hmac-idf-creds.json
+nublado cryptkeeper_key
+nublado crypto_key
+nublado hub_db_password
+nublado postgres-credentials.txt
+nublado proxy_token
+nublado slack_webhook
+postgres nublado3_password
+postgres root_password
diff --git a/tests/data/output/idfdev/static-secrets.yaml b/tests/data/output/idfdev/static-secrets.yaml
new file mode 100644
index 0000000000..b111f7740f
--- /dev/null
+++ b/tests/data/output/idfdev/static-secrets.yaml
@@ -0,0 +1,58 @@
+argocd:
+ dex.clientSecret:
+ description: >-
+ OAuth 2 or OpenID Connect client secret, used to authenticate to GitHub
+ or Google as part of the authentication flow. This secret can be changed
+ at any time.
+ value: null
+gafaelfawr:
+ cilogon-client-secret:
+ description: >-
+ Secret used to authenticate to CILogon as part of the OpenID Connect
+ login protocol to obtain an identity token for the user. This secret
+ can be changed at any time.
+ value: null
+ database-password:
+ description: >-
+ Password used to authenticate to the PostgreSQL database used to store
+ Gafaelfawr data. This password may be changed at any time.
+ value: null
+ ldap-password:
+ description: >-
+ Password to authenticate to the LDAP server via simple binds to retrieve
+ user and group information. This password can be changed at any time.
+ value: null
+mobu:
+ ALERT_HOOK:
+ description: >-
+ Slack web hook to which mobu should report failures and daily status.
+ value: null
+ app-alert-webhook:
+ description: >-
+ Slack web hook to which to post internal application alerts. This secret
+ is not used directly by mobu, but is copied from here to all of the
+ applications that report internal problems to Slack. It should normally
+ be separate from mobu's own web hook, since the separate identities
+ attached to the messages helps make the type of mesasge clearer, but
+ the same web hook as mobu's own alerts can be used in a pinch.
+ value: null
+nublado:
+ aws-credentials.ini:
+ description: >-
+ Google Cloud Storage credentials to the Butler data store, formatted
+ using AWS syntax for use with boto.
+ value: null
+ butler-gcs-idf-creds.json:
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the native
+ Google syntax, containing the private asymmetric key.
+ value: null
+ butler-hmac-idf-creds.json:
+ description: >-
+ Google Cloud Storage credentials to the Butler data store in the private
+ key syntax used for HMACs.
+ value: null
+ postgres-credentials.txt:
+ description: >-
+ PostgreSQL credentials in its pgpass format for the Butler database.
+ value: null
diff --git a/tests/docs/__init__.py b/tests/docs/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/support/__init__.py b/tests/support/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/support/data.py b/tests/support/data.py
new file mode 100644
index 0000000000..eb6c71b0cf
--- /dev/null
+++ b/tests/support/data.py
@@ -0,0 +1,67 @@
+"""Utilities for managing test data."""
+
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Any
+
+import yaml
+
+__all__ = [
+ "phalanx_test_path",
+ "read_output_data",
+]
+
+
+def phalanx_test_path() -> Path:
+ """Return path to Phalanx test data.
+
+ Returns
+ -------
+ Path
+ Path to test input data. The directory will contain test data in the
+ layout of a Phalanx repository to test information gathering and
+ analysis.
+ """
+ return Path(__file__).parent.parent / "data" / "input"
+
+
+def read_output_data(environment: str, filename: str) -> str:
+ """Read test output data and return it.
+
+ Parameters
+ ----------
+ environment
+ Name of the environment under :filename:`data/output` that the test
+ output is for.
+ filename
+ File containing the output data.
+
+ Returns
+ -------
+ str
+ Contents of the file.
+ """
+ base_path = Path(__file__).parent.parent / "data" / "output"
+ return (base_path / environment / filename).read_text()
+
+
+def read_output_yaml(environment: str, filename: str) -> dict[str, Any]:
+ """Read test output data as YAML and return the parsed format.
+
+ Parameters
+ ----------
+ environment
+ Name of the environment under :filename:`data/output` that the test
+ output is for.
+ filename
+ File containing the output data.
+
+ Returns
+ -------
+ dict
+ Parsed version of the YAML.
+ """
+ base_path = Path(__file__).parent.parent / "data" / "output"
+ with (base_path / environment / filename).open() as fh:
+ return yaml.safe_load(fh)
diff --git a/tests/support/vault.py b/tests/support/vault.py
new file mode 100644
index 0000000000..d8648329c3
--- /dev/null
+++ b/tests/support/vault.py
@@ -0,0 +1,87 @@
+"""Mock Vault API for testing."""
+
+from __future__ import annotations
+
+import json
+from collections import defaultdict
+from collections.abc import Iterator
+from typing import Any
+from unittest.mock import patch
+
+import hvac
+
+from .data import phalanx_test_path
+
+__all__ = [
+ "MockVaultClient",
+ "patch_vault",
+]
+
+
+class MockVaultClient:
+ """Mock Vault client for testing."""
+
+ def __init__(self) -> None:
+ self.secrets = self
+ self.kv = self
+ self._data: defaultdict[str, dict[str, dict[str, str]]]
+ self._data = defaultdict(dict)
+ self._paths: dict[str, str] = {}
+
+ def load_test_data(self, path: str, environment: str) -> None:
+ """Load Vault test data for the given environment.
+
+ This method is not part of the Vault API. It is intended for use by
+ the test suite to set up a test.
+
+ Parameters
+ ----------
+ path
+ Path to the environment data in Vault.
+ environment
+ Name of the environment for which to load Vault test data.
+ """
+ _, app_path = path.split("/", 1)
+ self._paths[app_path] = environment
+ data_path = phalanx_test_path() / "vault" / environment
+ for app_data_path in data_path.iterdir():
+ application = app_data_path.stem
+ with app_data_path.open() as fh:
+ self._data[environment][application] = json.load(fh)
+
+ def read_secret(
+ self, path: str, raise_on_deleted_version: bool | None = None
+ ) -> dict[str, Any]:
+ """Read a secret from Vault.
+
+ Parameters
+ ----------
+ path
+ Vault path to the secret.
+ raise_on_deleted_version
+ Whether to raise an exception if the most recent version is
+ deleted (required to be `True`).
+
+ Returns
+ -------
+ dict
+ Reply matching the Vault client reply structure.
+ """
+ assert raise_on_deleted_version
+ base_path, application = path.rsplit("/", 1)
+ environment = self._paths[base_path]
+ values = self._data[environment][application]
+ return {"data": {"data": values}}
+
+
+def patch_vault() -> Iterator[MockVaultClient]:
+ """Replace the HVAC Vault client with a mock class.
+
+ Yields
+ ------
+ MockVaultClient
+ Mock HVAC Vault client.
+ """
+ mock_vault = MockVaultClient()
+ with patch.object(hvac, "Client", return_value=mock_vault):
+ yield mock_vault
diff --git a/tox.ini b/tox.ini
index 6650653286..f34398c283 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,13 +4,19 @@ isolated_build = True
[testenv]
description = Run pytest against {envname}.
-extras =
- dev
+deps =
+ -r{toxinidir}/requirements/main.txt
+ -r{toxinidir}/requirements/dev.txt
-[testenv:py]
-description = Run pytest
+[testenv:docs]
+description = Build documentation (HTML) with Sphinx.
commands =
- coverage run -m pytest {posargs}
+ sphinx-build --keep-going -n -W -T -b html -d {envtmpdir}/doctrees docs docs/_build/html
+
+[testenv:docs-linkcheck]
+description = Check links in the documentation.
+commands =
+ sphinx-build --keep-going -n -W -T -b linkcheck -d {envtmpdir}/doctrees docs docs/_build/linkcheck
[testenv:coverage-report]
description = Compile coverage from each test run.
@@ -19,14 +25,8 @@ deps = coverage[toml]>=5.0.2
depends =
py
commands =
- coverage combine
coverage report
-[testenv:typing]
-description = Run mypy.
-commands =
- mypy src/phalanx tests
-
[testenv:lint]
description = Lint codebase by running pre-commit (Black, isort, Flake8).
skip_install = true
@@ -34,12 +34,19 @@ deps =
pre-commit
commands = pre-commit run --all-files
-[testenv:docs]
-description = Build documentation (HTML) with Sphinx.
+[testenv:neophile-update]
+description = Run neophile to update dependencies
+skip_install = true
+deps =
+ neophile
+commands = neophile update {posargs}
+
+[testenv:py]
+description = Run pytest
commands =
- sphinx-build --keep-going -n -W -T -b html -d {envtmpdir}/doctrees docs docs/_build/html
+ pytest -vv --cov=phalanx --cov-branch --cov-report= {posargs}
-[testenv:docs-linkcheck]
-description = Check links in the documentation.
+[testenv:typing]
+description = Run mypy.
commands =
- sphinx-build --keep-going -n -W -T -b linkcheck -d {envtmpdir}/doctrees docs docs/_build/linkcheck
+ mypy src/phalanx tests