Skip to content

Commit

Permalink
Merge branch 'lsst-sqre:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
gpfrancis authored Oct 3, 2024
2 parents 923ed93 + 387a58f commit eb97619
Show file tree
Hide file tree
Showing 272 changed files with 7,049 additions and 1,788 deletions.
35 changes: 0 additions & 35 deletions .github/workflows/dependencies.yaml

This file was deleted.

4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ repos:
- -c=.yamllint.yml

- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.29.1
rev: 0.29.2
hooks:
- id: check-jsonschema
files: ^applications/.*/secrets(-[^./-]+)?\.yaml
Expand Down Expand Up @@ -46,7 +46,7 @@ repos:
- --template-files=../helm-docs.md.gotmpl

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.7
rev: v0.6.7
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
Expand Down
16 changes: 8 additions & 8 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,20 @@ update-deps:
pip install --upgrade pip uv
uv pip install --upgrade pre-commit
pre-commit autoupdate
uv pip compile --upgrade --generate-hashes \
--output-file requirements/main.txt requirements/main.in
uv pip compile --upgrade --generate-hashes \
uv pip compile --upgrade --universal --generate-hashes \
--output-file requirements/main.txt pyproject.toml
uv pip compile --upgrade --universal --generate-hashes \
--output-file requirements/dev.txt requirements/dev.in
uv pip compile --upgrade --generate-hashes \
uv pip compile --upgrade --universal --generate-hashes \
--output-file requirements/tox.txt requirements/tox.in

# Useful for testing against a Git version of Safir.
.PHONY: update-deps-no-hashes
update-deps-no-hashes:
pip install --upgrade uv
uv pip compile --upgrade \
--output-file requirements/main.txt requirements/main.in
uv pip compile --upgrade \
uv pip compile --upgrade --universal \
--output-file requirements/main.txt pyproject.toml
uv pip compile --upgrade --universal \
--output-file requirements/dev.txt requirements/dev.in
uv pip compile --upgrade \
uv pip compile --upgrade --universal \
--output-file requirements/tox.txt requirements/tox.in
9 changes: 6 additions & 3 deletions applications/alert-stream-broker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,17 @@ Alert transmission to community brokers
| alert-stream-broker.kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. |
| alert-stream-broker.kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
| alert-stream-broker.kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. |
| alert-stream-broker.kafkaController.enabled | bool | `false` | Enable Kafka Controller |
| alert-stream-broker.kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller |
| alert-stream-broker.kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers |
| alert-stream-broker.kafkaController.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes |
| alert-stream-broker.kafkaExporter | object | `{"enableSaramaLogging":false,"enabled":false,"groupRegex":".*","logLevel":"warning","topicRegex":".*"}` | Kafka JMX Exporter for more detailed diagnostic metrics. |
| alert-stream-broker.kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging |
| alert-stream-broker.kafkaExporter.enabled | bool | `false` | Enable Kafka exporter. |
| alert-stream-broker.kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor |
| alert-stream-broker.kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging |
| alert-stream-broker.kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor |
| alert-stream-broker.kraft | bool | `true` | |
| alert-stream-broker.maxBytesRetained | string | `"100000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. |
| alert-stream-broker.maxMillisecondsRetained | string | `"5259492000"` | Maximum amount of time to save alerts in the replay topic, in milliseconds. Default is 7 days (604800000). |
| alert-stream-broker.nameOverride | string | `""` | |
Expand All @@ -95,10 +100,8 @@ Alert transmission to community brokers
| alert-stream-broker.users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. |
| alert-stream-broker.users[0].username | string | `"rubin-testing"` | The username for the user that should be created. |
| alert-stream-broker.vaultSecretsPath | string | `""` | Path to the secret resource in Vault |
| alert-stream-broker.zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. |
| alert-stream-broker.zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. |
| alert-stream-broker.zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
| alert-stream-schema-registry.clusterName | string | `"alert-broker"` | Strimzi "cluster name" of the broker to use as a backend. |
| alert-stream-schema-registry.compatibilityLevel | string | `"None"` | |
| alert-stream-schema-registry.hostname | string | `"usdf-alert-schemas-dev.slac.stanford.edu"` | Hostname for an ingress which sends traffic to the Schema Registry. |
| alert-stream-schema-registry.name | string | `"alert-schema-registry"` | Name used by the registry, and by its users. |
| alert-stream-schema-registry.port | int | `8081` | Port where the registry is listening. NOTE: Not actually configurable in strimzi-registry-operator, so this basically cannot be changed. |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,17 @@ Kafka broker cluster for distributing alerts
| kafka.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Kafka brokers. |
| kafka.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
| kafka.version | string | `"3.4.0"` | Version of Kafka to deploy. |
| kafkaController.enabled | bool | `false` | Enable Kafka Controller |
| kafkaController.resources | object | See `values.yaml` | Kubernetes requests and limits for the Kafka Controller |
| kafkaController.storage.size | string | `"20Gi"` | Size of the backing storage disk for each of the Kafka controllers |
| kafkaController.storage.storageClassName | string | `""` | Name of a StorageClass to use when requesting persistent volumes |
| kafkaExporter | object | `{"enableSaramaLogging":false,"enabled":false,"groupRegex":".*","logLevel":"warning","topicRegex":".*"}` | Kafka JMX Exporter for more detailed diagnostic metrics. |
| kafkaExporter.enableSaramaLogging | bool | `false` | Enable Sarama logging |
| kafkaExporter.enabled | bool | `false` | Enable Kafka exporter. |
| kafkaExporter.groupRegex | string | `".*"` | Consumer groups to monitor |
| kafkaExporter.logLevel | string | `"warning"` | Log level for Sarama logging |
| kafkaExporter.topicRegex | string | `".*"` | Kafka topics to monitor |
| kraft | bool | `true` | |
| maxBytesRetained | string | `"100000000000"` | Maximum number of bytes for the replay topic, per partition, per replica. Default is 100GB, but should be lower to not fill storage. |
| maxMillisecondsRetained | string | `"5259492000"` | Maximum amount of time to save alerts in the replay topic, in milliseconds. Default is 7 days (604800000). |
| nameOverride | string | `""` | |
Expand All @@ -52,6 +57,3 @@ Kafka broker cluster for distributing alerts
| users[0].readonlyTopics | list | `["alert-stream","alerts-simulated","alert-stream-test"]` | A list of topics that the user should get read-only access to. |
| users[0].username | string | `"rubin-testing"` | The username for the user that should be created. |
| vaultSecretsPath | string | `""` | Path to the secret resource in Vault |
| zookeeper.replicas | int | `3` | Number of Zookeeper replicas to run. |
| zookeeper.storage.size | string | `"1000Gi"` | Size of the backing storage disk for each of the Zookeeper instances. |
| zookeeper.storage.storageClassName | string | `"standard"` | Name of a StorageClass to use when requesting persistent volumes. |
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ spec:
cleanup.policy: "delete"
retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days
retention.bytes: {{ .Values.maxBytesRetained }}
compression.type: {{ .Values.topicCompression}}
# The default timestamp is the creation time of the alert.
# To get the ingestion rate, we need this to be the log
# append time, and the header will contain the producer
Expand All @@ -45,6 +46,7 @@ spec:
cleanup.policy: "delete"
retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days
retention.bytes: {{ .Values.maxBytesRetained }}
compression.type: {{ .Values.topicCompression}}
# The default timestamp is the creation time of the alert.
# To get the ingestion rate, we need this to be the log
# append time, and the header will contain the producer
Expand All @@ -64,11 +66,31 @@ spec:
cleanup.policy: "delete"
retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days
retention.bytes: {{ .Values.maxBytesRetained }}
compression.type: {{ .Values.devTopicCompression}}
compression.type: {{ .Values.topicCompression}}
# The default timestamp is the creation time of the alert.
# To get the ingestion rate, we need this to be the log
# append time, and the header will contain the producer
# timestamp instead
message.timestamp.type: 'LogAppendTime'
partitions: {{ .Values.devTopicPartitions }}
replicas: {{ .Values.devTopicReplicas }}
replicas: {{ .Values.devTopicReplicas }}
---
apiVersion: "kafka.strimzi.io/{{ .Values.strimziAPIVersion }}"
kind: KafkaTopic
metadata:
labels:
strimzi.io/cluster: "{{ .Values.clusterName }}"
name: "{{ .Values.latissTopicName}}"
spec:
config:
cleanup.policy: "delete"
retention.ms: {{ .Values.maxMillisecondsRetained }} # 7 days
retention.bytes: {{ .Values.maxBytesRetained }}
compression.type: {{ .Values.topicCompression}}
# The default timestamp is the creation time of the alert.
# To get the ingestion rate, we need this to be the log
# append time, and the header will contain the producer
# timestamp instead
message.timestamp.type: 'LogAppendTime'
partitions: {{ .Values.latissTopicPartitions }}
replicas: {{ .Values.latissTopicReplicas }}
Original file line number Diff line number Diff line change
@@ -1,7 +1,60 @@
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaNodePool
metadata:
name: controller
labels:
strimzi.io/cluster: {{ .Values.cluster.name }}
spec:
replicas: {{ .Values.kafka.replicas }}
roles:
- controller
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: {{ .Values.kafkaController.storage.size }}
class: {{ .Values.kafkaController.storage.storageClassName }}
deleteClaim: false
{{- with .Values.kafkaController.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaNodePool
metadata:
name: kafka
labels:
strimzi.io/cluster: {{ .Values.cluster.name }}
annotations:
strimzi.io/next-node-ids: "[0-99]"
spec:
replicas: {{ .Values.kafka.replicas }}
roles:
- broker
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
size: {{ .Values.kafka.storage.size }}
{{- if .Values.kafka.storage.storageClassName }}
class: {{ .Values.kafka.storage.storageClassName }}
{{- end}}
deleteClaim: false
{{- with .Values.kafka.resources }}
resources:
{{- toYaml . | nindent 6 }}
{{- end }}
---
apiVersion: kafka.strimzi.io/{{ .Values.strimziAPIVersion }}
kind: Kafka
metadata:
name: {{ .Values.cluster.name }}
annotations:
strimzi.io/kraft: enabled
strimzi.io/node-pools: enabled
spec:
{{- if .Values.kafkaExporter.enabled }}
kafkaExporter:
Expand Down Expand Up @@ -85,14 +138,15 @@ spec:

{{- if .Values.kafka.externalListener.brokers }}
brokers:
{{- range $idx, $broker := .Values.kafka.externalListener.brokers }}
- broker: {{ $idx }}
{{- range $broker := .Values.kafka.externalListener.brokers }}
- broker: {{ $broker.broker }}
loadBalancerIP: {{ $broker.ip }}
advertisedHost: {{ $broker.host }}
annotations: {{ toYaml $broker.annotations | nindent 16 }}
{{- end }}
advertisedPort: 9094
annotations:
annotations: {{ toYaml $broker.annotations | nindent 16 }}
{{- end }}
{{- end }}

{{- if and (.Values.kafka.externalListener.tls.enabled) (.Values.kafka.externalListener.bootstrap.host) }}
brokerCertChainAndKey:
secretName: {{ .Values.cluster.name }}-external-tls
Expand All @@ -114,8 +168,6 @@ spec:
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
message.max.bytes: 4194304 # 8 Megabytes. For testing purposes only.
log.message.format.version: {{ .Values.kafka.logMessageFormatVersion }}
inter.broker.protocol.version: {{ .Values.kafka.interBrokerProtocolVersion }}
ssl.client.auth: required
{{- range $key, $value := .Values.kafka.config }}
{{ $key }}: {{ $value }}
Expand All @@ -133,53 +185,6 @@ spec:
class: {{ .Values.kafka.storage.storageClassName }}
deleteClaim: false

template:
pod:
{{- if .Values.kafka.nodePool.tolerations }}
tolerations:
{{- range $tol := .Values.kafka.nodePool.tolerations }}
- key: {{ $tol.key }}
operator: "Equal"
value: {{ $tol.value }}
effect: {{ $tol.effect }}
{{- end }}
{{- end }}

{{- if .Values.kafka.nodePool.affinities }}
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
{{- range $affinity := .Values.kafka.nodePool.affinities }}
- weight: 1
preference:
matchExpressions:
- key: {{ $affinity.key }}
operator: In
values: [{{ $affinity.value }}]
{{- end }}
{{- end }}

affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app.kubernetes.io/name"
operator: In
values:
- kafka
topologyKey: "kubernetes.io/hostname"

zookeeper:
replicas: {{ .Values.zookeeper.replicas }}
storage:
# Note that storage is configured per replica. If there are 3 replicas,
# each will get its own PersistentVolumeClaim for the configured size.
type: persistent-claim
size: {{ .Values.zookeeper.storage.size }}
class: {{ .Values.zookeeper.storage.storageClassName }}
deleteClaim: false

template:
pod:
{{- if .Values.kafka.nodePool.tolerations }}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,17 +121,6 @@ users:
# matches.
groups: ["rubin-testing"]


zookeeper:
# -- Number of Zookeeper replicas to run.
replicas: 3

storage:
# -- Size of the backing storage disk for each of the Zookeeper instances.
size: 1000Gi
# -- Name of a StorageClass to use when requesting persistent volumes.
storageClassName: standard

tls:
subject:
# -- Organization to use in the 'Subject' field of the broker's TLS certificate.
Expand All @@ -149,6 +138,29 @@ fullnameOverride: ""

nameOverride: ""

kraft: true

kafkaController:
# -- Enable Kafka Controller
enabled: false

storage:
# -- Size of the backing storage disk for each of the Kafka controllers
size: 20Gi

# -- Name of a StorageClass to use when requesting persistent volumes
storageClassName: ""

# -- Kubernetes requests and limits for the Kafka Controller
# @default -- See `values.yaml`
resources:
requests:
memory: 32Gi
cpu: "4"
limits:
memory: 64Gi
cpu: "8"

# -- Topic used to send test alerts.
testTopicName: alert-stream-test

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ Confluent Schema Registry for managing schema versions for the Alert Stream
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| clusterName | string | `"alert-broker"` | Strimzi "cluster name" of the broker to use as a backend. |
| compatibilityLevel | string | `"None"` | |
| hostname | string | `"usdf-alert-schemas-dev.slac.stanford.edu"` | Hostname for an ingress which sends traffic to the Schema Registry. |
| name | string | `"alert-schema-registry"` | Name used by the registry, and by its users. |
| port | int | `8081` | Port where the registry is listening. NOTE: Not actually configurable in strimzi-registry-operator, so this basically cannot be changed. |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,5 @@ metadata:
revision: "1"
spec:
strimzi-version: {{ .Values.strimziAPIVersion }}
listener: internal
listener: internal
compatibilityLevel: none
Loading

0 comments on commit eb97619

Please sign in to comment.