From 598e5ac84a53a5676c29825592b6fc7dc78c9ea4 Mon Sep 17 00:00:00 2001 From: bakdata-bot Date: Thu, 26 Oct 2023 11:52:42 +0000 Subject: [PATCH] Deployed 52ec86f to dev with MkDocs 1.5.3 and mike 1.1.2 --- dev/404.html | 30 +- dev/developer/auto-generation/index.html | 34 +- .../{style => formatting}/index.html | 104 +- dev/index.html | 30 +- .../components-hierarchy/index.html | 52 +- dev/resources/examples/defaults/index.html | 30 +- dev/resources/examples/pipeline/index.html | 30 +- ...aults_pipeline_component_dependencies.yaml | 5 +- .../dependencies/kpops_structure.yaml | 14 +- .../pipeline_component_dependencies.yaml | 16 +- .../pipeline-components/headers/helm-app.yaml | 3 + .../pipeline-components/helm-app.yaml | 63 + .../pipeline-components/kafka-app.yaml | 2 +- .../pipeline-components/kubernetes-app.yaml | 11 - .../pipeline-components/pipeline.yaml | 80 +- .../pipeline-components/pipeline/index.html | 952 ++++++---- .../pipeline-components/producer-app.yaml | 2 +- .../sections/app-helm-app.yaml | 6 + .../sections/app-kafka-app.yaml | 2 +- .../sections/app-producer-app.yaml | 2 +- .../sections/app-streams-app.yaml | 2 +- ...tes-app.yaml => repo_config-helm-app.yaml} | 0 .../pipeline-components/streams-app.yaml | 2 +- .../pipeline-defaults/defaults-helm-app.yaml | 21 + .../pipeline-defaults/defaults-kafka-app.yaml | 2 +- .../defaults-kubernetes-app.yaml | 13 +- .../defaults-producer-app.yaml | 2 +- .../defaults-streams-app.yaml | 2 +- dev/resources/pipeline-defaults/defaults.yaml | 40 +- .../pipeline-defaults/defaults/index.html | 524 ++--- .../headers/defaults-helm-app.yaml | 5 + .../headers/defaults-kubernetes-app.yaml | 2 +- .../variables/cli_env_vars/index.html | 30 +- dev/resources/variables/config_env_vars.env | 14 +- .../variables/config_env_vars/index.html | 48 +- dev/schema/config.json | 166 +- dev/schema/pipeline.json | 166 +- dev/search/search_index.json | 2 +- dev/sitemap.xml | 69 +- dev/sitemap.xml.gz | Bin 554 -> 563 bytes .../components/kafka-app/index.html | 36 +- .../components/kafka-connector/index.html | 30 +- .../kafka-sink-connector/index.html | 30 +- .../kafka-source-connector/index.html | 30 +- .../components/kubernetes-app/index.html | 60 +- .../components/overview/index.html | 52 +- .../components/producer-app/index.html | 32 +- .../components/streams-app/index.html | 32 +- dev/user/core-concepts/config/index.html | 30 +- dev/user/core-concepts/defaults/index.html | 60 +- .../environment_variables/index.html | 62 +- .../variables/substitution/index.html | 30 +- .../examples/atm-fraud-pipeline/index.html | 30 +- .../getting-started/quick-start/index.html | 30 +- dev/user/getting-started/setup/index.html | 30 +- dev/user/getting-started/teardown/index.html | 30 +- dev/user/migration-guide/v1-v2/index.html | 60 +- dev/user/migration-guide/v2-v3/index.html | 1680 +++++++++++++++++ .../ci-integration/github-actions/index.html | 43 +- dev/user/references/cli-commands/index.html | 36 +- .../references/editor-integration/index.html | 30 +- dev/user/what-is-kpops/index.html | 30 +- 62 files changed, 3854 insertions(+), 1207 deletions(-) rename dev/developer/{style => formatting}/index.html (93%) create mode 100644 dev/resources/pipeline-components/headers/helm-app.yaml create mode 100644 dev/resources/pipeline-components/helm-app.yaml create mode 100644 dev/resources/pipeline-components/sections/app-helm-app.yaml rename dev/resources/pipeline-components/sections/{repo_config-kubernetes-app.yaml => repo_config-helm-app.yaml} (100%) create mode 100644 dev/resources/pipeline-defaults/defaults-helm-app.yaml create mode 100644 dev/resources/pipeline-defaults/headers/defaults-helm-app.yaml create mode 100644 dev/user/migration-guide/v2-v3/index.html diff --git a/dev/404.html b/dev/404.html index 2812e1602..b26d9b877 100644 --- a/dev/404.html +++ b/dev/404.html @@ -157,7 +157,7 @@
- KPOps + GitHub
@@ -274,7 +274,7 @@
- KPOps + GitHub
@@ -1063,6 +1063,8 @@ + + @@ -1113,6 +1115,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1315,11 +1337,11 @@
  • - + - Style + Formatting diff --git a/dev/developer/auto-generation/index.html b/dev/developer/auto-generation/index.html index 1d66c53b7..814bc7fa1 100644 --- a/dev/developer/auto-generation/index.html +++ b/dev/developer/auto-generation/index.html @@ -14,7 +14,7 @@ - + @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1087,6 +1087,8 @@ + + @@ -1137,6 +1139,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1448,11 +1470,11 @@
  • - + - Style + Formatting @@ -1588,7 +1610,7 @@

    PipelineConfig whether it has an env attribute defined. The script is currently unable to visit the classes of fields like topic_name_config, hence any environment variables defined there would remain unknown to it.

  • +
  • config_env_vars.env -- Almost all pipeline config environment variables in a dotenv file. The script checks for each field in KpopsConfig whether it has an env attribute defined. The script is currently unable to visit the classes of fields like topic_name_config, hence any environment variables defined there would remain unknown to it.
  • config_env_vars.env -- Almost all pipeline config environment variables in a table.
  • variable_substitution.yaml -- A copy of ./tests/pipeline/resources/component-type-substitution/pipeline.yaml used as an example of substitution.
  • diff --git a/dev/developer/style/index.html b/dev/developer/formatting/index.html similarity index 93% rename from dev/developer/style/index.html rename to dev/developer/formatting/index.html index df9c7830b..c5837c166 100644 --- a/dev/developer/style/index.html +++ b/dev/developer/formatting/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ - Style - KPOps + Formatting - KPOps @@ -78,7 +78,7 @@
    - + Skip to content @@ -116,7 +116,7 @@
    - Style + Formatting
    @@ -177,7 +177,7 @@
    - KPOps + GitHub
    @@ -296,7 +296,7 @@
    - KPOps + GitHub
    @@ -1085,6 +1085,8 @@ + + @@ -1135,6 +1137,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1352,7 +1374,7 @@ - Style + Formatting @@ -1363,7 +1385,7 @@ - Style + Formatting @@ -1384,33 +1406,6 @@ @@ -1328,11 +1350,11 @@
  • - + - Style + Formatting diff --git a/dev/resources/variables/config_env_vars.env b/dev/resources/variables/config_env_vars.env index 308fb6334..00bef6a4c 100644 --- a/dev/resources/variables/config_env_vars.env +++ b/dev/resources/variables/config_env_vars.env @@ -9,18 +9,18 @@ # Suffix your environment files with this value (e.g. # defaults_development.yaml for environment=development). KPOPS_ENVIRONMENT # No default value, required -# brokers +# kafka_brokers # The comma separated Kafka brokers address. KPOPS_KAFKA_BROKERS # No default value, required -# schema_registry_url +# url # Address of the Schema Registry. -KPOPS_SCHEMA_REGISTRY_URL # No default value, not required -# kafka_rest_host +KPOPS_SCHEMA_REGISTRY_URL=http://localhost:8081 +# url # Address of the Kafka REST Proxy. -KPOPS_REST_PROXY_HOST # No default value, not required -# kafka_connect_host +KPOPS_KAFKA_REST_URL=http://localhost:8082 +# url # Address of Kafka Connect. -KPOPS_CONNECT_HOST # No default value, not required +KPOPS_KAFKA_CONNECT_URL=http://localhost:8083 # timeout # The timeout in seconds that specifies when actions like deletion or # deploy timeout. diff --git a/dev/resources/variables/config_env_vars/index.html b/dev/resources/variables/config_env_vars/index.html index 2aae8b2e8..5952e7891 100644 --- a/dev/resources/variables/config_env_vars/index.html +++ b/dev/resources/variables/config_env_vars/index.html @@ -170,7 +170,7 @@
    - KPOps + GitHub
    @@ -287,7 +287,7 @@
    - KPOps + GitHub
    @@ -1076,6 +1076,8 @@ + + @@ -1126,6 +1128,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1328,11 +1350,11 @@
  • - + - Style + Formatting @@ -1409,28 +1431,28 @@

    Config env vars

    True The comma separated Kafka brokers address. -brokers +kafka_brokers KPOPS_SCHEMA_REGISTRY_URL - +http://localhost:8081 False Address of the Schema Registry. -schema_registry_url +url -KPOPS_REST_PROXY_HOST - +KPOPS_KAFKA_REST_URL +http://localhost:8082 False Address of the Kafka REST Proxy. -kafka_rest_host +url -KPOPS_CONNECT_HOST - +KPOPS_KAFKA_CONNECT_URL +http://localhost:8083 False Address of Kafka Connect. -kafka_connect_host +url KPOPS_TIMEOUT diff --git a/dev/schema/config.json b/dev/schema/config.json index a2f18eb6b..391a0f2b5 100644 --- a/dev/schema/config.json +++ b/dev/schema/config.json @@ -1,5 +1,5 @@ { - "$ref": "#/definitions/PipelineConfig", + "$ref": "#/definitions/KpopsConfig", "definitions": { "HelmConfig": { "description": "Global Helm configuration.", @@ -41,25 +41,57 @@ "title": "HelmDiffConfig", "type": "object" }, - "PipelineConfig": { + "KafkaConnectConfig": { "additionalProperties": false, - "description": "Pipeline configuration unrelated to the components.", + "description": "Configuration for Kafka Connect.", "properties": { - "brokers": { - "description": "The comma separated Kafka brokers address.", - "env": "KPOPS_KAFKA_BROKERS", + "url": { + "default": "http://localhost:8083", + "description": "Address of Kafka Connect.", + "env": "KPOPS_KAFKA_CONNECT_URL", "env_names": [ - "kpops_kafka_brokers" + "kpops_kafka_connect_url" ], - "example": "broker1:9092,broker2:9092,broker3:9092", - "title": "Brokers", + "format": "uri", + "maxLength": 65536, + "minLength": 1, + "title": "Url", "type": "string" - }, + } + }, + "title": "KafkaConnectConfig", + "type": "object" + }, + "KafkaRestConfig": { + "additionalProperties": false, + "description": "Configuration for Kafka REST Proxy.", + "properties": { + "url": { + "default": "http://localhost:8082", + "description": "Address of the Kafka REST Proxy.", + "env": "KPOPS_KAFKA_REST_URL", + "env_names": [ + "kpops_kafka_rest_url" + ], + "format": "uri", + "maxLength": 65536, + "minLength": 1, + "title": "Url", + "type": "string" + } + }, + "title": "KafkaRestConfig", + "type": "object" + }, + "KpopsConfig": { + "additionalProperties": false, + "description": "Pipeline configuration unrelated to the components.", + "properties": { "create_namespace": { "default": false, "description": "Flag for `helm upgrade --install`. Create the release namespace if not present.", "env_names": [ - "create_namespace" + "kpops_create_namespace" ], "title": "Create Namespace", "type": "boolean" @@ -68,7 +100,7 @@ "default": "defaults", "description": "The name of the defaults file and the prefix of the defaults environment file.", "env_names": [ - "defaults_filename_prefix" + "kpops_defaults_filename_prefix" ], "title": "Defaults Filename Prefix", "type": "string" @@ -77,7 +109,7 @@ "default": ".", "description": "The path to the folder containing the defaults.yaml file and the environment defaults files. Paths can either be absolute or relative to `config.yaml`", "env_names": [ - "defaults_path" + "kpops_defaults_path" ], "example": "defaults", "format": "path", @@ -107,7 +139,7 @@ }, "description": "Global flags for Helm.", "env_names": [ - "helm_config" + "kpops_helm_config" ], "title": "Helm Config" }, @@ -122,29 +154,49 @@ }, "description": "Configure Helm Diff.", "env_names": [ - "helm_diff_config" + "kpops_helm_diff_config" ], "title": "Helm Diff Config" }, - "kafka_connect_host": { - "description": "Address of Kafka Connect.", - "env": "KPOPS_CONNECT_HOST", + "kafka_brokers": { + "description": "The comma separated Kafka brokers address.", + "env": "KPOPS_KAFKA_BROKERS", "env_names": [ - "kpops_connect_host" + "kpops_kafka_brokers" ], - "example": "http://localhost:8083", - "title": "Kafka Connect Host", + "example": "broker1:9092,broker2:9092,broker3:9092", + "title": "Kafka Brokers", "type": "string" }, - "kafka_rest_host": { - "description": "Address of the Kafka REST Proxy.", - "env": "KPOPS_REST_PROXY_HOST", + "kafka_connect": { + "allOf": [ + { + "$ref": "#/definitions/KafkaConnectConfig" + } + ], + "default": { + "url": "http://localhost:8083" + }, + "description": "Configuration for Kafka Connect.", "env_names": [ - "kpops_rest_proxy_host" + "kpops_kafka_connect" ], - "example": "http://localhost:8082", - "title": "Kafka Rest Host", - "type": "string" + "title": "Kafka Connect" + }, + "kafka_rest": { + "allOf": [ + { + "$ref": "#/definitions/KafkaRestConfig" + } + ], + "default": { + "url": "http://localhost:8082" + }, + "description": "Configuration for Kafka REST Proxy.", + "env_names": [ + "kpops_kafka_rest" + ], + "title": "Kafka Rest" }, "retain_clean_jobs": { "default": false, @@ -156,15 +208,21 @@ "title": "Retain Clean Jobs", "type": "boolean" }, - "schema_registry_url": { - "description": "Address of the Schema Registry.", - "env": "KPOPS_SCHEMA_REGISTRY_URL", + "schema_registry": { + "allOf": [ + { + "$ref": "#/definitions/SchemaRegistryConfig" + } + ], + "default": { + "enabled": false, + "url": "http://localhost:8081" + }, + "description": "Configuration for Schema Registry.", "env_names": [ - "kpops_schema_registry_url" + "kpops_schema_registry" ], - "example": "http://localhost:8081", - "title": "Schema Registry Url", - "type": "string" + "title": "Schema Registry" }, "timeout": { "default": 300, @@ -188,21 +246,51 @@ }, "description": "Configure the topic name variables you can use in the pipeline definition.", "env_names": [ - "topic_name_config" + "kpops_topic_name_config" ], "title": "Topic Name Config" } }, "required": [ "environment", - "brokers" + "kafka_brokers" ], - "title": "PipelineConfig", + "title": "KpopsConfig", + "type": "object" + }, + "SchemaRegistryConfig": { + "additionalProperties": false, + "description": "Configuration for Schema Registry.", + "properties": { + "enabled": { + "default": false, + "description": "Whether the Schema Registry handler should be initialized.", + "env_names": [ + "enabled" + ], + "title": "Enabled", + "type": "boolean" + }, + "url": { + "default": "http://localhost:8081", + "description": "Address of the Schema Registry.", + "env": "KPOPS_SCHEMA_REGISTRY_URL", + "env_names": [ + "kpops_schema_registry_url" + ], + "format": "uri", + "maxLength": 65536, + "minLength": 1, + "title": "Url", + "type": "string" + } + }, + "title": "SchemaRegistryConfig", "type": "object" }, "TopicNameConfig": { "additionalProperties": false, - "description": "Configures topic names.", + "description": "Configure the topic name variables you can use in the pipeline definition.", "properties": { "default_error_topic_name": { "default": "${pipeline_name}-${component_name}-error", diff --git a/dev/schema/pipeline.json b/dev/schema/pipeline.json index 7e77b0ddd..2fe9aeeac 100644 --- a/dev/schema/pipeline.json +++ b/dev/schema/pipeline.json @@ -47,6 +47,84 @@ "title": "FromTopic", "type": "object" }, + "HelmApp": { + "description": "Kubernetes app managed through Helm with an associated Helm chart.", + "properties": { + "app": { + "allOf": [ + { + "$ref": "#/definitions/KubernetesAppConfig" + } + ], + "description": "Application-specific settings", + "title": "App" + }, + "from": { + "allOf": [ + { + "$ref": "#/definitions/FromSection" + } + ], + "description": "Topic(s) and/or components from which the component will read input", + "title": "From" + }, + "name": { + "description": "Component name", + "title": "Name", + "type": "string" + }, + "namespace": { + "description": "Namespace in which the component shall be deployed", + "title": "Namespace", + "type": "string" + }, + "prefix": { + "default": "${pipeline_name}-", + "description": "Pipeline prefix that will prefix every component name. If you wish to not have any prefix you can specify an empty string.", + "title": "Prefix", + "type": "string" + }, + "repo_config": { + "allOf": [ + { + "$ref": "#/definitions/HelmRepoConfig" + } + ], + "description": "Configuration of the Helm chart repo to be used for deploying the component", + "title": "Repo Config" + }, + "to": { + "allOf": [ + { + "$ref": "#/definitions/ToSection" + } + ], + "description": "Topic(s) into which the component will write output", + "title": "To" + }, + "type": { + "default": "helm-app", + "description": "Kubernetes app managed through Helm with an associated Helm chart.", + "enum": [ + "helm-app" + ], + "title": "Component type", + "type": "string" + }, + "version": { + "description": "Helm chart version", + "title": "Version", + "type": "string" + } + }, + "required": [ + "name", + "namespace", + "app" + ], + "title": "HelmApp", + "type": "object" + }, "HelmRepoConfig": { "description": "Helm repository configuration.", "properties": { @@ -305,86 +383,8 @@ "title": "KafkaSourceConnector", "type": "object" }, - "KubernetesApp": { - "description": "Base class for all Kubernetes apps.\nAll built-in components are Kubernetes apps, except for the Kafka connectors.", - "properties": { - "app": { - "allOf": [ - { - "$ref": "#/definitions/KubernetesAppConfig" - } - ], - "description": "Application-specific settings", - "title": "App" - }, - "from": { - "allOf": [ - { - "$ref": "#/definitions/FromSection" - } - ], - "description": "Topic(s) and/or components from which the component will read input", - "title": "From" - }, - "name": { - "description": "Component name", - "title": "Name", - "type": "string" - }, - "namespace": { - "description": "Namespace in which the component shall be deployed", - "title": "Namespace", - "type": "string" - }, - "prefix": { - "default": "${pipeline_name}-", - "description": "Pipeline prefix that will prefix every component name. If you wish to not have any prefix you can specify an empty string.", - "title": "Prefix", - "type": "string" - }, - "repo_config": { - "allOf": [ - { - "$ref": "#/definitions/HelmRepoConfig" - } - ], - "description": "Configuration of the Helm chart repo to be used for deploying the component", - "title": "Repo Config" - }, - "to": { - "allOf": [ - { - "$ref": "#/definitions/ToSection" - } - ], - "description": "Topic(s) into which the component will write output", - "title": "To" - }, - "type": { - "default": "kubernetes-app", - "description": "Base class for all Kubernetes apps.\nAll built-in components are Kubernetes apps, except for the Kafka connectors.", - "enum": [ - "kubernetes-app" - ], - "title": "Component type", - "type": "string" - }, - "version": { - "description": "Helm chart version", - "title": "Version", - "type": "string" - } - }, - "required": [ - "name", - "namespace", - "app" - ], - "title": "KubernetesApp", - "type": "object" - }, "KubernetesAppConfig": { - "description": "Settings specific to Kubernetes Apps.", + "description": "Settings specific to Kubernetes apps.", "properties": {}, "title": "KubernetesAppConfig", "type": "object" @@ -940,9 +940,9 @@ "items": { "discriminator": { "mapping": { + "helm-app": "#/definitions/HelmApp", "kafka-sink-connector": "#/definitions/KafkaSinkConnector", "kafka-source-connector": "#/definitions/KafkaSourceConnector", - "kubernetes-app": "#/definitions/KubernetesApp", "producer-app": "#/definitions/ProducerApp", "streams-app": "#/definitions/StreamsApp" }, @@ -950,13 +950,13 @@ }, "oneOf": [ { - "$ref": "#/definitions/KafkaSinkConnector" + "$ref": "#/definitions/HelmApp" }, { - "$ref": "#/definitions/KafkaSourceConnector" + "$ref": "#/definitions/KafkaSinkConnector" }, { - "$ref": "#/definitions/KubernetesApp" + "$ref": "#/definitions/KafkaSourceConnector" }, { "$ref": "#/definitions/ProducerApp" diff --git a/dev/search/search_index.json b/dev/search/search_index.json index d99016d5f..5aa96cf41 100644 --- a/dev/search/search_index.json +++ b/dev/search/search_index.json @@ -1 +1 @@ -{"config": {"lang": ["en"], "separator": "[\\s\\-]+", "pipeline": ["stopWordFilter"]}, "docs": [{"location": "developer/auto-generation/", "title": "Auto generation", "text": "

    Auto generation happens mostly with pre-commit hooks. You can find the pre-commit configuration here. These pre-commit hooks call different Python scripts to auto generate code for the documentation.

    "}, {"location": "developer/auto-generation/#generation-scripts-and-their-respective-files", "title": "Generation scripts and their respective files", "text": ""}, {"location": "developer/auto-generation/#documentation", "title": "Documentation", "text": ""}, {"location": "developer/auto-generation/#variables", "title": "Variables", "text": "
    • cli_env_vars.env -- All CLI environment variables in a dotenv file.
    • cli_env_vars.md -- All CLI environment variables in a table.
    • config_env_vars.env -- Almost all pipeline config environment variables in a dotenv file. The script checks for each field in PipelineConfig whether it has an env attribute defined. The script is currently unable to visit the classes of fields like topic_name_config, hence any environment variables defined there would remain unknown to it.
    • config_env_vars.env -- Almost all pipeline config environment variables in a table.
    • variable_substitution.yaml -- A copy of ./tests/pipeline/resources/component-type-substitution/pipeline.yaml used as an example of substitution.
    "}, {"location": "developer/auto-generation/#cli-commands", "title": "CLI commands", "text": "

    Generated by typer-cli from the code in main.py. It is called with Python's subprocess module.

    "}, {"location": "developer/auto-generation/#pipeline-and-defaults-example-definitions", "title": "Pipeline and defaults example definitions", "text": "

    Generates example pipeline.yaml and defaults.yaml for each individual component, stores them and also concatenates them into 1 big pipeline definition and 1 big pipeline defaults definition.

    User input

    • headers/*\\.yaml -- The top of each example. Includes a description comment, type and name. The headers for pipeline.yaml reside in the pipeline-components dir and the defaults.yaml headers reside in the pipeline-defaults dir. The names of the files must be equal to the respective component type.
    • sections/*\\.yaml -- Each YAML file contains a single section (component attribute) definition. The intention is to keep the minimal set of definitions there from which any component definition can be built. The names of the files must be equal to the respective component type and the attribute name. The sections are used for both defaults.yaml and pipeline.yaml generation and reside in the pipeline-components dir.

    Generated

    • pipeline-components/dependencies/* Cached information about KPOps components
    • pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the pipeline.yaml generation.
    • defaults_pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the defaults.yaml generation.
    • kpops_structure.yaml -- Specifies the inheritance hierarchy of the components and what sections exist in each component.
    • pipeline-components/*\\.yaml -- All single-component pipeline definitions and one big (complete) pipeline.yaml that contains all of them.
    • pipeline-defaults/*\\.yaml -- All single-component defaults definitions and one big (complete) defaults.yaml that contains all of them.
    "}, {"location": "developer/auto-generation/#editor-integration", "title": "Editor integration", "text": ""}, {"location": "developer/auto-generation/#schemas", "title": "Schemas", "text": "
    • config.json
    • pipeline.json
    "}, {"location": "developer/style/", "title": "Style", "text": ""}, {"location": "developer/style/#python", "title": "Python", "text": "

    To ensure a consistent Python code style, we use Ruff for both linting and formatting.

    "}, {"location": "developer/style/#configuration", "title": "Configuration", "text": "

    Our configuration can be found in KPOps' top-level pyproject.toml.

    "}, {"location": "developer/style/#editor-integration", "title": "Editor integration", "text": "

    Below are listed existing Ruff plugins/extensions for some of the most popular python IDEs. If you cannot find your Editor of choices or you want something more custom, ruff-lsp enables Ruff to be used in any editor that supports the LSP

    • VSCode
    • PyCharm
    "}, {"location": "developer/style/#markdown", "title": "Markdown", "text": "

    To ensure a consistent markdown style, we use dprint to check and reformat.

    dprint fmt\n

    Use the official documentation to set up dprint. The configuration can be found here.

    "}, {"location": "user/what-is-kpops/", "title": "What is KPOps?", "text": "

    With a couple of easy commands in the shell and a pipeline.yaml of under 30 lines, KPOps can not only deploy a Kafka pipeline1 to a Kubernetes cluster, but also reset, clean or destroy it!

    "}, {"location": "user/what-is-kpops/#key-features", "title": "Key features", "text": "
    • Deploy Kafka apps to Kubernetes: KPOps allows to deploy consecutive Kafka Streams applications and producers using an easy-to-read and -write pipeline definition.
    • Manage Kafka Connectors: KPOps connects with your Kafka Connect cluster and deploys, validates, and deletes your connectors.
    • Configure multiple pipelines and steps: KPOps has various abstractions that simplify configuring multiple pipelines and steps within pipelines by sharing common configuration between different components, such as producers or streaming applications.
    • Handle your topics and schemas: KPOps not only creates and deletes your topics but also registers and deletes your schemas.
    • Clean termination of Kafka components: KPOps removes your pipeline components (i.e., Kafka Streams applications) from the Kubernetes cluster and cleans up the component-related states (i.e., removing/resetting offset of Kafka consumer groups).
    • Preview your pipeline changes: With the KPOps dry-run, you can ensure your pipeline definition is set up correctly. This helps to minimize downtime and prevent potential errors or issues that could impact your production environment.
    "}, {"location": "user/what-is-kpops/#example", "title": "Example", "text": "An overview of Word-count pipeline shown in Streams Explorer Word-count pipeline.yaml
    - type: producer-app\n  name: data-producer\n  app:\n    image: bakdata/kpops-demo-sentence-producer\n    imageTag: \"1.0.0\"\n\n- type: streams-app\n  name: word-counter\n  to:\n    topics:\n      ${output_topic_name}:\n        type: output\n        configs:\n          cleanup.policy: compact\n  app:\n    replicaCount: 1\n    image: bakdata/kpops-demo-word-count-app\n    imageTag: \"1.0.0\"\n\n- type: kafka-sink-connector\n  name: redis-sink-connector\n  app:\n    connector.class: com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector\n    redis.hosts: redis-headless:6379\n    redis.database: 0\n    tasks.max: 1\n    key.converter: org.apache.kafka.connect.storage.StringConverter\n    value.converter: org.apache.kafka.connect.storage.StringConverter\n
    1. A Kafka pipeline can consist of consecutive streaming applications, producers, and connectors.\u00a0\u21a9

    "}, {"location": "user/core-concepts/config/", "title": "Configuration", "text": "

    KPOps reads its global configuration that is unrelated to a pipeline's components from config.yaml.

    Consider enabling KPOps' editor integration feature to enjoy the benefits of autocompletion and validation when configuring your pipeline.

    To learn about any of the available settings, take a look at the example below.

    config.yaml
    # CONFIGURATION\n#\n# The path to the folder containing the defaults.yaml file and the environment\n# defaults files.\ndefaults_path: .\n# The environment you want to generate and deploy the pipeline to. Suffix your\n# environment files with this value (e.g. defaults_development.yaml and\n# pipeline_development.yaml for environment=development).\n# REQUIRED\nenvironment: development\n# The Kafka brokers address.\n# REQUIRED\nbrokers: \"http://broker1:9092,http://broker2:9092\"\n# The name of the defaults file and the prefix of the defaults environment file.\ndefaults_filename_prefix: defaults\n# Configures topic names.\ntopic_name_config: \n  # Configures the value for the variable ${output_topic_name}\n  default_output_topic_name: ${pipeline_name}-${component_name}\n  # Configures the value for the variable ${error_topic_name}\n  default_error_topic_name: ${pipeline_name}-${component_name}-error\n# Address of the Schema Registry\nschema_registry_url: \"http://localhost:8081\"\n# Address of the Kafka REST Proxy.\nkafka_rest_host: \"http://localhost:8082\"\n# Address of Kafka Connect.\nkafka_connect_host: \"http://localhost:8083\"\n# The timeout in seconds that specifies when actions like deletion or deploy\n# timeout.\ntimeout: 300\n# Flag for `helm upgrade --install`.\n# Create the release namespace if not present.\ncreate_namespace: false\n# Global flags for Helm.\nhelm_config:\n  # Set the name of the kubeconfig context. (--kube-context)\n  context: name\n  # Run Helm in Debug mode.\n  debug: false\n# Configure Helm Diff.\nhelm_diff_config: \n  # Set of keys that should not be checked.\n  ignore: \n    - name\n    - imageTag\n# Whether to retain clean up jobs in the cluster or uninstall the, after\n# completion.\nretain_clean_jobs: false\n

    Environment-specific pipeline definitions

    Similarly to defaults, it is possible to have an unlimited amount of additional environment-specific pipeline definitions. The naming convention is the same: add a suffix of the form _{environment} to the filename.

    "}, {"location": "user/core-concepts/defaults/", "title": "Defaults", "text": "

    KPOps has a very efficient way of dealing with repeating settings which manifests as defaults.yaml. This file provides the user with the power to set defaults for any and all components, thus omitting the need to repeat the same settings in pipeline.yaml.

    See real-world examples for defaults.

    "}, {"location": "user/core-concepts/defaults/#features", "title": "Features", "text": ""}, {"location": "user/core-concepts/defaults/#inheritance", "title": "Inheritance", "text": "

    An important mechanic of KPOps is that defaults set for a component apply to all components that inherit from it.

    It is possible, although not recommended, to add settings that are specific to a component's subclass. An example would be configuring offset_topic under kafka-connector instead of kafka-source-connector.

    "}, {"location": "user/core-concepts/defaults/#configuration", "title": "Configuration", "text": "

    It is possible to set specific defaults for each environment by adding files called defaults_{environment}.yaml to the defaults folder at defaults_path. The defaults are loaded based on the currently set environment.

    It is important to note that defaults_{environment}.yaml overrides only the settings that are explicitly set to be different from the ones in the base defaults file.

    Tip

    defaults is the default value of defaults_filename_prefix. Together with defaults_path and environment it can be changed in config.yaml

    "}, {"location": "user/core-concepts/defaults/#components", "title": "Components", "text": "

    The defaults codeblocks in this section contain the full set of settings that are specific to the component. If a setting already exists in a parent config, it will not be included in the child's.

    "}, {"location": "user/core-concepts/defaults/#kubernetesapp", "title": "KubernetesApp", "text": "defaults.yaml
    # Base Kubernetes App\n#\n# Parent of: KafkaApp\n# Child of: PipelineComponent\nkubernetes-app:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/defaults/#kafkaapp", "title": "KafkaApp", "text": "defaults.yaml
    # Base component for Kafka-based components.\n#\n# Parent of: ProducerApp, StreamsApp\n# Child of: KubernetesApp\nkafka-app:\n  # `app` can contain application-specific settings, hence  the user is free to\n  # add the key-value pairs they need.\n  app: # required\n    streams: # required\n      brokers: ${brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name # kafka-app-specific\n    imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/defaults/#streamsapp", "title": "StreamsApp", "text": "defaults.yaml
    # StreamsApp component that configures a streams bootstrap app.\n#\n# Child of: KafkaApp\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\nstreams-app:\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  app: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n
    "}, {"location": "user/core-concepts/defaults/#producerapp", "title": "ProducerApp", "text": "defaults.yaml
    \n
    "}, {"location": "user/core-concepts/defaults/#kafkaconnector", "title": "KafkaConnector", "text": "defaults.yaml
    # Kafka connector\n#\n# Parent of: KafkaSinkConnector, KafkaSourceConnector\n# Child of: PipelineComponent\nkafka-connector:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Helm repository configuration for resetter\n  repo_config:\n    repository_name: my-repo # required\n    url: https://bakdata.github.io/kafka-connect-resetter/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.6\" # Helm chart version\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/defaults/#kafkasourceconnector", "title": "KafkaSourceConnector", "text": "defaults.yaml
    # Kafka source connector\n#\n# Child of: KafkaConnector\nkafka-source-connector:\n  # The source connector has no `from` section\n  # from:\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/defaults/#kafkasinkconnector", "title": "KafkaSinkConnector", "text": "defaults.yaml
    # Kafka sink connector\n#\n# Child of: KafkaConnector\nkafka-sink-connector:\n  # No settings differ from `kafka-connector`\n
    "}, {"location": "user/core-concepts/components/kafka-app/", "title": "KafkaApp", "text": "

    Subclass of KubernetesApp.

    "}, {"location": "user/core-concepts/components/kafka-app/#usage", "title": "Usage", "text": "
    • Defines a streams-bootstrap component
    • Should not be used in pipeline.yaml as the component can be defined as either a StreamsApp or a ProducerApp
    • Often used in defaults.yaml
    "}, {"location": "user/core-concepts/components/kafka-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base component for Kafka-based components.\n# Producer or streaming apps should inherit from this class.\n- type: kafka-app # required\n  name: kafka-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` can contain application-specific settings, hence  the user is free to\n  # add the key-value pairs they need.\n  app: # required\n    streams: # required\n      brokers: ${brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name # kafka-app-specific\n    imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/kafka-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/kafka-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-connector/", "title": "KafkaConnector", "text": "

    KafkaConnector is a component that deploys Kafka Connectors. Since a connector cannot be different from sink or source it is not recommended to use KafkaConnector for deployment in pipeline.yaml. Instead, KafkaConnector should be used in defaults.yaml to set defaults for all connectors in the pipeline as they can share some common settings.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/", "title": "KafkaSinkConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#usage", "title": "Usage", "text": "

    Lets other systems pull data from Apache Kafka.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka sink connector\n- type: kafka-sink-connector\n  name: kafka-sink-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Helm repository configuration for resetter\n  repo_config:\n    repository_name: my-repo # required\n    url: https://bakdata.github.io/kafka-connect-resetter/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.6\" # Helm chart version\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-sink-connector/#deploy", "title": "deploy", "text": "
    • Add the sink connector to the Kafka Connect cluster
    • Create the output topics if provided (optional)
    • Register schemas in the Schema Registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#destroy", "title": "destroy", "text": "

    The associated sink connector is removed from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#reset", "title": "reset", "text": "

    Reset the consumer group offsets using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#clean", "title": "clean", "text": "
    • Delete associated consumer group
    • Delete configured error topics
    "}, {"location": "user/core-concepts/components/kafka-source-connector/", "title": "KafkaSourceConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#usage", "title": "Usage", "text": "

    Manages source connectors in your Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka source connector\n- type: kafka-source-connector # required\n  name: kafka-source-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  # The source connector has no `from` section\n  # from:\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Helm repository configuration for resetter\n  repo_config:\n    repository_name: my-repo # required\n    url: https://bakdata.github.io/kafka-connect-resetter/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.6\" # Helm chart version\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/#deploy", "title": "deploy", "text": "
    • Add the source connector to the Kafka Connect cluster
    • Create the output topics if provided (optional)
    • Register schemas in the Schema registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#destroy", "title": "destroy", "text": "

    Remove the source connector from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#reset", "title": "reset", "text": "

    Delete state associated with the connector using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#clean", "title": "clean", "text": "
    • Delete all associated output topics
    • Delete all associated schemas in the Schema Registry
    • Delete state associated with the connector
    "}, {"location": "user/core-concepts/components/kubernetes-app/", "title": "KubernetesApp", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#usage", "title": "Usage", "text": "

    Can be used to deploy any app in Kubernetes using Helm, for example, a REST service that serves Kafka data.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base Kubernetes App\n- type: kubernetes-app\n  name: kubernetes-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/kubernetes-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/overview/", "title": "Overview", "text": "

    This section explains the different components of KPOps, their usage and configuration in the pipeline definition pipeline.yaml.

    flowchart BT\n    KubernetesApp --> PipelineComponent\n    KafkaConnector --> PipelineComponent\n    KafkaApp --> KubernetesApp\n    StreamsApp --> KafkaApp\n    ProducerApp --> KafkaApp\n    KafkaSourceConnector --> KafkaConnector\n    KafkaSinkConnector --> KafkaConnector\n    \n    click KubernetesApp \"../kubernetes-app\"\n    click KafkaApp \"../kafka-app\"\n    click StreamsApp \"../streams-app\"\n    click ProducerApp \"../producer-app\"\n    click KafkaConnector \"../kafka-connector\"\n    click KafkaSourceConnector \"../kafka-source-connector\"\n    click KafkaSinkConnector \"../kafka-sink-connector\"

    KPOps component hierarchy

    "}, {"location": "user/core-concepts/components/producer-app/", "title": "ProducerApp", "text": "

    Subclass of KafkaApp.

    "}, {"location": "user/core-concepts/components/producer-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka producer app

    "}, {"location": "user/core-concepts/components/producer-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Holds configuration to use as values for the streams bootstrap producer-app Helm\n# chart.\n# More documentation on ProducerApp:\n# https://github.com/bakdata/streams-bootstrap\n- type: producer-app\n  name: producer-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  # from: # While the producer-app does inherit from kafka-app, it does not need a\n  # `from` section, hence it does not support it.\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app\n  app: # required\n    streams: # required, producer-app-specific\n      brokers: ${brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n      outputTopic: output_topic\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n    nameOverride: override-with-this-name # kafka-app-specific\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/producer-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/producer-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/producer-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/producer-app/#reset", "title": "reset", "text": "

    Do nothing, producers are stateless.

    "}, {"location": "user/core-concepts/components/producer-app/#clean", "title": "clean", "text": "
    • Delete the output topics of the Kafka producer
    • Delete all associated schemas in the Schema Registry
    "}, {"location": "user/core-concepts/components/streams-app/", "title": "StreamsApp", "text": "

    Subclass of KafkaApp.

    "}, {"location": "user/core-concepts/components/streams-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka Streams app

    "}, {"location": "user/core-concepts/components/streams-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # StreamsApp component that configures a streams bootstrap app.\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\n- type: streams-app # required\n  name: streams-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  app: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/streams-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/streams-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-app/#reset", "title": "reset", "text": "
    • Delete the consumer group offsets
    • Delete Kafka Streams state
    "}, {"location": "user/core-concepts/components/streams-app/#clean", "title": "clean", "text": "

    Similar to reset with to additional steps:

    • Delete the app's output topics
    • Delete all associated schemas in the Schema Registry
    "}, {"location": "user/core-concepts/variables/environment_variables/", "title": "Environment variables", "text": "

    Environment variables can be set by using the export command in Linux or the set command in Windows.

    dotenv files

    Support for .env files is on the roadmap, but not implemented in KPOps yet. One of the possible ways to still use one and export the contents manually is with the following command: export $(xargs < .env). This would work in bash suppose there are no spaces inside the values.

    "}, {"location": "user/core-concepts/variables/environment_variables/#config", "title": "Config", "text": "

    These variables are a lower priority alternative to the settings in config.yaml. Variables marked as required can instead be set in the pipeline config.

    Name Default Value Required Description Setting name KPOPS_ENVIRONMENT True The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). environment KPOPS_KAFKA_BROKERS True The comma separated Kafka brokers address. brokers KPOPS_SCHEMA_REGISTRY_URL False Address of the Schema Registry. schema_registry_url KPOPS_REST_PROXY_HOST False Address of the Kafka REST Proxy. kafka_rest_host KPOPS_CONNECT_HOST False Address of Kafka Connect. kafka_connect_host KPOPS_TIMEOUT 300 False The timeout in seconds that specifies when actions like deletion or deploy timeout. timeout KPOPS_RETAIN_CLEAN_JOBS False False Whether to retain clean up jobs in the cluster or uninstall the, after completion. retain_clean_jobs config_env_vars.env Exhaustive list of all config-related environment variables
    # Pipeline config environment variables\n#\n# The default setup is shown. These variables are a lower priority\n# alternative to the settings in `config.yaml`. Variables marked as\n# required can instead be set in the pipeline config.\n#\n# environment\n# The environment you want to generate and deploy the pipeline to.\n# Suffix your environment files with this value (e.g.\n# defaults_development.yaml for environment=development).\nKPOPS_ENVIRONMENT # No default value, required\n# brokers\n# The comma separated Kafka brokers address.\nKPOPS_KAFKA_BROKERS # No default value, required\n# schema_registry_url\n# Address of the Schema Registry.\nKPOPS_SCHEMA_REGISTRY_URL # No default value, not required\n# kafka_rest_host\n# Address of the Kafka REST Proxy.\nKPOPS_REST_PROXY_HOST # No default value, not required\n# kafka_connect_host\n# Address of Kafka Connect.\nKPOPS_CONNECT_HOST # No default value, not required\n# timeout\n# The timeout in seconds that specifies when actions like deletion or\n# deploy timeout.\nKPOPS_TIMEOUT=300\n# retain_clean_jobs\n# Whether to retain clean up jobs in the cluster or uninstall the,\n# after completion.\nKPOPS_RETAIN_CLEAN_JOBS=False\n
    "}, {"location": "user/core-concepts/variables/environment_variables/#cli", "title": "CLI", "text": "

    These variables are a lower priority alternative to the commands' flags. If a variable is set, the corresponding flag does not have to be specified in commands. Variables marked as required can instead be set as flags.

    Name Default Value Required Description KPOPS_PIPELINE_BASE_DIR . False Base directory to the pipelines (default is current working directory) KPOPS_CONFIG_PATH config.yaml False Path to the config.yaml file KPOPS_DEFAULT_PATH False Path to defaults folder KPOPS_PIPELINE_PATH True Path to YAML with pipeline definition KPOPS_PIPELINE_STEPS False Comma separated list of steps to apply the command on cli_env_vars.env Exhaustive list of all cli-related environment variables
    # CLI Environment variables\n#\n# The default setup is shown. These variables are a lower priority\n# alternative to the commands' flags. If a variable is set, the\n# corresponding flag does not have to be specified in commands.\n# Variables marked as required can instead be set as flags.\n#\n# Base directory to the pipelines (default is current working\n# directory)\nKPOPS_PIPELINE_BASE_DIR=.\n# Path to the config.yaml file\nKPOPS_CONFIG_PATH=config.yaml\n# Path to defaults folder\nKPOPS_DEFAULT_PATH # No default value, not required\n# Path to YAML with pipeline definition\nKPOPS_PIPELINE_PATH # No default value, required\n# Comma separated list of steps to apply the command on\nKPOPS_PIPELINE_STEPS # No default value, not required\n
    "}, {"location": "user/core-concepts/variables/substitution/", "title": "Substitution", "text": "

    KPOps supports the usage of placeholders and environment variables in pipeline definition and defaults.

    "}, {"location": "user/core-concepts/variables/substitution/#component-specific-variables", "title": "Component-specific variables", "text": "

    These variables can be used in a component's definition to refer to any of its attributes, including ones that the user has defined in the defaults.

    All of them are prefixed with component_ and follow the following form: component_{attribute_name}. If the attribute itself contains attributes, they can be referred to like this: component_{attribute_name}_{subattribute_name}.

    Example
    - type: scheduled-producer\n  app:\n    labels:\n      app_type: \"${component_type}\"\n      app_name: \"${component_name}\"\n      app_schedule: \"${component_app_schedule}\"\n    commandLine:\n      FAKE_ARG: \"fake-arg-value\"\n    schedule: \"30 3/8 * * *\"\n- type: converter\n  app:\n    commandLine:\n      CONVERT_XML: true\n    resources:\n      limits:\n        memory: 2G\n      requests:\n        memory: 2G\n- type: filter\n  name: \"filter-app\"\n  app:\n    labels:\n      app_type: \"${component_type}\"\n      app_name: \"${component_name}\"\n      app_resources_requests_memory: \"${component_app_resources_requests_memory}\"\n      ${component_type}: \"${component_app_labels_app_name}-${component_app_labels_app_type}\"\n      test_placeholder_in_placeholder: \"${component_app_labels_${component_type}}\"\n    commandLine:\n      TYPE: \"nothing\"\n    resources:\n      requests:\n        memory: 3G\n    replicaCount: 4\n    autoscaling:\n      minReplicas: 4\n      maxReplicas: 4\n
    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-config-specific-variables", "title": "Pipeline-config-specific variables", "text": "

    These variables include all fields in the config and refer to the pipeline configuration that is independent of the components.

    Info

    error_topic_name is an alias for topic_name_config_default_error_topic_name output_topic_name is an alias for topic_name_config_default_output_topic_name

    "}, {"location": "user/core-concepts/variables/substitution/#environment-variables", "title": "Environment variables", "text": "

    Environment variables such as $PATH can be used in the pipeline definition and defaults without any transformation following the form ${ENV_VAR_NAME}. This, of course, includes variables like the ones relevant to the KPOps cli that are exported by the user.

    See all KPOps environment variables

    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-name-variables", "title": "Pipeline name variables", "text": "

    These are special variables that refer to the name and path of a pipeline.

    • ${pipeline_name}: Concatenated path of the parent directory where pipeline.yaml is defined in. For instance, ./data/pipelines/v1/pipeline.yaml, here the value for the variable would be data-pipelines-v1.

    • ${pipeline_name_<level>}: Similar to the previous variable, each <level> contains a part of the path to the pipeline.yaml file. Consider the previous example, ${pipeline_name_0} would be data, ${pipeline_name_1} would be pipelines, and ${pipeline_name_2} equals to v1.

    "}, {"location": "user/core-concepts/variables/substitution/#advanced-use-cases", "title": "Advanced use cases", "text": "
    1. Refer to default component field values: As long as a value is assigned to a component attribute, it is possible to refer to it with a placeholder. To see all component fields, take a look at the pipeline schema.
    2. Chaining variables: It is possible to chain any number of variables, see the example above.
    3. Cross-component substitution: YAML is quite an intricate language and with some of its magic one could write cross-component references.
    "}, {"location": "user/examples/atm-fraud-pipeline/", "title": "ATM fraud detection pipeline", "text": "

    ATM fraud is a demo pipeline for ATM fraud detection. The original by Confluent is written in KSQL and outlined in this blogpost. The one used in this example is re-built from scratch using bakdata's streams-bootstrap library.

    "}, {"location": "user/examples/atm-fraud-pipeline/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
    • Deploying a PostgreSQL database using Helm
    • Deploying a pipeline using KPOps
    • Destroying a pipeline using KPOps
    "}, {"location": "user/examples/atm-fraud-pipeline/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/examples/atm-fraud-pipeline/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postgresql", "title": "PostgreSQL", "text": "

    Deploy PostgreSQL using the Bitnami Helm chart: Add the helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install the PostgreSQL with helm:

    helm upgrade --install -f ./postgresql.yaml \\\n--namespace kpops \\\npostgresql bitnami/postgresql\n
    PostgreSQL Example Helm chart values (postgresql.yaml)
    auth:\n  database: app_db  \n  enablePostgresUser: true\n  password: AppPassword\n  postgresPassword: StrongPassword\n  username: app1\nprimary:\n  persistence:\n    enabled: false\n    existingClaim: postgresql-data-claim\nvolumePermissions:\n  enabled: true\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-detection-example-pipeline-setup", "title": "ATM fraud detection example pipeline setup", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#deploying-the-atm-fraud-detection-pipeline", "title": "Deploying the ATM fraud detection pipeline", "text": "
    1. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Deploy the pipeline

      poetry run kpops deploy ./examples/bakdata/atm-fraud-detection/pipeline.yaml \\\n--pipeline-base-dir ./examples \\\n--config ./examples/bakdata/atm-fraud-detection/config.yaml \\\n--execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/examples/atm-fraud-pipeline/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to see the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser. You should be able to see pipeline shown in the image below:

    An overview of ATM fraud pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/examples/atm-fraud-pipeline/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postrgresql", "title": "PostrgreSQL", "text": "

    PostgreSQL can be uninstalled by running the following command:

    helm --namespace kpops uninstall postgresql\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-pipeline", "title": "ATM fraud pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      poetry run kpops clean ./examples/bakdata/atm-fraud-detection/pipeline.yaml \\\n--pipeline-base-dir ./examples \\\n--config ./examples/bakdata/atm-fraud-detection/config.yaml \\\n--verbose \\\n--execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/examples/atm-fraud-pipeline/#common-errors", "title": "Common errors", "text": "
    • deploy fails:
    • Read the error message.
    • Try to correct the mistakes if there were any. Likely the configuration is not correct or the port-forwarding is not working as intended.
    • Run clean.
    • Run deploy --dry-run to avoid havig to clean again. If an error is dropped, start over from step 1.
    • If the dry-run is succesful, run deploy.
    • clean fails:
    • Read the error message.
    • Try to correct the indicated mistakes if there were any. Likely the configuration is not correct or the port-forwarding is not working as intended.
    • Run clean.
    • If clean fails, follow the steps in teardown.
    "}, {"location": "user/getting-started/quick-start/", "title": "Quick start", "text": ""}, {"location": "user/getting-started/quick-start/#word-count", "title": "Word-count", "text": "

    Word-count is a demo pipeline which consists of a producer producing words to Kafka, a Kafka streams app counting the number of times each word occurs and finally a Redis database into which the words are exported.

    "}, {"location": "user/getting-started/quick-start/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
    • Deploying a Redis database using Helm
    • Deploying a pipeline using KPOps
    • Destroying a pipeline using KPOps
    "}, {"location": "user/getting-started/quick-start/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/getting-started/quick-start/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/getting-started/quick-start/#redis", "title": "Redis", "text": "

    Deploy Redis using the Bitnami Helm chart: Add the Helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install Redis with Helm:

    helm upgrade --install -f ./values-redis.yaml \\\n--namespace kpops \\\nredis bitnami/redis\n
    Redis example Helm chart values (values-redis.yaml)
    architecture: standalone\nauth:\n  enabled: false\nmaster:\n  count: 1\n  configuration: \"databases 1\"\nimage:\n  tag: 7.0.8\n
    "}, {"location": "user/getting-started/quick-start/#word-count-example-pipeline-setup", "title": "Word-count example pipeline setup", "text": ""}, {"location": "user/getting-started/quick-start/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/getting-started/quick-start/#deploying-the-word-count-pipeline", "title": "Deploying the Word-count pipeline", "text": "
    1. Copy the configuration from the kpops-examples repository into kpops>examples>bakdata>word-count like so:

      kpops\n\u251c\u2500\u2500 examples\n|   \u251c\u2500\u2500 bakdata\n|   |   \u251c\u2500\u2500 word-count\n|   |   |   \u251c\u2500\u2500 config.yaml\n|   |   |   \u251c\u2500\u2500 defaults\n|   |   |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 defaults.yaml\n|   |   |   \u2514\u2500\u2500 pipeline.yaml\n|   |   |\n
    2. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    3. Deploy the pipeline

      kpops deploy ./examples/bakdata/word-count/pipeline.yaml \\\n--pipeline-base-dir ./examples \\\n--config ./examples/bakdata/word-count/config.yaml \\\n--execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/getting-started/quick-start/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to inspect the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser.

    You should be able to see pipeline shown in the image below:

    An overview of Word-count pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka Connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/getting-started/quick-start/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/quick-start/#redis_1", "title": "Redis", "text": "

    Redis can be uninstalled by running the following command:

    helm --namespace kpops uninstall redis\n
    "}, {"location": "user/getting-started/quick-start/#word-count-pipeline", "title": "Word-count pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean ./examples/bakdata/word-count/pipeline.yaml \\\n--pipeline-base-dir ./examples \\\n--config ./examples/bakdata/word-count/config.yaml \\\n--verbose \\\n--execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/getting-started/quick-start/#common-errors", "title": "Common errors", "text": "
    • deploy fails:
      1. Read the error message.
      2. Try to correct the mistakes if there were any. Likely the configuration is not correct or the port-forwarding is not working as intended.
      3. Run clean.
      4. Run deploy --dry-run to avoid having to clean again. If an error is dropped, start over from step 1.
      5. If the dry-run is successful, run deploy.
    • clean fails:
      1. Read the error message.
      2. Try to correct the indicated mistakes if there were any. Likely the configuration is not correct or the port-forwarding is not working as intended.
      3. Run clean.
      4. If clean fails, follow the steps in teardown.
    "}, {"location": "user/getting-started/setup/", "title": "Setup KPOps", "text": "

    In this part, you will set up KPOps. This includes:

    • optionally creating a local Kubernetes cluster
    • running Apache Kafka and Confluent's Schema Registry
    • installing KPOps
    "}, {"location": "user/getting-started/setup/#prerequisites", "title": "Prerequisites", "text": "
    • k3d (Version 5.4.6+) and Docker (Version >= v20.10.5) or an existing Kubernetes cluster (>= 1.21.0)
    • kubectl (Compatible with server version 1.21.0)
    • Helm (Version 3.8.0+)
    "}, {"location": "user/getting-started/setup/#setup-kubernetes-with-k3d", "title": "Setup Kubernetes with k3d", "text": "

    If you don't have access to an existing Kubernetes cluster, this section will guide you through creating a local cluster. We recommend the lightweight Kubernetes distribution k3s for this. k3d is a wrapper around k3s in Docker that lets you get started fast.

    1. You can install k3d with its installation script:

      wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/v5.4.6/install.sh | bash\n

      For other ways of installing k3d, you can have a look at their installation guide.

    2. The Kafka deployment needs a modified Docker image. In that case the image is built and pushed to a Docker registry that holds it. If you do not have access to an existing Docker registry, you can use k3d's Docker registry:

      k3d registry create kpops-registry.localhost --port 12345\n
    3. Now you can create a new cluster called kpops that uses the previously created Docker registry:

      k3d cluster create kpops --k3s-arg \"--no-deploy=traefik@server:*\" --registry-use k3d-kpops-registry.localhost:12345\n

    Note

    Creating a new k3d cluster automatically configures kubectl to connect to the local cluster by modifying your ~/.kube/config. In case you manually set the KUBECONFIG variable or don't want k3d to modify your config, k3d offers many other options.

    You can check the cluster status with kubectl get pods -n kube-system. If all returned elements have a STATUS of Running or Completed, then the cluster is up and running.

    "}, {"location": "user/getting-started/setup/#deploy-kafka", "title": "Deploy Kafka", "text": "

    Kafka is an open-source data streaming platform. More information about Kafka can be found in the documentation. To deploy Kafka, this guide uses Confluent's Helm chart.

    1. To allow connectivity to other systems Kafka Connect needs to be extended with drivers. You can install a JDBC driver for Kafka Connect by creating a new Docker image:

      1. Create a Dockerfile with the following content:

        FROM confluentinc/cp-kafka-connect:7.1.3\n\nRUN confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.6.0\n
      2. Build and push the modified image to your private Docker registry:

        docker build . --tag localhost:12345/kafka-connect-jdbc:7.1.3 && \\\ndocker push localhost:12345/kafka-connect-jdbc:7.1.3\n

      Detailed instructions on building, tagging and pushing a docker image can be found in Docker docs.

    2. Add Confluent's Helm chart repository and update the index:

      helm repo add confluentinc https://confluentinc.github.io/cp-helm-charts/ &&  \nhelm repo update\n
    3. Install Kafka, Zookeeper, Confluent's Schema Registry, Kafka Rest Proxy, and Kafka Connect. A single Helm chart installs all five components. Below you can find an example for the --values ./kafka.yaml file configuring the deployment accordingly. Deploy the services:

      helm upgrade \\\n    --install \\\n    --version 0.6.1 \\\n    --values ./kafka.yaml \\\n    --namespace kpops \\\n    --create-namespace \\\n    --wait \\\n    k8kafka confluentinc/cp-helm-charts\n
    Kafka Helm chart values (kafka.yaml)

    An example value configuration for Confluent's Helm chart. This configuration deploys a single Kafka Broker, a Schema Registry, Zookeeper, Kafka Rest Proxy, and Kafka Connect with minimal resources.

    cp-zookeeper:\n  enabled: true\n  servers: 1\n  imageTag: 7.1.3\n  heapOptions: \"-Xms124M -Xmx124M\"\n  overrideGroupId: k8kafka\n  fullnameOverride: \"k8kafka-cp-zookeeper\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.2G\n    limits:\n      cpu: 250m\n      memory: 0.2G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka:\n  enabled: true\n  brokers: 1\n  imageTag: 7.1.3\n  podManagementPolicy: Parallel\n  configurationOverrides:\n    \"auto.create.topics.enable\": false\n    \"offsets.topic.replication.factor\": 1\n    \"transaction.state.log.replication.factor\": 1\n    \"transaction.state.log.min.isr\": 1\n    \"confluent.metrics.reporter.topic.replicas\": 1\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.5G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n  persistence:\n    enabled: false\n\ncp-schema-registry:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-schema-registry\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.25G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-connect:\n  enabled: true\n  replicaCount: 1\n  image: k3d-kpops-registry.localhost:12345/kafka-connect-jdbc\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-kafka-connect\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 500m\n      memory: 0.25G\n    limits:\n      cpu: 500m\n      memory: 0.25G\n  configurationOverrides:\n    \"consumer.max.poll.records\": \"10\"\n    \"consumer.max.poll.interval.ms\": \"900000\"\n    \"config.storage.replication.factor\": \"1\"\n    \"offset.storage.replication.factor\": \"1\"\n    \"status.storage.replication.factor\": \"1\"\n  cp-schema-registry:\n    url: http://k8kafka-cp-schema-registry:8081\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-rest:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-rest\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-ksql-server:\n  enabled: false\ncp-control-center:\n  enabled: false\n
    "}, {"location": "user/getting-started/setup/#deploy-streams-explorer", "title": "Deploy Streams Explorer", "text": "

    Streams Explorer allows examining Apache Kafka data pipelines in a Kubernetes cluster including the inspection of schemas and monitoring of metrics. First, add the Helm repository:

    helm repo add streams-explorer https://bakdata.github.io/streams-explorer && \\\nhelm repo update\n

    Below you can find an example for the --values ./streams-explorer.yaml file configuring the deployment accordingly. Now, deploy the service:

    helm upgrade \\\n    --install \\\n    --version 0.2.3 \\\n    --values ./streams-explorer.yaml \\\n    --namespace kpops \\\n    streams-explorer streams-explorer/streams-explorer\n
    Streams Explorer Helm chart values (streams-explorer.yaml)

    An example value configuration for Steams Explorer Helm chart.

    imageTag: \"v2.1.2\"\nconfig:\n   K8S__deployment__cluster: true\n   SCHEMAREGISTRY__url: http://k8kafka-cp-schema-registry.kpops.svc.cluster.local:8081\n   KAFKACONNECT__url: http://k8kafka-cp-kafka-connect.kpops.svc.cluster.local:8083\nresources:\n   requests:\n       cpu: 200m\n       memory: 300Mi\n   limits:\n       cpu: 200m\n       memory: 300Mi\n
    "}, {"location": "user/getting-started/setup/#check-the-status-of-your-deployments", "title": "Check the status of your deployments", "text": "

    Now we will check if all the pods are running in our namespace. You can list all pods in the namespace with this command:

    kubectl --namespace kpops get pods\n

    Then you should see the following output in your terminal:

    NAME                                          READY   STATUS    RESTARTS   AGE\nk8kafka-cp-kafka-connect-8fc7d544f-8pjnt      1/1     Running   0          15m\nk8kafka-cp-zookeeper-0                        1/1     Running   0          15m\nk8kafka-cp-kafka-0                            1/1     Running   0          15m\nk8kafka-cp-schema-registry-588f8c65db-jdwbq   1/1     Running   0          15m\nk8kafka-cp-rest-6bbfd7b645-nwkf8              1/1     Running   0          15m\nstreams-explorer-54db878c67-s8wbz             1/1     Running   0          15m\n

    Pay attention to the STATUS row. The pods should have a status of Running.

    "}, {"location": "user/getting-started/setup/#install-kpops", "title": "Install KPOps", "text": "

    KPOps comes as a PyPI package. You can install it with pip:

    pip install kpops\n
    "}, {"location": "user/getting-started/teardown/", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/teardown/#kpops-teardown-commands", "title": "KPOps teardown commands", "text": "
    • destroy: Removes Kubernetes resources.
    • reset: Runs destroy, resets the states of Kafka Streams apps and resets offsets to zero.
    • clean: Runs reset and removes all Kafka resources.
    "}, {"location": "user/getting-started/teardown/#kpops-deployed-pipeline", "title": "KPOps-deployed pipeline", "text": "

    The kpops CLI can be used to destroy a pipeline that was previously deployed with KPOps. In case that doesn't work, the pipeline can always be taken down manually with helm (see section Infrastructure).

    1. Export environment variables.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Navigate to the examples folder. Replace the <name-of-the-example-directory> with the example you want to tear down. For example the atm-fraud-detection.

    3. Remove the pipeline

      # Uncomment 1 line to either destroy, reset or clean.\n\n# poetry run kpops destroy <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops reset <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops clean <name-of-the-example-directory>/pipeline.yaml \\\n--config <name-of-the-example-directory>/config.yaml \\\n--execute\n
    "}, {"location": "user/getting-started/teardown/#infrastructure", "title": "Infrastructure", "text": "

    Delete namespace:

    kubectl delete namespace kpops\n

    Note

    In case kpops destroy is not working one can uninstall the pipeline services one by one. This is equivalent to running kpops destroy. In case a clean uninstall (like the one kpops clean does) is needed, one needs to also delete the topics and schemas created by deployment of the pipeline.

    "}, {"location": "user/getting-started/teardown/#local-cluster", "title": "Local cluster", "text": "

    Delete local cluster:

    k3d cluster delete kpops\n
    "}, {"location": "user/getting-started/teardown/#local-image-registry", "title": "Local image registry", "text": "

    Delete local registry:

    k3d registry delete k3d-kpops-registry.localhost\n
    "}, {"location": "user/migration-guide/v1-v2/", "title": "Migrate from V1 to V2", "text": ""}, {"location": "user/migration-guide/v1-v2/#derive-component-type-automatically-from-class-name", "title": "Derive component type automatically from class name", "text": "

    KPOps automatically infers the component type from the class name. Therefore, the type and schema_type attributes can be removed from your custom components. By convention the type would be the lower, and kebab cased name of the class.

    class MyCoolStreamApp(StreamsApp):\n-    type = \"my-cool-stream-app\"\n+    ...\n

    Because of this new convention producer has been renamed to producer-app. This must be addressed in your pipeline.yaml and defaults.yaml.

    - producer:\n+ producer-app:\n    app:\n        streams:\n        outputTopic: output_topic\n        extraOutputTopics: \n            output_role1: output_topic1\n            output_role2: output_topic2\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-inputoutput-types", "title": "Refactor input/output types", "text": ""}, {"location": "user/migration-guide/v1-v2/#to-section", "title": "To section", "text": "

    In the to section these have changed:

    • The default type is output
    • If role is set, type is inferred to be extra
    • The type error needs to be defined explicitly
      to:\n    topics:\n      ${pipeline_name}-topic-1:\n-       type: extra\n        role: \"role-1\"\n        ...\n      ${pipeline_name}-topic-2:\n-       type: output \n        ...\n      ${pipeline_name}-topic-3:\n         type: error \n         ...\n
    "}, {"location": "user/migration-guide/v1-v2/#from-section", "title": "From section", "text": "

    In the from section these have changed:

    • The default type is input
    • input-pattern type is replaced by pattern
    • If role is set, type is inferred to be extra
    • If role is set, type is explicitly set to pattern, this would be inferred type extra-pattern
      from:\n    topics:\n      ${pipeline_name}-input-topic:\n-       type: input\n        ...\n      ${pipeline_name}-extra-topic:\n-       type: extra\n        role: topic-role\n        ...\n      ${pipeline_name}-input-pattern-topic:\n-       type: input-pattern \n+       type: pattern\n        ...\n      ${pipeline_name}-extra-pattern-topic:\n-       type: extra-pattern \n+       type: pattern\n        role: some-role\n        ...\n
    "}, {"location": "user/migration-guide/v1-v2/#remove-camel-case-conversion-of-internal-models", "title": "Remove camel case conversion of internal models", "text": "

    All the internal KPOps models are now snake_case, and only Helm/Kubernetes values require camel casing. You can find an example of a pipeline.yaml in the following. Notice that the app section here remains untouched.

    ...\ntype: streams-app\n  name: streams-app\n  namespace: namespace \n  app: \n    streams:\n      brokers: ${brokers} \n      schemaRegistryUrl: ${schema_registry_url}\n     autoscaling:\n      consumerGroup: consumer-group\n      lagThreshold: 0\n      enabled: false \n      pollingInterval: 30\n\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: error\n-       keySchema: key-schema\n+       key_schema: key-schema\n-       valueSchema: value-schema\n+       value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs:\n          cleanup.policy: compact\n    models:\n      model: model\n  prefix: ${pipeline_name}-\n- repoConfig:\n+ repo_config:\n-   repositoryName: bakdata-streams-bootstrap\n+   repository_name: bakdata-streams-bootstrap\n    url: https://bakdata.github.io/streams-bootstrap/\n-   repoAuthFlags: \n+   repo_auth_flags: \n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.4\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-handling-of-helm-flags", "title": "Refactor handling of Helm flags", "text": "

    If you are using the KubernetesApp class to define your own Kubernetes resource to deploy, the abstract function get_helm_chart that returns the chart for deploying the app using Helm is now a Python property and renamed to helm_chart.

    class MyCoolApp(KubernetesApp):\n\n+   @property\n    @override\n-   def get_helm_chart(self) -> str:\n+   def helm_chart(self) -> str:\n        return \"./charts/charts-folder\"\n
    "}, {"location": "user/migration-guide/v1-v2/#plural-broker-field-in-pipeline-config", "title": "Plural broker field in pipeline config", "text": "

    Since you can pass a comma separated string of broker address, the broker field in KPOps is now plural. The pluralization has affected multiple areas:

    "}, {"location": "user/migration-guide/v1-v2/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- broker: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n kafka_connect_host: \"http://localhost:8083\"\n kafka_rest_host: \"http://localhost:8082\"\n schema_registry_url: \"http://localhost:8081\"\n
    "}, {"location": "user/migration-guide/v1-v2/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called brokers.

    ...\n  app:\n    streams:\n-     brokers: ${broker}\n+     brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#environment-variable", "title": "Environment variable", "text": "

    Previously, if you set the environment variable KPOPS_KAFKA_BROKER, you need to replace that now with KPOPS_KAFKA_BROKERS.

    "}, {"location": "user/references/cli-commands/", "title": "CLI Usage", "text": "

    Usage:

    $ kpops [OPTIONS] COMMAND [ARGS]...\n

    Options:

    • -V, --version: Print KPOps version
    • --install-completion: Install completion for the current shell.
    • --show-completion: Show completion for the current shell, to copy it or customize the installation.
    • --help: Show this message and exit.

    Commands:

    • clean: Clean pipeline steps
    • deploy: Deploy pipeline steps
    • destroy: Destroy pipeline steps
    • generate: Enriches pipelines steps with defaults.
    • reset: Reset pipeline steps
    • schema: Generate json schema.
    "}, {"location": "user/references/cli-commands/#kpops-clean", "title": "kpops clean", "text": "

    Clean pipeline steps

    Usage:

    $ kpops clean [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-deploy", "title": "kpops deploy", "text": "

    Deploy pipeline steps

    Usage:

    $ kpops deploy [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-destroy", "title": "kpops destroy", "text": "

    Destroy pipeline steps

    Usage:

    $ kpops destroy [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-generate", "title": "kpops generate", "text": "

    Enriches pipelines steps with defaults. The output is used as input for the deploy/destroy/... commands.

    Usage:

    $ kpops generate [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --template / --no-template: Run Helm template [default: no-template]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-reset", "title": "kpops reset", "text": "

    Reset pipeline steps

    Usage:

    $ kpops reset [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-schema", "title": "kpops schema", "text": "

    Generate json schema.

    The schemas can be used to enable support for kpops files in a text editor.

    Usage:

    $ kpops schema [OPTIONS] SCOPE:{pipeline|config} [COMPONENTS_MODULE]\n

    Arguments:

    • SCOPE:{pipeline|config}: Scope of the generated schema
      pipeline: Schema of PipelineComponents. Includes the built-in kpops components by default. To include custom components, provide [COMPONENTS_MODULES].\n\nconfig: Schema of PipelineConfig.  [required]\n
      • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --include-stock-components / --no-include-stock-components: Include the built-in KPOps components. [default: include-stock-components]
    • --help: Show this message and exit.
    "}, {"location": "user/references/editor-integration/", "title": "Editor integration", "text": "

    KPOps provides JSON schemas that enable autocompletion and validation for some of the files that the user must work with.

    "}, {"location": "user/references/editor-integration/#supported-files", "title": "Supported files", "text": "
    • pipeline.yaml
    • config.yaml
    "}, {"location": "user/references/editor-integration/#usage", "title": "Usage", "text": "
    1. Install the yaml-language-server in your editor of choice. (requires LSP support)
    2. Configure the extension with the settings below.
    settings.json
    {\n    \"yaml.schemas\": {\n        \"https://bakdata.github.io/kpops/2.0/schema/config.json\": \"config.yaml\",\n        \"https://bakdata.github.io/kpops/2.0/schema/pipeline.json\": \"pipeline.yaml\"\n    }\n}\n

    Advanced usage

    It is possible to generate schemas with the kpops schema command. Useful when using custom components or when using a pre-release version of KPOps.

    "}, {"location": "user/references/ci-integration/github-actions/", "title": "GitHub Actions integration", "text": "

    We provided a GitHub composite action called kpops-runner that installs all the necessary dependencies and runs KPOps commands with the given parameters.

    "}, {"location": "user/references/ci-integration/github-actions/#input-parameters", "title": "Input Parameters", "text": "Name Required Default Value Type Description command \u2705 - string KPOps command to run. generate, deploy, destroy, reset, clean are possible values. Flags such as --dry-run and --execute need to be specified pipeline \u2705 - string Pipeline to run by KPOps working-directory \u274c . string root directory used by KPOps to run pipelines pipeline-base-dir \u274c - string directory where relative pipeline variables are initialized from defaults \u274c - string defaults folder path config \u274c - string config.yaml file path components \u274c - string components package path filter-type \u274c - string Whether to include/exclude the steps defined in KPOPS_PIPELINE_STEPS python-version \u274c \"3.11.x\" string Python version to install (Defaults to the latest stable version of Python 3.11) kpops-version \u274c latest string KPOps version to install helm-version \u274c latest string Helm version to install token \u274c latest string secrets.GITHUB_TOKEN, needed for setup-helm action if helm-version is set to latest"}, {"location": "user/references/ci-integration/github-actions/#usage", "title": "Usage", "text": "
    steps:\n  # ...\n  # This step is useful for debugging reasons\n  - name: Generate Kafka pipeline\n    uses: bakdata/kpops/actions/kpops-runner@main\n    with:\n      command: generate\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.3\n\n  - name: Deploy Kafka pipeline\n    uses: bakdata/kpops/actions/kpops-runner@main\n    with:\n      command: deploy --execute\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.3\n  # ...\n

    It is possible to execute the KPOps runner on a dev version from the test.pypi.

    steps:\n  - name: Deploy Kafka pipeline\n    uses: bakdata/kpops/actions/kpops-runner@main\n    with:\n      command: deploy --execute\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.5.dev20230707132709 -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/\n
    "}]} \ No newline at end of file +{"config": {"lang": ["en"], "separator": "[\\s\\-]+", "pipeline": ["stopWordFilter"]}, "docs": [{"location": "developer/auto-generation/", "title": "Auto generation", "text": "

    Auto generation happens mostly with pre-commit hooks. You can find the pre-commit configuration here. These pre-commit hooks call different Python scripts to auto generate code for the documentation.

    "}, {"location": "developer/auto-generation/#generation-scripts-and-their-respective-files", "title": "Generation scripts and their respective files", "text": ""}, {"location": "developer/auto-generation/#documentation", "title": "Documentation", "text": ""}, {"location": "developer/auto-generation/#variables", "title": "Variables", "text": "
    • cli_env_vars.env -- All CLI environment variables in a dotenv file.
    • cli_env_vars.md -- All CLI environment variables in a table.
    • config_env_vars.env -- Almost all pipeline config environment variables in a dotenv file. The script checks for each field in KpopsConfig whether it has an env attribute defined. The script is currently unable to visit the classes of fields like topic_name_config, hence any environment variables defined there would remain unknown to it.
    • config_env_vars.env -- Almost all pipeline config environment variables in a table.
    • variable_substitution.yaml -- A copy of ./tests/pipeline/resources/component-type-substitution/pipeline.yaml used as an example of substitution.
    "}, {"location": "developer/auto-generation/#cli-commands", "title": "CLI commands", "text": "

    Generated by typer-cli from the code in main.py. It is called with Python's subprocess module.

    "}, {"location": "developer/auto-generation/#pipeline-and-defaults-example-definitions", "title": "Pipeline and defaults example definitions", "text": "

    Generates example pipeline.yaml and defaults.yaml for each individual component, stores them and also concatenates them into 1 big pipeline definition and 1 big pipeline defaults definition.

    User input

    • headers/*\\.yaml -- The top of each example. Includes a description comment, type and name. The headers for pipeline.yaml reside in the pipeline-components dir and the defaults.yaml headers reside in the pipeline-defaults dir. The names of the files must be equal to the respective component type.
    • sections/*\\.yaml -- Each YAML file contains a single section (component attribute) definition. The intention is to keep the minimal set of definitions there from which any component definition can be built. The names of the files must be equal to the respective component type and the attribute name. The sections are used for both defaults.yaml and pipeline.yaml generation and reside in the pipeline-components dir.

    Generated

    • pipeline-components/dependencies/* Cached information about KPOps components
    • pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the pipeline.yaml generation.
    • defaults_pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the defaults.yaml generation.
    • kpops_structure.yaml -- Specifies the inheritance hierarchy of the components and what sections exist in each component.
    • pipeline-components/*\\.yaml -- All single-component pipeline definitions and one big (complete) pipeline.yaml that contains all of them.
    • pipeline-defaults/*\\.yaml -- All single-component defaults definitions and one big (complete) defaults.yaml that contains all of them.
    "}, {"location": "developer/auto-generation/#editor-integration", "title": "Editor integration", "text": ""}, {"location": "developer/auto-generation/#schemas", "title": "Schemas", "text": "
    • config.json
    • pipeline.json
    "}, {"location": "developer/formatting/", "title": "Formatting", "text": ""}, {"location": "developer/formatting/#markdown", "title": "Markdown", "text": "

    To ensure a consistent markdown style, we use dprint to check and reformat.

    dprint fmt\n

    Use the official documentation to set up dprint. The configuration can be found here.

    "}, {"location": "user/what-is-kpops/", "title": "What is KPOps?", "text": "

    With a couple of easy commands in the shell and a pipeline.yaml of under 30 lines, KPOps can not only deploy a Kafka pipeline1 to a Kubernetes cluster, but also reset, clean or destroy it!

    "}, {"location": "user/what-is-kpops/#key-features", "title": "Key features", "text": "
    • Deploy Kafka apps to Kubernetes: KPOps allows to deploy consecutive Kafka Streams applications and producers using an easy-to-read and -write pipeline definition.
    • Manage Kafka Connectors: KPOps connects with your Kafka Connect cluster and deploys, validates, and deletes your connectors.
    • Configure multiple pipelines and steps: KPOps has various abstractions that simplify configuring multiple pipelines and steps within pipelines by sharing common configuration between different components, such as producers or streaming applications.
    • Handle your topics and schemas: KPOps not only creates and deletes your topics but also registers and deletes your schemas.
    • Clean termination of Kafka components: KPOps removes your pipeline components (i.e., Kafka Streams applications) from the Kubernetes cluster and cleans up the component-related states (i.e., removing/resetting offset of Kafka consumer groups).
    • Preview your pipeline changes: With the KPOps dry-run, you can ensure your pipeline definition is set up correctly. This helps to minimize downtime and prevent potential errors or issues that could impact your production environment.
    "}, {"location": "user/what-is-kpops/#example", "title": "Example", "text": "An overview of Word-count pipeline shown in Streams Explorer Word-count pipeline.yaml
    - type: producer-app\n  name: data-producer\n  app:\n    image: bakdata/kpops-demo-sentence-producer\n    imageTag: \"1.0.0\"\n\n- type: streams-app\n  name: word-counter\n  to:\n    topics:\n      ${output_topic_name}:\n        type: output\n        configs:\n          cleanup.policy: compact\n  app:\n    replicaCount: 1\n    image: bakdata/kpops-demo-word-count-app\n    imageTag: \"1.0.0\"\n\n- type: kafka-sink-connector\n  name: redis-sink-connector\n  app:\n    connector.class: com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector\n    redis.hosts: redis-headless:6379\n    redis.database: 0\n    tasks.max: 1\n    key.converter: org.apache.kafka.connect.storage.StringConverter\n    value.converter: org.apache.kafka.connect.storage.StringConverter\n
    1. A Kafka pipeline can consist of consecutive streaming applications, producers, and connectors.\u00a0\u21a9

    "}, {"location": "user/core-concepts/config/", "title": "Configuration", "text": "

    KPOps reads its global configuration that is unrelated to a pipeline's components from config.yaml.

    Consider enabling KPOps' editor integration feature to enjoy the benefits of autocompletion and validation when configuring your pipeline.

    To learn about any of the available settings, take a look at the example below.

    config.yaml
    # CONFIGURATION\n#\n# The path to the folder containing the defaults.yaml file and the environment\n# defaults files.\ndefaults_path: .\n# The environment you want to generate and deploy the pipeline to. Suffix your\n# environment files with this value (e.g. defaults_development.yaml and\n# pipeline_development.yaml for environment=development).\n# REQUIRED\nenvironment: development\n# The Kafka brokers address.\n# REQUIRED\nbrokers: \"http://broker1:9092,http://broker2:9092\"\n# The name of the defaults file and the prefix of the defaults environment file.\ndefaults_filename_prefix: defaults\n# Configures topic names.\ntopic_name_config: \n  # Configures the value for the variable ${output_topic_name}\n  default_output_topic_name: ${pipeline_name}-${component_name}\n  # Configures the value for the variable ${error_topic_name}\n  default_error_topic_name: ${pipeline_name}-${component_name}-error\n# Address of the Schema Registry\nschema_registry_url: \"http://localhost:8081\"\n# Address of the Kafka REST Proxy.\nkafka_rest_host: \"http://localhost:8082\"\n# Address of Kafka Connect.\nkafka_connect_host: \"http://localhost:8083\"\n# The timeout in seconds that specifies when actions like deletion or deploy\n# timeout.\ntimeout: 300\n# Flag for `helm upgrade --install`.\n# Create the release namespace if not present.\ncreate_namespace: false\n# Global flags for Helm.\nhelm_config:\n  # Set the name of the kubeconfig context. (--kube-context)\n  context: name\n  # Run Helm in Debug mode.\n  debug: false\n# Configure Helm Diff.\nhelm_diff_config: \n  # Set of keys that should not be checked.\n  ignore: \n    - name\n    - imageTag\n# Whether to retain clean up jobs in the cluster or uninstall the, after\n# completion.\nretain_clean_jobs: false\n

    Environment-specific pipeline definitions

    Similarly to defaults, it is possible to have an unlimited amount of additional environment-specific pipeline definitions. The naming convention is the same: add a suffix of the form _{environment} to the filename.

    "}, {"location": "user/core-concepts/defaults/", "title": "Defaults", "text": "

    KPOps has a very efficient way of dealing with repeating settings which manifests as defaults.yaml. This file provides the user with the power to set defaults for any and all components, thus omitting the need to repeat the same settings in pipeline.yaml.

    See real-world examples for defaults.

    "}, {"location": "user/core-concepts/defaults/#features", "title": "Features", "text": ""}, {"location": "user/core-concepts/defaults/#inheritance", "title": "Inheritance", "text": "

    An important mechanic of KPOps is that defaults set for a component apply to all components that inherit from it.

    It is possible, although not recommended, to add settings that are specific to a component's subclass. An example would be configuring offset_topic under kafka-connector instead of kafka-source-connector.

    "}, {"location": "user/core-concepts/defaults/#configuration", "title": "Configuration", "text": "

    It is possible to set specific defaults for each environment by adding files called defaults_{environment}.yaml to the defaults folder at defaults_path. The defaults are loaded based on the currently set environment.

    It is important to note that defaults_{environment}.yaml overrides only the settings that are explicitly set to be different from the ones in the base defaults file.

    Tip

    defaults is the default value of defaults_filename_prefix. Together with defaults_path and environment it can be changed in config.yaml

    "}, {"location": "user/core-concepts/defaults/#components", "title": "Components", "text": "

    The defaults codeblocks in this section contain the full set of settings that are specific to the component. If a setting already exists in a parent config, it will not be included in the child's.

    "}, {"location": "user/core-concepts/defaults/#kubernetesapp", "title": "KubernetesApp", "text": "defaults.yaml
    # Base Kubernetes App\n#\n# Parent of: HelmApp\n# Child of: PipelineComponent\nkubernetes-app:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/defaults/#kafkaapp", "title": "KafkaApp", "text": "defaults.yaml
    # Base component for Kafka-based components.\n#\n# Parent of: ProducerApp, StreamsApp\n# Child of: KubernetesApp\nkafka-app:\n  # `app` can contain application-specific settings, hence  the user is free to\n  # add the key-value pairs they need.\n  app: # required\n    streams: # required\n      brokers: ${kafka_brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name # kafka-app-specific\n    imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/defaults/#streamsapp", "title": "StreamsApp", "text": "defaults.yaml
    # StreamsApp component that configures a streams bootstrap app.\n#\n# Child of: KafkaApp\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\nstreams-app:\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  app: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${kafka_brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n
    "}, {"location": "user/core-concepts/defaults/#producerapp", "title": "ProducerApp", "text": "defaults.yaml
    \n
    "}, {"location": "user/core-concepts/defaults/#kafkaconnector", "title": "KafkaConnector", "text": "defaults.yaml
    # Kafka connector\n#\n# Parent of: KafkaSinkConnector, KafkaSourceConnector\n# Child of: PipelineComponent\nkafka-connector:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Helm repository configuration for resetter\n  repo_config:\n    repository_name: my-repo # required\n    url: https://bakdata.github.io/kafka-connect-resetter/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.6\" # Helm chart version\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/defaults/#kafkasourceconnector", "title": "KafkaSourceConnector", "text": "defaults.yaml
    # Kafka source connector\n#\n# Child of: KafkaConnector\nkafka-source-connector:\n  # The source connector has no `from` section\n  # from:\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/defaults/#kafkasinkconnector", "title": "KafkaSinkConnector", "text": "defaults.yaml
    # Kafka sink connector\n#\n# Child of: KafkaConnector\nkafka-sink-connector:\n  # No settings differ from `kafka-connector`\n
    "}, {"location": "user/core-concepts/components/kafka-app/", "title": "KafkaApp", "text": "

    Subclass of HelmApp.

    "}, {"location": "user/core-concepts/components/kafka-app/#usage", "title": "Usage", "text": "
    • Defines a streams-bootstrap component
    • Should not be used in pipeline.yaml as the component can be defined as either a StreamsApp or a ProducerApp
    • Often used in defaults.yaml
    "}, {"location": "user/core-concepts/components/kafka-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base component for Kafka-based components.\n# Producer or streaming apps should inherit from this class.\n- type: kafka-app # required\n  name: kafka-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` can contain application-specific settings, hence  the user is free to\n  # add the key-value pairs they need.\n  app: # required\n    streams: # required\n      brokers: ${kafka_brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name # kafka-app-specific\n    imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/kafka-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-app/#deploy", "title": "deploy", "text": "

    In addition to HelmApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/kafka-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-connector/", "title": "KafkaConnector", "text": "

    KafkaConnector is a component that deploys Kafka Connectors. Since a connector cannot be different from sink or source it is not recommended to use KafkaConnector for deployment in pipeline.yaml. Instead, KafkaConnector should be used in defaults.yaml to set defaults for all connectors in the pipeline as they can share some common settings.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/", "title": "KafkaSinkConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#usage", "title": "Usage", "text": "

    Lets other systems pull data from Apache Kafka.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka sink connector\n- type: kafka-sink-connector\n  name: kafka-sink-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Helm repository configuration for resetter\n  repo_config:\n    repository_name: my-repo # required\n    url: https://bakdata.github.io/kafka-connect-resetter/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.6\" # Helm chart version\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-sink-connector/#deploy", "title": "deploy", "text": "
    • Add the sink connector to the Kafka Connect cluster
    • Create the output topics if provided (optional)
    • Register schemas in the Schema Registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#destroy", "title": "destroy", "text": "

    The associated sink connector is removed from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#reset", "title": "reset", "text": "

    Reset the consumer group offsets using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#clean", "title": "clean", "text": "
    • Delete associated consumer group
    • Delete configured error topics
    "}, {"location": "user/core-concepts/components/kafka-source-connector/", "title": "KafkaSourceConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#usage", "title": "Usage", "text": "

    Manages source connectors in your Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka source connector\n- type: kafka-source-connector # required\n  name: kafka-source-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  # The source connector has no `from` section\n  # from:\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Helm repository configuration for resetter\n  repo_config:\n    repository_name: my-repo # required\n    url: https://bakdata.github.io/kafka-connect-resetter/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.6\" # Helm chart version\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/#deploy", "title": "deploy", "text": "
    • Add the source connector to the Kafka Connect cluster
    • Create the output topics if provided (optional)
    • Register schemas in the Schema registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#destroy", "title": "destroy", "text": "

    Remove the source connector from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#reset", "title": "reset", "text": "

    Delete state associated with the connector using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#clean", "title": "clean", "text": "
    • Delete all associated output topics
    • Delete all associated schemas in the Schema Registry
    • Delete state associated with the connector
    "}, {"location": "user/core-concepts/components/kubernetes-app/", "title": "KubernetesApp", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#usage", "title": "Usage", "text": "

    Can be used to create components for any Kubernetes app.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base Kubernetes App\n- type: kubernetes-app\n  name: kubernetes-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/components/kubernetes-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#deploy", "title": "deploy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#destroy", "title": "destroy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/overview/", "title": "Overview", "text": "

    This section explains the different components of KPOps, their usage and configuration in the pipeline definition pipeline.yaml.

    flowchart BT\n    KubernetesApp --> PipelineComponent\n    HelmApp --> KubernetesApp\n    KafkaApp --> HelmApp\n    StreamsApp --> KafkaApp\n    ProducerApp --> KafkaApp\n    KafkaConnector --> PipelineComponent\n    KafkaSourceConnector --> KafkaConnector\n    KafkaSinkConnector --> KafkaConnector\n\n    click KubernetesApp \"/kpops/user/core-concepts/components/kubernetes-app\"\n    click HelmApp \"/kpops/user/core-concepts/components/helm-app\"\n    click KafkaApp \"/kpops/user/core-concepts/components/kafka-app\"\n    click StreamsApp \"/kpops/user/core-concepts/components/streams-app\"\n    click ProducerApp \"/kpops/user/core-concepts/components/producer-app\"\n    click KafkaConnector \"/kpops/user/core-concepts/components/kafka-connector\"\n    click KafkaSourceConnector \"/kpops/user/core-concepts/components/kafka-source-connector\"\n    click KafkaSinkConnector \"/kpops/user/core-concepts/components/kafka-sink-connector\"

    KPOps component hierarchy

    "}, {"location": "user/core-concepts/components/producer-app/", "title": "ProducerApp", "text": "

    Subclass of KafkaApp.

    "}, {"location": "user/core-concepts/components/producer-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka producer app

    "}, {"location": "user/core-concepts/components/producer-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Holds configuration to use as values for the streams bootstrap producer-app Helm\n# chart.\n# More documentation on ProducerApp:\n# https://github.com/bakdata/streams-bootstrap\n- type: producer-app\n  name: producer-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  # from: # While the producer-app does inherit from kafka-app, it does not need a\n  # `from` section, hence it does not support it.\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app\n  app: # required\n    streams: # required, producer-app-specific\n      brokers: ${kafka_brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n      outputTopic: output_topic\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n    nameOverride: override-with-this-name # kafka-app-specific\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/producer-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/producer-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/producer-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/producer-app/#reset", "title": "reset", "text": "

    Do nothing, producers are stateless.

    "}, {"location": "user/core-concepts/components/producer-app/#clean", "title": "clean", "text": "
    • Delete the output topics of the Kafka producer
    • Delete all associated schemas in the Schema Registry
    "}, {"location": "user/core-concepts/components/streams-app/", "title": "StreamsApp", "text": "

    Subclass of KafkaApp.

    "}, {"location": "user/core-concepts/components/streams-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka Streams app

    "}, {"location": "user/core-concepts/components/streams-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # StreamsApp component that configures a streams bootstrap app.\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\n- type: streams-app # required\n  name: streams-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline_name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline_name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline_name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline_name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: output # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline_name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline_name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  app: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${kafka_brokers} # required\n      schemaRegistryUrl: ${schema_registry_url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/streams-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/streams-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-app/#reset", "title": "reset", "text": "
    • Delete the consumer group offsets
    • Delete Kafka Streams state
    "}, {"location": "user/core-concepts/components/streams-app/#clean", "title": "clean", "text": "

    Similar to reset with to additional steps:

    • Delete the app's output topics
    • Delete all associated schemas in the Schema Registry
    "}, {"location": "user/core-concepts/variables/environment_variables/", "title": "Environment variables", "text": "

    Environment variables can be set by using the export command in Linux or the set command in Windows.

    dotenv files

    Support for .env files is on the roadmap, but not implemented in KPOps yet. One of the possible ways to still use one and export the contents manually is with the following command: export $(xargs < .env). This would work in bash suppose there are no spaces inside the values.

    "}, {"location": "user/core-concepts/variables/environment_variables/#config", "title": "Config", "text": "

    These variables are a lower priority alternative to the settings in config.yaml. Variables marked as required can instead be set in the pipeline config.

    Name Default Value Required Description Setting name KPOPS_ENVIRONMENT True The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). environment KPOPS_KAFKA_BROKERS True The comma separated Kafka brokers address. kafka_brokers KPOPS_SCHEMA_REGISTRY_URL http://localhost:8081 False Address of the Schema Registry. url KPOPS_KAFKA_REST_URL http://localhost:8082 False Address of the Kafka REST Proxy. url KPOPS_KAFKA_CONNECT_URL http://localhost:8083 False Address of Kafka Connect. url KPOPS_TIMEOUT 300 False The timeout in seconds that specifies when actions like deletion or deploy timeout. timeout KPOPS_RETAIN_CLEAN_JOBS False False Whether to retain clean up jobs in the cluster or uninstall the, after completion. retain_clean_jobs config_env_vars.env Exhaustive list of all config-related environment variables
    # Pipeline config environment variables\n#\n# The default setup is shown. These variables are a lower priority\n# alternative to the settings in `config.yaml`. Variables marked as\n# required can instead be set in the pipeline config.\n#\n# environment\n# The environment you want to generate and deploy the pipeline to.\n# Suffix your environment files with this value (e.g.\n# defaults_development.yaml for environment=development).\nKPOPS_ENVIRONMENT # No default value, required\n# kafka_brokers\n# The comma separated Kafka brokers address.\nKPOPS_KAFKA_BROKERS # No default value, required\n# url\n# Address of the Schema Registry.\nKPOPS_SCHEMA_REGISTRY_URL=http://localhost:8081\n# url\n# Address of the Kafka REST Proxy.\nKPOPS_KAFKA_REST_URL=http://localhost:8082\n# url\n# Address of Kafka Connect.\nKPOPS_KAFKA_CONNECT_URL=http://localhost:8083\n# timeout\n# The timeout in seconds that specifies when actions like deletion or\n# deploy timeout.\nKPOPS_TIMEOUT=300\n# retain_clean_jobs\n# Whether to retain clean up jobs in the cluster or uninstall the,\n# after completion.\nKPOPS_RETAIN_CLEAN_JOBS=False\n
    "}, {"location": "user/core-concepts/variables/environment_variables/#cli", "title": "CLI", "text": "

    These variables are a lower priority alternative to the commands' flags. If a variable is set, the corresponding flag does not have to be specified in commands. Variables marked as required can instead be set as flags.

    Name Default Value Required Description KPOPS_PIPELINE_BASE_DIR . False Base directory to the pipelines (default is current working directory) KPOPS_CONFIG_PATH config.yaml False Path to the config.yaml file KPOPS_DEFAULT_PATH False Path to defaults folder KPOPS_PIPELINE_PATH True Path to YAML with pipeline definition KPOPS_PIPELINE_STEPS False Comma separated list of steps to apply the command on cli_env_vars.env Exhaustive list of all cli-related environment variables
    # CLI Environment variables\n#\n# The default setup is shown. These variables are a lower priority\n# alternative to the commands' flags. If a variable is set, the\n# corresponding flag does not have to be specified in commands.\n# Variables marked as required can instead be set as flags.\n#\n# Base directory to the pipelines (default is current working\n# directory)\nKPOPS_PIPELINE_BASE_DIR=.\n# Path to the config.yaml file\nKPOPS_CONFIG_PATH=config.yaml\n# Path to defaults folder\nKPOPS_DEFAULT_PATH # No default value, not required\n# Path to YAML with pipeline definition\nKPOPS_PIPELINE_PATH # No default value, required\n# Comma separated list of steps to apply the command on\nKPOPS_PIPELINE_STEPS # No default value, not required\n
    "}, {"location": "user/core-concepts/variables/substitution/", "title": "Substitution", "text": "

    KPOps supports the usage of placeholders and environment variables in pipeline definition and defaults.

    "}, {"location": "user/core-concepts/variables/substitution/#component-specific-variables", "title": "Component-specific variables", "text": "

    These variables can be used in a component's definition to refer to any of its attributes, including ones that the user has defined in the defaults.

    All of them are prefixed with component_ and follow the following form: component_{attribute_name}. If the attribute itself contains attributes, they can be referred to like this: component_{attribute_name}_{subattribute_name}.

    Example
    - type: scheduled-producer\n  app:\n    labels:\n      app_type: \"${component_type}\"\n      app_name: \"${component_name}\"\n      app_schedule: \"${component_app_schedule}\"\n    commandLine:\n      FAKE_ARG: \"fake-arg-value\"\n    schedule: \"30 3/8 * * *\"\n- type: converter\n  app:\n    commandLine:\n      CONVERT_XML: true\n    resources:\n      limits:\n        memory: 2G\n      requests:\n        memory: 2G\n- type: filter\n  name: \"filter-app\"\n  app:\n    labels:\n      app_type: \"${component_type}\"\n      app_name: \"${component_name}\"\n      app_resources_requests_memory: \"${component_app_resources_requests_memory}\"\n      ${component_type}: \"${component_app_labels_app_name}-${component_app_labels_app_type}\"\n      test_placeholder_in_placeholder: \"${component_app_labels_${component_type}}\"\n    commandLine:\n      TYPE: \"nothing\"\n    resources:\n      requests:\n        memory: 3G\n    replicaCount: 4\n    autoscaling:\n      minReplicas: 4\n      maxReplicas: 4\n
    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-config-specific-variables", "title": "Pipeline-config-specific variables", "text": "

    These variables include all fields in the config and refer to the pipeline configuration that is independent of the components.

    Info

    error_topic_name is an alias for topic_name_config_default_error_topic_name output_topic_name is an alias for topic_name_config_default_output_topic_name

    "}, {"location": "user/core-concepts/variables/substitution/#environment-variables", "title": "Environment variables", "text": "

    Environment variables such as $PATH can be used in the pipeline definition and defaults without any transformation following the form ${ENV_VAR_NAME}. This, of course, includes variables like the ones relevant to the KPOps cli that are exported by the user.

    See all KPOps environment variables

    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-name-variables", "title": "Pipeline name variables", "text": "

    These are special variables that refer to the name and path of a pipeline.

    • ${pipeline_name}: Concatenated path of the parent directory where pipeline.yaml is defined in. For instance, ./data/pipelines/v1/pipeline.yaml, here the value for the variable would be data-pipelines-v1.

    • ${pipeline_name_<level>}: Similar to the previous variable, each <level> contains a part of the path to the pipeline.yaml file. Consider the previous example, ${pipeline_name_0} would be data, ${pipeline_name_1} would be pipelines, and ${pipeline_name_2} equals to v1.

    "}, {"location": "user/core-concepts/variables/substitution/#advanced-use-cases", "title": "Advanced use cases", "text": "
    1. Refer to default component field values: As long as a value is assigned to a component attribute, it is possible to refer to it with a placeholder. To see all component fields, take a look at the pipeline schema.
    2. Chaining variables: It is possible to chain any number of variables, see the example above.
    3. Cross-component substitution: YAML is quite an intricate language and with some of its magic one could write cross-component references.
    "}, {"location": "user/examples/atm-fraud-pipeline/", "title": "ATM fraud detection pipeline", "text": "

    ATM fraud is a demo pipeline for ATM fraud detection. The original by Confluent is written in KSQL and outlined in this blogpost. The one used in this example is re-built from scratch using bakdata's streams-bootstrap library.

    "}, {"location": "user/examples/atm-fraud-pipeline/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
    • Deploying a PostgreSQL database using Helm
    • Deploying a pipeline using KPOps
    • Destroying a pipeline using KPOps
    "}, {"location": "user/examples/atm-fraud-pipeline/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/examples/atm-fraud-pipeline/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postgresql", "title": "PostgreSQL", "text": "

    Deploy PostgreSQL using the Bitnami Helm chart: Add the helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install the PostgreSQL with helm:

    helm upgrade --install -f ./postgresql.yaml \\\n--namespace kpops \\\npostgresql bitnami/postgresql\n
    PostgreSQL Example Helm chart values (postgresql.yaml)
    auth:\n  database: app_db  \n  enablePostgresUser: true\n  password: AppPassword\n  postgresPassword: StrongPassword\n  username: app1\nprimary:\n  persistence:\n    enabled: false\n    existingClaim: postgresql-data-claim\nvolumePermissions:\n  enabled: true\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-detection-example-pipeline-setup", "title": "ATM fraud detection example pipeline setup", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#deploying-the-atm-fraud-detection-pipeline", "title": "Deploying the ATM fraud detection pipeline", "text": "
    1. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Deploy the pipeline

      poetry run kpops deploy ./examples/bakdata/atm-fraud-detection/pipeline.yaml \\\n--pipeline-base-dir ./examples \\\n--config ./examples/bakdata/atm-fraud-detection/config.yaml \\\n--execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/examples/atm-fraud-pipeline/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to see the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser. You should be able to see pipeline shown in the image below:

    An overview of ATM fraud pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/examples/atm-fraud-pipeline/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postrgresql", "title": "PostrgreSQL", "text": "

    PostgreSQL can be uninstalled by running the following command:

    helm --namespace kpops uninstall postgresql\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-pipeline", "title": "ATM fraud pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      poetry run kpops clean ./examples/bakdata/atm-fraud-detection/pipeline.yaml \\\n--pipeline-base-dir ./examples \\\n--config ./examples/bakdata/atm-fraud-detection/config.yaml \\\n--verbose \\\n--execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/examples/atm-fraud-pipeline/#common-errors", "title": "Common errors", "text": "
    • deploy fails:
    • Read the error message.
    • Try to correct the mistakes if there were any. Likely the configuration is not correct or the port-forwarding is not working as intended.
    • Run clean.
    • Run deploy --dry-run to avoid havig to clean again. If an error is dropped, start over from step 1.
    • If the dry-run is succesful, run deploy.
    • clean fails:
    • Read the error message.
    • Try to correct the indicated mistakes if there were any. Likely the configuration is not correct or the port-forwarding is not working as intended.
    • Run clean.
    • If clean fails, follow the steps in teardown.
    "}, {"location": "user/getting-started/quick-start/", "title": "Quick start", "text": ""}, {"location": "user/getting-started/quick-start/#word-count", "title": "Word-count", "text": "

    Word-count is a demo pipeline which consists of a producer producing words to Kafka, a Kafka streams app counting the number of times each word occurs and finally a Redis database into which the words are exported.

    "}, {"location": "user/getting-started/quick-start/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
    • Deploying a Redis database using Helm
    • Deploying a pipeline using KPOps
    • Destroying a pipeline using KPOps
    "}, {"location": "user/getting-started/quick-start/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/getting-started/quick-start/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/getting-started/quick-start/#redis", "title": "Redis", "text": "

    Deploy Redis using the Bitnami Helm chart: Add the Helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install Redis with Helm:

    helm upgrade --install -f ./values-redis.yaml \\\n--namespace kpops \\\nredis bitnami/redis\n
    Redis example Helm chart values (values-redis.yaml)
    architecture: standalone\nauth:\n  enabled: false\nmaster:\n  count: 1\n  configuration: \"databases 1\"\nimage:\n  tag: 7.0.8\n
    "}, {"location": "user/getting-started/quick-start/#word-count-example-pipeline-setup", "title": "Word-count example pipeline setup", "text": ""}, {"location": "user/getting-started/quick-start/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/getting-started/quick-start/#deploying-the-word-count-pipeline", "title": "Deploying the Word-count pipeline", "text": "
    1. Copy the configuration from the kpops-examples repository into kpops>examples>bakdata>word-count like so:

      kpops\n\u251c\u2500\u2500 examples\n|   \u251c\u2500\u2500 bakdata\n|   |   \u251c\u2500\u2500 word-count\n|   |   |   \u251c\u2500\u2500 config.yaml\n|   |   |   \u251c\u2500\u2500 defaults\n|   |   |   \u2502\u00a0\u00a0 \u2514\u2500\u2500 defaults.yaml\n|   |   |   \u2514\u2500\u2500 pipeline.yaml\n|   |   |\n
    2. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    3. Deploy the pipeline

      kpops deploy ./examples/bakdata/word-count/pipeline.yaml \\\n--pipeline-base-dir ./examples \\\n--config ./examples/bakdata/word-count/config.yaml \\\n--execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/getting-started/quick-start/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to inspect the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser.

    You should be able to see pipeline shown in the image below:

    An overview of Word-count pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka Connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/getting-started/quick-start/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/quick-start/#redis_1", "title": "Redis", "text": "

    Redis can be uninstalled by running the following command:

    helm --namespace kpops uninstall redis\n
    "}, {"location": "user/getting-started/quick-start/#word-count-pipeline", "title": "Word-count pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean ./examples/bakdata/word-count/pipeline.yaml \\\n--pipeline-base-dir ./examples \\\n--config ./examples/bakdata/word-count/config.yaml \\\n--verbose \\\n--execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/getting-started/quick-start/#common-errors", "title": "Common errors", "text": "
    • deploy fails:
      1. Read the error message.
      2. Try to correct the mistakes if there were any. Likely the configuration is not correct or the port-forwarding is not working as intended.
      3. Run clean.
      4. Run deploy --dry-run to avoid having to clean again. If an error is dropped, start over from step 1.
      5. If the dry-run is successful, run deploy.
    • clean fails:
      1. Read the error message.
      2. Try to correct the indicated mistakes if there were any. Likely the configuration is not correct or the port-forwarding is not working as intended.
      3. Run clean.
      4. If clean fails, follow the steps in teardown.
    "}, {"location": "user/getting-started/setup/", "title": "Setup KPOps", "text": "

    In this part, you will set up KPOps. This includes:

    • optionally creating a local Kubernetes cluster
    • running Apache Kafka and Confluent's Schema Registry
    • installing KPOps
    "}, {"location": "user/getting-started/setup/#prerequisites", "title": "Prerequisites", "text": "
    • k3d (Version 5.4.6+) and Docker (Version >= v20.10.5) or an existing Kubernetes cluster (>= 1.21.0)
    • kubectl (Compatible with server version 1.21.0)
    • Helm (Version 3.8.0+)
    "}, {"location": "user/getting-started/setup/#setup-kubernetes-with-k3d", "title": "Setup Kubernetes with k3d", "text": "

    If you don't have access to an existing Kubernetes cluster, this section will guide you through creating a local cluster. We recommend the lightweight Kubernetes distribution k3s for this. k3d is a wrapper around k3s in Docker that lets you get started fast.

    1. You can install k3d with its installation script:

      wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/v5.4.6/install.sh | bash\n

      For other ways of installing k3d, you can have a look at their installation guide.

    2. The Kafka deployment needs a modified Docker image. In that case the image is built and pushed to a Docker registry that holds it. If you do not have access to an existing Docker registry, you can use k3d's Docker registry:

      k3d registry create kpops-registry.localhost --port 12345\n
    3. Now you can create a new cluster called kpops that uses the previously created Docker registry:

      k3d cluster create kpops --k3s-arg \"--no-deploy=traefik@server:*\" --registry-use k3d-kpops-registry.localhost:12345\n

    Note

    Creating a new k3d cluster automatically configures kubectl to connect to the local cluster by modifying your ~/.kube/config. In case you manually set the KUBECONFIG variable or don't want k3d to modify your config, k3d offers many other options.

    You can check the cluster status with kubectl get pods -n kube-system. If all returned elements have a STATUS of Running or Completed, then the cluster is up and running.

    "}, {"location": "user/getting-started/setup/#deploy-kafka", "title": "Deploy Kafka", "text": "

    Kafka is an open-source data streaming platform. More information about Kafka can be found in the documentation. To deploy Kafka, this guide uses Confluent's Helm chart.

    1. To allow connectivity to other systems Kafka Connect needs to be extended with drivers. You can install a JDBC driver for Kafka Connect by creating a new Docker image:

      1. Create a Dockerfile with the following content:

        FROM confluentinc/cp-kafka-connect:7.1.3\n\nRUN confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.6.0\n
      2. Build and push the modified image to your private Docker registry:

        docker build . --tag localhost:12345/kafka-connect-jdbc:7.1.3 && \\\ndocker push localhost:12345/kafka-connect-jdbc:7.1.3\n

      Detailed instructions on building, tagging and pushing a docker image can be found in Docker docs.

    2. Add Confluent's Helm chart repository and update the index:

      helm repo add confluentinc https://confluentinc.github.io/cp-helm-charts/ &&  \nhelm repo update\n
    3. Install Kafka, Zookeeper, Confluent's Schema Registry, Kafka Rest Proxy, and Kafka Connect. A single Helm chart installs all five components. Below you can find an example for the --values ./kafka.yaml file configuring the deployment accordingly. Deploy the services:

      helm upgrade \\\n    --install \\\n    --version 0.6.1 \\\n    --values ./kafka.yaml \\\n    --namespace kpops \\\n    --create-namespace \\\n    --wait \\\n    k8kafka confluentinc/cp-helm-charts\n
    Kafka Helm chart values (kafka.yaml)

    An example value configuration for Confluent's Helm chart. This configuration deploys a single Kafka Broker, a Schema Registry, Zookeeper, Kafka Rest Proxy, and Kafka Connect with minimal resources.

    cp-zookeeper:\n  enabled: true\n  servers: 1\n  imageTag: 7.1.3\n  heapOptions: \"-Xms124M -Xmx124M\"\n  overrideGroupId: k8kafka\n  fullnameOverride: \"k8kafka-cp-zookeeper\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.2G\n    limits:\n      cpu: 250m\n      memory: 0.2G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka:\n  enabled: true\n  brokers: 1\n  imageTag: 7.1.3\n  podManagementPolicy: Parallel\n  configurationOverrides:\n    \"auto.create.topics.enable\": false\n    \"offsets.topic.replication.factor\": 1\n    \"transaction.state.log.replication.factor\": 1\n    \"transaction.state.log.min.isr\": 1\n    \"confluent.metrics.reporter.topic.replicas\": 1\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.5G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n  persistence:\n    enabled: false\n\ncp-schema-registry:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-schema-registry\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.25G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-connect:\n  enabled: true\n  replicaCount: 1\n  image: k3d-kpops-registry.localhost:12345/kafka-connect-jdbc\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-kafka-connect\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 500m\n      memory: 0.25G\n    limits:\n      cpu: 500m\n      memory: 0.25G\n  configurationOverrides:\n    \"consumer.max.poll.records\": \"10\"\n    \"consumer.max.poll.interval.ms\": \"900000\"\n    \"config.storage.replication.factor\": \"1\"\n    \"offset.storage.replication.factor\": \"1\"\n    \"status.storage.replication.factor\": \"1\"\n  cp-schema-registry:\n    url: http://k8kafka-cp-schema-registry:8081\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-rest:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-rest\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-ksql-server:\n  enabled: false\ncp-control-center:\n  enabled: false\n
    "}, {"location": "user/getting-started/setup/#deploy-streams-explorer", "title": "Deploy Streams Explorer", "text": "

    Streams Explorer allows examining Apache Kafka data pipelines in a Kubernetes cluster including the inspection of schemas and monitoring of metrics. First, add the Helm repository:

    helm repo add streams-explorer https://bakdata.github.io/streams-explorer && \\\nhelm repo update\n

    Below you can find an example for the --values ./streams-explorer.yaml file configuring the deployment accordingly. Now, deploy the service:

    helm upgrade \\\n    --install \\\n    --version 0.2.3 \\\n    --values ./streams-explorer.yaml \\\n    --namespace kpops \\\n    streams-explorer streams-explorer/streams-explorer\n
    Streams Explorer Helm chart values (streams-explorer.yaml)

    An example value configuration for Steams Explorer Helm chart.

    imageTag: \"v2.1.2\"\nconfig:\n   K8S__deployment__cluster: true\n   SCHEMAREGISTRY__url: http://k8kafka-cp-schema-registry.kpops.svc.cluster.local:8081\n   KAFKACONNECT__url: http://k8kafka-cp-kafka-connect.kpops.svc.cluster.local:8083\nresources:\n   requests:\n       cpu: 200m\n       memory: 300Mi\n   limits:\n       cpu: 200m\n       memory: 300Mi\n
    "}, {"location": "user/getting-started/setup/#check-the-status-of-your-deployments", "title": "Check the status of your deployments", "text": "

    Now we will check if all the pods are running in our namespace. You can list all pods in the namespace with this command:

    kubectl --namespace kpops get pods\n

    Then you should see the following output in your terminal:

    NAME                                          READY   STATUS    RESTARTS   AGE\nk8kafka-cp-kafka-connect-8fc7d544f-8pjnt      1/1     Running   0          15m\nk8kafka-cp-zookeeper-0                        1/1     Running   0          15m\nk8kafka-cp-kafka-0                            1/1     Running   0          15m\nk8kafka-cp-schema-registry-588f8c65db-jdwbq   1/1     Running   0          15m\nk8kafka-cp-rest-6bbfd7b645-nwkf8              1/1     Running   0          15m\nstreams-explorer-54db878c67-s8wbz             1/1     Running   0          15m\n

    Pay attention to the STATUS row. The pods should have a status of Running.

    "}, {"location": "user/getting-started/setup/#install-kpops", "title": "Install KPOps", "text": "

    KPOps comes as a PyPI package. You can install it with pip:

    pip install kpops\n
    "}, {"location": "user/getting-started/teardown/", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/teardown/#kpops-teardown-commands", "title": "KPOps teardown commands", "text": "
    • destroy: Removes Kubernetes resources.
    • reset: Runs destroy, resets the states of Kafka Streams apps and resets offsets to zero.
    • clean: Runs reset and removes all Kafka resources.
    "}, {"location": "user/getting-started/teardown/#kpops-deployed-pipeline", "title": "KPOps-deployed pipeline", "text": "

    The kpops CLI can be used to destroy a pipeline that was previously deployed with KPOps. In case that doesn't work, the pipeline can always be taken down manually with helm (see section Infrastructure).

    1. Export environment variables.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Navigate to the examples folder. Replace the <name-of-the-example-directory> with the example you want to tear down. For example the atm-fraud-detection.

    3. Remove the pipeline

      # Uncomment 1 line to either destroy, reset or clean.\n\n# poetry run kpops destroy <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops reset <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops clean <name-of-the-example-directory>/pipeline.yaml \\\n--config <name-of-the-example-directory>/config.yaml \\\n--execute\n
    "}, {"location": "user/getting-started/teardown/#infrastructure", "title": "Infrastructure", "text": "

    Delete namespace:

    kubectl delete namespace kpops\n

    Note

    In case kpops destroy is not working one can uninstall the pipeline services one by one. This is equivalent to running kpops destroy. In case a clean uninstall (like the one kpops clean does) is needed, one needs to also delete the topics and schemas created by deployment of the pipeline.

    "}, {"location": "user/getting-started/teardown/#local-cluster", "title": "Local cluster", "text": "

    Delete local cluster:

    k3d cluster delete kpops\n
    "}, {"location": "user/getting-started/teardown/#local-image-registry", "title": "Local image registry", "text": "

    Delete local registry:

    k3d registry delete k3d-kpops-registry.localhost\n
    "}, {"location": "user/migration-guide/v1-v2/", "title": "Migrate from V1 to V2", "text": ""}, {"location": "user/migration-guide/v1-v2/#derive-component-type-automatically-from-class-name", "title": "Derive component type automatically from class name", "text": "

    KPOps automatically infers the component type from the class name. Therefore, the type and schema_type attributes can be removed from your custom components. By convention the type would be the lower, and kebab cased name of the class.

    class MyCoolStreamApp(StreamsApp):\n-    type = \"my-cool-stream-app\"\n+    ...\n

    Because of this new convention producer has been renamed to producer-app. This must be addressed in your pipeline.yaml and defaults.yaml.

    - producer:\n+ producer-app:\n    app:\n        streams:\n        outputTopic: output_topic\n        extraOutputTopics:\n            output_role1: output_topic1\n            output_role2: output_topic2\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-inputoutput-types", "title": "Refactor input/output types", "text": ""}, {"location": "user/migration-guide/v1-v2/#to-section", "title": "To section", "text": "

    In the to section these have changed:

    • The default type is output
    • If role is set, type is inferred to be extra
    • The type error needs to be defined explicitly
      to:\n    topics:\n      ${pipeline_name}-topic-1:\n-       type: extra\n        role: \"role-1\"\n        ...\n      ${pipeline_name}-topic-2:\n-       type: output\n        ...\n      ${pipeline_name}-topic-3:\n         type: error\n         ...\n
    "}, {"location": "user/migration-guide/v1-v2/#from-section", "title": "From section", "text": "

    In the from section these have changed:

    • The default type is input
    • input-pattern type is replaced by pattern
    • If role is set, type is inferred to be extra
    • If role is set, type is explicitly set to pattern, this would be inferred type extra-pattern
      from:\n    topics:\n      ${pipeline_name}-input-topic:\n-       type: input\n        ...\n      ${pipeline_name}-extra-topic:\n-       type: extra\n        role: topic-role\n        ...\n      ${pipeline_name}-input-pattern-topic:\n-       type: input-pattern\n+       type: pattern\n        ...\n      ${pipeline_name}-extra-pattern-topic:\n-       type: extra-pattern\n+       type: pattern\n        role: some-role\n        ...\n
    "}, {"location": "user/migration-guide/v1-v2/#remove-camel-case-conversion-of-internal-models", "title": "Remove camel case conversion of internal models", "text": "

    All the internal KPOps models are now snake_case, and only Helm/Kubernetes values require camel casing. You can find an example of a pipeline.yaml in the following. Notice that the app section here remains untouched.

    ...\ntype: streams-app\n  name: streams-app\n  namespace: namespace\n  app:\n    streams:\n      brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n     autoscaling:\n      consumerGroup: consumer-group\n      lagThreshold: 0\n      enabled: false\n      pollingInterval: 30\n\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: error\n-       keySchema: key-schema\n+       key_schema: key-schema\n-       valueSchema: value-schema\n+       value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs:\n          cleanup.policy: compact\n    models:\n      model: model\n  prefix: ${pipeline_name}-\n- repoConfig:\n+ repo_config:\n-   repositoryName: bakdata-streams-bootstrap\n+   repository_name: bakdata-streams-bootstrap\n    url: https://bakdata.github.io/streams-bootstrap/\n-   repoAuthFlags:\n+   repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.4\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-handling-of-helm-flags", "title": "Refactor handling of Helm flags", "text": "

    If you are using the KubernetesApp class to define your own Kubernetes resource to deploy, the abstract function get_helm_chart that returns the chart for deploying the app using Helm is now a Python property and renamed to helm_chart.

    class MyCoolApp(KubernetesApp):\n\n+   @property\n    @override\n-   def get_helm_chart(self) -> str:\n+   def helm_chart(self) -> str:\n        return \"./charts/charts-folder\"\n
    "}, {"location": "user/migration-guide/v1-v2/#plural-broker-field-in-pipeline-config", "title": "Plural broker field in pipeline config", "text": "

    Since you can pass a comma separated string of broker address, the broker field in KPOps is now plural. The pluralization has affected multiple areas:

    "}, {"location": "user/migration-guide/v1-v2/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- broker: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  kafka_connect_host: \"http://localhost:8083\"\n  kafka_rest_host: \"http://localhost:8082\"\n  schema_registry_url: \"http://localhost:8081\"\n
    "}, {"location": "user/migration-guide/v1-v2/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called brokers.

    ...\n  app:\n    streams:\n-     brokers: ${broker}\n+     brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#environment-variable", "title": "Environment variable", "text": "

    Previously, if you set the environment variable KPOPS_KAFKA_BROKER, you need to replace that now with KPOPS_KAFKA_BROKERS.

    "}, {"location": "user/migration-guide/v2-v3/", "title": "Migrate from V2 to V3", "text": ""}, {"location": "user/migration-guide/v2-v3/#make-kafka-rest-proxy-kafka-connect-hosts-default-and-improve-schema-registry-config", "title": "Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config", "text": "

    The breaking changes target the config.yaml file:

    • The schema_registry_url is replaced with schema_registry.url (default http://localhost:8081) and schema_registry.enabled (default false).

    • kafka_rest_host is renamed to kafka_rest.url (default http://localhost:8082).

    • kafka_connect_host is replaced with kafka_connect.url (default http://localhost:8083).

    • brokers is renamed to kafka_brokers.

    The environment variable names of these config fields changed respectively. Please refer to the environment variables documentation page to see the newest changes.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called kafka_brokers.

    ...\n  app:\n    streams:\n-     brokers: ${brokers}\n+     brokers: ${kafka_brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v2-v3/#move-github-action-to-repsitory-root", "title": "Move GitHub action to repsitory root", "text": "

    The location of the GitHub action has changed, and it's now available directly as bakdata/kpops.

    You'll need to change it in your GitHub CI workflows.

    steps:\n  - name: kpops deploy\n-   uses: bakdata/kpops/actions/kpops-runner@main\n+   uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      # ...\n
    "}, {"location": "user/references/cli-commands/", "title": "CLI Usage", "text": "

    Usage:

    $ kpops [OPTIONS] COMMAND [ARGS]...\n

    Options:

    • -V, --version: Print KPOps version
    • --install-completion: Install completion for the current shell.
    • --show-completion: Show completion for the current shell, to copy it or customize the installation.
    • --help: Show this message and exit.

    Commands:

    • clean: Clean pipeline steps
    • deploy: Deploy pipeline steps
    • destroy: Destroy pipeline steps
    • generate: Enriches pipelines steps with defaults.
    • reset: Reset pipeline steps
    • schema: Generate json schema.
    "}, {"location": "user/references/cli-commands/#kpops-clean", "title": "kpops clean", "text": "

    Clean pipeline steps

    Usage:

    $ kpops clean [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-deploy", "title": "kpops deploy", "text": "

    Deploy pipeline steps

    Usage:

    $ kpops deploy [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-destroy", "title": "kpops destroy", "text": "

    Destroy pipeline steps

    Usage:

    $ kpops destroy [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-generate", "title": "kpops generate", "text": "

    Enriches pipelines steps with defaults. The output is used as input for the deploy/destroy/... commands.

    Usage:

    $ kpops generate [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --template / --no-template: Render component templates, e.g. Kubernetes manifests. [default: no-template]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-reset", "title": "kpops reset", "text": "

    Reset pipeline steps

    Usage:

    $ kpops reset [OPTIONS] PIPELINE_PATH [COMPONENTS_MODULE]\n

    Arguments:

    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --pipeline-base-dir DIRECTORY: Base directory to the pipelines (default is current working directory) [env var: KPOPS_PIPELINE_BASE_DIR; default: .]
    • --defaults DIRECTORY: Path to defaults folder [env var: KPOPS_DEFAULT_PATH]
    • --config FILE: Path to the config.yaml file [env var: KPOPS_CONFIG_PATH; default: config.yaml]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-schema", "title": "kpops schema", "text": "

    Generate json schema.

    The schemas can be used to enable support for kpops files in a text editor.

    Usage:

    $ kpops schema [OPTIONS] SCOPE:{pipeline|config} [COMPONENTS_MODULE]\n

    Arguments:

    • SCOPE:{pipeline|config}: Scope of the generated schema
      pipeline: Schema of PipelineComponents. Includes the built-in kpops components by default. To include custom components, provide [COMPONENTS_MODULES].\n\nconfig: Schema of KpopsConfig.  [required]\n
      • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components

    Options:

    • --include-stock-components / --no-include-stock-components: Include the built-in KPOps components. [default: include-stock-components]
    • --help: Show this message and exit.
    "}, {"location": "user/references/editor-integration/", "title": "Editor integration", "text": "

    KPOps provides JSON schemas that enable autocompletion and validation for some of the files that the user must work with.

    "}, {"location": "user/references/editor-integration/#supported-files", "title": "Supported files", "text": "
    • pipeline.yaml
    • config.yaml
    "}, {"location": "user/references/editor-integration/#usage", "title": "Usage", "text": "
    1. Install the yaml-language-server in your editor of choice. (requires LSP support)
    2. Configure the extension with the settings below.
    settings.json
    {\n    \"yaml.schemas\": {\n        \"https://bakdata.github.io/kpops/2.0/schema/config.json\": \"config.yaml\",\n        \"https://bakdata.github.io/kpops/2.0/schema/pipeline.json\": \"pipeline.yaml\"\n    }\n}\n

    Advanced usage

    It is possible to generate schemas with the kpops schema command. Useful when using custom components or when using a pre-release version of KPOps.

    "}, {"location": "user/references/ci-integration/github-actions/", "title": "GitHub Actions integration", "text": "

    We provided a GitHub composite action bakdata/kpops that installs and executes KPOps commands with the given parameters.

    "}, {"location": "user/references/ci-integration/github-actions/#input-parameters", "title": "Input Parameters", "text": "Name Required Default Value Type Description command \u2705 - string KPOps command to run. generate, deploy, destroy, reset, clean are possible values. Flags such as --dry-run and --execute need to be specified pipeline \u2705 - string Pipeline to run by KPOps working-directory \u274c . string root directory used by KPOps to run pipelines pipeline-base-dir \u274c - string directory where relative pipeline variables are initialized from defaults \u274c - string defaults folder path config \u274c - string config.yaml file path components \u274c - string components package path filter-type \u274c - string Whether to include/exclude the steps defined in KPOPS_PIPELINE_STEPS python-version \u274c \"3.11.x\" string Python version to install (Defaults to the latest stable version of Python 3.11) kpops-version \u274c latest string KPOps version to install helm-version \u274c latest string Helm version to install token \u274c latest string secrets.GITHUB_TOKEN, needed for setup-helm action if helm-version is set to latest"}, {"location": "user/references/ci-integration/github-actions/#usage", "title": "Usage", "text": "
    steps:\n  # ...\n  # This step is useful for debugging reasons\n  - name: Generate Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: generate\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.3\n\n  - name: Deploy Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.3\n  # ...\n

    It is possible to use a pre-release KPOps version from TestPyPI.

    steps:\n  - name: Deploy Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.5.dev20230707132709 -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/\n
    "}]} \ No newline at end of file diff --git a/dev/sitemap.xml b/dev/sitemap.xml index a052720e3..638461580 100644 --- a/dev/sitemap.xml +++ b/dev/sitemap.xml @@ -2,157 +2,162 @@ https://bakdata.github.io/kpops/dev/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/developer/auto-generation/ - 2023-10-25 + 2023-10-26 daily - https://bakdata.github.io/kpops/dev/developer/style/ - 2023-10-25 + https://bakdata.github.io/kpops/dev/developer/formatting/ + 2023-10-26 daily https://bakdata.github.io/kpops/dev/resources/architecture/components-hierarchy/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/resources/examples/defaults/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/resources/examples/pipeline/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/resources/pipeline-components/pipeline/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/resources/pipeline-defaults/defaults/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/resources/variables/cli_env_vars/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/resources/variables/config_env_vars/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/what-is-kpops/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/config/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/defaults/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kafka-app/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kafka-connector/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kafka-sink-connector/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kafka-source-connector/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kubernetes-app/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/overview/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/producer-app/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/streams-app/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/variables/environment_variables/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/core-concepts/variables/substitution/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/examples/atm-fraud-pipeline/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/getting-started/quick-start/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/getting-started/setup/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/getting-started/teardown/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/migration-guide/v1-v2/ - 2023-10-25 + 2023-10-26 + daily + + + https://bakdata.github.io/kpops/dev/user/migration-guide/v2-v3/ + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/references/cli-commands/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/references/editor-integration/ - 2023-10-25 + 2023-10-26 daily https://bakdata.github.io/kpops/dev/user/references/ci-integration/github-actions/ - 2023-10-25 + 2023-10-26 daily \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index 6855d56861eb40e111f27cabe7172e224af450e6..f1c8b91b569e8e5754735815fa2128ca84cad45c 100644 GIT binary patch literal 563 zcmV-30?hp%iwFn`Q#xe=|8r?{Wo=<_E_iKh0Nt3)j@uv*$M5?Tk?)u!D^+ckcyIdz z?Kzsj*haR2g~8r^`(k@HN!6Zus4{EGvKc<~`ws(RI6S=${bs}vjkCvMS8j_9TIIB{ z&9V6T<5_$yo~nmK3VlS{G8)1Gd@NmPWK?Y~MEP!%+wxmcJ#6wb z53v3%&!Km!nn#Iwm@{-52+GD#o6eE90W#GTlkC+44miqQN1~#9Z%W9k>1(bG)x4fm^b^W1rjET)qMZF%7uBYT7{lI{LK!V z$MXEEzb~6)2XzMmyyq_>*Vue`aDl&+Xl(bX^ekKKCsCBn7%UP-?#ssI)NnL7an|62 z(@A0AJ{OS!!oV}6t_P=lVlu(mAxk=4t$g*wbdF>w-GAP{$hf3$K!d0QBrSL(K?{9@ z%WZ{-1c6Y?mt<78mKf4Uq~sZL6GG5#;t=kGY1S(_(Ilf$jyo~#IdbpC_{e}eV2uH- zTJ!t)s$~GHBQJ8)CLchBv4jnW3*A3FZ+>H6y9iKo=|4=x^2vK!^$R&Y0Oeg6001y7 B7xn-E literal 554 zcmV+_0@eK=iwFoN*f?bZ|8r?{Wo=<_E_iKh0Nq%@j@uv*z3*2uH(_Ccyzb7cMlKO`965-;8=wd|j7MtMtqh zEPt10-#b&zvv8W`1YHFJMMKiYQ$bF5-g(v)j(V?W&-;PEGPtVTZFZm3cB6Kmlgpd! z&ds!78>|Dqloe2a-s?Ndf=FK>{VTZiV(&Z#4N;tG&|&~dfy@Oq%elwL1{;uF3M z`HT<
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1188,6 +1188,8 @@ + + @@ -1238,6 +1240,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1440,11 +1462,11 @@
  • - + - Style + Formatting @@ -1561,7 +1583,7 @@

    KafkaApp

    -

    Subclass of KubernetesApp.

    +

    Subclass of HelmApp.

    Usage

    @@ -1440,11 +1462,11 @@
  • - + - Style + Formatting diff --git a/dev/user/core-concepts/components/kafka-source-connector/index.html b/dev/user/core-concepts/components/kafka-source-connector/index.html index 2bb7a7e66..ddfdeee20 100644 --- a/dev/user/core-concepts/components/kafka-source-connector/index.html +++ b/dev/user/core-concepts/components/kafka-source-connector/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1188,6 +1188,8 @@ + + @@ -1238,6 +1240,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1440,11 +1462,11 @@
  • - + - Style + Formatting diff --git a/dev/user/core-concepts/components/kubernetes-app/index.html b/dev/user/core-concepts/components/kubernetes-app/index.html index 16d087a7a..0a97d1800 100644 --- a/dev/user/core-concepts/components/kubernetes-app/index.html +++ b/dev/user/core-concepts/components/kubernetes-app/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1188,6 +1188,8 @@ + + @@ -1238,6 +1240,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1440,11 +1462,11 @@
  • - + - Style + Formatting @@ -1562,7 +1584,7 @@

    KubernetesApp

    Usage

    -

    Can be used to deploy any app in Kubernetes using Helm, for example, a REST service that serves Kafka data.

    +

    Can be used to create components for any Kubernetes app.

    Configuration

    @@ -1619,18 +1641,7 @@

    Configuration49 50 51 -52 -53 -54 -55 -56 -57 -58 -59 -60 -61 -62 -63
    # Base Kubernetes App
    +52
    # Base Kubernetes App
     - type: kubernetes-app
       name: kubernetes-app # required
       # Pipeline prefix that will prefix every component name. If you wish to not
    @@ -1682,26 +1693,15 @@ 

    Configuration image: exampleImage # Example debug: false # Example commandLine: {} # Example - # Helm repository configuration (optional) - # If not set the helm repo add will not be called. Useful when using local Helm charts - repo_config: - repository_name: bakdata-streams-bootstrap # required - url: https://bakdata.github.io/streams-bootstrap/ # required - repo_auth_flags: - username: user - password: pass - ca_file: /home/user/path/to/ca-file - insecure_skip_tls_verify: false - version: "1.0.0" # Helm chart version

    Operations

    deploy

    -

    Deploy using Helm.

    +

    Do nothing.

    destroy

    -

    Uninstall Helm release.

    +

    Do nothing.

    reset

    Do nothing.

    clean

    diff --git a/dev/user/core-concepts/components/overview/index.html b/dev/user/core-concepts/components/overview/index.html index 8936a16a9..75941b9cb 100644 --- a/dev/user/core-concepts/components/overview/index.html +++ b/dev/user/core-concepts/components/overview/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1103,6 +1103,8 @@ + + @@ -1153,6 +1155,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1355,11 +1377,11 @@
  • - + - Style + Formatting @@ -1417,20 +1439,22 @@

    Overviewpipeline.yaml.

    flowchart BT
         KubernetesApp --> PipelineComponent
    -    KafkaConnector --> PipelineComponent
    -    KafkaApp --> KubernetesApp
    +    HelmApp --> KubernetesApp
    +    KafkaApp --> HelmApp
         StreamsApp --> KafkaApp
         ProducerApp --> KafkaApp
    +    KafkaConnector --> PipelineComponent
         KafkaSourceConnector --> KafkaConnector
         KafkaSinkConnector --> KafkaConnector
    -    
    -    click KubernetesApp "../kubernetes-app"
    -    click KafkaApp "../kafka-app"
    -    click StreamsApp "../streams-app"
    -    click ProducerApp "../producer-app"
    -    click KafkaConnector "../kafka-connector"
    -    click KafkaSourceConnector "../kafka-source-connector"
    -    click KafkaSinkConnector "../kafka-sink-connector"
    + + click KubernetesApp "/kpops/user/core-concepts/components/kubernetes-app" + click HelmApp "/kpops/user/core-concepts/components/helm-app" + click KafkaApp "/kpops/user/core-concepts/components/kafka-app" + click StreamsApp "/kpops/user/core-concepts/components/streams-app" + click ProducerApp "/kpops/user/core-concepts/components/producer-app" + click KafkaConnector "/kpops/user/core-concepts/components/kafka-connector" + click KafkaSourceConnector "/kpops/user/core-concepts/components/kafka-source-connector" + click KafkaSinkConnector "/kpops/user/core-concepts/components/kafka-sink-connector"

    KPOps component hierarchy

    diff --git a/dev/user/core-concepts/components/producer-app/index.html b/dev/user/core-concepts/components/producer-app/index.html index 6616d2aab..257d1691f 100644 --- a/dev/user/core-concepts/components/producer-app/index.html +++ b/dev/user/core-concepts/components/producer-app/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1188,6 +1188,8 @@ + + @@ -1238,6 +1240,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1440,11 +1462,11 @@
  • - + - Style + Formatting @@ -1654,7 +1676,7 @@

    Configuration # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app app: # required streams: # required, producer-app-specific - brokers: ${brokers} # required + brokers: ${kafka_brokers} # required schemaRegistryUrl: ${schema_registry_url} outputTopic: output_topic extraOutputTopics: diff --git a/dev/user/core-concepts/components/streams-app/index.html b/dev/user/core-concepts/components/streams-app/index.html index dca7adad4..038989406 100644 --- a/dev/user/core-concepts/components/streams-app/index.html +++ b/dev/user/core-concepts/components/streams-app/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1188,6 +1188,8 @@ + + @@ -1238,6 +1240,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1440,11 +1462,11 @@
  • - + - Style + Formatting @@ -1737,7 +1759,7 @@

    Configuration app: # required # Streams Bootstrap streams section streams: # required, streams-app-specific - brokers: ${brokers} # required + brokers: ${kafka_brokers} # required schemaRegistryUrl: ${schema_registry_url} inputTopics: - topic1 diff --git a/dev/user/core-concepts/config/index.html b/dev/user/core-concepts/config/index.html index 28f37266e..68101814a 100644 --- a/dev/user/core-concepts/config/index.html +++ b/dev/user/core-concepts/config/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1101,6 +1101,8 @@ + + @@ -1151,6 +1153,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1353,11 +1375,11 @@
  • - + - Style + Formatting diff --git a/dev/user/core-concepts/defaults/index.html b/dev/user/core-concepts/defaults/index.html index 96caeeea9..844d36701 100644 --- a/dev/user/core-concepts/defaults/index.html +++ b/dev/user/core-concepts/defaults/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1220,6 +1220,8 @@ + + @@ -1270,6 +1272,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1472,11 +1494,11 @@
  • - + - Style + Formatting @@ -1707,20 +1729,9 @@

    KubernetesApp 51 52 53 -54 -55 -56 -57 -58 -59 -60 -61 -62 -63 -64 -65
    # Base Kubernetes App
    +54
    # Base Kubernetes App
     #
    -# Parent of: KafkaApp
    +# Parent of: HelmApp
     # Child of: PipelineComponent
     kubernetes-app:
       # Pipeline prefix that will prefix every component name. If you wish to not
    @@ -1772,17 +1783,6 @@ 

    KubernetesApp image: exampleImage # Example debug: false # Example commandLine: {} # Example - # Helm repository configuration (optional) - # If not set the helm repo add will not be called. Useful when using local Helm charts - repo_config: - repository_name: bakdata-streams-bootstrap # required - url: https://bakdata.github.io/streams-bootstrap/ # required - repo_auth_flags: - username: user - password: pass - ca_file: /home/user/path/to/ca-file - insecure_skip_tls_verify: false - version: "1.0.0" # Helm chart version

    @@ -1814,7 +1814,7 @@

    KafkaApp # add the key-value pairs they need. app: # required streams: # required - brokers: ${brokers} # required + brokers: ${kafka_brokers} # required schemaRegistryUrl: ${schema_registry_url} nameOverride: override-with-this-name # kafka-app-specific imageTag: "1.0.0" # Example values that are shared between streams-app and producer-app @@ -1899,7 +1899,7 @@

    StreamsApp app: # required # Streams Bootstrap streams section streams: # required, streams-app-specific - brokers: ${brokers} # required + brokers: ${kafka_brokers} # required schemaRegistryUrl: ${schema_registry_url} inputTopics: - topic1 diff --git a/dev/user/core-concepts/variables/environment_variables/index.html b/dev/user/core-concepts/variables/environment_variables/index.html index eff20c362..8ae5e6fe7 100644 --- a/dev/user/core-concepts/variables/environment_variables/index.html +++ b/dev/user/core-concepts/variables/environment_variables/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1147,6 +1147,8 @@ + + @@ -1197,6 +1199,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1399,11 +1421,11 @@
  • - + - Style + Formatting @@ -1516,28 +1538,28 @@

    ConfigConfig# Suffix your environment files with this value (e.g. # defaults_development.yaml for environment=development). KPOPS_ENVIRONMENT # No default value, required -# brokers +# kafka_brokers # The comma separated Kafka brokers address. KPOPS_KAFKA_BROKERS # No default value, required -# schema_registry_url +# url # Address of the Schema Registry. -KPOPS_SCHEMA_REGISTRY_URL # No default value, not required -# kafka_rest_host +KPOPS_SCHEMA_REGISTRY_URL=http://localhost:8081 +# url # Address of the Kafka REST Proxy. -KPOPS_REST_PROXY_HOST # No default value, not required -# kafka_connect_host +KPOPS_KAFKA_REST_URL=http://localhost:8082 +# url # Address of Kafka Connect. -KPOPS_CONNECT_HOST # No default value, not required +KPOPS_KAFKA_CONNECT_URL=http://localhost:8083 # timeout # The timeout in seconds that specifies when actions like deletion or # deploy timeout. diff --git a/dev/user/core-concepts/variables/substitution/index.html b/dev/user/core-concepts/variables/substitution/index.html index 59f061613..0a3fec0b8 100644 --- a/dev/user/core-concepts/variables/substitution/index.html +++ b/dev/user/core-concepts/variables/substitution/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1168,6 +1168,8 @@ + + @@ -1218,6 +1220,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1420,11 +1442,11 @@
  • - + - Style + Formatting diff --git a/dev/user/examples/atm-fraud-pipeline/index.html b/dev/user/examples/atm-fraud-pipeline/index.html index b3e816138..c6cc3c7ab 100644 --- a/dev/user/examples/atm-fraud-pipeline/index.html +++ b/dev/user/examples/atm-fraud-pipeline/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1233,6 +1233,8 @@ + + @@ -1283,6 +1285,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1485,11 +1507,11 @@
  • - + - Style + Formatting diff --git a/dev/user/getting-started/quick-start/index.html b/dev/user/getting-started/quick-start/index.html index ce6ee717b..d2a4625c0 100644 --- a/dev/user/getting-started/quick-start/index.html +++ b/dev/user/getting-started/quick-start/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1240,6 +1240,8 @@ + + @@ -1290,6 +1292,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1492,11 +1514,11 @@
  • - + - Style + Formatting diff --git a/dev/user/getting-started/setup/index.html b/dev/user/getting-started/setup/index.html index f1e4f7f36..33abfe672 100644 --- a/dev/user/getting-started/setup/index.html +++ b/dev/user/getting-started/setup/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1173,6 +1173,8 @@ + + @@ -1223,6 +1225,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1425,11 +1447,11 @@
  • - + - Style + Formatting diff --git a/dev/user/getting-started/teardown/index.html b/dev/user/getting-started/teardown/index.html index e9a09ea80..8a57f0df9 100644 --- a/dev/user/getting-started/teardown/index.html +++ b/dev/user/getting-started/teardown/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1166,6 +1166,8 @@ + + @@ -1216,6 +1218,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1418,11 +1440,11 @@
  • - + - Style + Formatting diff --git a/dev/user/migration-guide/v1-v2/index.html b/dev/user/migration-guide/v1-v2/index.html index 332e59c9f..4cd6fa910 100644 --- a/dev/user/migration-guide/v1-v2/index.html +++ b/dev/user/migration-guide/v1-v2/index.html @@ -14,7 +14,7 @@ - + @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1093,6 +1093,8 @@ + + @@ -1265,6 +1267,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1467,11 +1489,11 @@
  • - + - Style + Formatting @@ -1636,7 +1658,7 @@

    app: streams: outputTopic: output_topic - extraOutputTopics: + extraOutputTopics: output_role1: output_topic1 output_role2: output_topic2 @@ -1666,10 +1688,10 @@

    To section role: "role-1" ... ${pipeline_name}-topic-2: -- type: output +- type: output ... ${pipeline_name}-topic-3: - type: error + type: error ...

    From section

    @@ -1707,11 +1729,11 @@

    From section role: topic-role ... ${pipeline_name}-input-pattern-topic: -- type: input-pattern +- type: input-pattern + type: pattern ... ${pipeline_name}-extra-pattern-topic: -- type: extra-pattern +- type: extra-pattern + type: pattern role: some-role ... @@ -1762,15 +1784,15 @@

    42
    ...
     type: streams-app
       name: streams-app
    -  namespace: namespace 
    -  app: 
    +  namespace: namespace
    +  app:
         streams:
    -      brokers: ${brokers} 
    +      brokers: ${brokers}
           schemaRegistryUrl: ${schema_registry_url}
          autoscaling:
           consumerGroup: consumer-group
           lagThreshold: 0
    -      enabled: false 
    +      enabled: false
           pollingInterval: 30
     
       to:
    @@ -1793,8 +1815,8 @@ 

    - repositoryName: bakdata-streams-bootstrap + repository_name: bakdata-streams-bootstrap url: https://bakdata.github.io/streams-bootstrap/ -- repoAuthFlags: -+ repo_auth_flags: +- repoAuthFlags: ++ repo_auth_flags: username: user password: pass ca_file: /home/user/path/to/ca-file @@ -1829,9 +1851,9 @@

    config.yaml6

      environment: development
     - broker: "http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092"
     + brokers: "http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092"
    - kafka_connect_host: "http://localhost:8083"
    - kafka_rest_host: "http://localhost:8082"
    - schema_registry_url: "http://localhost:8081"
    +  kafka_connect_host: "http://localhost:8083"
    +  kafka_rest_host: "http://localhost:8082"
    +  schema_registry_url: "http://localhost:8081"
     

    pipeline.yaml and default.yaml

    The variable is now called brokers.

    diff --git a/dev/user/migration-guide/v2-v3/index.html b/dev/user/migration-guide/v2-v3/index.html new file mode 100644 index 000000000..55294f8ba --- /dev/null +++ b/dev/user/migration-guide/v2-v3/index.html @@ -0,0 +1,1680 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Migrate from v2 to v3 - KPOps + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + + + + +
    +
    + + + +
    +
    +
    + + + + + + + +
    +
    +
    + + + + + + + +
    +
    + + + + + + + +

    Migrate from V2 to V3

    +

    Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config

    +

    The breaking changes target the config.yaml file:

    +
      +
    • +

      The schema_registry_url is replaced with schema_registry.url (default http://localhost:8081) and schema_registry.enabled (default false).

      +
    • +
    • +

      kafka_rest_host is renamed to kafka_rest.url (default http://localhost:8082).

      +
    • +
    • +

      kafka_connect_host is replaced with kafka_connect.url (default http://localhost:8083).

      +
    • +
    • +

      brokers is renamed to kafka_brokers.

      +
    • +
    +

    The environment variable names of these config fields changed respectively. Please refer to the environment variables documentation page to see the newest changes.

    +

    config.yaml

    +
     1
    + 2
    + 3
    + 4
    + 5
    + 6
    + 7
    + 8
    + 9
    +10
    +11
    +12
    +13
      environment: development
    +- brokers: "http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092"
    +- kafka_rest_host: "http://my-custom-rest.url:8082"
    +- kafka_connect_host: "http://my-custom-connect.url:8083"
    +- schema_registry_url: "http://my-custom-sr.url:8081"
    ++ kafka_brokers: "http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092"
    ++ kafka_rest:
    ++   url: "http://my-custom-rest.url:8082"
    ++ kafka_connect:
    ++   url: "http://my-custom-connect.url:8083"
    ++ schema_registry:
    ++   enabled: true
    ++   url: "http://my-custom-sr.url:8081"
    +
    +

    pipeline.yaml and default.yaml

    +

    The variable is now called kafka_brokers.

    +
    1
    +2
    +3
    +4
    +5
    +6
    +7
    +8
    +9
    ...
    +  app:
    +    streams:
    +-     brokers: ${brokers}
    ++     brokers: ${kafka_brokers}
    +      schemaRegistryUrl: ${schema_registry_url}
    +    nameOverride: override-with-this-name
    +    imageTag: "1.0.0"
    +...
    +
    +

    Move GitHub action to repsitory root

    +

    The location of the GitHub action has changed, and it's now available directly as bakdata/kpops.

    +

    You'll need to change it in your GitHub CI workflows.

    +
    1
    +2
    +3
    +4
    +5
    +6
    +7
    steps:
    +  - name: kpops deploy
    +-   uses: bakdata/kpops/actions/kpops-runner@main
    ++   uses: bakdata/kpops@main
    +    with:
    +      command: deploy --execute
    +      # ...
    +
    + + + + + + +
    +
    + + +
    + + + +
    + + + +
    +
    +
    +
    + + + + + + + + + + \ No newline at end of file diff --git a/dev/user/references/ci-integration/github-actions/index.html b/dev/user/references/ci-integration/github-actions/index.html index 17a5da69a..12d5ec057 100644 --- a/dev/user/references/ci-integration/github-actions/index.html +++ b/dev/user/references/ci-integration/github-actions/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1091,6 +1091,8 @@ + + @@ -1141,6 +1143,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1399,11 +1421,11 @@
  • - + - Style + Formatting @@ -1479,9 +1501,7 @@

    GitHub Actions integration

    -

    We provided a GitHub composite action called -kpops-runner -that installs all the necessary dependencies and runs KPOps commands with the given parameters.

    +

    We provided a GitHub composite action bakdata/kpops that installs and executes KPOps commands with the given parameters.

    Input Parameters

    @@ -1603,7 +1623,7 @@

    Usage& # ... # This step is useful for debugging reasons - name: Generate Kafka pipeline - uses: bakdata/kpops/actions/kpops-runner@main + uses: bakdata/kpops@main with: command: generate working-directory: home/my-kpops-root-dir @@ -1611,7 +1631,7 @@

    Usage& kpops-version: 1.2.3 - name: Deploy Kafka pipeline - uses: bakdata/kpops/actions/kpops-runner@main + uses: bakdata/kpops@main with: command: deploy --execute working-directory: home/my-kpops-root-dir @@ -1619,8 +1639,7 @@

    Usage& kpops-version: 1.2.3 # ...

    -

    It is possible to execute the KPOps runner on -a dev version from the test.pypi.

    +

    It is possible to use a pre-release KPOps version from TestPyPI.

    1
     2
     3
    @@ -1630,7 +1649,7 @@ 

    Usage& 7 8

    steps:
       - name: Deploy Kafka pipeline
    -    uses: bakdata/kpops/actions/kpops-runner@main
    +    uses: bakdata/kpops@main
         with:
           command: deploy --execute
           working-directory: home/my-kpops-root-dir
    diff --git a/dev/user/references/cli-commands/index.html b/dev/user/references/cli-commands/index.html
    index 015f7d066..1bc0d1362 100644
    --- a/dev/user/references/cli-commands/index.html
    +++ b/dev/user/references/cli-commands/index.html
    @@ -11,7 +11,7 @@
             
           
           
    -        
    +        
           
           
             
    @@ -179,7 +179,7 @@
         
       
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1091,6 +1091,8 @@ + + @@ -1141,6 +1143,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1425,11 +1447,11 @@
  • - + - Style + Formatting @@ -1630,7 +1652,7 @@

    kpops generatekpops schema2 3

  • pipeline: Schema of PipelineComponents. Includes the built-in kpops components by default. To include custom components, provide [COMPONENTS_MODULES].
     
    -config: Schema of PipelineConfig.  [required]
    +config: Schema of KpopsConfig.  [required]
     
    • [COMPONENTS_MODULE]: Custom Python module containing your project-specific components
    • diff --git a/dev/user/references/editor-integration/index.html b/dev/user/references/editor-integration/index.html index f1dc9500d..72447cee3 100644 --- a/dev/user/references/editor-integration/index.html +++ b/dev/user/references/editor-integration/index.html @@ -179,7 +179,7 @@
      - KPOps + GitHub
      @@ -298,7 +298,7 @@
      - KPOps + GitHub
      @@ -1091,6 +1091,8 @@ + + @@ -1141,6 +1143,26 @@ + + + + + +
    • + + + + + Migrate from v2 to v3 + + + + +
    • + + + +
    @@ -1397,11 +1419,11 @@
  • - + - Style + Formatting diff --git a/dev/user/what-is-kpops/index.html b/dev/user/what-is-kpops/index.html index 948ec6076..b5e88e13c 100644 --- a/dev/user/what-is-kpops/index.html +++ b/dev/user/what-is-kpops/index.html @@ -179,7 +179,7 @@
    - KPOps + GitHub
    @@ -298,7 +298,7 @@
    - KPOps + GitHub
    @@ -1143,6 +1143,8 @@ + + @@ -1193,6 +1195,26 @@ + + + + + +
  • + + + + + Migrate from v2 to v3 + + + + +
  • + + + + @@ -1395,11 +1417,11 @@
  • - + - Style + Formatting