From 12a8c35ca70dfdac925ecca17098e6c3511d9d6f Mon Sep 17 00:00:00 2001 From: bakdata-bot Date: Thu, 6 Jun 2024 15:32:15 +0000 Subject: [PATCH] Deployed c19f4ea to main with MkDocs 1.5.3 and mike 1.1.2 --- main/404.html | 22 + main/developer/auto-generation/index.html | 22 + main/developer/contributing/index.html | 22 + main/developer/getting-started/index.html | 22 + main/index.html | 22 + .../components-hierarchy/index.html | 22 + main/resources/examples/defaults/index.html | 22 + main/resources/examples/pipeline/index.html | 22 + .../pipeline-components/pipeline/index.html | 22 + main/resources/pipeline-config/config.yaml | 2 - .../pipeline-defaults/defaults/index.html | 22 + main/resources/variables/cli_env_vars.env | 5 +- .../variables/cli_env_vars/index.html | 26 +- main/resources/variables/config_env_vars.env | 4 - .../variables/config_env_vars/index.html | 29 +- .../variables/variable_substitution.yaml | 2 - main/schema/config.json | 6 - main/search/search_index.json | 2 +- main/sitemap.xml | 81 +- main/sitemap.xml.gz | Bin 601 -> 607 bytes main/user/changelog/index.html | 295 ++- .../components/helm-app/index.html | 22 + .../components/kafka-app/index.html | 22 + .../components/kafka-connector/index.html | 22 + .../kafka-sink-connector/index.html | 22 + .../kafka-source-connector/index.html | 22 + .../components/kubernetes-app/index.html | 22 + .../components/overview/index.html | 22 + .../components/producer-app/index.html | 22 + .../components/streams-app/index.html | 22 + .../components/streams-bootstrap/index.html | 22 + main/user/core-concepts/config/index.html | 114 +- main/user/core-concepts/defaults/index.html | 30 +- .../environment_variables/index.html | 149 +- .../variables/substitution/index.html | 86 +- .../examples/atm-fraud-pipeline/index.html | 22 + .../getting-started/quick-start/index.html | 22 + main/user/getting-started/setup/index.html | 22 + main/user/getting-started/teardown/index.html | 22 + main/user/migration-guide/v1-v2/index.html | 22 + main/user/migration-guide/v2-v3/index.html | 22 + main/user/migration-guide/v3-v4/index.html | 22 + main/user/migration-guide/v4-v5/index.html | 24 +- main/user/migration-guide/v5-v6/index.html | 1774 +++++++++++++++++ .../ci-integration/github-actions/index.html | 22 + main/user/references/cli-commands/index.html | 50 +- .../references/editor-integration/index.html | 22 + main/user/what-is-kpops/index.html | 22 + 48 files changed, 3003 insertions(+), 336 deletions(-) create mode 100644 main/user/migration-guide/v5-v6/index.html diff --git a/main/404.html b/main/404.html index 48384e2fc..dcd327c08 100644 --- a/main/404.html +++ b/main/404.html @@ -1135,6 +1135,8 @@ + + @@ -1245,6 +1247,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/developer/auto-generation/index.html b/main/developer/auto-generation/index.html index 759214db7..b0285f193 100644 --- a/main/developer/auto-generation/index.html +++ b/main/developer/auto-generation/index.html @@ -1157,6 +1157,8 @@ + + @@ -1267,6 +1269,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/developer/contributing/index.html b/main/developer/contributing/index.html index f7617613d..6a0f3cd7b 100644 --- a/main/developer/contributing/index.html +++ b/main/developer/contributing/index.html @@ -1159,6 +1159,8 @@ + + @@ -1269,6 +1271,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/developer/getting-started/index.html b/main/developer/getting-started/index.html index e7b13f3c6..8b0ab7ee6 100644 --- a/main/developer/getting-started/index.html +++ b/main/developer/getting-started/index.html @@ -1159,6 +1159,8 @@ + + @@ -1269,6 +1271,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/index.html b/main/index.html index c4624156e..9768b8cbd 100644 --- a/main/index.html +++ b/main/index.html @@ -1213,6 +1213,8 @@

    KPOps Documentation

    + + @@ -1323,6 +1325,26 @@

    KPOps Documentation

    + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/resources/architecture/components-hierarchy/index.html b/main/resources/architecture/components-hierarchy/index.html index 11c3d34af..02deec18d 100644 --- a/main/resources/architecture/components-hierarchy/index.html +++ b/main/resources/architecture/components-hierarchy/index.html @@ -1148,6 +1148,8 @@ + + @@ -1258,6 +1260,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/resources/examples/defaults/index.html b/main/resources/examples/defaults/index.html index cecf29732..6b052f05b 100644 --- a/main/resources/examples/defaults/index.html +++ b/main/resources/examples/defaults/index.html @@ -1153,6 +1153,8 @@ + + @@ -1263,6 +1265,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/resources/examples/pipeline/index.html b/main/resources/examples/pipeline/index.html index 21ead4e4f..a10060b16 100644 --- a/main/resources/examples/pipeline/index.html +++ b/main/resources/examples/pipeline/index.html @@ -1153,6 +1153,8 @@ + + @@ -1263,6 +1265,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/resources/pipeline-components/pipeline/index.html b/main/resources/pipeline-components/pipeline/index.html index 82bedb59d..d399f518b 100644 --- a/main/resources/pipeline-components/pipeline/index.html +++ b/main/resources/pipeline-components/pipeline/index.html @@ -1153,6 +1153,8 @@ + + @@ -1263,6 +1265,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/resources/pipeline-config/config.yaml b/main/resources/pipeline-config/config.yaml index ead12b785..862a49ac0 100644 --- a/main/resources/pipeline-config/config.yaml +++ b/main/resources/pipeline-config/config.yaml @@ -7,8 +7,6 @@ pipeline_base_dir: . # The Kafka brokers address. # REQUIRED kafka_brokers: "http://broker1:9092,http://broker2:9092" -# The name of the defaults file and the prefix of the defaults environment file. -defaults_filename_prefix: defaults # Configure the topic name variables you can use in the pipeline definition. topic_name_config: # Configures the value for the variable ${output_topic_name} diff --git a/main/resources/pipeline-defaults/defaults/index.html b/main/resources/pipeline-defaults/defaults/index.html index 81afe0190..6414f647a 100644 --- a/main/resources/pipeline-defaults/defaults/index.html +++ b/main/resources/pipeline-defaults/defaults/index.html @@ -1153,6 +1153,8 @@ + + @@ -1263,6 +1265,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + diff --git a/main/resources/variables/cli_env_vars.env b/main/resources/variables/cli_env_vars.env index c51e2ee85..21436ded7 100644 --- a/main/resources/variables/cli_env_vars.env +++ b/main/resources/variables/cli_env_vars.env @@ -14,7 +14,8 @@ KPOPS_DOTENV_PATH # No default value, not required # Suffix your environment files with this value (e.g. # defaults_development.yaml for environment=development). KPOPS_ENVIRONMENT # No default value, not required -# Path to YAML with pipeline definition -KPOPS_PIPELINE_PATH # No default value, required +# Paths to dir containing 'pipeline.yaml' or files named +# 'pipeline.yaml'. +KPOPS_PIPELINE_PATHS # No default value, required # Comma separated list of steps to apply the command on KPOPS_PIPELINE_STEPS # No default value, not required diff --git a/main/resources/variables/cli_env_vars/index.html b/main/resources/variables/cli_env_vars/index.html index 3d77c2d50..f1b7352cf 100644 --- a/main/resources/variables/cli_env_vars/index.html +++ b/main/resources/variables/cli_env_vars/index.html @@ -1148,6 +1148,8 @@ + + @@ -1258,6 +1260,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + @@ -1620,10 +1642,10 @@

    Cli env vars

    The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). -KPOPS_PIPELINE_PATH +KPOPS_PIPELINE_PATHS True -Path to YAML with pipeline definition +Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. KPOPS_PIPELINE_STEPS diff --git a/main/resources/variables/config_env_vars.env b/main/resources/variables/config_env_vars.env index 12195fe9d..c4b4050e8 100644 --- a/main/resources/variables/config_env_vars.env +++ b/main/resources/variables/config_env_vars.env @@ -14,10 +14,6 @@ KPOPS_PIPELINE_BASE_DIR=. # kafka_brokers # The comma separated Kafka brokers address. KPOPS_KAFKA_BROKERS # No default value, required -# defaults_filename_prefix -# The name of the defaults file and the prefix of the defaults -# environment file. -KPOPS_DEFAULTS_FILENAME_PREFIX=defaults # topic_name_config.default_output_topic_name # Configures the value for the variable ${output_topic_name} KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name} diff --git a/main/resources/variables/config_env_vars/index.html b/main/resources/variables/config_env_vars/index.html index c17228b18..e42a83df5 100644 --- a/main/resources/variables/config_env_vars/index.html +++ b/main/resources/variables/config_env_vars/index.html @@ -1148,6 +1148,8 @@ + + @@ -1258,6 +1260,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + @@ -1624,13 +1646,6 @@

    Config env vars

    kafka_brokers -KPOPS_DEFAULTS_FILENAME_PREFIX -defaults -False -The name of the defaults file and the prefix of the defaults environment file. -defaults_filename_prefix - - KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME ${pipeline.name}-${component.name} False diff --git a/main/resources/variables/variable_substitution.yaml b/main/resources/variables/variable_substitution.yaml index 4bdf51685..8a4cf60ea 100644 --- a/main/resources/variables/variable_substitution.yaml +++ b/main/resources/variables/variable_substitution.yaml @@ -4,8 +4,6 @@ app_type: "${component.type}" app_name: "${component.name}" app_schedule: "${component.app.schedule}" - helm_release_name: ${component.helm_release_name} - helm_name_override: ${component.helm_name_override} commandLine: FAKE_ARG: "fake-arg-value" schedule: "30 3/8 * * *" diff --git a/main/schema/config.json b/main/schema/config.json index f2a070842..47aab93b1 100644 --- a/main/schema/config.json +++ b/main/schema/config.json @@ -196,12 +196,6 @@ "title": "Create Namespace", "type": "boolean" }, - "defaults_filename_prefix": { - "default": "defaults", - "description": "The name of the defaults file and the prefix of the defaults environment file.", - "title": "Defaults Filename Prefix", - "type": "string" - }, "helm_config": { "allOf": [ { diff --git a/main/search/search_index.json b/main/search/search_index.json index 19abe6f9a..6095a0713 100644 --- a/main/search/search_index.json +++ b/main/search/search_index.json @@ -1 +1 @@ -{"config": {"lang": ["en"], "separator": "[\\s\\-]+", "pipeline": ["stopWordFilter"]}, "docs": [{"location": "developer/auto-generation/", "title": "Auto generation", "text": "

    Auto generation happens mostly with pre-commit hooks. You can find the pre-commit configuration here. These pre-commit hooks call different Python scripts to auto generate code for the documentation.

    "}, {"location": "developer/auto-generation/#generation-scripts-and-their-respective-files", "title": "Generation scripts and their respective files", "text": ""}, {"location": "developer/auto-generation/#documentation", "title": "Documentation", "text": ""}, {"location": "developer/auto-generation/#variables", "title": "Variables", "text": ""}, {"location": "developer/auto-generation/#cli-commands", "title": "CLI commands", "text": "

    Generated by typer-cli from the code in main.py. It is called with Python's subprocess module.

    "}, {"location": "developer/auto-generation/#pipeline-and-defaults-example-definitions", "title": "Pipeline and defaults example definitions", "text": "

    Generates example pipeline.yaml and defaults.yaml for each individual component, stores them and also concatenates them into 1 big pipeline definition and 1 big pipeline defaults definition.

    User input

    Generated

    "}, {"location": "developer/auto-generation/#editor-integration", "title": "Editor integration", "text": ""}, {"location": "developer/auto-generation/#schemas", "title": "Schemas", "text": ""}, {"location": "developer/contributing/", "title": "How to contribute", "text": "

    Welcome! We are glad to have you visit our contributing guide!

    If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.

    "}, {"location": "developer/contributing/#git", "title": "git", "text": "

    We are using git submodules to import the KPOps examples repository. You need to fetch the repository locally on your machine. To do so use this command:

    git submodule init\ngit submodule update --recursive\n

    This will fetch the resources under the examples folder.

    "}, {"location": "developer/contributing/#style", "title": "Style", "text": "

    We advise that you stick to our pre-commit hooks for code linting, formatting, and auto-generation of documentation. After you install them using poetry run pre-commit install they're triggered automatically during git commit. Additionally, you can manually invoke them with poetry run pre-commit run -a. In order for dprint to work, you have to manually install it locally. It will work in the CI, so it is also possible to manually carry out formatting changes flagged by dprint in the CI and skip installing it locally.

    "}, {"location": "developer/contributing/#python", "title": "Python", "text": "

    To ensure a consistent Python code style, we use Ruff for both linting and formatting. The official docs contain a guide on editor integration.

    Our configuration can be found in KPOps' top-level pyproject.toml.

    "}, {"location": "developer/contributing/#markdown", "title": "Markdown", "text": "

    To ensure a consistent markdown style, we use dprint's Markdown code formatter. Our configuration can be found here.

    "}, {"location": "developer/contributing/#css", "title": "CSS", "text": "

    To ensure a consistent CSS style, we use the malva dprint's plugin. Our configuration can be found here.

    "}, {"location": "developer/contributing/#toml", "title": "TOML", "text": "

    To ensure a consistent TOML style, we use dprint's TOML code formatter. Our configuration can be found here.

    "}, {"location": "developer/getting-started/", "title": "Getting started", "text": "

    Welcome! We are glad to have you visit our developer guide! If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.

    Find more about our code-style or insights into KPOps' code base here in our developer guide.

    Work in progress

    The developer guide is still under construction. If you have a question left unanswered here, feel free to ask it by opening an issue.

    "}, {"location": "user/changelog/", "title": "Changelog", "text": ""}, {"location": "user/changelog/#511-release-date-2024-05-22", "title": "5.1.1 - Release Date: [2024-05-22]", "text": ""}, {"location": "user/changelog/#fixes", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#510-release-date-2024-05-22", "title": "5.1.0 - Release Date: [2024-05-22]", "text": ""}, {"location": "user/changelog/#miscellaneous", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#501-release-date-2024-05-15", "title": "5.0.1 - Release Date: [2024-05-15]", "text": ""}, {"location": "user/changelog/#fixes_1", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#500-release-date-2024-05-02", "title": "5.0.0 - Release Date: [2024-05-02]", "text": ""}, {"location": "user/changelog/#breaking-changes", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#miscellaneous_1", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#421-release-date-2024-04-25", "title": "4.2.1 - Release Date: [2024-04-25]", "text": ""}, {"location": "user/changelog/#features", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#420-release-date-2024-04-25", "title": "4.2.0 - Release Date: [2024-04-25]", "text": ""}, {"location": "user/changelog/#refactor", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_2", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#412-release-date-2024-03-11", "title": "4.1.2 - Release Date: [2024-03-11]", "text": ""}, {"location": "user/changelog/#documentation", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#411-release-date-2024-03-11", "title": "4.1.1 - Release Date: [2024-03-11]", "text": ""}, {"location": "user/changelog/#fixes_2", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_1", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_3", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#410-release-date-2024-03-07", "title": "4.1.0 - Release Date: [2024-03-07]", "text": ""}, {"location": "user/changelog/#documentation_1", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_4", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#402-release-date-2024-03-04", "title": "4.0.2 - Release Date: [2024-03-04]", "text": ""}, {"location": "user/changelog/#documentation_2", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_5", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#401-release-date-2024-02-29", "title": "4.0.1 - Release Date: [2024-02-29]", "text": ""}, {"location": "user/changelog/#fixes_3", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#400-release-date-2024-02-27", "title": "4.0.0 - Release Date: [2024-02-27]", "text": ""}, {"location": "user/changelog/#breaking-changes_1", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#features_1", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#324-release-date-2024-02-26", "title": "3.2.4 - Release Date: [2024-02-26]", "text": ""}, {"location": "user/changelog/#fixes_4", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_2", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_3", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#323-release-date-2024-02-19", "title": "3.2.3 - Release Date: [2024-02-19]", "text": ""}, {"location": "user/changelog/#fixes_5", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#322-release-date-2024-02-12", "title": "3.2.2 - Release Date: [2024-02-12]", "text": ""}, {"location": "user/changelog/#fixes_6", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#321-release-date-2024-02-08", "title": "3.2.1 - Release Date: [2024-02-08]", "text": ""}, {"location": "user/changelog/#fixes_7", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_3", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#320-release-date-2024-02-01", "title": "3.2.0 - Release Date: [2024-02-01]", "text": ""}, {"location": "user/changelog/#features_2", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#refactor_4", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_4", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#310-release-date-2024-01-30", "title": "3.1.0 - Release Date: [2024-01-30]", "text": ""}, {"location": "user/changelog/#features_3", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#fixes_8", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_5", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_6", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#302-release-date-2024-01-23", "title": "3.0.2 - Release Date: [2024-01-23]", "text": ""}, {"location": "user/changelog/#fixes_9", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_5", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_7", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#301-release-date-2024-01-19", "title": "3.0.1 - Release Date: [2024-01-19]", "text": ""}, {"location": "user/changelog/#fixes_10", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_6", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_8", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#300-release-date-2024-01-17", "title": "3.0.0 - Release Date: [2024-01-17]", "text": ""}, {"location": "user/changelog/#breaking-changes_2", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#features_4", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#fixes_11", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#dependencies", "title": "\ud83e\uddea Dependencies", "text": ""}, {"location": "user/changelog/#refactor_6", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_7", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_9", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#2011-release-date-2023-10-24", "title": "2.0.11 - Release Date: [2023-10-24]", "text": ""}, {"location": "user/changelog/#fixes_12", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_8", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#2010-release-date-2023-10-12", "title": "2.0.10 - Release Date: [2023-10-12]", "text": ""}, {"location": "user/changelog/#miscellaneous_10", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#209-release-date-2023-09-19", "title": "2.0.9 - Release Date: [2023-09-19]", "text": ""}, {"location": "user/changelog/#fixes_13", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_9", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#208-release-date-2023-09-06", "title": "2.0.8 - Release Date: [2023-09-06]", "text": ""}, {"location": "user/changelog/#fixes_14", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_7", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#207-release-date-2023-08-31", "title": "2.0.7 - Release Date: [2023-08-31]", "text": ""}, {"location": "user/changelog/#fixes_15", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_10", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#206-release-date-2023-08-30", "title": "2.0.6 - Release Date: [2023-08-30]", "text": ""}, {"location": "user/changelog/#refactor_8", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#205-release-date-2023-08-30", "title": "2.0.5 - Release Date: [2023-08-30]", "text": ""}, {"location": "user/changelog/#fixes_16", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#204-release-date-2023-08-29", "title": "2.0.4 - Release Date: [2023-08-29]", "text": ""}, {"location": "user/changelog/#fixes_17", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_11", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_11", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#203-release-date-2023-08-24", "title": "2.0.3 - Release Date: [2023-08-24]", "text": ""}, {"location": "user/changelog/#fixes_18", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#miscellaneous_12", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#202-release-date-2023-08-23", "title": "2.0.2 - Release Date: [2023-08-23]", "text": ""}, {"location": "user/changelog/#documentation_12", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#201-release-date-2023-08-22", "title": "2.0.1 - Release Date: [2023-08-22]", "text": ""}, {"location": "user/changelog/#fixes_19", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#200-release-date-2023-08-17", "title": "2.0.0 - Release Date: [2023-08-17]", "text": ""}, {"location": "user/changelog/#breaking-changes_3", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#features_5", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#refactor_9", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_13", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#172-release-date-2023-08-16", "title": "1.7.2 - Release Date: [2023-08-16]", "text": ""}, {"location": "user/changelog/#refactor_10", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_14", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#171-release-date-2023-08-15", "title": "1.7.1 - Release Date: [2023-08-15]", "text": ""}, {"location": "user/changelog/#documentation_15", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#170-release-date-2023-08-15", "title": "1.7.0 - Release Date: [2023-08-15]", "text": ""}, {"location": "user/changelog/#features_6", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#160-release-date-2023-08-10", "title": "1.6.0 - Release Date: [2023-08-10]", "text": ""}, {"location": "user/changelog/#refactor_11", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#150-release-date-2023-08-10", "title": "1.5.0 - Release Date: [2023-08-10]", "text": ""}, {"location": "user/changelog/#features_7", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#refactor_12", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_13", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#140-release-date-2023-08-02", "title": "1.4.0 - Release Date: [2023-08-02]", "text": ""}, {"location": "user/changelog/#fixes_20", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_13", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_14", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#132-release-date-2023-07-13", "title": "1.3.2 - Release Date: [2023-07-13]", "text": ""}, {"location": "user/changelog/#fixes_21", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#131-release-date-2023-07-11", "title": "1.3.1 - Release Date: [2023-07-11]", "text": ""}, {"location": "user/changelog/#refactor_14", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_15", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#130-release-date-2023-07-07", "title": "1.3.0 - Release Date: [2023-07-07]", "text": ""}, {"location": "user/changelog/#refactor_15", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_16", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#breaking-changes_4", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#miscellaneous_16", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#124-release-date-2023-06-27", "title": "1.2.4 - Release Date: [2023-06-27]", "text": ""}, {"location": "user/changelog/#miscellaneous_17", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#123-release-date-2023-06-22", "title": "1.2.3 - Release Date: [2023-06-22]", "text": ""}, {"location": "user/changelog/#fixes_22", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_16", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_18", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#122-release-date-2023-06-21", "title": "1.2.2 - Release Date: [2023-06-21]", "text": ""}, {"location": "user/changelog/#miscellaneous_19", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#121-release-date-2023-06-21", "title": "1.2.1 - Release Date: [2023-06-21]", "text": "Uncategorized "}, {"location": "user/changelog/#120-release-date-2023-06-21", "title": "1.2.0 - Release Date: [2023-06-21]", "text": ""}, {"location": "user/changelog/#features_8", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#fixes_23", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_17", "title": "\ud83c\udfed Refactor", "text": " Uncategorized "}, {"location": "user/what-is-kpops/", "title": "What is KPOps?", "text": "

    With a couple of easy commands in the shell, and a pipeline.yaml of under 30 lines, KPOps can not only deploy a Kafka pipeline1 to a Kubernetes cluster, but also reset, clean or destroy it!

    "}, {"location": "user/what-is-kpops/#key-features", "title": "Key features", "text": ""}, {"location": "user/what-is-kpops/#example", "title": "Example", "text": "An overview of Word-count pipeline shown in Streams Explorer Word-count pipeline.yaml
    - type: producer-app\n  name: data-producer\n  app:\n    image: bakdata/kpops-demo-sentence-producer\n\n- type: streams-app\n  name: word-counter\n  to:\n    topics:\n      ${output_topic_name}:\n        type: output\n        configs:\n          cleanup.policy: compact\n  app:\n    image: bakdata/kpops-demo-word-count-app\n    replicaCount: 1\n\n- type: kafka-sink-connector\n  name: redis-sink-connector\n  app:\n    connector.class: com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector\n    redis.hosts: redis-headless:6379\n    redis.database: 0\n    tasks.max: 1\n    key.converter: org.apache.kafka.connect.storage.StringConverter\n    value.converter: org.apache.kafka.connect.storage.StringConverter\n
    1. A Kafka pipeline can consist of consecutive streaming applications, producers, and connectors.\u00a0\u21a9

    "}, {"location": "user/core-concepts/config/", "title": "Configuration", "text": "

    KPOps reads its global configuration that is unrelated to a pipeline's components from config.yaml.

    Consider enabling KPOps' editor integration feature to enjoy the benefits of autocompletion and validation when configuring your pipeline.

    To learn about any of the available settings, take a look at the example below.

    config.yaml
    # CONFIGURATION\n#\n# Custom Python module defining project-specific KPOps components\ncomponents_module: null\n# Base directory to the pipelines (default is current working directory)\npipeline_base_dir: .\n# The Kafka brokers address.\n# REQUIRED\nkafka_brokers: \"http://broker1:9092,http://broker2:9092\"\n# The name of the defaults file and the prefix of the defaults environment file.\ndefaults_filename_prefix: defaults\n# Configure the topic name variables you can use in the pipeline definition.\ntopic_name_config: \n  # Configures the value for the variable ${output_topic_name}\n  default_output_topic_name: ${pipeline.name}-${component.name}\n  # Configures the value for the variable ${error_topic_name}\n  default_error_topic_name: ${pipeline.name}-${component.name}-error\n# Configuration for Schema Registry.\nschema_registry:\n  # Whether the Schema Registry handler should be initialized.\n  enabled: false\n  # Address of the Schema Registry.\n  url: \"http://localhost:8081\"\n# Configuration for the Kafka REST Proxy.\nkafka_rest:\n  # Address of the Kafka REST Proxy.\n  url: \"http://localhost:8082\"\n# Configuration for Kafka Connect.\nkafka_connect:\n  # Address of Kafka Connect.\n  url: \"http://localhost:8083\"\n# The timeout in seconds that specifies when actions like deletion or deploy\n# timeout.\ntimeout: 300\n# Flag for `helm upgrade --install`.\n# Create the release namespace if not present.\ncreate_namespace: false\n# Global flags for Helm.\nhelm_config:\n  # Name of kubeconfig context (`--kube-context`)\n  context: name\n  # Run Helm in Debug mode.\n  debug: false\n  # Kubernetes API version used for Capabilities.APIVersions\n  api_version: null\n# Configure Helm Diff.\nhelm_diff_config: \n  # Set of keys that should not be checked.\n  ignore:\n    - name\n    - imageTag\n# Whether to retain clean up jobs in the cluster or uninstall the, after\n# completion.\nretain_clean_jobs: false\n

    Environment-specific pipeline definitions

    Similarly to defaults, it is possible to have an unlimited amount of additional environment-specific pipeline definitions. The naming convention is the same: add a suffix of the form _{environment} to the filename.

    "}, {"location": "user/core-concepts/defaults/", "title": "Defaults", "text": "

    KPOps has a very efficient way of dealing with repeating settings which manifests as defaults.yaml. This file provides the user with the power to set defaults for any and all components, thus omitting the need to repeat the same settings in pipeline.yaml.

    See real-world examples for defaults.

    "}, {"location": "user/core-concepts/defaults/#features", "title": "Features", "text": ""}, {"location": "user/core-concepts/defaults/#inheritance", "title": "Inheritance", "text": "

    An important mechanic of KPOps is that defaults set for a component apply to all components that inherit from it.

    It is possible, although not recommended, to add settings that are specific to a component's subclass. An example would be configuring offset_topic under kafka-connector instead of kafka-source-connector.

    "}, {"location": "user/core-concepts/defaults/#configuration", "title": "Configuration", "text": "

    KPOps allows using multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.

    The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.

    It is important to note that defaults_{environment}.yaml overrides only the settings that are explicitly set to be different from the ones in the base defaults file.

    defaults merge priority

    Imagine the following folder structure, where the pipeline_base_dir is configured to pipelines:

    \u2514\u2500 pipelines\n   \u2514\u2500\u2500 distributed-defaults\n       \u251c\u2500\u2500 defaults.yaml\n       \u251c\u2500\u2500 defaults_dev.yaml\n       \u2514\u2500\u2500 pipeline-deep\n           \u251c\u2500\u2500 defaults.yaml\n           \u2514\u2500\u2500 pipeline.yaml\n

    KPOps picks up the defaults in the following order (high to low priority):

    Tip

    defaults is the default value of defaults_filename_prefix.

    "}, {"location": "user/core-concepts/defaults/#components", "title": "Components", "text": "

    The defaults codeblocks in this section contain the full set of settings that are specific to the component. If a setting already exists in a parent config, it will not be included in the child's.

    "}, {"location": "user/core-concepts/defaults/#kubernetesapp", "title": "KubernetesApp", "text": "defaults.yaml
    # Base Kubernetes App\n#\n# Parent of: HelmApp\n# Child of: PipelineComponent\nkubernetes-app:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/defaults/#kafkaapp", "title": "KafkaApp", "text": "defaults.yaml
    # Base component for Kafka-based components.\n#\n# Parent of: ProducerApp, StreamsApp\n# Child of: KubernetesApp\nkafka-app:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` can contain application-specific settings, hence  the user is free to\n  # add the key-value pairs they need.\n  app: # required\n    streams: # required\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n    nameOverride: override-with-this-name # kafka-app-specific\n    imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n
    "}, {"location": "user/core-concepts/defaults/#streamsapp", "title": "StreamsApp", "text": "defaults.yaml
    # StreamsApp component that configures a streams bootstrap app.\n#\n# Child of: KafkaApp\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\nstreams-app:\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  app: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n
    "}, {"location": "user/core-concepts/defaults/#producerapp", "title": "ProducerApp", "text": "defaults.yaml
    \n
    "}, {"location": "user/core-concepts/defaults/#kafkaconnector", "title": "KafkaConnector", "text": "defaults.yaml
    # Kafka connector\n#\n# Parent of: KafkaSinkConnector, KafkaSourceConnector\n# Child of: PipelineComponent\nkafka-connector:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/defaults/#kafkasourceconnector", "title": "KafkaSourceConnector", "text": "defaults.yaml
    # Kafka source connector\n#\n# Child of: KafkaConnector\nkafka-source-connector:\n  # The source connector has no `from` section\n  # from:\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/defaults/#kafkasinkconnector", "title": "KafkaSinkConnector", "text": "defaults.yaml
    # Kafka sink connector\n#\n# Child of: KafkaConnector\nkafka-sink-connector:\n  # No settings differ from `kafka-connector`\n
    "}, {"location": "user/core-concepts/components/helm-app/", "title": "HelmApp", "text": ""}, {"location": "user/core-concepts/components/helm-app/#usage", "title": "Usage", "text": "

    Can be used to deploy any app in Kubernetes using Helm, for example, a REST service that serves Kafka data.

    "}, {"location": "user/core-concepts/components/helm-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kubernetes app managed through Helm with an associated Helm chart\n- type: helm-app\n  name: helm-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/helm-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/helm-app/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/helm-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/helm-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/helm-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-app/", "title": "KafkaApp", "text": "

    Subclass of HelmApp.

    "}, {"location": "user/core-concepts/components/kafka-app/#usage", "title": "Usage", "text": ""}, {"location": "user/core-concepts/components/kafka-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base component for Kafka-based components.\n# Producer or streaming apps should inherit from this class.\n- type: kafka-app # required\n  name: kafka-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` can contain application-specific settings, hence  the user is free to\n  # add the key-value pairs they need.\n  app: # required\n    streams: # required\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n    nameOverride: override-with-this-name # kafka-app-specific\n    imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n
    "}, {"location": "user/core-concepts/components/kafka-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-app/#deploy", "title": "deploy", "text": "

    In addition to HelmApp's deploy:

    "}, {"location": "user/core-concepts/components/kafka-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/kafka-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-connector/", "title": "KafkaConnector", "text": "

    KafkaConnector is a component that deploys Kafka Connectors. Since a connector cannot be different from sink or source it is not recommended to use KafkaConnector for deployment in pipeline.yaml. Instead, KafkaConnector should be used in defaults.yaml to set defaults for all connectors in the pipeline as they can share some common settings.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/", "title": "KafkaSinkConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#usage", "title": "Usage", "text": "

    Lets other systems pull data from Apache Kafka.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka sink connector\n- type: kafka-sink-connector\n  name: kafka-sink-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-sink-connector/#deploy", "title": "deploy", "text": ""}, {"location": "user/core-concepts/components/kafka-sink-connector/#destroy", "title": "destroy", "text": "

    The associated sink connector is removed from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#reset", "title": "reset", "text": "

    Reset the consumer group offsets using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#clean", "title": "clean", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/", "title": "KafkaSourceConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#usage", "title": "Usage", "text": "

    Manages source connectors in your Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka source connector\n- type: kafka-source-connector # required\n  name: kafka-source-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  # The source connector has no `from` section\n  # from:\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/#deploy", "title": "deploy", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/#destroy", "title": "destroy", "text": "

    Remove the source connector from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#reset", "title": "reset", "text": "

    Delete state associated with the connector using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#clean", "title": "clean", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/", "title": "KubernetesApp", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#usage", "title": "Usage", "text": "

    Can be used to create components for any Kubernetes app.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base Kubernetes App\n- type: kubernetes-app\n  name: kubernetes-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/components/kubernetes-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#deploy", "title": "deploy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#destroy", "title": "destroy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/overview/", "title": "Overview", "text": "

    This section explains the different components of KPOps, their usage and configuration in the pipeline definition pipeline.yaml.

    flowchart BT\n    KubernetesApp --> PipelineComponent\n    KafkaApp --> PipelineComponent\n    HelmApp --> KubernetesApp\n    StreamsBootstrap --> HelmApp\n    StreamsApp --> KafkaApp\n    StreamsApp --> StreamsBootstrap\n    ProducerApp --> KafkaApp\n    ProducerApp --> StreamsBootstrap\n    KafkaConnector --> PipelineComponent\n    KafkaSourceConnector --> KafkaConnector\n    KafkaSinkConnector --> KafkaConnector\n\n    click KubernetesApp \"./../kubernetes-app\"\n    click HelmApp \"./../helm-app\"\n    click KafkaApp \"./../kafka-app\"\n    click StreamsBootstrap \"./../streams-bootstrap\"\n    click StreamsApp \"./../streams-app\"\n    click ProducerApp \"./../producer-app\"\n    click KafkaConnector \"./../kafka-connector\"\n    click KafkaSourceConnector \"./../kafka-source-connector\"\n    click KafkaSinkConnector \"./../kafka-sink-connector\"

    KPOps component hierarchy

    "}, {"location": "user/core-concepts/components/producer-app/", "title": "ProducerApp", "text": "

    Subclass of KafkaApp and StreamsBootstrap.

    "}, {"location": "user/core-concepts/components/producer-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka producer app

    "}, {"location": "user/core-concepts/components/producer-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Holds configuration to use as values for the streams bootstrap producer-app Helm\n# chart.\n# More documentation on ProducerApp:\n# https://github.com/bakdata/streams-bootstrap\n- type: producer-app\n  name: producer-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  # from: # While the producer-app does inherit from kafka-app, it does not need a\n  # `from` section, hence it does not support it.\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app\n  app: # required\n    streams: # required, producer-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      outputTopic: output_topic\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n    nameOverride: override-with-this-name # kafka-app-specific\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/producer-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/producer-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    "}, {"location": "user/core-concepts/components/producer-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/producer-app/#reset", "title": "reset", "text": "

    Do nothing, producers are stateless.

    "}, {"location": "user/core-concepts/components/producer-app/#clean", "title": "clean", "text": ""}, {"location": "user/core-concepts/components/streams-app/", "title": "StreamsApp", "text": "

    Subclass of KafkaApp and StreamsBootstrap.

    "}, {"location": "user/core-concepts/components/streams-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka Streams app

    "}, {"location": "user/core-concepts/components/streams-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # StreamsApp component that configures a streams bootstrap app.\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\n- type: streams-app # required\n  name: streams-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  app: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/streams-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    "}, {"location": "user/core-concepts/components/streams-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-app/#reset", "title": "reset", "text": ""}, {"location": "user/core-concepts/components/streams-app/#clean", "title": "clean", "text": "

    Similar to reset with to additional steps:

    "}, {"location": "user/core-concepts/components/streams-bootstrap/", "title": "StreamsApp", "text": "

    Subclass of HelmApp.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#usage", "title": "Usage", "text": "

    Configures a Helm app with streams-bootstrap Helm charts.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-bootstrap/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/variables/environment_variables/", "title": "Environment variables", "text": "

    Environment variables can be set by using the export command in Linux or the set command in Windows.

    dotenv files

    KPOps currently supports .env files only for variables related to the config. Full support for .env files is on the roadmap. One of the possible ways to use one and export the contents manually is with the following command: export $(xargs < .env). This would work in bash suppose there are no spaces inside the values.

    "}, {"location": "user/core-concepts/variables/environment_variables/#config", "title": "Config", "text": "

    These variables take precedence over the settings in config.yaml. Variables marked as required can instead be set in the global config.

    Name Default Value Required Description Setting name KPOPS_COMPONENTS_MODULE False Custom Python module defining project-specific KPOps components components_module KPOPS_PIPELINE_BASE_DIR . False Base directory to the pipelines (default is current working directory) pipeline_base_dir KPOPS_KAFKA_BROKERS True The comma separated Kafka brokers address. kafka_brokers KPOPS_DEFAULTS_FILENAME_PREFIX defaults False The name of the defaults file and the prefix of the defaults environment file. defaults_filename_prefix KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME ${pipeline.name}-${component.name} False Configures the value for the variable ${output_topic_name} topic_name_config.default_output_topic_name KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME ${pipeline.name}-${component.name}-error False Configures the value for the variable ${error_topic_name} topic_name_config.default_error_topic_name KPOPS_SCHEMA_REGISTRY__ENABLED False False Whether the Schema Registry handler should be initialized. schema_registry.enabled KPOPS_SCHEMA_REGISTRY__URL http://localhost:8081/ False Address of the Schema Registry. schema_registry.url KPOPS_SCHEMA_REGISTRY__TIMEOUT 30 False Operation timeout in seconds. schema_registry.timeout KPOPS_KAFKA_REST__URL http://localhost:8082/ False Address of the Kafka REST Proxy. kafka_rest.url KPOPS_KAFKA_REST__TIMEOUT 30 False Operation timeout in seconds. kafka_rest.timeout KPOPS_KAFKA_CONNECT__URL http://localhost:8083/ False Address of Kafka Connect. kafka_connect.url KPOPS_KAFKA_CONNECT__TIMEOUT 30 False Operation timeout in seconds. kafka_connect.timeout KPOPS_CREATE_NAMESPACE False False Flag for helm upgrade --install. Create the release namespace if not present. create_namespace KPOPS_HELM_CONFIG__CONTEXT False Name of kubeconfig context (--kube-context) helm_config.context KPOPS_HELM_CONFIG__DEBUG False False Run Helm in Debug mode helm_config.debug KPOPS_HELM_CONFIG__API_VERSION False Kubernetes API version used for Capabilities.APIVersions helm_config.api_version KPOPS_HELM_DIFF_CONFIG__IGNORE True Set of keys that should not be checked. helm_diff_config.ignore KPOPS_RETAIN_CLEAN_JOBS False False Whether to retain clean up jobs in the cluster or uninstall the, after completion. retain_clean_jobs config_env_vars.env Exhaustive list of all config-related environment variables
    # Global config environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# settings in `config.yaml`. Variables marked as required can instead\n# be set in the global config.\n#\n# components_module\n# Custom Python module defining project-specific KPOps components\nKPOPS_COMPONENTS_MODULE # No default value, not required\n# pipeline_base_dir\n# Base directory to the pipelines (default is current working\n# directory)\nKPOPS_PIPELINE_BASE_DIR=.\n# kafka_brokers\n# The comma separated Kafka brokers address.\nKPOPS_KAFKA_BROKERS # No default value, required\n# defaults_filename_prefix\n# The name of the defaults file and the prefix of the defaults\n# environment file.\nKPOPS_DEFAULTS_FILENAME_PREFIX=defaults\n# topic_name_config.default_output_topic_name\n# Configures the value for the variable ${output_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name}\n# topic_name_config.default_error_topic_name\n# Configures the value for the variable ${error_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error\n# schema_registry.enabled\n# Whether the Schema Registry handler should be initialized.\nKPOPS_SCHEMA_REGISTRY__ENABLED=False\n# schema_registry.url\n# Address of the Schema Registry.\nKPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/\n# schema_registry.timeout\n# Operation timeout in seconds.\nKPOPS_SCHEMA_REGISTRY__TIMEOUT=30\n# kafka_rest.url\n# Address of the Kafka REST Proxy.\nKPOPS_KAFKA_REST__URL=http://localhost:8082/\n# kafka_rest.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_REST__TIMEOUT=30\n# kafka_connect.url\n# Address of Kafka Connect.\nKPOPS_KAFKA_CONNECT__URL=http://localhost:8083/\n# kafka_connect.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_CONNECT__TIMEOUT=30\n# create_namespace\n# Flag for `helm upgrade --install`. Create the release namespace if\n# not present.\nKPOPS_CREATE_NAMESPACE=False\n# helm_config.context\n# Name of kubeconfig context (`--kube-context`)\nKPOPS_HELM_CONFIG__CONTEXT # No default value, not required\n# helm_config.debug\n# Run Helm in Debug mode\nKPOPS_HELM_CONFIG__DEBUG=False\n# helm_config.api_version\n# Kubernetes API version used for `Capabilities.APIVersions`\nKPOPS_HELM_CONFIG__API_VERSION # No default value, not required\n# helm_diff_config.ignore\n# Set of keys that should not be checked.\nKPOPS_HELM_DIFF_CONFIG__IGNORE # No default value, required\n# retain_clean_jobs\n# Whether to retain clean up jobs in the cluster or uninstall the,\n# after completion.\nKPOPS_RETAIN_CLEAN_JOBS=False\n
    "}, {"location": "user/core-concepts/variables/environment_variables/#cli", "title": "CLI", "text": "

    These variables take precedence over the commands' flags. If a variable is set, the corresponding flag does not have to be specified in commands. Variables marked as required can instead be set as flags.

    Name Default Value Required Description KPOPS_CONFIG_PATH . False Path to the dir containing config.yaml files KPOPS_DOTENV_PATH False Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. KPOPS_ENVIRONMENT False The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). KPOPS_PIPELINE_PATH True Path to YAML with pipeline definition KPOPS_PIPELINE_STEPS False Comma separated list of steps to apply the command on cli_env_vars.env Exhaustive list of all cli-related environment variables
    # CLI Environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# commands' flags. If a variable is set, the corresponding flag does\n# not have to be specified in commands. Variables marked as required\n# can instead be set as flags.\n#\n# Path to the dir containing config.yaml files\nKPOPS_CONFIG_PATH=.\n# Path to dotenv file. Multiple files can be provided. The files will\n# be loaded in order, with each file overriding the previous one.\nKPOPS_DOTENV_PATH # No default value, not required\n# The environment you want to generate and deploy the pipeline to.\n# Suffix your environment files with this value (e.g.\n# defaults_development.yaml for environment=development).\nKPOPS_ENVIRONMENT # No default value, not required\n# Path to YAML with pipeline definition\nKPOPS_PIPELINE_PATH # No default value, required\n# Comma separated list of steps to apply the command on\nKPOPS_PIPELINE_STEPS # No default value, not required\n
    "}, {"location": "user/core-concepts/variables/substitution/", "title": "Substitution", "text": "

    KPOps supports the usage of placeholders and environment variables in pipeline definition and defaults.

    "}, {"location": "user/core-concepts/variables/substitution/#component-specific-variables", "title": "Component-specific variables", "text": "

    These variables can be used in a component's definition to refer to any of its attributes, including ones that the user has defined in the defaults.

    All of them are prefixed with component. and follow the following form: component.{attribute_name}. If the attribute itself contains attributes, they can be referred to like this: component.{attribute_name}.{subattribute_name}.

    Example
    - type: scheduled-producer\n  app:\n    labels:\n      app_type: \"${component.type}\"\n      app_name: \"${component.name}\"\n      app_schedule: \"${component.app.schedule}\"\n      helm_release_name: ${component.helm_release_name}\n      helm_name_override: ${component.helm_name_override}\n    commandLine:\n      FAKE_ARG: \"fake-arg-value\"\n    schedule: \"30 3/8 * * *\"\n- type: converter\n  app:\n    commandLine:\n      CONVERT_XML: true\n    resources:\n      limits:\n        memory: 2G\n      requests:\n        memory: 2G\n- type: filter\n  name: \"filter-app\"\n  app:\n    labels:\n      app_type: \"${component.type}\"\n      app_name: \"${component.name}\"\n      app_resources_requests_memory: \"${component.app.resources.requests.memory}\"\n      ${component.type}: \"${component.app.labels.app_name}-${component.app.labels.app_type}\"\n      test_placeholder_in_placeholder: \"${component.app.labels.${component.type}}\"\n    commandLine:\n      TYPE: \"nothing\"\n    resources:\n      requests:\n        memory: 3G\n    replicaCount: 4\n    autoscaling:\n      minReplicas: 4\n      maxReplicas: 4\n
    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-config-specific-variables", "title": "Pipeline-config-specific variables", "text": "

    These variables include all fields in the config and refer to the pipeline configuration that is independent of the components.

    All such variables are prefixed with config. and are of the same form as the component-specific variables.

    Info

    error_topic_name is an alias for config.topic_name_config.default_error_topic_name output_topic_name is an alias for config.topic_name_config.default_output_topic_name

    "}, {"location": "user/core-concepts/variables/substitution/#environment-variables", "title": "Environment variables", "text": "

    Environment variables such as $PATH can be used in the pipeline definition and defaults without any transformation following the form ${ENV_VAR_NAME}. This, of course, includes variables like the ones relevant to the KPOps cli that are exported by the user.

    See all KPOps environment variables

    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-name-variables", "title": "Pipeline name variables", "text": "

    These are special variables that refer to the name and path of a pipeline.

    "}, {"location": "user/core-concepts/variables/substitution/#advanced-use-cases", "title": "Advanced use cases", "text": "
    1. Refer to default component field values: As long as a value is assigned to a component attribute, it is possible to refer to it with a placeholder. To see all component fields, take a look at the pipeline schema.
    2. Chaining variables: It is possible to chain any number of variables, see the example above.
    3. Cross-component substitution: YAML is quite an intricate language and with some of its magic one could write cross-component references.
    "}, {"location": "user/examples/atm-fraud-pipeline/", "title": "ATM fraud detection pipeline", "text": "

    ATM fraud is a demo pipeline for ATM fraud detection. The original by Confluent is written in KSQL and outlined in this blogpost. The one used in this example is re-built from scratch using bakdata's streams-bootstrap library.

    "}, {"location": "user/examples/atm-fraud-pipeline/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/examples/atm-fraud-pipeline/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postgresql", "title": "PostgreSQL", "text": "

    Deploy PostgreSQL using the Bitnami Helm chart: Add the helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install the PostgreSQL with helm:

    helm upgrade --install -f ./postgresql.yaml \\\n--namespace kpops \\\npostgresql bitnami/postgresql\n
    PostgreSQL Example Helm chart values (postgresql.yaml)
    auth:\n  database: app_db\n  enablePostgresUser: true\n  password: AppPassword\n  postgresPassword: StrongPassword\n  username: app1\nprimary:\n  persistence:\n    enabled: false\n    existingClaim: postgresql-data-claim\nvolumePermissions:\n  enabled: true\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-detection-example-pipeline-setup", "title": "ATM fraud detection example pipeline setup", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#deploying-the-atm-fraud-detection-pipeline", "title": "Deploying the ATM fraud detection pipeline", "text": "
    1. Clone the kpops-examples repository and cd into the directory.

    2. Install KPOps pip install -r requirements.txt.

    3. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    4. Deploy the pipeline

      kpops deploy atm-fraud/pipeline.yaml --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/examples/atm-fraud-pipeline/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to see the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser. You should be able to see pipeline shown in the image below:

    An overview of ATM fraud pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/examples/atm-fraud-pipeline/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postrgresql", "title": "PostrgreSQL", "text": "

    PostgreSQL can be uninstalled by running the following command:

    helm --namespace kpops uninstall postgresql\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-pipeline", "title": "ATM fraud pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean atm-fraud/pipeline.yaml --verbose  --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/examples/atm-fraud-pipeline/#common-errors", "title": "Common errors", "text": ""}, {"location": "user/getting-started/quick-start/", "title": "Quick start", "text": ""}, {"location": "user/getting-started/quick-start/#word-count", "title": "Word-count", "text": "

    Word-count is a demo pipeline consisting of a producer producing words to Kafka, a Kafka streams app counting the number of times each word occurs, and finally a Redis database into which the words are exported.

    "}, {"location": "user/getting-started/quick-start/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": ""}, {"location": "user/getting-started/quick-start/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/getting-started/quick-start/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/getting-started/quick-start/#redis", "title": "Redis", "text": "

    Deploy Redis using the Bitnami Helm chart: Add the Helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install Redis with Helm:

    helm upgrade --install -f ./values-redis.yaml \\\n--namespace kpops \\\nredis bitnami/redis\n
    Redis example Helm chart values (values-redis.yaml)
    architecture: standalone\nauth:\n  enabled: false\nmaster:\n  count: 1\n  configuration: \"databases 1\"\nimage:\n  tag: 7.0.8\n
    "}, {"location": "user/getting-started/quick-start/#word-count-example-pipeline-setup", "title": "Word-count example pipeline setup", "text": ""}, {"location": "user/getting-started/quick-start/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/getting-started/quick-start/#deploying-the-word-count-pipeline", "title": "Deploying the Word-count pipeline", "text": "
    1. Clone the kpops-examples repository and cd into the directory.

    2. Install KPOps pip install -r requirements.txt.

    3. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    4. Deploy the pipeline

      kpops deploy word-count/pipeline.yaml --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/getting-started/quick-start/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to inspect the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser.

    You should be able to see pipeline shown in the image below:

    An overview of Word-count pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka Connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/getting-started/quick-start/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/quick-start/#redis_1", "title": "Redis", "text": "

    Redis can be uninstalled by running the following command:

    helm --namespace kpops uninstall redis\n
    "}, {"location": "user/getting-started/quick-start/#word-count-pipeline", "title": "Word-count pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean word-count/pipeline.yaml --verbose --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/getting-started/quick-start/#common-errors", "title": "Common errors", "text": ""}, {"location": "user/getting-started/setup/", "title": "Setup KPOps", "text": "

    In this part, you will set up KPOps. This includes:

    "}, {"location": "user/getting-started/setup/#prerequisites", "title": "Prerequisites", "text": ""}, {"location": "user/getting-started/setup/#setup-kubernetes-with-k3d", "title": "Setup Kubernetes with k3d", "text": "

    If you don't have access to an existing Kubernetes cluster, this section will guide you through creating a local cluster. We recommend the lightweight Kubernetes distribution k3s for this. k3d is a wrapper around k3s in Docker that lets you get started fast.

    1. You can install k3d with its installation script:

      wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/v5.4.6/install.sh | bash\n

      For other ways of installing k3d, you can have a look at their installation guide.

    2. The Kafka deployment needs a modified Docker image. In that case the image is built and pushed to a Docker registry that holds it. If you do not have access to an existing Docker registry, you can use k3d's Docker registry:

      k3d registry create kpops-registry.localhost --port 12345\n
    3. Now you can create a new cluster called kpops that uses the previously created Docker registry:

      k3d cluster create kpops --k3s-arg \"--no-deploy=traefik@server:*\" --registry-use k3d-kpops-registry.localhost:12345\n

    Note

    Creating a new k3d cluster automatically configures kubectl to connect to the local cluster by modifying your ~/.kube/config. In case you manually set the KUBECONFIG variable or don't want k3d to modify your config, k3d offers many other options.

    You can check the cluster status with kubectl get pods -n kube-system. If all returned elements have a STATUS of Running or Completed, then the cluster is up and running.

    "}, {"location": "user/getting-started/setup/#deploy-kafka", "title": "Deploy Kafka", "text": "

    Kafka is an open-source data streaming platform. More information about Kafka can be found in the documentation. To deploy Kafka, this guide uses Confluent's Helm chart.

    1. To allow connectivity to other systems Kafka Connect needs to be extended with drivers. You can install a JDBC driver for Kafka Connect by creating a new Docker image:

      1. Create a Dockerfile with the following content:

        FROM confluentinc/cp-kafka-connect:7.1.3\n\nRUN confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.6.0\n
      2. Build and push the modified image to your private Docker registry:

        docker build . --tag localhost:12345/kafka-connect-jdbc:7.1.3 && \\\ndocker push localhost:12345/kafka-connect-jdbc:7.1.3\n

      Detailed instructions on building, tagging and pushing a docker image can be found in Docker docs.

    2. Add Confluent's Helm chart repository and update the index:

      helm repo add confluentinc https://confluentinc.github.io/cp-helm-charts/ &&  \nhelm repo update\n
    3. Install Kafka, Zookeeper, Confluent's Schema Registry, Kafka Rest Proxy, and Kafka Connect. A single Helm chart installs all five components. Below you can find an example for the --values ./kafka.yaml file configuring the deployment accordingly. Deploy the services:

      helm upgrade \\\n    --install \\\n    --version 0.6.1 \\\n    --values ./kafka.yaml \\\n    --namespace kpops \\\n    --create-namespace \\\n    --wait \\\n    k8kafka confluentinc/cp-helm-charts\n
    Kafka Helm chart values (kafka.yaml)

    An example value configuration for Confluent's Helm chart. This configuration deploys a single Kafka Broker, a Schema Registry, Zookeeper, Kafka Rest Proxy, and Kafka Connect with minimal resources.

    cp-zookeeper:\n  enabled: true\n  servers: 1\n  imageTag: 7.1.3\n  heapOptions: \"-Xms124M -Xmx124M\"\n  overrideGroupId: k8kafka\n  fullnameOverride: \"k8kafka-cp-zookeeper\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.2G\n    limits:\n      cpu: 250m\n      memory: 0.2G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka:\n  enabled: true\n  brokers: 1\n  imageTag: 7.1.3\n  podManagementPolicy: Parallel\n  configurationOverrides:\n    \"auto.create.topics.enable\": false\n    \"offsets.topic.replication.factor\": 1\n    \"transaction.state.log.replication.factor\": 1\n    \"transaction.state.log.min.isr\": 1\n    \"confluent.metrics.reporter.topic.replicas\": 1\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.5G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n  persistence:\n    enabled: false\n\ncp-schema-registry:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-schema-registry\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.25G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-connect:\n  enabled: true\n  replicaCount: 1\n  image: k3d-kpops-registry.localhost:12345/kafka-connect-jdbc\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-kafka-connect\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 500m\n      memory: 0.25G\n    limits:\n      cpu: 500m\n      memory: 0.25G\n  configurationOverrides:\n    \"consumer.max.poll.records\": \"10\"\n    \"consumer.max.poll.interval.ms\": \"900000\"\n    \"config.storage.replication.factor\": \"1\"\n    \"offset.storage.replication.factor\": \"1\"\n    \"status.storage.replication.factor\": \"1\"\n  cp-schema-registry:\n    url: http://k8kafka-cp-schema-registry:8081\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-rest:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-rest\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-ksql-server:\n  enabled: false\ncp-control-center:\n  enabled: false\n
    "}, {"location": "user/getting-started/setup/#deploy-streams-explorer", "title": "Deploy Streams Explorer", "text": "

    Streams Explorer allows examining Apache Kafka data pipelines in a Kubernetes cluster including the inspection of schemas and monitoring of metrics. First, add the Helm repository:

    helm repo add streams-explorer https://bakdata.github.io/streams-explorer && \\\nhelm repo update\n

    Below you can find an example for the --values ./streams-explorer.yaml file configuring the deployment accordingly. Now, deploy the service:

    helm upgrade \\\n    --install \\\n    --version 0.2.3 \\\n    --values ./streams-explorer.yaml \\\n    --namespace kpops \\\n    streams-explorer streams-explorer/streams-explorer\n
    Streams Explorer Helm chart values (streams-explorer.yaml)

    An example value configuration for Steams Explorer Helm chart.

    imageTag: \"v2.1.2\"\nconfig:\n   K8S__deployment__cluster: true\n   SCHEMAREGISTRY__url: http://k8kafka-cp-schema-registry.kpops.svc.cluster.local:8081\n   KAFKACONNECT__url: http://k8kafka-cp-kafka-connect.kpops.svc.cluster.local:8083\nresources:\n   requests:\n       cpu: 200m\n       memory: 300Mi\n   limits:\n       cpu: 200m\n       memory: 300Mi\n
    "}, {"location": "user/getting-started/setup/#check-the-status-of-your-deployments", "title": "Check the status of your deployments", "text": "

    Now we will check if all the pods are running in our namespace. You can list all pods in the namespace with this command:

    kubectl --namespace kpops get pods\n

    Then you should see the following output in your terminal:

    NAME                                          READY   STATUS    RESTARTS   AGE\nk8kafka-cp-kafka-connect-8fc7d544f-8pjnt      1/1     Running   0          15m\nk8kafka-cp-zookeeper-0                        1/1     Running   0          15m\nk8kafka-cp-kafka-0                            1/1     Running   0          15m\nk8kafka-cp-schema-registry-588f8c65db-jdwbq   1/1     Running   0          15m\nk8kafka-cp-rest-6bbfd7b645-nwkf8              1/1     Running   0          15m\nstreams-explorer-54db878c67-s8wbz             1/1     Running   0          15m\n

    Pay attention to the STATUS row. The pods should have a status of Running.

    "}, {"location": "user/getting-started/setup/#install-kpops", "title": "Install KPOps", "text": "

    KPOps comes as a PyPI package. You can install it with pip:

    pip install kpops\n
    "}, {"location": "user/getting-started/teardown/", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/teardown/#kpops-teardown-commands", "title": "KPOps teardown commands", "text": ""}, {"location": "user/getting-started/teardown/#kpops-deployed-pipeline", "title": "KPOps-deployed pipeline", "text": "

    The kpops CLI can be used to destroy a pipeline that was previously deployed with KPOps. In case that doesn't work, the pipeline can always be taken down manually with helm (see section Infrastructure).

    1. Export environment variables.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Navigate to the examples folder. Replace the <name-of-the-example-directory> with the example you want to tear down. For example the atm-fraud-detection.

    3. Remove the pipeline

      # Uncomment 1 line to either destroy, reset or clean.\n\n# poetry run kpops destroy <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops reset <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops clean <name-of-the-example-directory>/pipeline.yaml \\\n--config <name-of-the-example-directory>/config.yaml \\\n--execute\n
    "}, {"location": "user/getting-started/teardown/#infrastructure", "title": "Infrastructure", "text": "

    Delete namespace:

    kubectl delete namespace kpops\n

    Note

    In case kpops destroy is not working one can uninstall the pipeline services one by one. This is equivalent to running kpops destroy. In case a clean uninstall (like the one kpops clean does) is needed, one needs to also delete the topics and schemas created by deployment of the pipeline.

    "}, {"location": "user/getting-started/teardown/#local-cluster", "title": "Local cluster", "text": "

    Delete local cluster:

    k3d cluster delete kpops\n
    "}, {"location": "user/getting-started/teardown/#local-image-registry", "title": "Local image registry", "text": "

    Delete local registry:

    k3d registry delete k3d-kpops-registry.localhost\n
    "}, {"location": "user/migration-guide/v1-v2/", "title": "Migrate from V1 to V2", "text": ""}, {"location": "user/migration-guide/v1-v2/#derive-component-type-automatically-from-class-name", "title": "Derive component type automatically from class name", "text": "

    KPOps automatically infers the component type from the class name. Therefore, the type and schema_type attributes can be removed from your custom components. By convention the type would be the lower, and kebab cased name of the class.

    class MyCoolStreamApp(StreamsApp):\n-    type = \"my-cool-stream-app\"\n+    ...\n

    Because of this new convention producer has been renamed to producer-app. This must be addressed in your pipeline.yaml and defaults.yaml.

    - producer:\n+ producer-app:\n    app:\n        streams:\n        outputTopic: output_topic\n        extraOutputTopics:\n            output_role1: output_topic1\n            output_role2: output_topic2\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-inputoutput-types", "title": "Refactor input/output types", "text": ""}, {"location": "user/migration-guide/v1-v2/#to-section", "title": "To section", "text": "

    In the to section these have changed:

      to:\n    topics:\n      ${pipeline_name}-topic-1:\n-       type: extra\n        role: \"role-1\"\n        ...\n      ${pipeline_name}-topic-2:\n-       type: output\n        ...\n      ${pipeline_name}-topic-3:\n         type: error\n         ...\n
    "}, {"location": "user/migration-guide/v1-v2/#from-section", "title": "From section", "text": "

    In the from section these have changed:

      from:\n    topics:\n      ${pipeline_name}-input-topic:\n-       type: input\n        ...\n      ${pipeline_name}-extra-topic:\n-       type: extra\n        role: topic-role\n        ...\n      ${pipeline_name}-input-pattern-topic:\n-       type: input-pattern\n+       type: pattern\n        ...\n      ${pipeline_name}-extra-pattern-topic:\n-       type: extra-pattern\n+       type: pattern\n        role: some-role\n        ...\n
    "}, {"location": "user/migration-guide/v1-v2/#remove-camel-case-conversion-of-internal-models", "title": "Remove camel case conversion of internal models", "text": "

    All the internal KPOps models are now snake_case, and only Helm/Kubernetes values require camel casing. You can find an example of a pipeline.yaml in the following. Notice that the app section here remains untouched.

    ...\ntype: streams-app\n  name: streams-app\n  namespace: namespace\n  app:\n    streams:\n      brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n     autoscaling:\n      consumerGroup: consumer-group\n      lagThreshold: 0\n      enabled: false\n      pollingInterval: 30\n\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: error\n-       keySchema: key-schema\n+       key_schema: key-schema\n-       valueSchema: value-schema\n+       value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs:\n          cleanup.policy: compact\n    models:\n      model: model\n  prefix: ${pipeline_name}-\n- repoConfig:\n+ repo_config:\n-   repositoryName: bakdata-streams-bootstrap\n+   repository_name: bakdata-streams-bootstrap\n    url: https://bakdata.github.io/streams-bootstrap/\n-   repoAuthFlags:\n+   repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.4\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-handling-of-helm-flags", "title": "Refactor handling of Helm flags", "text": "

    If you are using the KubernetesApp class to define your own Kubernetes resource to deploy, the abstract function get_helm_chart that returns the chart for deploying the app using Helm is now a Python property and renamed to helm_chart.

    class MyCoolApp(KubernetesApp):\n\n+   @property\n    @override\n-   def get_helm_chart(self) -> str:\n+   def helm_chart(self) -> str:\n        return \"./charts/charts-folder\"\n
    "}, {"location": "user/migration-guide/v1-v2/#plural-broker-field-in-pipeline-config", "title": "Plural broker field in pipeline config", "text": "

    Since you can pass a comma separated string of broker address, the broker field in KPOps is now plural. The pluralization has affected multiple areas:

    "}, {"location": "user/migration-guide/v1-v2/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- broker: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  kafka_connect_host: \"http://localhost:8083\"\n  kafka_rest_host: \"http://localhost:8082\"\n  schema_registry_url: \"http://localhost:8081\"\n
    "}, {"location": "user/migration-guide/v1-v2/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called brokers.

    ...\n  app:\n    streams:\n-     brokers: ${broker}\n+     brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#environment-variable", "title": "Environment variable", "text": "

    Previously, if you set the environment variable KPOPS_KAFKA_BROKER, you need to replace that now with KPOPS_KAFKA_BROKERS.

    "}, {"location": "user/migration-guide/v2-v3/", "title": "Migrate from V2 to V3", "text": "

    Jump to the summary

    "}, {"location": "user/migration-guide/v2-v3/#use-hash-and-trim-long-helm-release-names-instead-of-only-trimming", "title": "Use hash and trim long Helm release names instead of only trimming", "text": "

    KPOps handles long (more than 53 characters) Helm releases names differently. Helm will not find your (long) old release names anymore. Therefore, it is recommended that you should once destroy your pipeline with KPOps v2 to remove old Helm release names. After a clean destroy, re-deploy your pipeline with the KPOps v3.

    For example if you have a component with the Helm release name example-component-name-too-long-fake-fakefakefakefakefake. The new release name will shorten the original name to 53 characters and then replace the last 6 characters of the trimmed name with the first 5 characters of the result of SHA-1(helm_release_name).

    example-component-name-too-long-fake-fakefakef-0a7fc ----> 53 chars\n---------------------------------------------- -----\n  ^Shortened helm_release_name                 ^first 5 characters of SHA1(helm_release_name)\n
    "}, {"location": "user/migration-guide/v2-v3/#create-helmapp-component", "title": "Create HelmApp component", "text": "

    All Helm-specific parts of the built-in KubernetesApp have been extracted to a new child component that is more appropriately named HelmApp. It has to be renamed in your existing pipeline defintions and custom components module.

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml", "title": "pipeline.yaml", "text": "
    -- type: kubernetes-app\n+- type: helm-app\n   name: foo\n
    "}, {"location": "user/migration-guide/v2-v3/#custom_modulepy", "title": "custom_module.py", "text": "
    - from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n      ...\n
    "}, {"location": "user/migration-guide/v2-v3/#create-streamsbootstrap-component-refactor-cleanup-jobs-as-individual-helmapp", "title": "Create StreamsBootstrap component & refactor cleanup jobs as individual HelmApp", "text": "

    Previously the default KafkaApp component configured the streams-bootstrap Helm Charts. Now, this component is no longer tied to Helm (or Kubernetes). Instead, there is a new StreamsBootstrap component that configures the Helm Chart repository for the components that use it, e.g. StreamsApp and ProducerApp. If you are using non-default values for the Helm Chart repository or version, it has to be updated as shown below.

    "}, {"location": "user/migration-guide/v2-v3/#defaultsyaml", "title": "defaults.yaml", "text": "
      kafka-app:\n    app:\n      streams: ...\n\n+ streams-bootstrap:\n    repo_config: ...\n    version: ...\n
    "}, {"location": "user/migration-guide/v2-v3/#refactor-kafka-connector-resetter-as-individual-helmapp", "title": "Refactor Kafka Connector resetter as individual HelmApp", "text": "

    Internally, the Kafka Connector resetter is now its own standard HelmApp, removing a lot of the shared code. It is configured using the resetter_namespace (formerly namespace) and resetter_values attributes.

    "}, {"location": "user/migration-guide/v2-v3/#defaultsyaml_1", "title": "defaults.yaml", "text": "
      kafka-connector:\n-   namespace: my-namespace\n+   resetter_namespace: my-namespace\n
    "}, {"location": "user/migration-guide/v2-v3/#make-kafka-rest-proxy-kafka-connect-hosts-default-and-improve-schema-registry-config", "title": "Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config", "text": "

    The breaking changes target the config.yaml file:

    The environment variable names of these config fields changed respectively. Please refer to the environment variables documentation page to see the newest changes.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called kafka_brokers.

    ...\n  app:\n    streams:\n-     brokers: ${brokers}\n+     brokers: ${kafka_brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v2-v3/#define-custom-components-module-pipeline-base-dir-globally", "title": "Define custom components module & pipeline base dir globally", "text": "

    Warning

    The previous CLI parameters have been removed.

    The options for a custom components_module and pipeline_base_dir are now global settings, defined in config.yaml.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml_1", "title": "config.yaml", "text": "
      kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  environment: development\n+ components_module: components\n+ pipeline_base_dir: pipelines\n
    "}, {"location": "user/migration-guide/v2-v3/#move-github-action-to-repsitory-root", "title": "Move GitHub action to repsitory root", "text": "

    The location of the GitHub action has changed, and it's now available directly as bakdata/kpops.

    You'll need to change it in your GitHub CI workflows.

    steps:\n  - name: kpops deploy\n-   uses: bakdata/kpops/actions/kpops-runner@main\n+   uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      # ...\n
    "}, {"location": "user/migration-guide/v2-v3/#allow-overriding-config-files", "title": "Allow overriding config files", "text": "

    Specifying the environment is no longer mandatory. If not defined, only the global files will be used.

    environment is no longer specified in config.yaml. Instead, it can be either set via the CLI flag --environment or with the environment variable KPOPS_ENVIRONMENT.

    The --config flag in the CLI now points to the directory that contains config*.yaml files. The files to be used are resolved based on the provided (or not) environment.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml_2", "title": "config.yaml", "text": "
    - environment: development\n  kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  schema_registry:\n    enabled: true\n    url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#change-substitution-variables-separator-to", "title": "Change substitution variables separator to .", "text": "

    The delimiter in the substitution variables is changed to ..

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml_1", "title": "pipeline.yaml and default.yaml", "text": "
    steps:\n  - type: scheduled-producer\n    app:\n      labels:\n-       app_type: \"${component_type}\"\n-       app_name: \"${component_name}\"\n-       app_schedule: \"${component_app_schedule}\"\n+       app_type: \"${component.type}\"\n+       app_name: \"${component.name}\"\n+       app_schedule: \"${component.app.schedule}\"\n
    "}, {"location": "user/migration-guide/v2-v3/#configyaml_3", "title": "config.yaml", "text": "
    topic_name_config:\n- default_error_topic_name: \"${pipeline_name}-${component_name}-dead-letter-topic\"\n- default_output_topic_name: \"${pipeline_name}-${component_name}-topic\"\n+ default_error_topic_name: \"${pipeline_name}-${component.name}-dead-letter-topic\"\n+ default_output_topic_name: \"${pipeline_name}-${component.name}-topic\"\n
    "}, {"location": "user/migration-guide/v2-v3/#refactor-generate-template-for-python-api-usage", "title": "Refactor generate template for Python API usage", "text": "

    The template method of every pipeline component has been renamed to manifest as it is no longer strictly tied to Helm template. Instead, it can be used to render the final resources of a component, such as Kubernetes manifests.

    There is also a new kpops manifest command replacing the existing kpops generate --template flag.

    If you're using this functionality in your custom components, it needs to be updated.

      from kpops.components.base_components.models.resource import Resource\n\n  @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n  \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n      return []  # list of manifests\n
    "}, {"location": "user/migration-guide/v2-v3/#namespace-substitution-vars", "title": "Namespace substitution vars", "text": "

    The global configuration variables are now namespaced under the config key, such as ${config.kafka_brokers}, ${config.schema_registry.url}. Same with pipeline variables, e.g. ${pipeline_name} \u2192 ${pipeline.name}. This would make it more uniform with the existing ${component.<key>} variables.

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml_1", "title": "pipeline.yaml", "text": "
      name: kafka-app\n- prefix: ${pipeline_name}-\n+ prefix: ${pipeline.name}-\n  app:\n    streams:\n-     brokers: ${kafka_brokers}\n-     schemaRegistryUrl: ${schema_registry.url}\n+     brokers: ${config.kafka_brokers}\n+     schemaRegistryUrl: ${config.schema_registry.url}\n
    "}, {"location": "user/migration-guide/v2-v3/#summary", "title": "Summary", "text": "

    Warning

    Helm will not find your (long) old release names anymore.

    defaults.yaml
      kafka-app:\n    app:\n      streams: ...\n\n+ streams-bootstrap:\n    repo_config: ...\n    version: ...\n
    pipeline.yaml
    - - type: kubernetes-app\n+ - type: helm-app\n  ...\n  - type: kafka-app\n    app:\n-     brokers: ${brokers}\n+     brokers: ${config.kafka_brokers}\n      labels:\n-       app_schedule: \"${component_app_schedule}\"\n+       app_schedule: \"${component.app.schedule}\"\n  ...\n  - type: kafka-connector\n-   namespace: my-namespace\n+   resetter_namespace: my-namespace\n  ...\n
    config.yaml
    - environment: development\n\n+ components_module: components\n\n+ pipeline_base_dir: pipelines\n\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n\n  topic_name_config:\n- default_error_topic_name: \"${pipeline_name}-${component_name}-dead-letter-topic\"\n+ default_error_topic_name: \"${pipeline.name}-${component.name}-dead-letter-topic\"\n  ...\n
    custom_module.py
    - from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n+ from kpops.components.base_components.models.resource import Resource\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n\n  @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n  \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n      return []  # list of manifests\n  ...\n
    github_ci_workflow.yaml
      steps:\n    - name: ...\n-     uses: bakdata/kpops/actions/kpops-runner@main\n+     uses: bakdata/kpops@main\n  ...\n
    "}, {"location": "user/migration-guide/v3-v4/", "title": "Migrate from V3 to V4", "text": ""}, {"location": "user/migration-guide/v3-v4/#distribute-defaults-across-multiple-files", "title": "Distribute defaults across multiple files", "text": "

    Warning

    The --defaults flag is removed

    It is possible now to use multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.

    For example, imagine the following folder structure:

    \u2514\u2500 pipelines\n   \u2514\u2500\u2500 distributed-defaults\n       \u251c\u2500\u2500 defaults.yaml\n       \u251c\u2500\u2500 defaults_dev.yaml\n       \u2514\u2500\u2500 pipeline-deep\n           \u251c\u2500\u2500 defaults.yaml\n           \u2514\u2500\u2500 pipeline.yaml\n

    The pipeline_base_dir is configured to pipelines. Now if we generate this pipeline with the following command:

    kpops generate \\\n      --environment dev\n      ./pipelines/distributed-defaults/pipeline-deep/pipeline.yaml\n

    The defaults would be picked in the following order (high to low priority):

    The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.

    "}, {"location": "user/migration-guide/v4-v5/", "title": "Migrate from V4 to V5", "text": ""}, {"location": "user/migration-guide/v4-v5/#allow-custom-timeout-for-external-services", "title": "Allow custom timeout for external services", "text": "

    The global timeout setting has been removed. Instead, an individual timeout can be set for each external service. The default is 30 seconds.

    "}, {"location": "user/migration-guide/v4-v5/#configyaml", "title": "config.yaml", "text": "
    - timeout: 300\n\n  kafka_rest:\n    url: \"http://my-custom-rest.url:8082\"\n+   timeout: 30\n  kafka_connect:\n    url: \"http://my-custom-connect.url:8083\"\n+   timeout: 30\n  schema_registry:\n    enabled: true\n    url: \"http://my-custom-sr.url:8081\"\n+   timeout: 30\n
    "}, {"location": "user/references/cli-commands/", "title": "CLI Usage", "text": "

    Usage:

    $ kpops [OPTIONS] COMMAND [ARGS]...\n

    Options:

    Commands:

    "}, {"location": "user/references/cli-commands/#kpops-clean", "title": "kpops clean", "text": "

    Clean pipeline steps

    Usage:

    $ kpops clean [OPTIONS] PIPELINE_PATH\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-deploy", "title": "kpops deploy", "text": "

    Deploy pipeline steps

    Usage:

    $ kpops deploy [OPTIONS] PIPELINE_PATH\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-destroy", "title": "kpops destroy", "text": "

    Destroy pipeline steps

    Usage:

    $ kpops destroy [OPTIONS] PIPELINE_PATH\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-generate", "title": "kpops generate", "text": "

    Enrich pipeline steps with defaults. The enriched pipeline is used for all KPOps operations (deploy, destroy, ...).

    Usage:

    $ kpops generate [OPTIONS] PIPELINE_PATH\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-init", "title": "kpops init", "text": "

    Initialize a new KPOps project.

    Usage:

    $ kpops init [OPTIONS] PATH\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-manifest", "title": "kpops manifest", "text": "

    In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.

    Usage:

    $ kpops manifest [OPTIONS] PIPELINE_PATH\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-reset", "title": "kpops reset", "text": "

    Reset pipeline steps

    Usage:

    $ kpops reset [OPTIONS] PIPELINE_PATH\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-schema", "title": "kpops schema", "text": "

    Generate JSON schema.

    The schemas can be used to enable support for KPOps files in a text editor.

    Usage:

    $ kpops schema [OPTIONS] SCOPE:{pipeline|defaults|config}\n

    Arguments:

    Options:

    "}, {"location": "user/references/editor-integration/", "title": "Editor integration", "text": ""}, {"location": "user/references/editor-integration/#native", "title": "Native", "text": "

    We are working towards first-class editor support by providing plugins that work out of the box.

    "}, {"location": "user/references/editor-integration/#manual-for-unsupported-editors-with-lsp", "title": "Manual (for unsupported editors with LSP)", "text": "
    1. Install the yaml-language-server in your editor of choice. (requires LSP support)
    2. Configure the extension with the settings below.
    settings.json
    {\n    \"yaml.schemas\": {\n        \"https://bakdata.github.io/kpops/4.0/schema/pipeline.json\": [\n            \"pipeline.yaml\",\n            \"pipeline_*.yaml\"\n        ],\n        \"https://bakdata.github.io/kpops/4.0/schema/defaults.json\": [\n            \"defaults.yaml\",\n            \"defaults_*.yaml\"\n        ],\n        \"https://bakdata.github.io/kpops/4.0/schema/config.json\": [\n            \"config.yaml\",\n            \"config_*.yaml\"\n        ]\n    }\n}\n

    Advanced usage

    It is possible to generate schemas with the kpops schema command. Useful for including custom components or when using a pre-release version of KPOps.

    "}, {"location": "user/references/editor-integration/#concepts", "title": "Concepts", "text": "

    KPOps provides JSON schemas that enable autocompletion and validation for all YAML files that the user must work with.

    "}, {"location": "user/references/editor-integration/#supported-files", "title": "Supported files", "text": ""}, {"location": "user/references/ci-integration/github-actions/", "title": "GitHub Actions integration", "text": "

    We provided a GitHub composite action bakdata/kpops that installs and executes KPOps commands with the given parameters.

    "}, {"location": "user/references/ci-integration/github-actions/#input-parameters", "title": "Input Parameters", "text": "Name Required Default Value Type Description command \u2705 - string KPOps command to run. generate, deploy, destroy, reset, clean are possible values. Flags such as --dry-run and --execute need to be specified pipeline \u2705 - string Pipeline to run by KPOps working-directory \u274c . string root directory used by KPOps to run pipelines config \u274c - string Directory containing the config*.yaml file(s) environment \u274c - string Environment to run KPOps in components \u274c - string components package path filter-type \u274c - string Whether to include/exclude the steps defined in KPOPS_PIPELINE_STEPS parallel \u274c \"false\" string Whether to run pipelines in parallel python-version \u274c \"3.11.x\" string Python version to install (Defaults to the latest stable version of Python 3.11) kpops-version \u274c latest string KPOps version to install helm-version \u274c latest string Helm version to install token \u274c latest string secrets.GITHUB_TOKEN, needed for setup-helm action if helm-version is set to latest"}, {"location": "user/references/ci-integration/github-actions/#usage", "title": "Usage", "text": "
    steps:\n  # ...\n  # This step is useful for debugging reasons\n  - name: Generate Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: generate\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.3\n\n  # It is possible to use a pre-release KPOps version from TestPyPI https://test.pypi.org/project/kpops/#history\n  - name: Deploy Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.5.dev20230707132709\n  # ...\n
    "}]} \ No newline at end of file +{"config": {"lang": ["en"], "separator": "[\\s\\-]+", "pipeline": ["stopWordFilter"]}, "docs": [{"location": "developer/auto-generation/", "title": "Auto generation", "text": "

    Auto generation happens mostly with pre-commit hooks. You can find the pre-commit configuration here. These pre-commit hooks call different Python scripts to auto generate code for the documentation.

    "}, {"location": "developer/auto-generation/#generation-scripts-and-their-respective-files", "title": "Generation scripts and their respective files", "text": ""}, {"location": "developer/auto-generation/#documentation", "title": "Documentation", "text": ""}, {"location": "developer/auto-generation/#variables", "title": "Variables", "text": ""}, {"location": "developer/auto-generation/#cli-commands", "title": "CLI commands", "text": "

    Generated by typer-cli from the code in main.py. It is called with Python's subprocess module.

    "}, {"location": "developer/auto-generation/#pipeline-and-defaults-example-definitions", "title": "Pipeline and defaults example definitions", "text": "

    Generates example pipeline.yaml and defaults.yaml for each individual component, stores them and also concatenates them into 1 big pipeline definition and 1 big pipeline defaults definition.

    User input

    Generated

    "}, {"location": "developer/auto-generation/#editor-integration", "title": "Editor integration", "text": ""}, {"location": "developer/auto-generation/#schemas", "title": "Schemas", "text": ""}, {"location": "developer/contributing/", "title": "How to contribute", "text": "

    Welcome! We are glad to have you visit our contributing guide!

    If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.

    "}, {"location": "developer/contributing/#git", "title": "git", "text": "

    We are using git submodules to import the KPOps examples repository. You need to fetch the repository locally on your machine. To do so use this command:

    git submodule init\ngit submodule update --recursive\n

    This will fetch the resources under the examples folder.

    "}, {"location": "developer/contributing/#style", "title": "Style", "text": "

    We advise that you stick to our pre-commit hooks for code linting, formatting, and auto-generation of documentation. After you install them using poetry run pre-commit install they're triggered automatically during git commit. Additionally, you can manually invoke them with poetry run pre-commit run -a. In order for dprint to work, you have to manually install it locally. It will work in the CI, so it is also possible to manually carry out formatting changes flagged by dprint in the CI and skip installing it locally.

    "}, {"location": "developer/contributing/#python", "title": "Python", "text": "

    To ensure a consistent Python code style, we use Ruff for both linting and formatting. The official docs contain a guide on editor integration.

    Our configuration can be found in KPOps' top-level pyproject.toml.

    "}, {"location": "developer/contributing/#markdown", "title": "Markdown", "text": "

    To ensure a consistent markdown style, we use dprint's Markdown code formatter. Our configuration can be found here.

    "}, {"location": "developer/contributing/#css", "title": "CSS", "text": "

    To ensure a consistent CSS style, we use the malva dprint's plugin. Our configuration can be found here.

    "}, {"location": "developer/contributing/#toml", "title": "TOML", "text": "

    To ensure a consistent TOML style, we use dprint's TOML code formatter. Our configuration can be found here.

    "}, {"location": "developer/getting-started/", "title": "Getting started", "text": "

    Welcome! We are glad to have you visit our developer guide! If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.

    Find more about our code-style or insights into KPOps' code base here in our developer guide.

    Work in progress

    The developer guide is still under construction. If you have a question left unanswered here, feel free to ask it by opening an issue.

    "}, {"location": "user/changelog/", "title": "Changelog", "text": ""}, {"location": "user/changelog/#600-release-date-2024-06-06", "title": "6.0.0 - Release Date: [2024-06-06]", "text": ""}, {"location": "user/changelog/#breaking-changes", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#features", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#refactor", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#511-release-date-2024-05-22", "title": "5.1.1 - Release Date: [2024-05-22]", "text": ""}, {"location": "user/changelog/#fixes", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#510-release-date-2024-05-22", "title": "5.1.0 - Release Date: [2024-05-22]", "text": ""}, {"location": "user/changelog/#miscellaneous", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#501-release-date-2024-05-15", "title": "5.0.1 - Release Date: [2024-05-15]", "text": ""}, {"location": "user/changelog/#fixes_1", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#500-release-date-2024-05-02", "title": "5.0.0 - Release Date: [2024-05-02]", "text": ""}, {"location": "user/changelog/#breaking-changes_1", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#miscellaneous_1", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#421-release-date-2024-04-25", "title": "4.2.1 - Release Date: [2024-04-25]", "text": ""}, {"location": "user/changelog/#features_1", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#420-release-date-2024-04-25", "title": "4.2.0 - Release Date: [2024-04-25]", "text": ""}, {"location": "user/changelog/#refactor_1", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_2", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#412-release-date-2024-03-11", "title": "4.1.2 - Release Date: [2024-03-11]", "text": ""}, {"location": "user/changelog/#documentation", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#411-release-date-2024-03-11", "title": "4.1.1 - Release Date: [2024-03-11]", "text": ""}, {"location": "user/changelog/#fixes_2", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_2", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_3", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#410-release-date-2024-03-07", "title": "4.1.0 - Release Date: [2024-03-07]", "text": ""}, {"location": "user/changelog/#documentation_1", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_4", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#402-release-date-2024-03-04", "title": "4.0.2 - Release Date: [2024-03-04]", "text": ""}, {"location": "user/changelog/#documentation_2", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_5", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#401-release-date-2024-02-29", "title": "4.0.1 - Release Date: [2024-02-29]", "text": ""}, {"location": "user/changelog/#fixes_3", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#400-release-date-2024-02-27", "title": "4.0.0 - Release Date: [2024-02-27]", "text": ""}, {"location": "user/changelog/#breaking-changes_2", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#features_2", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#324-release-date-2024-02-26", "title": "3.2.4 - Release Date: [2024-02-26]", "text": ""}, {"location": "user/changelog/#fixes_4", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_3", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_3", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#323-release-date-2024-02-19", "title": "3.2.3 - Release Date: [2024-02-19]", "text": ""}, {"location": "user/changelog/#fixes_5", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#322-release-date-2024-02-12", "title": "3.2.2 - Release Date: [2024-02-12]", "text": ""}, {"location": "user/changelog/#fixes_6", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#321-release-date-2024-02-08", "title": "3.2.1 - Release Date: [2024-02-08]", "text": ""}, {"location": "user/changelog/#fixes_7", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_4", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#320-release-date-2024-02-01", "title": "3.2.0 - Release Date: [2024-02-01]", "text": ""}, {"location": "user/changelog/#features_3", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#refactor_5", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_4", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#310-release-date-2024-01-30", "title": "3.1.0 - Release Date: [2024-01-30]", "text": ""}, {"location": "user/changelog/#features_4", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#fixes_8", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_6", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_6", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#302-release-date-2024-01-23", "title": "3.0.2 - Release Date: [2024-01-23]", "text": ""}, {"location": "user/changelog/#fixes_9", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_5", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_7", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#301-release-date-2024-01-19", "title": "3.0.1 - Release Date: [2024-01-19]", "text": ""}, {"location": "user/changelog/#fixes_10", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_6", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_8", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#300-release-date-2024-01-17", "title": "3.0.0 - Release Date: [2024-01-17]", "text": ""}, {"location": "user/changelog/#breaking-changes_3", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#features_5", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#fixes_11", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#dependencies", "title": "\ud83e\uddea Dependencies", "text": ""}, {"location": "user/changelog/#refactor_7", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_7", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_9", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#2011-release-date-2023-10-24", "title": "2.0.11 - Release Date: [2023-10-24]", "text": ""}, {"location": "user/changelog/#fixes_12", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_8", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#2010-release-date-2023-10-12", "title": "2.0.10 - Release Date: [2023-10-12]", "text": ""}, {"location": "user/changelog/#miscellaneous_10", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#209-release-date-2023-09-19", "title": "2.0.9 - Release Date: [2023-09-19]", "text": ""}, {"location": "user/changelog/#fixes_13", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_9", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#208-release-date-2023-09-06", "title": "2.0.8 - Release Date: [2023-09-06]", "text": ""}, {"location": "user/changelog/#fixes_14", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_8", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#207-release-date-2023-08-31", "title": "2.0.7 - Release Date: [2023-08-31]", "text": ""}, {"location": "user/changelog/#fixes_15", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_10", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#206-release-date-2023-08-30", "title": "2.0.6 - Release Date: [2023-08-30]", "text": ""}, {"location": "user/changelog/#refactor_9", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#205-release-date-2023-08-30", "title": "2.0.5 - Release Date: [2023-08-30]", "text": ""}, {"location": "user/changelog/#fixes_16", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#204-release-date-2023-08-29", "title": "2.0.4 - Release Date: [2023-08-29]", "text": ""}, {"location": "user/changelog/#fixes_17", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#documentation_11", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#miscellaneous_11", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#203-release-date-2023-08-24", "title": "2.0.3 - Release Date: [2023-08-24]", "text": ""}, {"location": "user/changelog/#fixes_18", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#miscellaneous_12", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#202-release-date-2023-08-23", "title": "2.0.2 - Release Date: [2023-08-23]", "text": ""}, {"location": "user/changelog/#documentation_12", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#201-release-date-2023-08-22", "title": "2.0.1 - Release Date: [2023-08-22]", "text": ""}, {"location": "user/changelog/#fixes_19", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#200-release-date-2023-08-17", "title": "2.0.0 - Release Date: [2023-08-17]", "text": ""}, {"location": "user/changelog/#breaking-changes_4", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#features_6", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#refactor_10", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_13", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#172-release-date-2023-08-16", "title": "1.7.2 - Release Date: [2023-08-16]", "text": ""}, {"location": "user/changelog/#refactor_11", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_14", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#171-release-date-2023-08-15", "title": "1.7.1 - Release Date: [2023-08-15]", "text": ""}, {"location": "user/changelog/#documentation_15", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#170-release-date-2023-08-15", "title": "1.7.0 - Release Date: [2023-08-15]", "text": ""}, {"location": "user/changelog/#features_7", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#160-release-date-2023-08-10", "title": "1.6.0 - Release Date: [2023-08-10]", "text": ""}, {"location": "user/changelog/#refactor_12", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#150-release-date-2023-08-10", "title": "1.5.0 - Release Date: [2023-08-10]", "text": ""}, {"location": "user/changelog/#features_8", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#refactor_13", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_13", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#140-release-date-2023-08-02", "title": "1.4.0 - Release Date: [2023-08-02]", "text": ""}, {"location": "user/changelog/#fixes_20", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_14", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_14", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#132-release-date-2023-07-13", "title": "1.3.2 - Release Date: [2023-07-13]", "text": ""}, {"location": "user/changelog/#fixes_21", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#131-release-date-2023-07-11", "title": "1.3.1 - Release Date: [2023-07-11]", "text": ""}, {"location": "user/changelog/#refactor_15", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_15", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#130-release-date-2023-07-07", "title": "1.3.0 - Release Date: [2023-07-07]", "text": ""}, {"location": "user/changelog/#refactor_16", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#documentation_16", "title": "\ud83d\udcdd Documentation", "text": ""}, {"location": "user/changelog/#breaking-changes_5", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": ""}, {"location": "user/changelog/#miscellaneous_16", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#124-release-date-2023-06-27", "title": "1.2.4 - Release Date: [2023-06-27]", "text": ""}, {"location": "user/changelog/#miscellaneous_17", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#123-release-date-2023-06-22", "title": "1.2.3 - Release Date: [2023-06-22]", "text": ""}, {"location": "user/changelog/#fixes_22", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_17", "title": "\ud83c\udfed Refactor", "text": ""}, {"location": "user/changelog/#miscellaneous_18", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#122-release-date-2023-06-21", "title": "1.2.2 - Release Date: [2023-06-21]", "text": ""}, {"location": "user/changelog/#miscellaneous_19", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#121-release-date-2023-06-21", "title": "1.2.1 - Release Date: [2023-06-21]", "text": "Uncategorized "}, {"location": "user/changelog/#120-release-date-2023-06-21", "title": "1.2.0 - Release Date: [2023-06-21]", "text": ""}, {"location": "user/changelog/#features_9", "title": "\ud83d\ude80 Features", "text": ""}, {"location": "user/changelog/#fixes_23", "title": "\ud83d\udc1b Fixes", "text": ""}, {"location": "user/changelog/#refactor_18", "title": "\ud83c\udfed Refactor", "text": " Uncategorized "}, {"location": "user/what-is-kpops/", "title": "What is KPOps?", "text": "

    With a couple of easy commands in the shell, and a pipeline.yaml of under 30 lines, KPOps can not only deploy a Kafka pipeline1 to a Kubernetes cluster, but also reset, clean or destroy it!

    "}, {"location": "user/what-is-kpops/#key-features", "title": "Key features", "text": ""}, {"location": "user/what-is-kpops/#example", "title": "Example", "text": "An overview of Word-count pipeline shown in Streams Explorer Word-count pipeline.yaml
    - type: producer-app\n  name: data-producer\n  app:\n    image: bakdata/kpops-demo-sentence-producer\n\n- type: streams-app\n  name: word-counter\n  to:\n    topics:\n      ${output_topic_name}:\n        type: output\n        configs:\n          cleanup.policy: compact\n  app:\n    image: bakdata/kpops-demo-word-count-app\n    replicaCount: 1\n\n- type: kafka-sink-connector\n  name: redis-sink-connector\n  app:\n    connector.class: com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector\n    redis.hosts: redis-headless:6379\n    redis.database: 0\n    tasks.max: 1\n    key.converter: org.apache.kafka.connect.storage.StringConverter\n    value.converter: org.apache.kafka.connect.storage.StringConverter\n
    1. A Kafka pipeline can consist of consecutive streaming applications, producers, and connectors.\u00a0\u21a9

    "}, {"location": "user/core-concepts/config/", "title": "Configuration", "text": "

    KPOps reads its global configuration that is unrelated to a pipeline's components from config.yaml.

    Consider enabling KPOps' editor integration feature to enjoy the benefits of autocompletion and validation when configuring your pipeline.

    To learn about any of the available settings, take a look at the example below.

    config.yaml
    # CONFIGURATION\n#\n# Custom Python module defining project-specific KPOps components\ncomponents_module: null\n# Base directory to the pipelines (default is current working directory)\npipeline_base_dir: .\n# The Kafka brokers address.\n# REQUIRED\nkafka_brokers: \"http://broker1:9092,http://broker2:9092\"\n# Configure the topic name variables you can use in the pipeline definition.\ntopic_name_config: \n  # Configures the value for the variable ${output_topic_name}\n  default_output_topic_name: ${pipeline.name}-${component.name}\n  # Configures the value for the variable ${error_topic_name}\n  default_error_topic_name: ${pipeline.name}-${component.name}-error\n# Configuration for Schema Registry.\nschema_registry:\n  # Whether the Schema Registry handler should be initialized.\n  enabled: false\n  # Address of the Schema Registry.\n  url: \"http://localhost:8081\"\n# Configuration for the Kafka REST Proxy.\nkafka_rest:\n  # Address of the Kafka REST Proxy.\n  url: \"http://localhost:8082\"\n# Configuration for Kafka Connect.\nkafka_connect:\n  # Address of Kafka Connect.\n  url: \"http://localhost:8083\"\n# The timeout in seconds that specifies when actions like deletion or deploy\n# timeout.\ntimeout: 300\n# Flag for `helm upgrade --install`.\n# Create the release namespace if not present.\ncreate_namespace: false\n# Global flags for Helm.\nhelm_config:\n  # Name of kubeconfig context (`--kube-context`)\n  context: name\n  # Run Helm in Debug mode.\n  debug: false\n  # Kubernetes API version used for Capabilities.APIVersions\n  api_version: null\n# Configure Helm Diff.\nhelm_diff_config: \n  # Set of keys that should not be checked.\n  ignore:\n    - name\n    - imageTag\n# Whether to retain clean up jobs in the cluster or uninstall the, after\n# completion.\nretain_clean_jobs: false\n

    Environment-specific pipeline definitions

    Similarly to defaults, it is possible to have an unlimited amount of additional environment-specific pipeline definitions. The naming convention is the same: add a suffix of the form _{environment} to the filename.

    "}, {"location": "user/core-concepts/defaults/", "title": "Defaults", "text": "

    KPOps has a very efficient way of dealing with repeating settings which manifests as defaults.yaml. This file provides the user with the power to set defaults for any and all components, thus omitting the need to repeat the same settings in pipeline.yaml.

    See real-world examples for defaults.

    "}, {"location": "user/core-concepts/defaults/#features", "title": "Features", "text": ""}, {"location": "user/core-concepts/defaults/#inheritance", "title": "Inheritance", "text": "

    An important mechanic of KPOps is that defaults set for a component apply to all components that inherit from it.

    It is possible, although not recommended, to add settings that are specific to a component's subclass. An example would be configuring offset_topic under kafka-connector instead of kafka-source-connector.

    "}, {"location": "user/core-concepts/defaults/#configuration", "title": "Configuration", "text": "

    KPOps allows using multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.

    The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.

    It is important to note that defaults_{environment}.yaml overrides only the settings that are explicitly set to be different from the ones in the base defaults file.

    defaults merge priority

    Imagine the following folder structure, where the pipeline_base_dir is configured to pipelines:

    \u2514\u2500 pipelines\n   \u2514\u2500\u2500 distributed-defaults\n       \u251c\u2500\u2500 defaults.yaml\n       \u251c\u2500\u2500 defaults_dev.yaml\n       \u2514\u2500\u2500 pipeline-deep\n           \u251c\u2500\u2500 defaults.yaml\n           \u2514\u2500\u2500 pipeline.yaml\n

    KPOps picks up the defaults in the following order (high to low priority):

    "}, {"location": "user/core-concepts/defaults/#components", "title": "Components", "text": "

    The defaults codeblocks in this section contain the full set of settings that are specific to the component. If a setting already exists in a parent config, it will not be included in the child's.

    "}, {"location": "user/core-concepts/defaults/#kubernetesapp", "title": "KubernetesApp", "text": "defaults.yaml
    # Base Kubernetes App\n#\n# Parent of: HelmApp\n# Child of: PipelineComponent\nkubernetes-app:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/defaults/#kafkaapp", "title": "KafkaApp", "text": "defaults.yaml
    # Base component for Kafka-based components.\n#\n# Parent of: ProducerApp, StreamsApp\n# Child of: KubernetesApp\nkafka-app:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` can contain application-specific settings, hence  the user is free to\n  # add the key-value pairs they need.\n  app: # required\n    streams: # required\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n    nameOverride: override-with-this-name # kafka-app-specific\n    imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n
    "}, {"location": "user/core-concepts/defaults/#streamsapp", "title": "StreamsApp", "text": "defaults.yaml
    # StreamsApp component that configures a streams bootstrap app.\n#\n# Child of: KafkaApp\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\nstreams-app:\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  app: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n
    "}, {"location": "user/core-concepts/defaults/#producerapp", "title": "ProducerApp", "text": "defaults.yaml
    \n
    "}, {"location": "user/core-concepts/defaults/#kafkaconnector", "title": "KafkaConnector", "text": "defaults.yaml
    # Kafka connector\n#\n# Parent of: KafkaSinkConnector, KafkaSourceConnector\n# Child of: PipelineComponent\nkafka-connector:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/defaults/#kafkasourceconnector", "title": "KafkaSourceConnector", "text": "defaults.yaml
    # Kafka source connector\n#\n# Child of: KafkaConnector\nkafka-source-connector:\n  # The source connector has no `from` section\n  # from:\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/defaults/#kafkasinkconnector", "title": "KafkaSinkConnector", "text": "defaults.yaml
    # Kafka sink connector\n#\n# Child of: KafkaConnector\nkafka-sink-connector:\n  # No settings differ from `kafka-connector`\n
    "}, {"location": "user/core-concepts/components/helm-app/", "title": "HelmApp", "text": ""}, {"location": "user/core-concepts/components/helm-app/#usage", "title": "Usage", "text": "

    Can be used to deploy any app in Kubernetes using Helm, for example, a REST service that serves Kafka data.

    "}, {"location": "user/core-concepts/components/helm-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kubernetes app managed through Helm with an associated Helm chart\n- type: helm-app\n  name: helm-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/helm-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/helm-app/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/helm-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/helm-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/helm-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-app/", "title": "KafkaApp", "text": "

    Subclass of HelmApp.

    "}, {"location": "user/core-concepts/components/kafka-app/#usage", "title": "Usage", "text": ""}, {"location": "user/core-concepts/components/kafka-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base component for Kafka-based components.\n# Producer or streaming apps should inherit from this class.\n- type: kafka-app # required\n  name: kafka-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` can contain application-specific settings, hence  the user is free to\n  # add the key-value pairs they need.\n  app: # required\n    streams: # required\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n    nameOverride: override-with-this-name # kafka-app-specific\n    imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n
    "}, {"location": "user/core-concepts/components/kafka-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-app/#deploy", "title": "deploy", "text": "

    In addition to HelmApp's deploy:

    "}, {"location": "user/core-concepts/components/kafka-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/kafka-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-connector/", "title": "KafkaConnector", "text": "

    KafkaConnector is a component that deploys Kafka Connectors. Since a connector cannot be different from sink or source it is not recommended to use KafkaConnector for deployment in pipeline.yaml. Instead, KafkaConnector should be used in defaults.yaml to set defaults for all connectors in the pipeline as they can share some common settings.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/", "title": "KafkaSinkConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#usage", "title": "Usage", "text": "

    Lets other systems pull data from Apache Kafka.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka sink connector\n- type: kafka-sink-connector\n  name: kafka-sink-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-sink-connector/#deploy", "title": "deploy", "text": ""}, {"location": "user/core-concepts/components/kafka-sink-connector/#destroy", "title": "destroy", "text": "

    The associated sink connector is removed from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#reset", "title": "reset", "text": "

    Reset the consumer group offsets using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#clean", "title": "clean", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/", "title": "KafkaSourceConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#usage", "title": "Usage", "text": "

    Manages source connectors in your Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka source connector\n- type: kafka-source-connector # required\n  name: kafka-source-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  # The source connector has no `from` section\n  # from:\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example. Extensive documentation on\n  # connectors: https://kafka.apache.org/documentation/#connectconfigs\n  app: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/#deploy", "title": "deploy", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/#destroy", "title": "destroy", "text": "

    Remove the source connector from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#reset", "title": "reset", "text": "

    Delete state associated with the connector using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#clean", "title": "clean", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/", "title": "KubernetesApp", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#usage", "title": "Usage", "text": "

    Can be used to create components for any Kubernetes app.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base Kubernetes App\n- type: kubernetes-app\n  name: kubernetes-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # `app` contains application-specific settings, hence it does not have a rigid\n  # structure. The fields below are just an example.\n  app: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/components/kubernetes-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#deploy", "title": "deploy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#destroy", "title": "destroy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/overview/", "title": "Overview", "text": "

    This section explains the different components of KPOps, their usage and configuration in the pipeline definition pipeline.yaml.

    flowchart BT\n    KubernetesApp --> PipelineComponent\n    KafkaApp --> PipelineComponent\n    HelmApp --> KubernetesApp\n    StreamsBootstrap --> HelmApp\n    StreamsApp --> KafkaApp\n    StreamsApp --> StreamsBootstrap\n    ProducerApp --> KafkaApp\n    ProducerApp --> StreamsBootstrap\n    KafkaConnector --> PipelineComponent\n    KafkaSourceConnector --> KafkaConnector\n    KafkaSinkConnector --> KafkaConnector\n\n    click KubernetesApp \"./../kubernetes-app\"\n    click HelmApp \"./../helm-app\"\n    click KafkaApp \"./../kafka-app\"\n    click StreamsBootstrap \"./../streams-bootstrap\"\n    click StreamsApp \"./../streams-app\"\n    click ProducerApp \"./../producer-app\"\n    click KafkaConnector \"./../kafka-connector\"\n    click KafkaSourceConnector \"./../kafka-source-connector\"\n    click KafkaSinkConnector \"./../kafka-sink-connector\"

    KPOps component hierarchy

    "}, {"location": "user/core-concepts/components/producer-app/", "title": "ProducerApp", "text": "

    Subclass of KafkaApp and StreamsBootstrap.

    "}, {"location": "user/core-concepts/components/producer-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka producer app

    "}, {"location": "user/core-concepts/components/producer-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Holds configuration to use as values for the streams bootstrap producer-app Helm\n# chart.\n# More documentation on ProducerApp:\n# https://github.com/bakdata/streams-bootstrap\n- type: producer-app\n  name: producer-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  # from: # While the producer-app does inherit from kafka-app, it does not need a\n  # `from` section, hence it does not support it.\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app\n  app: # required\n    streams: # required, producer-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      outputTopic: output_topic\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n    nameOverride: override-with-this-name # kafka-app-specific\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/producer-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/producer-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    "}, {"location": "user/core-concepts/components/producer-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/producer-app/#reset", "title": "reset", "text": "

    Do nothing, producers are stateless.

    "}, {"location": "user/core-concepts/components/producer-app/#clean", "title": "clean", "text": ""}, {"location": "user/core-concepts/components/streams-app/", "title": "StreamsApp", "text": "

    Subclass of KafkaApp and StreamsBootstrap.

    "}, {"location": "user/core-concepts/components/streams-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka Streams app

    "}, {"location": "user/core-concepts/components/streams-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # StreamsApp component that configures a streams bootstrap app.\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\n- type: streams-app # required\n  name: streams-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  app: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/streams-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    "}, {"location": "user/core-concepts/components/streams-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-app/#reset", "title": "reset", "text": ""}, {"location": "user/core-concepts/components/streams-app/#clean", "title": "clean", "text": "

    Similar to reset with to additional steps:

    "}, {"location": "user/core-concepts/components/streams-bootstrap/", "title": "StreamsApp", "text": "

    Subclass of HelmApp.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#usage", "title": "Usage", "text": "

    Configures a Helm app with streams-bootstrap Helm charts.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-bootstrap/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/variables/environment_variables/", "title": "Environment variables", "text": "

    Environment variables can be set by using the export command in Linux or the set command in Windows.

    dotenv files

    KPOps currently supports .env files only for variables related to the config. Full support for .env files is on the roadmap. One of the possible ways to use one and export the contents manually is with the following command: export $(xargs < .env). This would work in bash suppose there are no spaces inside the values.

    "}, {"location": "user/core-concepts/variables/environment_variables/#config", "title": "Config", "text": "

    These variables take precedence over the settings in config.yaml. Variables marked as required can instead be set in the global config.

    Name Default Value Required Description Setting name KPOPS_COMPONENTS_MODULE False Custom Python module defining project-specific KPOps components components_module KPOPS_PIPELINE_BASE_DIR . False Base directory to the pipelines (default is current working directory) pipeline_base_dir KPOPS_KAFKA_BROKERS True The comma separated Kafka brokers address. kafka_brokers KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME ${pipeline.name}-${component.name} False Configures the value for the variable ${output_topic_name} topic_name_config.default_output_topic_name KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME ${pipeline.name}-${component.name}-error False Configures the value for the variable ${error_topic_name} topic_name_config.default_error_topic_name KPOPS_SCHEMA_REGISTRY__ENABLED False False Whether the Schema Registry handler should be initialized. schema_registry.enabled KPOPS_SCHEMA_REGISTRY__URL http://localhost:8081/ False Address of the Schema Registry. schema_registry.url KPOPS_SCHEMA_REGISTRY__TIMEOUT 30 False Operation timeout in seconds. schema_registry.timeout KPOPS_KAFKA_REST__URL http://localhost:8082/ False Address of the Kafka REST Proxy. kafka_rest.url KPOPS_KAFKA_REST__TIMEOUT 30 False Operation timeout in seconds. kafka_rest.timeout KPOPS_KAFKA_CONNECT__URL http://localhost:8083/ False Address of Kafka Connect. kafka_connect.url KPOPS_KAFKA_CONNECT__TIMEOUT 30 False Operation timeout in seconds. kafka_connect.timeout KPOPS_CREATE_NAMESPACE False False Flag for helm upgrade --install. Create the release namespace if not present. create_namespace KPOPS_HELM_CONFIG__CONTEXT False Name of kubeconfig context (--kube-context) helm_config.context KPOPS_HELM_CONFIG__DEBUG False False Run Helm in Debug mode helm_config.debug KPOPS_HELM_CONFIG__API_VERSION False Kubernetes API version used for Capabilities.APIVersions helm_config.api_version KPOPS_HELM_DIFF_CONFIG__IGNORE True Set of keys that should not be checked. helm_diff_config.ignore KPOPS_RETAIN_CLEAN_JOBS False False Whether to retain clean up jobs in the cluster or uninstall the, after completion. retain_clean_jobs config_env_vars.env Exhaustive list of all config-related environment variables
    # Global config environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# settings in `config.yaml`. Variables marked as required can instead\n# be set in the global config.\n#\n# components_module\n# Custom Python module defining project-specific KPOps components\nKPOPS_COMPONENTS_MODULE # No default value, not required\n# pipeline_base_dir\n# Base directory to the pipelines (default is current working\n# directory)\nKPOPS_PIPELINE_BASE_DIR=.\n# kafka_brokers\n# The comma separated Kafka brokers address.\nKPOPS_KAFKA_BROKERS # No default value, required\n# topic_name_config.default_output_topic_name\n# Configures the value for the variable ${output_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name}\n# topic_name_config.default_error_topic_name\n# Configures the value for the variable ${error_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error\n# schema_registry.enabled\n# Whether the Schema Registry handler should be initialized.\nKPOPS_SCHEMA_REGISTRY__ENABLED=False\n# schema_registry.url\n# Address of the Schema Registry.\nKPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/\n# schema_registry.timeout\n# Operation timeout in seconds.\nKPOPS_SCHEMA_REGISTRY__TIMEOUT=30\n# kafka_rest.url\n# Address of the Kafka REST Proxy.\nKPOPS_KAFKA_REST__URL=http://localhost:8082/\n# kafka_rest.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_REST__TIMEOUT=30\n# kafka_connect.url\n# Address of Kafka Connect.\nKPOPS_KAFKA_CONNECT__URL=http://localhost:8083/\n# kafka_connect.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_CONNECT__TIMEOUT=30\n# create_namespace\n# Flag for `helm upgrade --install`. Create the release namespace if\n# not present.\nKPOPS_CREATE_NAMESPACE=False\n# helm_config.context\n# Name of kubeconfig context (`--kube-context`)\nKPOPS_HELM_CONFIG__CONTEXT # No default value, not required\n# helm_config.debug\n# Run Helm in Debug mode\nKPOPS_HELM_CONFIG__DEBUG=False\n# helm_config.api_version\n# Kubernetes API version used for `Capabilities.APIVersions`\nKPOPS_HELM_CONFIG__API_VERSION # No default value, not required\n# helm_diff_config.ignore\n# Set of keys that should not be checked.\nKPOPS_HELM_DIFF_CONFIG__IGNORE # No default value, required\n# retain_clean_jobs\n# Whether to retain clean up jobs in the cluster or uninstall the,\n# after completion.\nKPOPS_RETAIN_CLEAN_JOBS=False\n
    "}, {"location": "user/core-concepts/variables/environment_variables/#cli", "title": "CLI", "text": "

    These variables take precedence over the commands' flags. If a variable is set, the corresponding flag does not have to be specified in commands. Variables marked as required can instead be set as flags.

    Name Default Value Required Description KPOPS_CONFIG_PATH . False Path to the dir containing config.yaml files KPOPS_DOTENV_PATH False Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. KPOPS_ENVIRONMENT False The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). KPOPS_PIPELINE_PATHS True Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. KPOPS_PIPELINE_STEPS False Comma separated list of steps to apply the command on cli_env_vars.env Exhaustive list of all cli-related environment variables
    # CLI Environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# commands' flags. If a variable is set, the corresponding flag does\n# not have to be specified in commands. Variables marked as required\n# can instead be set as flags.\n#\n# Path to the dir containing config.yaml files\nKPOPS_CONFIG_PATH=.\n# Path to dotenv file. Multiple files can be provided. The files will\n# be loaded in order, with each file overriding the previous one.\nKPOPS_DOTENV_PATH # No default value, not required\n# The environment you want to generate and deploy the pipeline to.\n# Suffix your environment files with this value (e.g.\n# defaults_development.yaml for environment=development).\nKPOPS_ENVIRONMENT # No default value, not required\n# Paths to dir containing 'pipeline.yaml' or files named\n# 'pipeline.yaml'.\nKPOPS_PIPELINE_PATHS # No default value, required\n# Comma separated list of steps to apply the command on\nKPOPS_PIPELINE_STEPS # No default value, not required\n
    "}, {"location": "user/core-concepts/variables/substitution/", "title": "Substitution", "text": "

    KPOps supports the usage of placeholders and environment variables in pipeline definition and defaults.

    "}, {"location": "user/core-concepts/variables/substitution/#component-specific-variables", "title": "Component-specific variables", "text": "

    These variables can be used in a component's definition to refer to any of its attributes, including ones that the user has defined in the defaults.

    All of them are prefixed with component. and follow the following form: component.{attribute_name}. If the attribute itself contains attributes, they can be referred to like this: component.{attribute_name}.{subattribute_name}.

    Example
    - type: scheduled-producer\n  app:\n    labels:\n      app_type: \"${component.type}\"\n      app_name: \"${component.name}\"\n      app_schedule: \"${component.app.schedule}\"\n    commandLine:\n      FAKE_ARG: \"fake-arg-value\"\n    schedule: \"30 3/8 * * *\"\n- type: converter\n  app:\n    commandLine:\n      CONVERT_XML: true\n    resources:\n      limits:\n        memory: 2G\n      requests:\n        memory: 2G\n- type: filter\n  name: \"filter-app\"\n  app:\n    labels:\n      app_type: \"${component.type}\"\n      app_name: \"${component.name}\"\n      app_resources_requests_memory: \"${component.app.resources.requests.memory}\"\n      ${component.type}: \"${component.app.labels.app_name}-${component.app.labels.app_type}\"\n      test_placeholder_in_placeholder: \"${component.app.labels.${component.type}}\"\n    commandLine:\n      TYPE: \"nothing\"\n    resources:\n      requests:\n        memory: 3G\n    replicaCount: 4\n    autoscaling:\n      minReplicas: 4\n      maxReplicas: 4\n
    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-config-specific-variables", "title": "Pipeline-config-specific variables", "text": "

    These variables include all fields in the config and refer to the pipeline configuration that is independent of the components.

    All such variables are prefixed with config. and are of the same form as the component-specific variables.

    Info

    error_topic_name is an alias for config.topic_name_config.default_error_topic_name output_topic_name is an alias for config.topic_name_config.default_output_topic_name

    "}, {"location": "user/core-concepts/variables/substitution/#environment-variables", "title": "Environment variables", "text": "

    Environment variables such as $PATH can be used in the pipeline definition and defaults without any transformation following the form ${ENV_VAR_NAME}. This, of course, includes variables like the ones relevant to the KPOps cli that are exported by the user.

    See all KPOps environment variables

    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-name-variables", "title": "Pipeline name variables", "text": "

    These are special variables that refer to the name and path of a pipeline.

    "}, {"location": "user/core-concepts/variables/substitution/#advanced-use-cases", "title": "Advanced use cases", "text": "
    1. Refer to default component field values: As long as a value is assigned to a component attribute, it is possible to refer to it with a placeholder. To see all component fields, take a look at the pipeline schema.
    2. Chaining variables: It is possible to chain any number of variables, see the example above.
    3. Cross-component substitution: YAML is quite an intricate language and with some of its magic one could write cross-component references.
    "}, {"location": "user/examples/atm-fraud-pipeline/", "title": "ATM fraud detection pipeline", "text": "

    ATM fraud is a demo pipeline for ATM fraud detection. The original by Confluent is written in KSQL and outlined in this blogpost. The one used in this example is re-built from scratch using bakdata's streams-bootstrap library.

    "}, {"location": "user/examples/atm-fraud-pipeline/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/examples/atm-fraud-pipeline/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postgresql", "title": "PostgreSQL", "text": "

    Deploy PostgreSQL using the Bitnami Helm chart: Add the helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install the PostgreSQL with helm:

    helm upgrade --install -f ./postgresql.yaml \\\n--namespace kpops \\\npostgresql bitnami/postgresql\n
    PostgreSQL Example Helm chart values (postgresql.yaml)
    auth:\n  database: app_db\n  enablePostgresUser: true\n  password: AppPassword\n  postgresPassword: StrongPassword\n  username: app1\nprimary:\n  persistence:\n    enabled: false\n    existingClaim: postgresql-data-claim\nvolumePermissions:\n  enabled: true\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-detection-example-pipeline-setup", "title": "ATM fraud detection example pipeline setup", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#deploying-the-atm-fraud-detection-pipeline", "title": "Deploying the ATM fraud detection pipeline", "text": "
    1. Clone the kpops-examples repository and cd into the directory.

    2. Install KPOps pip install -r requirements.txt.

    3. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    4. Deploy the pipeline

      kpops deploy atm-fraud/pipeline.yaml --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/examples/atm-fraud-pipeline/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to see the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser. You should be able to see pipeline shown in the image below:

    An overview of ATM fraud pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/examples/atm-fraud-pipeline/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postrgresql", "title": "PostrgreSQL", "text": "

    PostgreSQL can be uninstalled by running the following command:

    helm --namespace kpops uninstall postgresql\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-pipeline", "title": "ATM fraud pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean atm-fraud/pipeline.yaml --verbose  --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/examples/atm-fraud-pipeline/#common-errors", "title": "Common errors", "text": ""}, {"location": "user/getting-started/quick-start/", "title": "Quick start", "text": ""}, {"location": "user/getting-started/quick-start/#word-count", "title": "Word-count", "text": "

    Word-count is a demo pipeline consisting of a producer producing words to Kafka, a Kafka streams app counting the number of times each word occurs, and finally a Redis database into which the words are exported.

    "}, {"location": "user/getting-started/quick-start/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": ""}, {"location": "user/getting-started/quick-start/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/getting-started/quick-start/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/getting-started/quick-start/#redis", "title": "Redis", "text": "

    Deploy Redis using the Bitnami Helm chart: Add the Helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install Redis with Helm:

    helm upgrade --install -f ./values-redis.yaml \\\n--namespace kpops \\\nredis bitnami/redis\n
    Redis example Helm chart values (values-redis.yaml)
    architecture: standalone\nauth:\n  enabled: false\nmaster:\n  count: 1\n  configuration: \"databases 1\"\nimage:\n  tag: 7.0.8\n
    "}, {"location": "user/getting-started/quick-start/#word-count-example-pipeline-setup", "title": "Word-count example pipeline setup", "text": ""}, {"location": "user/getting-started/quick-start/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/getting-started/quick-start/#deploying-the-word-count-pipeline", "title": "Deploying the Word-count pipeline", "text": "
    1. Clone the kpops-examples repository and cd into the directory.

    2. Install KPOps pip install -r requirements.txt.

    3. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    4. Deploy the pipeline

      kpops deploy word-count/pipeline.yaml --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/getting-started/quick-start/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to inspect the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser.

    You should be able to see pipeline shown in the image below:

    An overview of Word-count pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka Connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/getting-started/quick-start/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/quick-start/#redis_1", "title": "Redis", "text": "

    Redis can be uninstalled by running the following command:

    helm --namespace kpops uninstall redis\n
    "}, {"location": "user/getting-started/quick-start/#word-count-pipeline", "title": "Word-count pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean word-count/pipeline.yaml --verbose --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/getting-started/quick-start/#common-errors", "title": "Common errors", "text": ""}, {"location": "user/getting-started/setup/", "title": "Setup KPOps", "text": "

    In this part, you will set up KPOps. This includes:

    "}, {"location": "user/getting-started/setup/#prerequisites", "title": "Prerequisites", "text": ""}, {"location": "user/getting-started/setup/#setup-kubernetes-with-k3d", "title": "Setup Kubernetes with k3d", "text": "

    If you don't have access to an existing Kubernetes cluster, this section will guide you through creating a local cluster. We recommend the lightweight Kubernetes distribution k3s for this. k3d is a wrapper around k3s in Docker that lets you get started fast.

    1. You can install k3d with its installation script:

      wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/v5.4.6/install.sh | bash\n

      For other ways of installing k3d, you can have a look at their installation guide.

    2. The Kafka deployment needs a modified Docker image. In that case the image is built and pushed to a Docker registry that holds it. If you do not have access to an existing Docker registry, you can use k3d's Docker registry:

      k3d registry create kpops-registry.localhost --port 12345\n
    3. Now you can create a new cluster called kpops that uses the previously created Docker registry:

      k3d cluster create kpops --k3s-arg \"--no-deploy=traefik@server:*\" --registry-use k3d-kpops-registry.localhost:12345\n

    Note

    Creating a new k3d cluster automatically configures kubectl to connect to the local cluster by modifying your ~/.kube/config. In case you manually set the KUBECONFIG variable or don't want k3d to modify your config, k3d offers many other options.

    You can check the cluster status with kubectl get pods -n kube-system. If all returned elements have a STATUS of Running or Completed, then the cluster is up and running.

    "}, {"location": "user/getting-started/setup/#deploy-kafka", "title": "Deploy Kafka", "text": "

    Kafka is an open-source data streaming platform. More information about Kafka can be found in the documentation. To deploy Kafka, this guide uses Confluent's Helm chart.

    1. To allow connectivity to other systems Kafka Connect needs to be extended with drivers. You can install a JDBC driver for Kafka Connect by creating a new Docker image:

      1. Create a Dockerfile with the following content:

        FROM confluentinc/cp-kafka-connect:7.1.3\n\nRUN confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.6.0\n
      2. Build and push the modified image to your private Docker registry:

        docker build . --tag localhost:12345/kafka-connect-jdbc:7.1.3 && \\\ndocker push localhost:12345/kafka-connect-jdbc:7.1.3\n

      Detailed instructions on building, tagging and pushing a docker image can be found in Docker docs.

    2. Add Confluent's Helm chart repository and update the index:

      helm repo add confluentinc https://confluentinc.github.io/cp-helm-charts/ &&  \nhelm repo update\n
    3. Install Kafka, Zookeeper, Confluent's Schema Registry, Kafka Rest Proxy, and Kafka Connect. A single Helm chart installs all five components. Below you can find an example for the --values ./kafka.yaml file configuring the deployment accordingly. Deploy the services:

      helm upgrade \\\n    --install \\\n    --version 0.6.1 \\\n    --values ./kafka.yaml \\\n    --namespace kpops \\\n    --create-namespace \\\n    --wait \\\n    k8kafka confluentinc/cp-helm-charts\n
    Kafka Helm chart values (kafka.yaml)

    An example value configuration for Confluent's Helm chart. This configuration deploys a single Kafka Broker, a Schema Registry, Zookeeper, Kafka Rest Proxy, and Kafka Connect with minimal resources.

    cp-zookeeper:\n  enabled: true\n  servers: 1\n  imageTag: 7.1.3\n  heapOptions: \"-Xms124M -Xmx124M\"\n  overrideGroupId: k8kafka\n  fullnameOverride: \"k8kafka-cp-zookeeper\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.2G\n    limits:\n      cpu: 250m\n      memory: 0.2G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka:\n  enabled: true\n  brokers: 1\n  imageTag: 7.1.3\n  podManagementPolicy: Parallel\n  configurationOverrides:\n    \"auto.create.topics.enable\": false\n    \"offsets.topic.replication.factor\": 1\n    \"transaction.state.log.replication.factor\": 1\n    \"transaction.state.log.min.isr\": 1\n    \"confluent.metrics.reporter.topic.replicas\": 1\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.5G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n  persistence:\n    enabled: false\n\ncp-schema-registry:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-schema-registry\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.25G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-connect:\n  enabled: true\n  replicaCount: 1\n  image: k3d-kpops-registry.localhost:12345/kafka-connect-jdbc\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-kafka-connect\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 500m\n      memory: 0.25G\n    limits:\n      cpu: 500m\n      memory: 0.25G\n  configurationOverrides:\n    \"consumer.max.poll.records\": \"10\"\n    \"consumer.max.poll.interval.ms\": \"900000\"\n    \"config.storage.replication.factor\": \"1\"\n    \"offset.storage.replication.factor\": \"1\"\n    \"status.storage.replication.factor\": \"1\"\n  cp-schema-registry:\n    url: http://k8kafka-cp-schema-registry:8081\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-rest:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-rest\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-ksql-server:\n  enabled: false\ncp-control-center:\n  enabled: false\n
    "}, {"location": "user/getting-started/setup/#deploy-streams-explorer", "title": "Deploy Streams Explorer", "text": "

    Streams Explorer allows examining Apache Kafka data pipelines in a Kubernetes cluster including the inspection of schemas and monitoring of metrics. First, add the Helm repository:

    helm repo add streams-explorer https://bakdata.github.io/streams-explorer && \\\nhelm repo update\n

    Below you can find an example for the --values ./streams-explorer.yaml file configuring the deployment accordingly. Now, deploy the service:

    helm upgrade \\\n    --install \\\n    --version 0.2.3 \\\n    --values ./streams-explorer.yaml \\\n    --namespace kpops \\\n    streams-explorer streams-explorer/streams-explorer\n
    Streams Explorer Helm chart values (streams-explorer.yaml)

    An example value configuration for Steams Explorer Helm chart.

    imageTag: \"v2.1.2\"\nconfig:\n   K8S__deployment__cluster: true\n   SCHEMAREGISTRY__url: http://k8kafka-cp-schema-registry.kpops.svc.cluster.local:8081\n   KAFKACONNECT__url: http://k8kafka-cp-kafka-connect.kpops.svc.cluster.local:8083\nresources:\n   requests:\n       cpu: 200m\n       memory: 300Mi\n   limits:\n       cpu: 200m\n       memory: 300Mi\n
    "}, {"location": "user/getting-started/setup/#check-the-status-of-your-deployments", "title": "Check the status of your deployments", "text": "

    Now we will check if all the pods are running in our namespace. You can list all pods in the namespace with this command:

    kubectl --namespace kpops get pods\n

    Then you should see the following output in your terminal:

    NAME                                          READY   STATUS    RESTARTS   AGE\nk8kafka-cp-kafka-connect-8fc7d544f-8pjnt      1/1     Running   0          15m\nk8kafka-cp-zookeeper-0                        1/1     Running   0          15m\nk8kafka-cp-kafka-0                            1/1     Running   0          15m\nk8kafka-cp-schema-registry-588f8c65db-jdwbq   1/1     Running   0          15m\nk8kafka-cp-rest-6bbfd7b645-nwkf8              1/1     Running   0          15m\nstreams-explorer-54db878c67-s8wbz             1/1     Running   0          15m\n

    Pay attention to the STATUS row. The pods should have a status of Running.

    "}, {"location": "user/getting-started/setup/#install-kpops", "title": "Install KPOps", "text": "

    KPOps comes as a PyPI package. You can install it with pip:

    pip install kpops\n
    "}, {"location": "user/getting-started/teardown/", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/teardown/#kpops-teardown-commands", "title": "KPOps teardown commands", "text": ""}, {"location": "user/getting-started/teardown/#kpops-deployed-pipeline", "title": "KPOps-deployed pipeline", "text": "

    The kpops CLI can be used to destroy a pipeline that was previously deployed with KPOps. In case that doesn't work, the pipeline can always be taken down manually with helm (see section Infrastructure).

    1. Export environment variables.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Navigate to the examples folder. Replace the <name-of-the-example-directory> with the example you want to tear down. For example the atm-fraud-detection.

    3. Remove the pipeline

      # Uncomment 1 line to either destroy, reset or clean.\n\n# poetry run kpops destroy <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops reset <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops clean <name-of-the-example-directory>/pipeline.yaml \\\n--config <name-of-the-example-directory>/config.yaml \\\n--execute\n
    "}, {"location": "user/getting-started/teardown/#infrastructure", "title": "Infrastructure", "text": "

    Delete namespace:

    kubectl delete namespace kpops\n

    Note

    In case kpops destroy is not working one can uninstall the pipeline services one by one. This is equivalent to running kpops destroy. In case a clean uninstall (like the one kpops clean does) is needed, one needs to also delete the topics and schemas created by deployment of the pipeline.

    "}, {"location": "user/getting-started/teardown/#local-cluster", "title": "Local cluster", "text": "

    Delete local cluster:

    k3d cluster delete kpops\n
    "}, {"location": "user/getting-started/teardown/#local-image-registry", "title": "Local image registry", "text": "

    Delete local registry:

    k3d registry delete k3d-kpops-registry.localhost\n
    "}, {"location": "user/migration-guide/v1-v2/", "title": "Migrate from V1 to V2", "text": ""}, {"location": "user/migration-guide/v1-v2/#derive-component-type-automatically-from-class-name", "title": "Derive component type automatically from class name", "text": "

    KPOps automatically infers the component type from the class name. Therefore, the type and schema_type attributes can be removed from your custom components. By convention the type would be the lower, and kebab cased name of the class.

    class MyCoolStreamApp(StreamsApp):\n-    type = \"my-cool-stream-app\"\n+    ...\n

    Because of this new convention producer has been renamed to producer-app. This must be addressed in your pipeline.yaml and defaults.yaml.

    - producer:\n+ producer-app:\n    app:\n        streams:\n        outputTopic: output_topic\n        extraOutputTopics:\n            output_role1: output_topic1\n            output_role2: output_topic2\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-inputoutput-types", "title": "Refactor input/output types", "text": ""}, {"location": "user/migration-guide/v1-v2/#to-section", "title": "To section", "text": "

    In the to section these have changed:

      to:\n    topics:\n      ${pipeline_name}-topic-1:\n-       type: extra\n        role: \"role-1\"\n        ...\n      ${pipeline_name}-topic-2:\n-       type: output\n        ...\n      ${pipeline_name}-topic-3:\n         type: error\n         ...\n
    "}, {"location": "user/migration-guide/v1-v2/#from-section", "title": "From section", "text": "

    In the from section these have changed:

      from:\n    topics:\n      ${pipeline_name}-input-topic:\n-       type: input\n        ...\n      ${pipeline_name}-extra-topic:\n-       type: extra\n        role: topic-role\n        ...\n      ${pipeline_name}-input-pattern-topic:\n-       type: input-pattern\n+       type: pattern\n        ...\n      ${pipeline_name}-extra-pattern-topic:\n-       type: extra-pattern\n+       type: pattern\n        role: some-role\n        ...\n
    "}, {"location": "user/migration-guide/v1-v2/#remove-camel-case-conversion-of-internal-models", "title": "Remove camel case conversion of internal models", "text": "

    All the internal KPOps models are now snake_case, and only Helm/Kubernetes values require camel casing. You can find an example of a pipeline.yaml in the following. Notice that the app section here remains untouched.

    ...\ntype: streams-app\n  name: streams-app\n  namespace: namespace\n  app:\n    streams:\n      brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n     autoscaling:\n      consumerGroup: consumer-group\n      lagThreshold: 0\n      enabled: false\n      pollingInterval: 30\n\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: error\n-       keySchema: key-schema\n+       key_schema: key-schema\n-       valueSchema: value-schema\n+       value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs:\n          cleanup.policy: compact\n    models:\n      model: model\n  prefix: ${pipeline_name}-\n- repoConfig:\n+ repo_config:\n-   repositoryName: bakdata-streams-bootstrap\n+   repository_name: bakdata-streams-bootstrap\n    url: https://bakdata.github.io/streams-bootstrap/\n-   repoAuthFlags:\n+   repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.4\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-handling-of-helm-flags", "title": "Refactor handling of Helm flags", "text": "

    If you are using the KubernetesApp class to define your own Kubernetes resource to deploy, the abstract function get_helm_chart that returns the chart for deploying the app using Helm is now a Python property and renamed to helm_chart.

    class MyCoolApp(KubernetesApp):\n\n+   @property\n    @override\n-   def get_helm_chart(self) -> str:\n+   def helm_chart(self) -> str:\n        return \"./charts/charts-folder\"\n
    "}, {"location": "user/migration-guide/v1-v2/#plural-broker-field-in-pipeline-config", "title": "Plural broker field in pipeline config", "text": "

    Since you can pass a comma separated string of broker address, the broker field in KPOps is now plural. The pluralization has affected multiple areas:

    "}, {"location": "user/migration-guide/v1-v2/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- broker: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  kafka_connect_host: \"http://localhost:8083\"\n  kafka_rest_host: \"http://localhost:8082\"\n  schema_registry_url: \"http://localhost:8081\"\n
    "}, {"location": "user/migration-guide/v1-v2/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called brokers.

    ...\n  app:\n    streams:\n-     brokers: ${broker}\n+     brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#environment-variable", "title": "Environment variable", "text": "

    Previously, if you set the environment variable KPOPS_KAFKA_BROKER, you need to replace that now with KPOPS_KAFKA_BROKERS.

    "}, {"location": "user/migration-guide/v2-v3/", "title": "Migrate from V2 to V3", "text": "

    Jump to the summary

    "}, {"location": "user/migration-guide/v2-v3/#use-hash-and-trim-long-helm-release-names-instead-of-only-trimming", "title": "Use hash and trim long Helm release names instead of only trimming", "text": "

    KPOps handles long (more than 53 characters) Helm releases names differently. Helm will not find your (long) old release names anymore. Therefore, it is recommended that you should once destroy your pipeline with KPOps v2 to remove old Helm release names. After a clean destroy, re-deploy your pipeline with the KPOps v3.

    For example if you have a component with the Helm release name example-component-name-too-long-fake-fakefakefakefakefake. The new release name will shorten the original name to 53 characters and then replace the last 6 characters of the trimmed name with the first 5 characters of the result of SHA-1(helm_release_name).

    example-component-name-too-long-fake-fakefakef-0a7fc ----> 53 chars\n---------------------------------------------- -----\n  ^Shortened helm_release_name                 ^first 5 characters of SHA1(helm_release_name)\n
    "}, {"location": "user/migration-guide/v2-v3/#create-helmapp-component", "title": "Create HelmApp component", "text": "

    All Helm-specific parts of the built-in KubernetesApp have been extracted to a new child component that is more appropriately named HelmApp. It has to be renamed in your existing pipeline defintions and custom components module.

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml", "title": "pipeline.yaml", "text": "
    -- type: kubernetes-app\n+- type: helm-app\n   name: foo\n
    "}, {"location": "user/migration-guide/v2-v3/#custom_modulepy", "title": "custom_module.py", "text": "
    - from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n      ...\n
    "}, {"location": "user/migration-guide/v2-v3/#create-streamsbootstrap-component-refactor-cleanup-jobs-as-individual-helmapp", "title": "Create StreamsBootstrap component & refactor cleanup jobs as individual HelmApp", "text": "

    Previously the default KafkaApp component configured the streams-bootstrap Helm Charts. Now, this component is no longer tied to Helm (or Kubernetes). Instead, there is a new StreamsBootstrap component that configures the Helm Chart repository for the components that use it, e.g. StreamsApp and ProducerApp. If you are using non-default values for the Helm Chart repository or version, it has to be updated as shown below.

    "}, {"location": "user/migration-guide/v2-v3/#defaultsyaml", "title": "defaults.yaml", "text": "
      kafka-app:\n    app:\n      streams: ...\n\n+ streams-bootstrap:\n    repo_config: ...\n    version: ...\n
    "}, {"location": "user/migration-guide/v2-v3/#refactor-kafka-connector-resetter-as-individual-helmapp", "title": "Refactor Kafka Connector resetter as individual HelmApp", "text": "

    Internally, the Kafka Connector resetter is now its own standard HelmApp, removing a lot of the shared code. It is configured using the resetter_namespace (formerly namespace) and resetter_values attributes.

    "}, {"location": "user/migration-guide/v2-v3/#defaultsyaml_1", "title": "defaults.yaml", "text": "
      kafka-connector:\n-   namespace: my-namespace\n+   resetter_namespace: my-namespace\n
    "}, {"location": "user/migration-guide/v2-v3/#make-kafka-rest-proxy-kafka-connect-hosts-default-and-improve-schema-registry-config", "title": "Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config", "text": "

    The breaking changes target the config.yaml file:

    The environment variable names of these config fields changed respectively. Please refer to the environment variables documentation page to see the newest changes.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called kafka_brokers.

    ...\n  app:\n    streams:\n-     brokers: ${brokers}\n+     brokers: ${kafka_brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v2-v3/#define-custom-components-module-pipeline-base-dir-globally", "title": "Define custom components module & pipeline base dir globally", "text": "

    Warning

    The previous CLI parameters have been removed.

    The options for a custom components_module and pipeline_base_dir are now global settings, defined in config.yaml.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml_1", "title": "config.yaml", "text": "
      kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  environment: development\n+ components_module: components\n+ pipeline_base_dir: pipelines\n
    "}, {"location": "user/migration-guide/v2-v3/#move-github-action-to-repsitory-root", "title": "Move GitHub action to repsitory root", "text": "

    The location of the GitHub action has changed, and it's now available directly as bakdata/kpops.

    You'll need to change it in your GitHub CI workflows.

    steps:\n  - name: kpops deploy\n-   uses: bakdata/kpops/actions/kpops-runner@main\n+   uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      # ...\n
    "}, {"location": "user/migration-guide/v2-v3/#allow-overriding-config-files", "title": "Allow overriding config files", "text": "

    Specifying the environment is no longer mandatory. If not defined, only the global files will be used.

    environment is no longer specified in config.yaml. Instead, it can be either set via the CLI flag --environment or with the environment variable KPOPS_ENVIRONMENT.

    The --config flag in the CLI now points to the directory that contains config*.yaml files. The files to be used are resolved based on the provided (or not) environment.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml_2", "title": "config.yaml", "text": "
    - environment: development\n  kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  schema_registry:\n    enabled: true\n    url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#change-substitution-variables-separator-to", "title": "Change substitution variables separator to .", "text": "

    The delimiter in the substitution variables is changed to ..

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml_1", "title": "pipeline.yaml and default.yaml", "text": "
    steps:\n  - type: scheduled-producer\n    app:\n      labels:\n-       app_type: \"${component_type}\"\n-       app_name: \"${component_name}\"\n-       app_schedule: \"${component_app_schedule}\"\n+       app_type: \"${component.type}\"\n+       app_name: \"${component.name}\"\n+       app_schedule: \"${component.app.schedule}\"\n
    "}, {"location": "user/migration-guide/v2-v3/#configyaml_3", "title": "config.yaml", "text": "
    topic_name_config:\n- default_error_topic_name: \"${pipeline_name}-${component_name}-dead-letter-topic\"\n- default_output_topic_name: \"${pipeline_name}-${component_name}-topic\"\n+ default_error_topic_name: \"${pipeline_name}-${component.name}-dead-letter-topic\"\n+ default_output_topic_name: \"${pipeline_name}-${component.name}-topic\"\n
    "}, {"location": "user/migration-guide/v2-v3/#refactor-generate-template-for-python-api-usage", "title": "Refactor generate template for Python API usage", "text": "

    The template method of every pipeline component has been renamed to manifest as it is no longer strictly tied to Helm template. Instead, it can be used to render the final resources of a component, such as Kubernetes manifests.

    There is also a new kpops manifest command replacing the existing kpops generate --template flag.

    If you're using this functionality in your custom components, it needs to be updated.

      from kpops.components.base_components.models.resource import Resource\n\n  @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n  \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n      return []  # list of manifests\n
    "}, {"location": "user/migration-guide/v2-v3/#namespace-substitution-vars", "title": "Namespace substitution vars", "text": "

    The global configuration variables are now namespaced under the config key, such as ${config.kafka_brokers}, ${config.schema_registry.url}. Same with pipeline variables, e.g. ${pipeline_name} \u2192 ${pipeline.name}. This would make it more uniform with the existing ${component.<key>} variables.

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml_1", "title": "pipeline.yaml", "text": "
      name: kafka-app\n- prefix: ${pipeline_name}-\n+ prefix: ${pipeline.name}-\n  app:\n    streams:\n-     brokers: ${kafka_brokers}\n-     schemaRegistryUrl: ${schema_registry.url}\n+     brokers: ${config.kafka_brokers}\n+     schemaRegistryUrl: ${config.schema_registry.url}\n
    "}, {"location": "user/migration-guide/v2-v3/#summary", "title": "Summary", "text": "

    Warning

    Helm will not find your (long) old release names anymore.

    defaults.yaml
      kafka-app:\n    app:\n      streams: ...\n\n+ streams-bootstrap:\n    repo_config: ...\n    version: ...\n
    pipeline.yaml
    - - type: kubernetes-app\n+ - type: helm-app\n  ...\n  - type: kafka-app\n    app:\n-     brokers: ${brokers}\n+     brokers: ${config.kafka_brokers}\n      labels:\n-       app_schedule: \"${component_app_schedule}\"\n+       app_schedule: \"${component.app.schedule}\"\n  ...\n  - type: kafka-connector\n-   namespace: my-namespace\n+   resetter_namespace: my-namespace\n  ...\n
    config.yaml
    - environment: development\n\n+ components_module: components\n\n+ pipeline_base_dir: pipelines\n\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n\n  topic_name_config:\n- default_error_topic_name: \"${pipeline_name}-${component_name}-dead-letter-topic\"\n+ default_error_topic_name: \"${pipeline.name}-${component.name}-dead-letter-topic\"\n  ...\n
    custom_module.py
    - from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n+ from kpops.components.base_components.models.resource import Resource\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n\n  @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n  \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n      return []  # list of manifests\n  ...\n
    github_ci_workflow.yaml
      steps:\n    - name: ...\n-     uses: bakdata/kpops/actions/kpops-runner@main\n+     uses: bakdata/kpops@main\n  ...\n
    "}, {"location": "user/migration-guide/v3-v4/", "title": "Migrate from V3 to V4", "text": ""}, {"location": "user/migration-guide/v3-v4/#distribute-defaults-across-multiple-files", "title": "Distribute defaults across multiple files", "text": "

    Warning

    The --defaults flag is removed

    It is possible now to use multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.

    For example, imagine the following folder structure:

    \u2514\u2500 pipelines\n   \u2514\u2500\u2500 distributed-defaults\n       \u251c\u2500\u2500 defaults.yaml\n       \u251c\u2500\u2500 defaults_dev.yaml\n       \u2514\u2500\u2500 pipeline-deep\n           \u251c\u2500\u2500 defaults.yaml\n           \u2514\u2500\u2500 pipeline.yaml\n

    The pipeline_base_dir is configured to pipelines. Now if we generate this pipeline with the following command:

    kpops generate \\\n      --environment dev\n      ./pipelines/distributed-defaults/pipeline-deep/pipeline.yaml\n

    The defaults would be picked in the following order (high to low priority):

    The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.

    "}, {"location": "user/migration-guide/v4-v5/", "title": "Migrate from V4 to V5", "text": ""}, {"location": "user/migration-guide/v4-v5/#allow-custom-timeout-for-external-services", "title": "Allow custom timeout for external services", "text": "

    The global timeout setting has been removed. Instead, an individual timeout can be set for each external service. The default is 30 seconds.

    "}, {"location": "user/migration-guide/v4-v5/#configyaml", "title": "config.yaml", "text": "
    - timeout: 300\n\n  kafka_rest:\n    url: \"http://my-custom-rest.url:8082\"\n+   timeout: 30\n  kafka_connect:\n    url: \"http://my-custom-connect.url:8083\"\n+   timeout: 30\n  schema_registry:\n    enabled: true\n    url: \"http://my-custom-sr.url:8081\"\n+   timeout: 30\n
    "}, {"location": "user/migration-guide/v5-v6/", "title": "Migrate from V5 to V6", "text": ""}, {"location": "user/migration-guide/v5-v6/#deploy-multiple-pipelines", "title": "Deploy multiple pipelines", "text": "

    KPOps can now deploy multiple pipelines in a single command. It is possible to pass one or many pipeline.yaml files or pass a directory with many pipeline.yaml files within it.

    The environment variable KPOPS_PIPELINE_PATH is changed to KPOPS_PIPELINE_PATHS.

    Read more:

    "}, {"location": "user/migration-guide/v5-v6/#separate-kpops-api-from-the-cli", "title": "Separate KPOps API from the CLI", "text": "

    KPops Python API is now stable and separated from the CLI! \ud83c\udf89

    "}, {"location": "user/references/cli-commands/", "title": "CLI Usage", "text": "

    Usage:

    $ kpops [OPTIONS] COMMAND [ARGS]...\n

    Options:

    Commands:

    "}, {"location": "user/references/cli-commands/#kpops-clean", "title": "kpops clean", "text": "

    Clean pipeline steps

    Usage:

    $ kpops clean [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-deploy", "title": "kpops deploy", "text": "

    Deploy pipeline steps

    Usage:

    $ kpops deploy [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-destroy", "title": "kpops destroy", "text": "

    Destroy pipeline steps

    Usage:

    $ kpops destroy [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-generate", "title": "kpops generate", "text": "

    Enrich pipeline steps with defaults. The enriched pipeline is used for all KPOps operations (deploy, destroy, ...).

    Usage:

    $ kpops generate [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-init", "title": "kpops init", "text": "

    Initialize a new KPOps project.

    Usage:

    $ kpops init [OPTIONS] PATH\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-manifest", "title": "kpops manifest", "text": "

    In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.

    Usage:

    $ kpops manifest [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-reset", "title": "kpops reset", "text": "

    Reset pipeline steps

    Usage:

    $ kpops reset [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    Options:

    "}, {"location": "user/references/cli-commands/#kpops-schema", "title": "kpops schema", "text": "

    Generate JSON schema.

    The schemas can be used to enable support for KPOps files in a text editor.

    Usage:

    $ kpops schema [OPTIONS] SCOPE:{pipeline|defaults|config}\n

    Arguments:

    Options:

    "}, {"location": "user/references/editor-integration/", "title": "Editor integration", "text": ""}, {"location": "user/references/editor-integration/#native", "title": "Native", "text": "

    We are working towards first-class editor support by providing plugins that work out of the box.

    "}, {"location": "user/references/editor-integration/#manual-for-unsupported-editors-with-lsp", "title": "Manual (for unsupported editors with LSP)", "text": "
    1. Install the yaml-language-server in your editor of choice. (requires LSP support)
    2. Configure the extension with the settings below.
    settings.json
    {\n    \"yaml.schemas\": {\n        \"https://bakdata.github.io/kpops/4.0/schema/pipeline.json\": [\n            \"pipeline.yaml\",\n            \"pipeline_*.yaml\"\n        ],\n        \"https://bakdata.github.io/kpops/4.0/schema/defaults.json\": [\n            \"defaults.yaml\",\n            \"defaults_*.yaml\"\n        ],\n        \"https://bakdata.github.io/kpops/4.0/schema/config.json\": [\n            \"config.yaml\",\n            \"config_*.yaml\"\n        ]\n    }\n}\n

    Advanced usage

    It is possible to generate schemas with the kpops schema command. Useful for including custom components or when using a pre-release version of KPOps.

    "}, {"location": "user/references/editor-integration/#concepts", "title": "Concepts", "text": "

    KPOps provides JSON schemas that enable autocompletion and validation for all YAML files that the user must work with.

    "}, {"location": "user/references/editor-integration/#supported-files", "title": "Supported files", "text": ""}, {"location": "user/references/ci-integration/github-actions/", "title": "GitHub Actions integration", "text": "

    We provided a GitHub composite action bakdata/kpops that installs and executes KPOps commands with the given parameters.

    "}, {"location": "user/references/ci-integration/github-actions/#input-parameters", "title": "Input Parameters", "text": "Name Required Default Value Type Description command \u2705 - string KPOps command to run. generate, deploy, destroy, reset, clean are possible values. Flags such as --dry-run and --execute need to be specified pipeline \u2705 - string Pipeline to run by KPOps working-directory \u274c . string root directory used by KPOps to run pipelines config \u274c - string Directory containing the config*.yaml file(s) environment \u274c - string Environment to run KPOps in components \u274c - string components package path filter-type \u274c - string Whether to include/exclude the steps defined in KPOPS_PIPELINE_STEPS parallel \u274c \"false\" string Whether to run pipelines in parallel python-version \u274c \"3.11.x\" string Python version to install (Defaults to the latest stable version of Python 3.11) kpops-version \u274c latest string KPOps version to install helm-version \u274c latest string Helm version to install token \u274c latest string secrets.GITHUB_TOKEN, needed for setup-helm action if helm-version is set to latest"}, {"location": "user/references/ci-integration/github-actions/#usage", "title": "Usage", "text": "
    steps:\n  # ...\n  # This step is useful for debugging reasons\n  - name: Generate Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: generate\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.3\n\n  # It is possible to use a pre-release KPOps version from TestPyPI https://test.pypi.org/project/kpops/#history\n  - name: Deploy Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.5.dev20230707132709\n  # ...\n
    "}]} \ No newline at end of file diff --git a/main/sitemap.xml b/main/sitemap.xml index 3e94ebd1e..8ef762062 100644 --- a/main/sitemap.xml +++ b/main/sitemap.xml @@ -2,192 +2,197 @@ https://bakdata.github.io/kpops/main/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/developer/auto-generation/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/developer/contributing/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/developer/getting-started/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/resources/architecture/components-hierarchy/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/resources/examples/defaults/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/resources/examples/pipeline/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/resources/pipeline-components/pipeline/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/resources/pipeline-defaults/defaults/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/resources/variables/cli_env_vars/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/resources/variables/config_env_vars/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/changelog/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/what-is-kpops/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/config/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/defaults/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/helm-app/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/kafka-app/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/kafka-connector/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/kafka-sink-connector/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/kafka-source-connector/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/kubernetes-app/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/overview/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/producer-app/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/streams-app/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/components/streams-bootstrap/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/variables/environment_variables/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/core-concepts/variables/substitution/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/examples/atm-fraud-pipeline/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/getting-started/quick-start/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/getting-started/setup/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/getting-started/teardown/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/migration-guide/v1-v2/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/migration-guide/v2-v3/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/migration-guide/v3-v4/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/migration-guide/v4-v5/ - 2024-05-22 + 2024-06-06 + daily + + + https://bakdata.github.io/kpops/main/user/migration-guide/v5-v6/ + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/references/cli-commands/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/references/editor-integration/ - 2024-05-22 + 2024-06-06 daily https://bakdata.github.io/kpops/main/user/references/ci-integration/github-actions/ - 2024-05-22 + 2024-06-06 daily \ No newline at end of file diff --git a/main/sitemap.xml.gz b/main/sitemap.xml.gz index 26c0b7f71e5c1abd95081a68955d1a93abd8335f..d63f1acd60c4095077fa64b2d0a5118e1812d390 100644 GIT binary patch literal 607 zcmV-l0-*gLiwFpQ)?sD>|8r?{Wo=<_E_iKh0Nt6*Zrd;n$M1cLAa_M+nikuTq_;i6 zb`EHXu?gFfDp5}R_LJi_$v#dy3}XxfmS|b{{V7_K>CN+N-^~V$iM-p+?-$GY44v_o zT)mxt|MpdVnm_OEHaT_)c~%;`w4FE1;fvPC<8hIQv4@ZrKGr&!rc{!CH|pi$^L%$V zE63QuR5~4-&YRuXODf$|&{|+v)Wl7$7UXpsd`P+na(bi3rq@qG;@;ccYPnjgp@euK4XMwu6jlQx>Nczn5*K`&K?0mqeA+uLCI*h=@m14M!cO0q8Tn1bW zbd5ans>Bc(?SIZ4G5H(~CJoUv<<&5pBNos1!8>%ER6`|f%Dp!+O7Io>(3OJ41LTgA zJn}#RJ8~$7`~z5BB1f=@bKr=JpaCL5HHMJsXpe5#7mG-I?+#Q;QIONPT%9`Zd@TX? z*Z?a^YO+i4u8-sF;tU4<2oHJOLMD_ydi*@GH`w(GLJ%#Yg+mKBqN3b!W#{JO4QNT^ z+S?LzTH9_#Qm!yMWK7~;9Y0_k2#=yggy?NH7{!l9;)u|vo6%8u&!r3k)m%2=W%o_d zyS^yfU#x-!rCcQ@&H~b4CmrX60C2Ak5wcZ6zN%07zvjYTGMRR&NyJacIY?g0Ix@AQJpR4Qr9$Yc)KI1b_iZa0m>b(jVY9t2ixkghbVknAC+%yxhf?bc3BKlTKcA^F{Ay}32(JG0r; zCa<;+&0Ghu#XaHE&O_*5w)TT%MzUw7zozpzVxKaSg(Atc7?D7#E7fqj6oo=lq)xaR z=oV$-H7r1a{y*m?J~If_M`14ms1jX(jKDTG`f9x;xO)uIwV#eHbCC@4H_SEqsd)M|h| zb|5COne7_9n`l~HqQfk2;h~RP$%67nkDnLz4*S7C&Z)tPxxg$fu)< zkRL)jr>RX_f=4@En{lHOd}GENmEbpKyj2N)YsNbb;Dmc5jBZZTO*zv6qGx63=tKRS nF(C@JD*i0}=KJsy{M^TYn`-}HGETR;r|f
      +
    • + + 6.0.0 - Release Date: [2024-06-06] + + + + +
    • +
    • 5.1.1 - Release Date: [2024-05-22] @@ -557,7 +591,7 @@
      • - + 🏗️ Breaking changes @@ -584,7 +618,7 @@
        • - + 🚀 Features @@ -604,7 +638,7 @@
          • - + 🏭 Refactor @@ -658,7 +692,7 @@
          • - + 🏭 Refactor @@ -759,14 +793,14 @@
            • - + 🏗️ Breaking changes
            • - + 🚀 Features @@ -793,7 +827,7 @@
            • - + 🏭 Refactor @@ -867,7 +901,7 @@
            • - + 🏭 Refactor @@ -887,14 +921,14 @@
              • - + 🚀 Features
              • - + 🏭 Refactor @@ -921,7 +955,7 @@
                • - + 🚀 Features @@ -935,7 +969,7 @@
                • - + 🏭 Refactor @@ -1030,14 +1064,14 @@
                  • - + 🏗️ Breaking changes
                  • - + 🚀 Features @@ -1058,7 +1092,7 @@
                  • - + 🏭 Refactor @@ -1173,7 +1207,7 @@
                  • - + 🏭 Refactor @@ -1220,7 +1254,7 @@
                    • - + 🏭 Refactor @@ -1361,21 +1395,21 @@
                      • - + 🏗️ Breaking changes
                      • - + 🚀 Features
                      • - + 🏭 Refactor @@ -1402,7 +1436,7 @@
                        • - + 🏭 Refactor @@ -1449,7 +1483,7 @@
                          • - + 🚀 Features @@ -1469,7 +1503,7 @@
                            • - + 🏭 Refactor @@ -1489,14 +1523,14 @@
                              • - + 🚀 Features
                              • - + 🏭 Refactor @@ -1530,7 +1564,7 @@
                              • - + 🏭 Refactor @@ -1577,7 +1611,7 @@
                                • - + 🏭 Refactor @@ -1604,7 +1638,7 @@
                                  • - + 🏭 Refactor @@ -1618,7 +1652,7 @@
                                  • - + 🏗️ Breaking changes @@ -1672,7 +1706,7 @@
                                  • - + 🏭 Refactor @@ -1726,7 +1760,7 @@ @@ -2896,6 +2952,40 @@
                                      +
                                    • + + 6.0.0 - Release Date: [2024-06-06] + + + + +
                                    • +
                                    • 5.1.1 - Release Date: [2024-05-22] @@ -2965,7 +3055,7 @@
                                      • - + 🏗️ Breaking changes @@ -2992,7 +3082,7 @@
                                        • - + 🚀 Features @@ -3012,7 +3102,7 @@
                                          • - + 🏭 Refactor @@ -3066,7 +3156,7 @@
                                          • - + 🏭 Refactor @@ -3167,14 +3257,14 @@
                                            • - + 🏗️ Breaking changes
                                            • - + 🚀 Features @@ -3201,7 +3291,7 @@
                                            • - + 🏭 Refactor @@ -3275,7 +3365,7 @@
                                            • - + 🏭 Refactor @@ -3295,14 +3385,14 @@
                                              • - + 🚀 Features
                                              • - + 🏭 Refactor @@ -3329,7 +3419,7 @@
                                                • - + 🚀 Features @@ -3343,7 +3433,7 @@
                                                • - + 🏭 Refactor @@ -3438,14 +3528,14 @@
                                                  • - + 🏗️ Breaking changes
                                                  • - + 🚀 Features @@ -3466,7 +3556,7 @@
                                                  • - + 🏭 Refactor @@ -3581,7 +3671,7 @@
                                                  • - + 🏭 Refactor @@ -3628,7 +3718,7 @@
                                                    • - + 🏭 Refactor @@ -3769,21 +3859,21 @@
                                                      • - + 🏗️ Breaking changes
                                                      • - + 🚀 Features
                                                      • - + 🏭 Refactor @@ -3810,7 +3900,7 @@
                                                        • - + 🏭 Refactor @@ -3857,7 +3947,7 @@
                                                          • - + 🚀 Features @@ -3877,7 +3967,7 @@
                                                            • - + 🏭 Refactor @@ -3897,14 +3987,14 @@ -

                                                              🏭 Refactor

                                                              +

                                                              🏭 Refactor

                                                              • Refactor Kafka topics - #447

                                                                @@ -4337,16 +4440,16 @@

                                                                🐛 Fixes#452

                                                              -

                                                              🏭 Refactor

                                                              +

                                                              🏭 Refactor

                                                              • Simplify execution graph logic - #446

                                                              3.2.0 - Release Date: [2024-02-01]

                                                              -

                                                              🚀 Features

                                                              +

                                                              🚀 Features

                                                              • Refactor pipeline filter and add to public API - #405
                                                              -

                                                              🏭 Refactor

                                                              +

                                                              🏭 Refactor

                                                              • Refactor enrichment using Pydantic model validator - #444

                                                                @@ -4360,7 +4463,7 @@

                                                                📝 Documentation#443

                                                              3.1.0 - Release Date: [2024-01-30]

                                                              -

                                                              🚀 Features

                                                              +

                                                              🚀 Features

                                                              • Add support for pipeline steps parallelization - #312
                                                              @@ -4368,7 +4471,7 @@

                                                              🐛 Fixes#430

                                                            -

                                                            🏭 Refactor

                                                            +

                                                            🏭 Refactor

                                                            • Simplify loading of defaults - #435
                                                            @@ -4431,7 +4534,7 @@

                                                            🌀 Miscellaneous3.0.0 - Release Date: [2024-01-17]

                                                            -

                                                            🏗️ Breaking changes

                                                            +

                                                            🏗️ Breaking changes

                                                            • Move GitHub action to repository root - #356

                                                              @@ -4470,7 +4573,7 @@

                                                              🏗️ Breaking changes#418

                                                            -

                                                            🚀 Features

                                                            +

                                                            🚀 Features

                                                            • Allow overriding config files - #391

                                                              @@ -4498,7 +4601,7 @@

                                                              🧪 Dependencies#347

                                                            -

                                                            🏭 Refactor

                                                            +

                                                            🏭 Refactor

                                                            • Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config - #354

                                                              @@ -4628,7 +4731,7 @@

                                                              🐛 Fixes#353

                                                            -

                                                            🏭 Refactor

                                                            +

                                                            🏭 Refactor

                                                            • Refactor component prefix & name - #326

                                                              @@ -4647,7 +4750,7 @@

                                                              📝 Documentation#352

                                                            2.0.6 - Release Date: [2023-08-30]

                                                            -

                                                            🏭 Refactor

                                                            +

                                                            🏭 Refactor

                                                            • Simplify deployment with local Helm charts - #349
                                                            @@ -4702,7 +4805,7 @@

                                                            🐛 Fixes#334

                                                          2.0.0 - Release Date: [2023-08-17]

                                                          -

                                                          🏗️ Breaking changes

                                                          +

                                                          🏗️ Breaking changes

                                                          • Remove camel case conversion of internal models - #308

                                                            @@ -4717,7 +4820,7 @@

                                                            🏗️ Breaking changes#321

                                                          -

                                                          🚀 Features

                                                          +

                                                          🚀 Features

                                                          -

                                                          🏭 Refactor

                                                          +

                                                          🏭 Refactor

                                                          • Refactor input/output types - #232
                                                          @@ -4735,7 +4838,7 @@

                                                          📝 Documentation#273

                                                        1.7.2 - Release Date: [2023-08-16]

                                                        -

                                                        🏭 Refactor

                                                        +

                                                        🏭 Refactor

                                                        • Refactor Kafka Connect handler - #322
                                                        @@ -4759,21 +4862,21 @@

                                                        📝 Documentation1.7.0 - Release Date: [2023-08-15]

                                                        -

                                                        🚀 Features

                                                        +

                                                        🚀 Features

                                                        • Add flag to exclude pipeline steps - #300

                                                        1.6.0 - Release Date: [2023-08-10]

                                                        -

                                                        🏭 Refactor

                                                        +

                                                        🏭 Refactor

                                                        • Refactor handling of Helm flags - #319

                                                        1.5.0 - Release Date: [2023-08-10]

                                                        -

                                                        🚀 Features

                                                        +

                                                        🚀 Features

                                                        • Refactor Helm wrapper and add --set-file flag - #311
                                                        -

                                                        🏭 Refactor

                                                        +

                                                        🏭 Refactor

                                                        • Refactor Helm wrapper and add --set-file flag - #311

                                                          @@ -4794,7 +4897,7 @@

                                                          🐛 Fixes#292

                                                        -

                                                        🏭 Refactor

                                                        +

                                                        🏭 Refactor

                                                        • Order PipelineComponent fields - #290

                                                          @@ -4824,7 +4927,7 @@

                                                          🐛 Fixes#293

                                                        1.3.1 - Release Date: [2023-07-11]

                                                        -

                                                        🏭 Refactor

                                                        +

                                                        🏭 Refactor

                                                        • Remove workaround for pipeline steps - #276
                                                        @@ -4844,7 +4947,7 @@

                                                        🌀 Miscellaneous1.3.0 - Release Date: [2023-07-07]

                                                        -

                                                        🏭 Refactor

                                                        +

                                                        🏭 Refactor

                                                        • Plural broker field in pipeline config - #278
                                                        @@ -4852,7 +4955,7 @@

                                                        📝 Documentation#279

                                                      -

                                                      🏗️ Breaking changes

                                                      +

                                                      🏗️ Breaking changes

                                                      • Plural broker field in pipeline config - #278
                                                      @@ -4870,7 +4973,7 @@

                                                      🐛 Fixes#265

                                                    -

                                                    🏭 Refactor

                                                    +

                                                    🏭 Refactor

                                                    • Refactor custom component validation & hide field from kpops output - #265
                                                    @@ -4895,7 +4998,7 @@

                                                    #258

                                                  1.2.0 - Release Date: [2023-06-21]

                                                  -

                                                  🚀 Features

                                                  +

                                                  🚀 Features

                                                  • Add helm repo update <repo-name> for Helm >3.7 - #239
                                                  @@ -4917,7 +5020,7 @@

                                                  🐛 Fixes#256

                                                -

                                                🏭 Refactor

                                                +

                                                🏭 Refactor

                                                • Remove enable option from helm diff - #235

                                                  diff --git a/main/user/core-concepts/components/helm-app/index.html b/main/user/core-concepts/components/helm-app/index.html index 1bd40c16e..b954babcb 100644 --- a/main/user/core-concepts/components/helm-app/index.html +++ b/main/user/core-concepts/components/helm-app/index.html @@ -1260,6 +1260,8 @@ + + @@ -1370,6 +1372,26 @@ + + + + + +
                                                • + + + + + Migrate from v5 to v6 + + + + +
                                                • + + + +
                                                diff --git a/main/user/core-concepts/components/kafka-app/index.html b/main/user/core-concepts/components/kafka-app/index.html index 4940bf979..1e48d840e 100644 --- a/main/user/core-concepts/components/kafka-app/index.html +++ b/main/user/core-concepts/components/kafka-app/index.html @@ -1260,6 +1260,8 @@ + + @@ -1370,6 +1372,26 @@ + + + + + +
                                              • + + + + + Migrate from v5 to v6 + + + + +
                                              • + + + +
                                              diff --git a/main/user/core-concepts/components/kafka-connector/index.html b/main/user/core-concepts/components/kafka-connector/index.html index 7ab5a3ce0..e42f7b957 100644 --- a/main/user/core-concepts/components/kafka-connector/index.html +++ b/main/user/core-concepts/components/kafka-connector/index.html @@ -1175,6 +1175,8 @@ + + @@ -1285,6 +1287,26 @@ + + + + + +
                                            • + + + + + Migrate from v5 to v6 + + + + +
                                            • + + + +
                                            diff --git a/main/user/core-concepts/components/kafka-sink-connector/index.html b/main/user/core-concepts/components/kafka-sink-connector/index.html index ce18d06af..2968cc650 100644 --- a/main/user/core-concepts/components/kafka-sink-connector/index.html +++ b/main/user/core-concepts/components/kafka-sink-connector/index.html @@ -1260,6 +1260,8 @@ + + @@ -1370,6 +1372,26 @@ + + + + + +
                                          • + + + + + Migrate from v5 to v6 + + + + +
                                          • + + + +
                                          diff --git a/main/user/core-concepts/components/kafka-source-connector/index.html b/main/user/core-concepts/components/kafka-source-connector/index.html index 456652f27..2722d479e 100644 --- a/main/user/core-concepts/components/kafka-source-connector/index.html +++ b/main/user/core-concepts/components/kafka-source-connector/index.html @@ -1260,6 +1260,8 @@ + + @@ -1370,6 +1372,26 @@ + + + + + +
                                        • + + + + + Migrate from v5 to v6 + + + + +
                                        • + + + +
                                        diff --git a/main/user/core-concepts/components/kubernetes-app/index.html b/main/user/core-concepts/components/kubernetes-app/index.html index 01de0a1ab..614e33543 100644 --- a/main/user/core-concepts/components/kubernetes-app/index.html +++ b/main/user/core-concepts/components/kubernetes-app/index.html @@ -1260,6 +1260,8 @@ + + @@ -1370,6 +1372,26 @@ + + + + + +
                                      • + + + + + Migrate from v5 to v6 + + + + +
                                      • + + + +
                                      diff --git a/main/user/core-concepts/components/overview/index.html b/main/user/core-concepts/components/overview/index.html index aafbfa911..ca45086f1 100644 --- a/main/user/core-concepts/components/overview/index.html +++ b/main/user/core-concepts/components/overview/index.html @@ -1175,6 +1175,8 @@ + + @@ -1285,6 +1287,26 @@ + + + + + +
                                    • + + + + + Migrate from v5 to v6 + + + + +
                                    • + + + +
                                    diff --git a/main/user/core-concepts/components/producer-app/index.html b/main/user/core-concepts/components/producer-app/index.html index 51be36e2d..adbd0c9d9 100644 --- a/main/user/core-concepts/components/producer-app/index.html +++ b/main/user/core-concepts/components/producer-app/index.html @@ -1260,6 +1260,8 @@ + + @@ -1370,6 +1372,26 @@ + + + + + +
                                  • + + + + + Migrate from v5 to v6 + + + + +
                                  • + + + +
                                  diff --git a/main/user/core-concepts/components/streams-app/index.html b/main/user/core-concepts/components/streams-app/index.html index 975a0df66..d5266cea1 100644 --- a/main/user/core-concepts/components/streams-app/index.html +++ b/main/user/core-concepts/components/streams-app/index.html @@ -1260,6 +1260,8 @@ + + @@ -1370,6 +1372,26 @@ + + + + + +
                                • + + + + + Migrate from v5 to v6 + + + + +
                                • + + + +
                                diff --git a/main/user/core-concepts/components/streams-bootstrap/index.html b/main/user/core-concepts/components/streams-bootstrap/index.html index a375fb77e..d97224385 100644 --- a/main/user/core-concepts/components/streams-bootstrap/index.html +++ b/main/user/core-concepts/components/streams-bootstrap/index.html @@ -1253,6 +1253,8 @@ + + @@ -1363,6 +1365,26 @@ + + + + + +
                              • + + + + + Migrate from v5 to v6 + + + + +
                              • + + + +
                              diff --git a/main/user/core-concepts/config/index.html b/main/user/core-concepts/config/index.html index d4db93c06..c5d5c2edb 100644 --- a/main/user/core-concepts/config/index.html +++ b/main/user/core-concepts/config/index.html @@ -1173,6 +1173,8 @@ + + @@ -1283,6 +1285,26 @@ + + + + + +
                            • + + + + + Migrate from v5 to v6 + + + + +
                            • + + + +
                            @@ -1674,9 +1696,7 @@

                            Configuration49 50 51 -52 -53 -54
                            # CONFIGURATION
                            +52
                            # CONFIGURATION
                             #
                             # Custom Python module defining project-specific KPOps components
                             components_module: null
                            @@ -1685,51 +1705,49 @@ 

                            Configuration# The Kafka brokers address. # REQUIRED kafka_brokers: "http://broker1:9092,http://broker2:9092" -# The name of the defaults file and the prefix of the defaults environment file. -defaults_filename_prefix: defaults -# Configure the topic name variables you can use in the pipeline definition. -topic_name_config: - # Configures the value for the variable ${output_topic_name} - default_output_topic_name: ${pipeline.name}-${component.name} - # Configures the value for the variable ${error_topic_name} - default_error_topic_name: ${pipeline.name}-${component.name}-error -# Configuration for Schema Registry. -schema_registry: - # Whether the Schema Registry handler should be initialized. - enabled: false - # Address of the Schema Registry. - url: "http://localhost:8081" -# Configuration for the Kafka REST Proxy. -kafka_rest: - # Address of the Kafka REST Proxy. - url: "http://localhost:8082" -# Configuration for Kafka Connect. -kafka_connect: - # Address of Kafka Connect. - url: "http://localhost:8083" -# The timeout in seconds that specifies when actions like deletion or deploy -# timeout. -timeout: 300 -# Flag for `helm upgrade --install`. -# Create the release namespace if not present. -create_namespace: false -# Global flags for Helm. -helm_config: - # Name of kubeconfig context (`--kube-context`) - context: name - # Run Helm in Debug mode. - debug: false - # Kubernetes API version used for Capabilities.APIVersions - api_version: null -# Configure Helm Diff. -helm_diff_config: - # Set of keys that should not be checked. - ignore: - - name - - imageTag -# Whether to retain clean up jobs in the cluster or uninstall the, after -# completion. -retain_clean_jobs: false +# Configure the topic name variables you can use in the pipeline definition. +topic_name_config: + # Configures the value for the variable ${output_topic_name} + default_output_topic_name: ${pipeline.name}-${component.name} + # Configures the value for the variable ${error_topic_name} + default_error_topic_name: ${pipeline.name}-${component.name}-error +# Configuration for Schema Registry. +schema_registry: + # Whether the Schema Registry handler should be initialized. + enabled: false + # Address of the Schema Registry. + url: "http://localhost:8081" +# Configuration for the Kafka REST Proxy. +kafka_rest: + # Address of the Kafka REST Proxy. + url: "http://localhost:8082" +# Configuration for Kafka Connect. +kafka_connect: + # Address of Kafka Connect. + url: "http://localhost:8083" +# The timeout in seconds that specifies when actions like deletion or deploy +# timeout. +timeout: 300 +# Flag for `helm upgrade --install`. +# Create the release namespace if not present. +create_namespace: false +# Global flags for Helm. +helm_config: + # Name of kubeconfig context (`--kube-context`) + context: name + # Run Helm in Debug mode. + debug: false + # Kubernetes API version used for Capabilities.APIVersions + api_version: null +# Configure Helm Diff. +helm_diff_config: + # Set of keys that should not be checked. + ignore: + - name + - imageTag +# Whether to retain clean up jobs in the cluster or uninstall the, after +# completion. +retain_clean_jobs: false

                            diff --git a/main/user/core-concepts/defaults/index.html b/main/user/core-concepts/defaults/index.html index bc4cb3e89..52803dccc 100644 --- a/main/user/core-concepts/defaults/index.html +++ b/main/user/core-concepts/defaults/index.html @@ -1292,6 +1292,8 @@ + + @@ -1402,6 +1404,26 @@ + + + + + +
                          • + + + + + Migrate from v5 to v6 + + + + +
                          • + + + +

                          @@ -1871,14 +1893,6 @@

                          Configuration - - -
                          -

                          Tip

                          -

                          defaults is the default value of defaults_filename_prefix.

                          -
                          - -

                          Components

                          diff --git a/main/user/core-concepts/variables/environment_variables/index.html b/main/user/core-concepts/variables/environment_variables/index.html index 3ec4d0e82..5241cd5cc 100644 --- a/main/user/core-concepts/variables/environment_variables/index.html +++ b/main/user/core-concepts/variables/environment_variables/index.html @@ -1219,6 +1219,8 @@ + + @@ -1329,6 +1331,26 @@ + + + + + +
                        • + + + + + Migrate from v5 to v6 + + + + +
                        • + + + +

                        @@ -1728,13 +1750,6 @@

                        ConfigConfig60 61 62 -63 -64 -65 -66 -67
                        # Global config environment variables
                        +63
                        # Global config environment variables
                         #
                         # The default setup is shown. These variables take precedence over the
                         # settings in `config.yaml`. Variables marked as required can instead
                        @@ -1927,57 +1938,53 @@ 

                        Config# kafka_brokers # The comma separated Kafka brokers address. KPOPS_KAFKA_BROKERS # No default value, required -# defaults_filename_prefix -# The name of the defaults file and the prefix of the defaults -# environment file. -KPOPS_DEFAULTS_FILENAME_PREFIX=defaults -# topic_name_config.default_output_topic_name -# Configures the value for the variable ${output_topic_name} -KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name} -# topic_name_config.default_error_topic_name -# Configures the value for the variable ${error_topic_name} -KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error -# schema_registry.enabled -# Whether the Schema Registry handler should be initialized. -KPOPS_SCHEMA_REGISTRY__ENABLED=False -# schema_registry.url -# Address of the Schema Registry. -KPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/ -# schema_registry.timeout -# Operation timeout in seconds. -KPOPS_SCHEMA_REGISTRY__TIMEOUT=30 -# kafka_rest.url -# Address of the Kafka REST Proxy. -KPOPS_KAFKA_REST__URL=http://localhost:8082/ -# kafka_rest.timeout -# Operation timeout in seconds. -KPOPS_KAFKA_REST__TIMEOUT=30 -# kafka_connect.url -# Address of Kafka Connect. -KPOPS_KAFKA_CONNECT__URL=http://localhost:8083/ -# kafka_connect.timeout -# Operation timeout in seconds. -KPOPS_KAFKA_CONNECT__TIMEOUT=30 -# create_namespace -# Flag for `helm upgrade --install`. Create the release namespace if -# not present. -KPOPS_CREATE_NAMESPACE=False -# helm_config.context -# Name of kubeconfig context (`--kube-context`) -KPOPS_HELM_CONFIG__CONTEXT # No default value, not required -# helm_config.debug -# Run Helm in Debug mode -KPOPS_HELM_CONFIG__DEBUG=False -# helm_config.api_version -# Kubernetes API version used for `Capabilities.APIVersions` -KPOPS_HELM_CONFIG__API_VERSION # No default value, not required -# helm_diff_config.ignore -# Set of keys that should not be checked. -KPOPS_HELM_DIFF_CONFIG__IGNORE # No default value, required -# retain_clean_jobs -# Whether to retain clean up jobs in the cluster or uninstall the, -# after completion. -KPOPS_RETAIN_CLEAN_JOBS=False +# topic_name_config.default_output_topic_name +# Configures the value for the variable ${output_topic_name} +KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name} +# topic_name_config.default_error_topic_name +# Configures the value for the variable ${error_topic_name} +KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error +# schema_registry.enabled +# Whether the Schema Registry handler should be initialized. +KPOPS_SCHEMA_REGISTRY__ENABLED=False +# schema_registry.url +# Address of the Schema Registry. +KPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/ +# schema_registry.timeout +# Operation timeout in seconds. +KPOPS_SCHEMA_REGISTRY__TIMEOUT=30 +# kafka_rest.url +# Address of the Kafka REST Proxy. +KPOPS_KAFKA_REST__URL=http://localhost:8082/ +# kafka_rest.timeout +# Operation timeout in seconds. +KPOPS_KAFKA_REST__TIMEOUT=30 +# kafka_connect.url +# Address of Kafka Connect. +KPOPS_KAFKA_CONNECT__URL=http://localhost:8083/ +# kafka_connect.timeout +# Operation timeout in seconds. +KPOPS_KAFKA_CONNECT__TIMEOUT=30 +# create_namespace +# Flag for `helm upgrade --install`. Create the release namespace if +# not present. +KPOPS_CREATE_NAMESPACE=False +# helm_config.context +# Name of kubeconfig context (`--kube-context`) +KPOPS_HELM_CONFIG__CONTEXT # No default value, not required +# helm_config.debug +# Run Helm in Debug mode +KPOPS_HELM_CONFIG__DEBUG=False +# helm_config.api_version +# Kubernetes API version used for `Capabilities.APIVersions` +KPOPS_HELM_CONFIG__API_VERSION # No default value, not required +# helm_diff_config.ignore +# Set of keys that should not be checked. +KPOPS_HELM_DIFF_CONFIG__IGNORE # No default value, required +# retain_clean_jobs +# Whether to retain clean up jobs in the cluster or uninstall the, +# after completion. +KPOPS_RETAIN_CLEAN_JOBS=False

                        @@ -2013,10 +2020,10 @@

                        CLI¶< The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). -KPOPS_PIPELINE_PATH +KPOPS_PIPELINE_PATHS True -Path to YAML with pipeline definition +Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. KPOPS_PIPELINE_STEPS @@ -2049,7 +2056,8 @@

                        CLI¶< 17 18 19 -20
                        # CLI Environment variables
                        +20
                        +21
                        # CLI Environment variables
                         #
                         # The default setup is shown. These variables take precedence over the
                         # commands' flags. If a variable is set, the corresponding flag does
                        @@ -2065,10 +2073,11 @@ 

                        CLI¶< # Suffix your environment files with this value (e.g. # defaults_development.yaml for environment=development). KPOPS_ENVIRONMENT # No default value, not required -# Path to YAML with pipeline definition -KPOPS_PIPELINE_PATH # No default value, required -# Comma separated list of steps to apply the command on -KPOPS_PIPELINE_STEPS # No default value, not required +# Paths to dir containing 'pipeline.yaml' or files named +# 'pipeline.yaml'. +KPOPS_PIPELINE_PATHS # No default value, required +# Comma separated list of steps to apply the command on +KPOPS_PIPELINE_STEPS # No default value, not required

                        diff --git a/main/user/core-concepts/variables/substitution/index.html b/main/user/core-concepts/variables/substitution/index.html index 593b923f4..01666a9e8 100644 --- a/main/user/core-concepts/variables/substitution/index.html +++ b/main/user/core-concepts/variables/substitution/index.html @@ -1240,6 +1240,8 @@ + + @@ -1350,6 +1352,26 @@ + + + + + +
                      • + + + + + Migrate from v5 to v6 + + + + +
                      • + + + +

                      @@ -1769,46 +1791,42 @@

                      Component-specific variables33 34 35 -36 -37 -38
                      - type: scheduled-producer
                      +36
                      - type: scheduled-producer
                         app:
                           labels:
                             app_type: "${component.type}"
                             app_name: "${component.name}"
                             app_schedule: "${component.app.schedule}"
                      -      helm_release_name: ${component.helm_release_name}
                      -      helm_name_override: ${component.helm_name_override}
                      -    commandLine:
                      -      FAKE_ARG: "fake-arg-value"
                      -    schedule: "30 3/8 * * *"
                      -- type: converter
                      -  app:
                      -    commandLine:
                      -      CONVERT_XML: true
                      -    resources:
                      -      limits:
                      +    commandLine:
                      +      FAKE_ARG: "fake-arg-value"
                      +    schedule: "30 3/8 * * *"
                      +- type: converter
                      +  app:
                      +    commandLine:
                      +      CONVERT_XML: true
                      +    resources:
                      +      limits:
                      +        memory: 2G
                      +      requests:
                               memory: 2G
                      -      requests:
                      -        memory: 2G
                      -- type: filter
                      -  name: "filter-app"
                      -  app:
                      -    labels:
                      -      app_type: "${component.type}"
                      -      app_name: "${component.name}"
                      -      app_resources_requests_memory: "${component.app.resources.requests.memory}"
                      -      ${component.type}: "${component.app.labels.app_name}-${component.app.labels.app_type}"
                      -      test_placeholder_in_placeholder: "${component.app.labels.${component.type}}"
                      -    commandLine:
                      -      TYPE: "nothing"
                      -    resources:
                      -      requests:
                      -        memory: 3G
                      -    replicaCount: 4
                      -    autoscaling:
                      -      minReplicas: 4
                      -      maxReplicas: 4
                      +- type: filter
                      +  name: "filter-app"
                      +  app:
                      +    labels:
                      +      app_type: "${component.type}"
                      +      app_name: "${component.name}"
                      +      app_resources_requests_memory: "${component.app.resources.requests.memory}"
                      +      ${component.type}: "${component.app.labels.app_name}-${component.app.labels.app_type}"
                      +      test_placeholder_in_placeholder: "${component.app.labels.${component.type}}"
                      +    commandLine:
                      +      TYPE: "nothing"
                      +    resources:
                      +      requests:
                      +        memory: 3G
                      +    replicaCount: 4
                      +    autoscaling:
                      +      minReplicas: 4
                      +      maxReplicas: 4
                       
                      diff --git a/main/user/examples/atm-fraud-pipeline/index.html b/main/user/examples/atm-fraud-pipeline/index.html index dea7d58e2..2c2fb55ef 100644 --- a/main/user/examples/atm-fraud-pipeline/index.html +++ b/main/user/examples/atm-fraud-pipeline/index.html @@ -1305,6 +1305,8 @@ + + @@ -1415,6 +1417,26 @@ + + + + + +
                    • + + + + + Migrate from v5 to v6 + + + + +
                    • + + + +

                    diff --git a/main/user/getting-started/quick-start/index.html b/main/user/getting-started/quick-start/index.html index 7cee4c647..cd97cc5a4 100644 --- a/main/user/getting-started/quick-start/index.html +++ b/main/user/getting-started/quick-start/index.html @@ -1312,6 +1312,8 @@ + + @@ -1422,6 +1424,26 @@ + + + + + +
                  • + + + + + Migrate from v5 to v6 + + + + +
                  • + + + +
                  diff --git a/main/user/getting-started/setup/index.html b/main/user/getting-started/setup/index.html index 1b0adbec3..de419ca6e 100644 --- a/main/user/getting-started/setup/index.html +++ b/main/user/getting-started/setup/index.html @@ -1245,6 +1245,8 @@ + + @@ -1355,6 +1357,26 @@ + + + + + +
                • + + + + + Migrate from v5 to v6 + + + + +
                • + + + +
                diff --git a/main/user/getting-started/teardown/index.html b/main/user/getting-started/teardown/index.html index 06857e0a2..4d2374b44 100644 --- a/main/user/getting-started/teardown/index.html +++ b/main/user/getting-started/teardown/index.html @@ -1238,6 +1238,8 @@ + + @@ -1348,6 +1350,26 @@ + + + + + +
              • + + + + + Migrate from v5 to v6 + + + + +
              • + + + +
              diff --git a/main/user/migration-guide/v1-v2/index.html b/main/user/migration-guide/v1-v2/index.html index 0072cf1ce..f7f452643 100644 --- a/main/user/migration-guide/v1-v2/index.html +++ b/main/user/migration-guide/v1-v2/index.html @@ -1164,6 +1164,8 @@ + + @@ -1397,6 +1399,26 @@ + + + + + +
            • + + + + + Migrate from v5 to v6 + + + + +
            • + + + +
            diff --git a/main/user/migration-guide/v2-v3/index.html b/main/user/migration-guide/v2-v3/index.html index d54bf91ff..1436784db 100644 --- a/main/user/migration-guide/v2-v3/index.html +++ b/main/user/migration-guide/v2-v3/index.html @@ -1164,6 +1164,8 @@ + + @@ -1524,6 +1526,26 @@ + + + + + +
          • + + + + + Migrate from v5 to v6 + + + + +
          • + + + +
          diff --git a/main/user/migration-guide/v3-v4/index.html b/main/user/migration-guide/v3-v4/index.html index e1ec90bcd..2c4ccc097 100644 --- a/main/user/migration-guide/v3-v4/index.html +++ b/main/user/migration-guide/v3-v4/index.html @@ -1164,6 +1164,8 @@ + + @@ -1322,6 +1324,26 @@ + + + + + +
        • + + + + + Migrate from v5 to v6 + + + + +
        • + + + +
        diff --git a/main/user/migration-guide/v4-v5/index.html b/main/user/migration-guide/v4-v5/index.html index dadd7f10f..7ae6f6ebb 100644 --- a/main/user/migration-guide/v4-v5/index.html +++ b/main/user/migration-guide/v4-v5/index.html @@ -14,7 +14,7 @@ - + @@ -1164,6 +1164,8 @@ + + @@ -1335,6 +1337,26 @@ + + + + + +
      • + + + + + Migrate from v5 to v6 + + + + +
      • + + + +
      diff --git a/main/user/migration-guide/v5-v6/index.html b/main/user/migration-guide/v5-v6/index.html new file mode 100644 index 000000000..c9d1ae657 --- /dev/null +++ b/main/user/migration-guide/v5-v6/index.html @@ -0,0 +1,1774 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Migrate from v5 to v6 - KPOps + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      + +
      + + + + + + +
      + + +
      + +
      + + + + + + + + + +
      +
      + + + +
      +
      +
      + + + + + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      +
      + + + +
      +
      + + + + + + + +

      Migrate from V5 to V6

      +

      Deploy multiple pipelines

      +

      KPOps can now deploy multiple pipelines in a single command. It is possible to pass one or many pipeline.yaml files or pass a directory with many pipeline.yaml files within it.

      +

      The environment variable KPOPS_PIPELINE_PATH is changed to KPOPS_PIPELINE_PATHS.

      +

      Read more:

      + +

      Separate KPOps API from the CLI

      +

      KPops Python API is now stable and separated from the CLI! 🎉

      + + + + + + +
      +
      + + +
      + + + +
      + + + +
      +
      +
      +
      + + + + + + + + + + \ No newline at end of file diff --git a/main/user/references/ci-integration/github-actions/index.html b/main/user/references/ci-integration/github-actions/index.html index 63d707a5d..0926b47b4 100644 --- a/main/user/references/ci-integration/github-actions/index.html +++ b/main/user/references/ci-integration/github-actions/index.html @@ -1163,6 +1163,8 @@ + + @@ -1273,6 +1275,26 @@ + + + + + +
    • + + + + + Migrate from v5 to v6 + + + + +
    • + + + +
    diff --git a/main/user/references/cli-commands/index.html b/main/user/references/cli-commands/index.html index 571f2402b..38d9e66bb 100644 --- a/main/user/references/cli-commands/index.html +++ b/main/user/references/cli-commands/index.html @@ -11,7 +11,7 @@ - + @@ -1163,6 +1163,8 @@ + + @@ -1273,6 +1275,26 @@ + + + + + +
  • + + + + + Migrate from v5 to v6 + + + + +
  • + + + + @@ -1790,11 +1812,11 @@

    CLI Usagekpops clean

    Clean pipeline steps

    Usage:

    -
    $ kpops clean [OPTIONS] PIPELINE_PATH
    +
    $ kpops clean [OPTIONS] PIPELINE_PATHS...
     

    Arguments:

      -
    • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
    • +
    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

      @@ -1811,11 +1833,11 @@

      kpops cleankpops deploy

      Deploy pipeline steps

      Usage:

      -
      $ kpops deploy [OPTIONS] PIPELINE_PATH
      +
      $ kpops deploy [OPTIONS] PIPELINE_PATHS...
       

      Arguments:

        -
      • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
      • +
      • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

      Options:

        @@ -1832,11 +1854,11 @@

        kpops deploykpops destroy

        Destroy pipeline steps

        Usage:

        -
        $ kpops destroy [OPTIONS] PIPELINE_PATH
        +
        $ kpops destroy [OPTIONS] PIPELINE_PATHS...
         

        Arguments:

          -
        • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
        • +
        • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

        Options:

          @@ -1853,17 +1875,16 @@

          kpops destroykpops generate

          Enrich pipeline steps with defaults. The enriched pipeline is used for all KPOps operations (deploy, destroy, ...).

          Usage:

          -
          $ kpops generate [OPTIONS] PIPELINE_PATH
          +
          $ kpops generate [OPTIONS] PIPELINE_PATHS...
           

          Arguments:

            -
          • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
          • +
          • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

          Options:

          • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
          • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
          • -
          • --output / --no-output: Enable output printing [default: output]
          • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
          • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
          • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
          • @@ -1887,17 +1908,16 @@

            kpops initkpops manifest

            In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.

            Usage:

            -
            $ kpops manifest [OPTIONS] PIPELINE_PATH
            +
            $ kpops manifest [OPTIONS] PIPELINE_PATHS...
             

            Arguments:

              -
            • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
            • +
            • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

            Options:

            • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
            • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
            • -
            • --output / --no-output: Enable output printing [default: output]
            • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
            • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
            • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
            • @@ -1907,11 +1927,11 @@

              kpops manifestkpops reset

              Reset pipeline steps

              Usage:

              -
              $ kpops reset [OPTIONS] PIPELINE_PATH
              +
              $ kpops reset [OPTIONS] PIPELINE_PATHS...
               

              Arguments:

                -
              • PIPELINE_PATH: Path to YAML with pipeline definition [env var: KPOPS_PIPELINE_PATH;required]
              • +
              • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

              Options:

                diff --git a/main/user/references/editor-integration/index.html b/main/user/references/editor-integration/index.html index e4c6c0b68..9272bc746 100644 --- a/main/user/references/editor-integration/index.html +++ b/main/user/references/editor-integration/index.html @@ -1163,6 +1163,8 @@ + + @@ -1273,6 +1275,26 @@ + + + + + +
              • + + + + + Migrate from v5 to v6 + + + + +
              • + + + +
              diff --git a/main/user/what-is-kpops/index.html b/main/user/what-is-kpops/index.html index 4a19504e8..f614bad3b 100644 --- a/main/user/what-is-kpops/index.html +++ b/main/user/what-is-kpops/index.html @@ -1215,6 +1215,8 @@ + + @@ -1325,6 +1327,26 @@ + + + + + +
            • + + + + + Migrate from v5 to v6 + + + + +
            • + + + +