defgenerate(
-pipeline_path:Path,
-dotenv:list[Path]|None=None,
-config:Path=Path(),
-steps:set[str]|None=None,
-filter_type:FilterType=FilterType.INCLUDE,
-environment:str|None=None,
-verbose:bool=False,
-)->Pipeline:
-"""Generate enriched pipeline representation.
-
- :param pipeline_path: Path to pipeline definition yaml file.
- :param dotenv: Paths to dotenv files.
- :param config: Path to the dir containing config.yaml files.
- :param steps: Set of steps (components) to apply the command on.
- :param filter_type: Whether `steps` should include/exclude the steps.
- :param environment: The environment to generate and deploy the pipeline to.
- :param verbose: Enable verbose printing.
- :return: Generated `Pipeline` object.
- """
-kpops_config=KpopsConfig.create(
-config,
-dotenv,
-environment,
-verbose,
-)
-pipeline=_create_pipeline(pipeline_path,kpops_config,environment)
-log.info(f"Picked up pipeline '{pipeline_path.parent.name}'")
-ifsteps:
-component_names=steps
-log.debug(
-f"KPOPS_PIPELINE_STEPS is defined with values: {component_names} and filter type of {filter_type.value}"
-)
-
-predicate=filter_type.create_default_step_names_filter_predicate(
-component_names
-)
-pipeline.filter(predicate)
-log.info(f"Filtered pipeline:\n{pipeline.step_names}")
-returnpipeline
+69
defgenerate(
+pipeline_path:Path,
+dotenv:list[Path]|None=None,
+config:Path=Path(),
+steps:set[str]|None=None,
+filter_type:FilterType=FilterType.INCLUDE,
+environment:str|None=None,
+verbose:bool=False,
+)->Pipeline:
+"""Generate enriched pipeline representation.
+
+ :param pipeline_path: Path to pipeline definition yaml file.
+ :param dotenv: Paths to dotenv files.
+ :param config: Path to the dir containing config.yaml files.
+ :param steps: Set of steps (components) to apply the command on.
+ :param filter_type: Whether `steps` should include/exclude the steps.
+ :param environment: The environment to generate and deploy the pipeline to.
+ :param verbose: Enable verbose printing.
+ :return: Generated `Pipeline` object.
+ """
+kpops_config=KpopsConfig.create(
+config,
+dotenv,
+environment,
+verbose,
+)
+pipeline=_create_pipeline(pipeline_path,kpops_config,environment)
+log.info(f"Picked up pipeline '{pipeline_path.parent.name}'")
+ifsteps:
+component_names=steps
+log.debug(
+f"KPOPS_PIPELINE_STEPS is defined with values: {component_names} and filter type of {filter_type.value}"
+)
+
+predicate=filter_type.create_default_step_names_filter_predicate(
+component_names
+)
+pipeline.filter(predicate)
+log.info(f"Filtered pipeline:\n{pipeline.step_names}")
+returnpipeline
definit(
-path:Path,
-config_include_opt:bool=False,
-):
-"""Initiate a default empty project.
-
- :param path: Directory in which the project should be initiated.
- :param conf_incl_opt: Whether to include non-required settings
- in the generated config file.
- """
-ifnotpath.exists():
-path.mkdir(parents=False)
-elifnext(path.iterdir(),False):
-log.warning("Please provide a path to an empty directory.")
-return
-init_project(path,config_include_opt)
+319
definit(
+path:Path,
+config_include_opt:bool=False,
+):
+"""Initiate a default empty project.
+
+ :param path: Directory in which the project should be initiated.
+ :param conf_incl_opt: Whether to include non-required settings
+ in the generated config file.
+ """
+ifnotpath.exists():
+path.mkdir(parents=False)
+elifnext(path.iterdir(),False):
+log.warning("Please provide a path to an empty directory.")
+return
+init_project(path,config_include_opt)
+
+
+
+
diff --git a/dev/resources/pipeline-config/config.yaml b/dev/resources/pipeline-config/config.yaml
index 862a49ac0..d8c5433b7 100644
--- a/dev/resources/pipeline-config/config.yaml
+++ b/dev/resources/pipeline-config/config.yaml
@@ -1,14 +1,12 @@
# CONFIGURATION
#
-# Custom Python module defining project-specific KPOps components
-components_module: null
# Base directory to the pipelines (default is current working directory)
pipeline_base_dir: .
# The Kafka brokers address.
# REQUIRED
kafka_brokers: "http://broker1:9092,http://broker2:9092"
# Configure the topic name variables you can use in the pipeline definition.
-topic_name_config:
+topic_name_config:
# Configures the value for the variable ${output_topic_name}
default_output_topic_name: ${pipeline.name}-${component.name}
# Configures the value for the variable ${error_topic_name}
@@ -27,9 +25,6 @@ kafka_rest:
kafka_connect:
# Address of Kafka Connect.
url: "http://localhost:8083"
-# The timeout in seconds that specifies when actions like deletion or deploy
-# timeout.
-timeout: 300
# Flag for `helm upgrade --install`.
# Create the release namespace if not present.
create_namespace: false
@@ -42,7 +37,7 @@ helm_config:
# Kubernetes API version used for Capabilities.APIVersions
api_version: null
# Configure Helm Diff.
-helm_diff_config:
+helm_diff_config:
# Set of keys that should not be checked.
ignore:
- name
diff --git a/dev/resources/pipeline-defaults/defaults/index.html b/dev/resources/pipeline-defaults/defaults/index.html
index bb3e6bfe8..5ef53eac3 100644
--- a/dev/resources/pipeline-defaults/defaults/index.html
+++ b/dev/resources/pipeline-defaults/defaults/index.html
@@ -1156,6 +1156,8 @@
+
+
@@ -1287,6 +1289,26 @@
+
+
+
+
+
+
Directory in which the project should be initiated.
TYPE: Path
conf_incl_opt
Whether to include non-required settings in the generated config file.
Source code in kpops/api/__init__.py
def init(\n path: Path,\n config_include_opt: bool = False,\n):\n \"\"\"Initiate a default empty project.\n\n :param path: Directory in which the project should be initiated.\n :param conf_incl_opt: Whether to include non-required settings\n in the generated config file.\n \"\"\"\n if not path.exists():\n path.mkdir(parents=False)\n elif next(path.iterdir(), False):\n log.warning(\"Please provide a path to an empty directory.\")\n return\n init_project(path, config_include_opt)\n
def build_execution_graph(\n self,\n runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],\n /,\n reverse: bool = False,\n) -> Awaitable[None]:\n async def run_parallel_tasks(\n coroutines: list[Coroutine[Any, Any, None]],\n ) -> None:\n tasks = []\n for coro in coroutines:\n tasks.append(asyncio.create_task(coro))\n await asyncio.gather(*tasks)\n\n async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None:\n for pending_task in pending_tasks:\n await pending_task\n\n graph: nx.DiGraph = self._graph.copy() # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx\n\n # We add an extra node to the graph, connecting all the leaf nodes to it\n # in that way we make this node the root of the graph, avoiding backtracking\n root_node = \"root_node_bfs\"\n graph.add_node(root_node)\n\n for node in graph:\n predecessors = list(graph.predecessors(node))\n if not predecessors:\n graph.add_edge(root_node, node)\n\n layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node))\n\n sorted_tasks = []\n for layer in layers_graph[1:]:\n if parallel_tasks := self.__get_parallel_tasks_from(layer, runner):\n sorted_tasks.append(run_parallel_tasks(parallel_tasks))\n\n if reverse:\n sorted_tasks.reverse()\n\n return run_graph_tasks(sorted_tasks)\n
Filter pipeline components using a custom predicate.
PARAMETER DESCRIPTION predicate
Filter function, returns boolean value whether the component should be kept or removed
TYPE: ComponentFilterPredicate
Source code in kpops/pipeline.py
def filter(self, predicate: ComponentFilterPredicate) -> None:\n \"\"\"Filter pipeline components using a custom predicate.\n\n :param predicate: Filter function,\n returns boolean value whether the component should be kept or removed\n \"\"\"\n for component in self.components:\n # filter out components not matching the predicate\n if not predicate(component):\n self.remove(component.id)\n
Find pipeline components matching a custom predicate.
PARAMETER DESCRIPTION predicate
Filter function, returns boolean value whether the component should be kept or removed
TYPE: ComponentFilterPredicate
RETURNS DESCRIPTION Iterator[PipelineComponent]
Iterator of components matching the predicate
Source code in kpops/pipeline.py
def find(self, predicate: ComponentFilterPredicate) -> Iterator[PipelineComponent]:\n \"\"\"Find pipeline components matching a custom predicate.\n\n :param predicate: Filter function,\n returns boolean value whether the component should be kept or removed\n :returns: Iterator of components matching the predicate\n \"\"\"\n for component in self.components:\n if predicate(component):\n yield component\n
Auto generation happens mostly with pre-commit hooks. You can find the pre-commit configuration here. These pre-commit hooks call different Python scripts to auto generate code for the documentation.
cli_env_vars.env -- All CLI environment variables in a dotenv file.
cli_env_vars.md -- All CLI environment variables in a table.
config_env_vars.env -- Almost all pipeline config environment variables in a dotenv file. The script checks for each field in KpopsConfig whether it has an env attribute defined. The script is currently unable to visit the classes of fields like topic_name_config, hence any environment variables defined there would remain unknown to it.
config_env_vars.env -- Almost all pipeline config environment variables in a table.
variable_substitution.yaml -- A copy of ./tests/pipeline/resources/component-type-substitution/pipeline.yaml used as an example of substitution.
Generated by typer-cli from the code in main.py. It is called with Python's subprocess module.
"}, {"location": "developer/auto-generation/#pipeline-and-defaults-example-definitions", "title": "Pipeline and defaults example definitions", "text": "
Generates example pipeline.yaml and defaults.yaml for each individual component, stores them and also concatenates them into 1 big pipeline definition and 1 big pipeline defaults definition.
User input
headers/*\\.yaml -- The top of each example. Includes a description comment, type and name. The headers for pipeline.yaml reside in the pipeline-components dir and the defaults.yaml headers reside in the pipeline-defaults dir. The names of the files must be equal to the respective component type.
sections/*\\.yaml -- Each YAML file contains a single section (component attribute) definition. The intention is to keep the minimal set of definitions there from which any component definition can be built. The names of the files must be equal to the respective component type and the attribute name. The sections are used for both defaults.yaml and pipeline.yaml generation and reside in the pipeline-components dir.
Generated
pipeline-components/dependencies/* Cached information about KPOps components
pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the pipeline.yaml generation.
defaults_pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the defaults.yaml generation.
kpops_structure.yaml -- Specifies the inheritance hierarchy of the components and what sections exist in each component.
pipeline-components/*\\.yaml -- All single-component pipeline definitions and one big (complete) pipeline.yaml that contains all of them.
pipeline-defaults/*\\.yaml -- All single-component defaults definitions and one big (complete) defaults.yaml that contains all of them.
"}, {"location": "developer/contributing/", "title": "How to contribute", "text": "
Welcome! We are glad to have you visit our contributing guide!
If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.
We are using git submodules to import the KPOps examples repository. You need to fetch the repository locally on your machine. To do so use this command:
We advise that you stick to our pre-commit hooks for code linting, formatting, and auto-generation of documentation. After you install them using poetry run pre-commit install they're triggered automatically during git commit. Additionally, you can manually invoke them with poetry run pre-commit run -a. In order for dprint to work, you have to manually install it locally. It will work in the CI, so it is also possible to manually carry out formatting changes flagged by dprint in the CI and skip installing it locally.
Welcome! We are glad to have you visit our developer guide! If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.
Find more about our code-style or insights into KPOps' code base here in our developer guide.
Work in progress
The developer guide is still under construction. If you have a question left unanswered here, feel free to ask it by opening an issue.
"}, {"location": "user/what-is-kpops/", "title": "What is KPOps?", "text": "
With a couple of easy commands in the shell, and a pipeline.yaml of under 30 lines, KPOps can not only deploy a Kafka pipeline1 to a Kubernetes cluster, but also reset, clean or destroy it!
Deploy Kafka apps to Kubernetes: KPOps allows to deploy consecutive Kafka Streams applications and producers using an easy-to-read and -write pipeline definition.
Manage Kafka Connectors: KPOps connects with your Kafka Connect cluster and deploys, validates, and deletes your connectors.
Configure multiple pipelines and steps: KPOps has various abstractions that simplify configuring multiple pipelines and steps within pipelines by sharing common configuration between different components, such as producers or streaming applications.
Handle your topics and schemas: KPOps not only creates and deletes your topics but also registers and deletes your schemas.
Clean termination of Kafka components: KPOps removes your pipeline components (i.e., Kafka Streams applications) from the Kubernetes cluster and cleans up the component-related states (i.e., removing/resetting offset of Kafka consumer groups).
Preview your pipeline changes: With the KPOps dry-run, you can ensure your pipeline definition is set up correctly. This helps to minimize downtime and prevent potential errors or issues that could impact your production environment.
"}, {"location": "user/what-is-kpops/#example", "title": "Example", "text": "An overview of Word-count pipeline shown in Streams Explorer Word-count pipeline.yaml
KPOps reads its global configuration that is unrelated to a pipeline's components from config.yaml.
Consider enabling KPOps' editor integration feature to enjoy the benefits of autocompletion and validation when configuring your pipeline.
To learn about any of the available settings, take a look at the example below.
config.yaml
# CONFIGURATION\n#\n# Custom Python module defining project-specific KPOps components\ncomponents_module: null\n# Base directory to the pipelines (default is current working directory)\npipeline_base_dir: .\n# The Kafka brokers address.\n# REQUIRED\nkafka_brokers: \"http://broker1:9092,http://broker2:9092\"\n# Configure the topic name variables you can use in the pipeline definition.\ntopic_name_config: \n # Configures the value for the variable ${output_topic_name}\n default_output_topic_name: ${pipeline.name}-${component.name}\n # Configures the value for the variable ${error_topic_name}\n default_error_topic_name: ${pipeline.name}-${component.name}-error\n# Configuration for Schema Registry.\nschema_registry:\n # Whether the Schema Registry handler should be initialized.\n enabled: false\n # Address of the Schema Registry.\n url: \"http://localhost:8081\"\n# Configuration for the Kafka REST Proxy.\nkafka_rest:\n # Address of the Kafka REST Proxy.\n url: \"http://localhost:8082\"\n# Configuration for Kafka Connect.\nkafka_connect:\n # Address of Kafka Connect.\n url: \"http://localhost:8083\"\n# The timeout in seconds that specifies when actions like deletion or deploy\n# timeout.\ntimeout: 300\n# Flag for `helm upgrade --install`.\n# Create the release namespace if not present.\ncreate_namespace: false\n# Global flags for Helm.\nhelm_config:\n # Name of kubeconfig context (`--kube-context`)\n context: name\n # Run Helm in Debug mode.\n debug: false\n # Kubernetes API version used for Capabilities.APIVersions\n api_version: null\n# Configure Helm Diff.\nhelm_diff_config: \n # Set of keys that should not be checked.\n ignore:\n - name\n - imageTag\n# Whether to retain clean up jobs in the cluster or uninstall the, after\n# completion.\nretain_clean_jobs: false\n
Environment-specific pipeline definitions
Similarly to defaults, it is possible to have an unlimited amount of additional environment-specific pipeline definitions. The naming convention is the same: add a suffix of the form _{environment} to the filename.
KPOps has a very efficient way of dealing with repeating settings which manifests as defaults.yaml. This file provides the user with the power to set defaults for any and all components, thus omitting the need to repeat the same settings in pipeline.yaml.
An important mechanic of KPOps is that defaults set for a component apply to all components that inherit from it.
It is possible, although not recommended, to add settings that are specific to a component's subclass. An example would be configuring offset_topic under kafka-connector instead of kafka-source-connector.
KPOps allows using multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.
The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.
It is important to note that defaults_{environment}.yaml overrides only the settings that are explicitly set to be different from the ones in the base defaults file.
defaults merge priority
Imagine the following folder structure, where the pipeline_base_dir is configured to pipelines:
The defaults codeblocks in this section contain the full set of settings that are specific to the component. If a setting already exists in a parent config, it will not be included in the child's.
# Base Kubernetes App\n#\n# Parent of: HelmApp\n# Child of: PipelineComponent\nkubernetes-app:\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example.\n app: # required\n image: exampleImage # Example\n debug: false # Example\n commandLine: {} # Example\n
# Base component for Kafka-based components.\n#\n# Parent of: ProducerApp, StreamsApp\n# Child of: KubernetesApp\nkafka-app:\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` can contain application-specific settings, hence the user is free to\n # add the key-value pairs they need.\n app: # required\n streams: # required\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n nameOverride: override-with-this-name # kafka-app-specific\n imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n
# StreamsApp component that configures a streams bootstrap app.\n#\n# Child of: KafkaApp\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\nstreams-app:\n # No arbitrary keys are allowed under `app`here\n # Allowed configs:\n # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n app: # required\n # Streams Bootstrap streams section\n streams: # required, streams-app-specific\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n inputTopics:\n - topic1\n - topic2\n outputTopic: output-topic\n inputPattern: input-pattern\n extraInputTopics:\n input_role1:\n - input_topic1\n - input_topic2\n input_role2:\n - input_topic3\n - input_topic4\n extraInputPatterns:\n pattern_role1: input_pattern1\n extraOutputTopics:\n output_role1: output_topic1\n output_role2: output_topic2\n errorTopic: error-topic\n config:\n my.streams.config: my.value\n nameOverride: override-with-this-name # streams-app-specific\n autoscaling: # streams-app-specific\n consumerGroup: consumer-group # required\n lagThreshold: 0 # Average target value to trigger scaling actions.\n enabled: false # Whether to enable auto-scaling using KEDA.\n # This is the interval to check each trigger on.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n pollingInterval: 30\n # The period to wait after the last trigger reported active before scaling\n # the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n cooldownPeriod: 300\n # The offset reset policy for the consumer if the the consumer group is\n # not yet subscribed to a partition.\n offsetResetPolicy: earliest\n # This setting is passed to the HPA definition that KEDA will create for a\n # given resource and holds the maximum number of replicas of the target resouce.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n maxReplicas: 1\n # Minimum number of replicas KEDA will scale the resource down to.\n # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n minReplicas: 0\n # If this property is set, KEDA will scale the resource down to this\n # number of replicas.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n idleReplicas: 0\n topics: # List of auto-generated Kafka Streams topics used by the streams app.\n - topic1\n - topic2\n
# Kafka connector\n#\n# Parent of: KafkaSinkConnector, KafkaSourceConnector\n# Child of: PipelineComponent\nkafka-connector:\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example. Extensive documentation on\n # connectors: https://kafka.apache.org/documentation/#connectconfigs\n app: # required\n tasks.max: 1\n # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n # Image Tag etc.\n resetter_values:\n imageTag: \"1.2.3\"\n
# Kubernetes app managed through Helm with an associated Helm chart\n- type: helm-app\n name: helm-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example.\n app: # required\n image: exampleImage # Example\n debug: false # Example\n commandLine: {} # Example\n # Helm repository configuration (optional)\n # If not set the helm repo add will not be called. Useful when using local Helm charts\n repo_config:\n repository_name: bakdata-streams-bootstrap # required\n url: https://bakdata.github.io/streams-bootstrap/ # required\n repo_auth_flags:\n username: user\n password: pass\n ca_file: /home/user/path/to/ca-file\n insecure_skip_tls_verify: false\n version: \"1.0.0\" # Helm chart version\n
# Base component for Kafka-based components.\n# Producer or streaming apps should inherit from this class.\n- type: kafka-app # required\n name: kafka-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` can contain application-specific settings, hence the user is free to\n # add the key-value pairs they need.\n app: # required\n streams: # required\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n nameOverride: override-with-this-name # kafka-app-specific\n imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n
KafkaConnector is a component that deploys Kafka Connectors. Since a connector cannot be different from sink or source it is not recommended to use KafkaConnector for deployment in pipeline.yaml. Instead, KafkaConnector should be used in defaults.yaml to set defaults for all connectors in the pipeline as they can share some common settings.
# Kafka sink connector\n- type: kafka-sink-connector\n name: kafka-sink-connector # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example. Extensive documentation on\n # connectors: https://kafka.apache.org/documentation/#connectconfigs\n app: # required\n tasks.max: 1\n # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n # Image Tag etc.\n resetter_values:\n imageTag: \"1.2.3\"\n
# Kafka source connector\n- type: kafka-source-connector # required\n name: kafka-source-connector # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n # The source connector has no `from` section\n # from:\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example. Extensive documentation on\n # connectors: https://kafka.apache.org/documentation/#connectconfigs\n app: # required\n tasks.max: 1\n # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n # Image Tag etc.\n resetter_values:\n imageTag: \"1.2.3\"\n # offset.storage.topic\n # https://kafka.apache.org/documentation/#connect_running\n offset_topic: offset_topic\n
# Base Kubernetes App\n- type: kubernetes-app\n name: kubernetes-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example.\n app: # required\n image: exampleImage # Example\n debug: false # Example\n commandLine: {} # Example\n
# Holds configuration to use as values for the streams bootstrap producer-app Helm\n# chart.\n# More documentation on ProducerApp:\n# https://github.com/bakdata/streams-bootstrap\n- type: producer-app\n name: producer-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n # from: # While the producer-app does inherit from kafka-app, it does not need a\n # `from` section, hence it does not support it.\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # Allowed configs:\n # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app\n app: # required\n streams: # required, producer-app-specific\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n outputTopic: output_topic\n extraOutputTopics:\n output_role1: output_topic1\n output_role2: output_topic2\n nameOverride: override-with-this-name # kafka-app-specific\n # Helm repository configuration (optional)\n # If not set the helm repo add will not be called. Useful when using local Helm charts\n repo_config:\n repository_name: bakdata-streams-bootstrap # required\n url: https://bakdata.github.io/streams-bootstrap/ # required\n repo_auth_flags:\n username: user\n password: pass\n ca_file: /home/user/path/to/ca-file\n insecure_skip_tls_verify: false\n version: \"2.12.0\" # Helm chart version\n
# StreamsApp component that configures a streams bootstrap app.\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\n- type: streams-app # required\n name: streams-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # No arbitrary keys are allowed under `app`here\n # Allowed configs:\n # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n app: # required\n # Streams Bootstrap streams section\n streams: # required, streams-app-specific\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n inputTopics:\n - topic1\n - topic2\n outputTopic: output-topic\n inputPattern: input-pattern\n extraInputTopics:\n input_role1:\n - input_topic1\n - input_topic2\n input_role2:\n - input_topic3\n - input_topic4\n extraInputPatterns:\n pattern_role1: input_pattern1\n extraOutputTopics:\n output_role1: output_topic1\n output_role2: output_topic2\n errorTopic: error-topic\n config:\n my.streams.config: my.value\n nameOverride: override-with-this-name # streams-app-specific\n autoscaling: # streams-app-specific\n consumerGroup: consumer-group # required\n lagThreshold: 0 # Average target value to trigger scaling actions.\n enabled: false # Whether to enable auto-scaling using KEDA.\n # This is the interval to check each trigger on.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n pollingInterval: 30\n # The period to wait after the last trigger reported active before scaling\n # the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n cooldownPeriod: 300\n # The offset reset policy for the consumer if the the consumer group is\n # not yet subscribed to a partition.\n offsetResetPolicy: earliest\n # This setting is passed to the HPA definition that KEDA will create for a\n # given resource and holds the maximum number of replicas of the target resouce.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n maxReplicas: 1\n # Minimum number of replicas KEDA will scale the resource down to.\n # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n minReplicas: 0\n # If this property is set, KEDA will scale the resource down to this\n # number of replicas.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n idleReplicas: 0\n topics: # List of auto-generated Kafka Streams topics used by the streams app.\n - topic1\n - topic2\n # Helm repository configuration (optional)\n # If not set the helm repo add will not be called. Useful when using local Helm charts\n repo_config:\n repository_name: bakdata-streams-bootstrap # required\n url: https://bakdata.github.io/streams-bootstrap/ # required\n repo_auth_flags:\n username: user\n password: pass\n ca_file: /home/user/path/to/ca-file\n insecure_skip_tls_verify: false\n version: \"2.12.0\" # Helm chart version\n
Environment variables can be set by using the export command in Linux or the set command in Windows.
dotenv files
KPOps currently supports .env files only for variables related to the config. Full support for .env files is on the roadmap. One of the possible ways to use one and export the contents manually is with the following command: export $(xargs < .env). This would work in bash suppose there are no spaces inside the values.
These variables take precedence over the settings in config.yaml. Variables marked as required can instead be set in the global config.
Name Default Value Required Description Setting name KPOPS_COMPONENTS_MODULE False Custom Python module defining project-specific KPOps components components_module KPOPS_PIPELINE_BASE_DIR . False Base directory to the pipelines (default is current working directory) pipeline_base_dir KPOPS_KAFKA_BROKERS True The comma separated Kafka brokers address. kafka_brokers KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME ${pipeline.name}-${component.name} False Configures the value for the variable ${output_topic_name} topic_name_config.default_output_topic_name KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME ${pipeline.name}-${component.name}-error False Configures the value for the variable ${error_topic_name} topic_name_config.default_error_topic_name KPOPS_SCHEMA_REGISTRY__ENABLED False False Whether the Schema Registry handler should be initialized. schema_registry.enabled KPOPS_SCHEMA_REGISTRY__URL http://localhost:8081/ False Address of the Schema Registry. schema_registry.url KPOPS_SCHEMA_REGISTRY__TIMEOUT 30 False Operation timeout in seconds. schema_registry.timeout KPOPS_KAFKA_REST__URL http://localhost:8082/ False Address of the Kafka REST Proxy. kafka_rest.url KPOPS_KAFKA_REST__TIMEOUT 30 False Operation timeout in seconds. kafka_rest.timeout KPOPS_KAFKA_CONNECT__URL http://localhost:8083/ False Address of Kafka Connect. kafka_connect.url KPOPS_KAFKA_CONNECT__TIMEOUT 30 False Operation timeout in seconds. kafka_connect.timeout KPOPS_CREATE_NAMESPACE False False Flag for helm upgrade --install. Create the release namespace if not present. create_namespace KPOPS_HELM_CONFIG__CONTEXT False Name of kubeconfig context (--kube-context) helm_config.context KPOPS_HELM_CONFIG__DEBUG False False Run Helm in Debug mode helm_config.debug KPOPS_HELM_CONFIG__API_VERSION False Kubernetes API version used for Capabilities.APIVersions helm_config.api_version KPOPS_HELM_DIFF_CONFIG__IGNORE True Set of keys that should not be checked. helm_diff_config.ignore KPOPS_RETAIN_CLEAN_JOBS False False Whether to retain clean up jobs in the cluster or uninstall the, after completion. retain_clean_jobs config_env_vars.env Exhaustive list of all config-related environment variables
# Global config environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# settings in `config.yaml`. Variables marked as required can instead\n# be set in the global config.\n#\n# components_module\n# Custom Python module defining project-specific KPOps components\nKPOPS_COMPONENTS_MODULE # No default value, not required\n# pipeline_base_dir\n# Base directory to the pipelines (default is current working\n# directory)\nKPOPS_PIPELINE_BASE_DIR=.\n# kafka_brokers\n# The comma separated Kafka brokers address.\nKPOPS_KAFKA_BROKERS # No default value, required\n# topic_name_config.default_output_topic_name\n# Configures the value for the variable ${output_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name}\n# topic_name_config.default_error_topic_name\n# Configures the value for the variable ${error_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error\n# schema_registry.enabled\n# Whether the Schema Registry handler should be initialized.\nKPOPS_SCHEMA_REGISTRY__ENABLED=False\n# schema_registry.url\n# Address of the Schema Registry.\nKPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/\n# schema_registry.timeout\n# Operation timeout in seconds.\nKPOPS_SCHEMA_REGISTRY__TIMEOUT=30\n# kafka_rest.url\n# Address of the Kafka REST Proxy.\nKPOPS_KAFKA_REST__URL=http://localhost:8082/\n# kafka_rest.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_REST__TIMEOUT=30\n# kafka_connect.url\n# Address of Kafka Connect.\nKPOPS_KAFKA_CONNECT__URL=http://localhost:8083/\n# kafka_connect.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_CONNECT__TIMEOUT=30\n# create_namespace\n# Flag for `helm upgrade --install`. Create the release namespace if\n# not present.\nKPOPS_CREATE_NAMESPACE=False\n# helm_config.context\n# Name of kubeconfig context (`--kube-context`)\nKPOPS_HELM_CONFIG__CONTEXT # No default value, not required\n# helm_config.debug\n# Run Helm in Debug mode\nKPOPS_HELM_CONFIG__DEBUG=False\n# helm_config.api_version\n# Kubernetes API version used for `Capabilities.APIVersions`\nKPOPS_HELM_CONFIG__API_VERSION # No default value, not required\n# helm_diff_config.ignore\n# Set of keys that should not be checked.\nKPOPS_HELM_DIFF_CONFIG__IGNORE # No default value, required\n# retain_clean_jobs\n# Whether to retain clean up jobs in the cluster or uninstall the,\n# after completion.\nKPOPS_RETAIN_CLEAN_JOBS=False\n
These variables take precedence over the commands' flags. If a variable is set, the corresponding flag does not have to be specified in commands. Variables marked as required can instead be set as flags.
Name Default Value Required Description KPOPS_CONFIG_PATH . False Path to the dir containing config.yaml files KPOPS_DOTENV_PATH False Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. KPOPS_ENVIRONMENT False The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). KPOPS_PIPELINE_PATHS True Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. KPOPS_PIPELINE_STEPS False Comma separated list of steps to apply the command on cli_env_vars.env Exhaustive list of all cli-related environment variables
# CLI Environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# commands' flags. If a variable is set, the corresponding flag does\n# not have to be specified in commands. Variables marked as required\n# can instead be set as flags.\n#\n# Path to the dir containing config.yaml files\nKPOPS_CONFIG_PATH=.\n# Path to dotenv file. Multiple files can be provided. The files will\n# be loaded in order, with each file overriding the previous one.\nKPOPS_DOTENV_PATH # No default value, not required\n# The environment you want to generate and deploy the pipeline to.\n# Suffix your environment files with this value (e.g.\n# defaults_development.yaml for environment=development).\nKPOPS_ENVIRONMENT # No default value, not required\n# Paths to dir containing 'pipeline.yaml' or files named\n# 'pipeline.yaml'.\nKPOPS_PIPELINE_PATHS # No default value, required\n# Comma separated list of steps to apply the command on\nKPOPS_PIPELINE_STEPS # No default value, not required\n
These variables can be used in a component's definition to refer to any of its attributes, including ones that the user has defined in the defaults.
All of them are prefixed with component. and follow the following form: component.{attribute_name}. If the attribute itself contains attributes, they can be referred to like this: component.{attribute_name}.{subattribute_name}.
These variables include all fields in the config and refer to the pipeline configuration that is independent of the components.
All such variables are prefixed with config. and are of the same form as the component-specific variables.
Info
error_topic_name is an alias for config.topic_name_config.default_error_topic_nameoutput_topic_name is an alias for config.topic_name_config.default_output_topic_name
Environment variables such as $PATH can be used in the pipeline definition and defaults without any transformation following the form ${ENV_VAR_NAME}. This, of course, includes variables like the ones relevant to the KPOps cli that are exported by the user.
See all KPOps environment variables
"}, {"location": "user/core-concepts/variables/substitution/#pipeline-name-variables", "title": "Pipeline name variables", "text": "
These are special variables that refer to the name and path of a pipeline.
${pipeline.name}: Concatenated path of the parent directory where pipeline.yaml is defined in. For instance, ./data/pipelines/v1/pipeline.yaml, here the value for the variable would be data-pipelines-v1.
${pipeline_name_<level>}: Similar to the previous variable, each <level> contains a part of the path to the pipeline.yaml file. Consider the previous example, ${pipeline_name_0} would be data, ${pipeline_name_1} would be pipelines, and ${pipeline_name_2} equals to v1.
"}, {"location": "user/core-concepts/variables/substitution/#advanced-use-cases", "title": "Advanced use cases", "text": "
Refer to default component field values: As long as a value is assigned to a component attribute, it is possible to refer to it with a placeholder. To see all component fields, take a look at the pipeline schema.
Chaining variables: It is possible to chain any number of variables, see the example above.
Cross-component substitution: YAML is quite an intricate language and with some of its magic one could write cross-component references.
ATM fraud is a demo pipeline for ATM fraud detection. The original by Confluent is written in KSQL and outlined in this blogpost. The one used in this example is re-built from scratch using bakdata's streams-bootstrap library.
"}, {"location": "user/examples/atm-fraud-pipeline/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.
You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.
"}, {"location": "user/examples/atm-fraud-pipeline/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "
You can use the Streams Explorer to see the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:
After that open http://localhost:8080 in your browser. You should be able to see pipeline shown in the image below:
An overview of ATM fraud pipeline shown in Streams Explorer
Attention
Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka connect. Therefore, it might take a bit until you see the whole graph.
Word-count is a demo pipeline consisting of a producer producing words to Kafka, a Kafka streams app counting the number of times each word occurs, and finally a Redis database into which the words are exported.
"}, {"location": "user/getting-started/quick-start/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.
You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.
"}, {"location": "user/getting-started/quick-start/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "
You can use the Streams Explorer to inspect the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:
After that open http://localhost:8080 in your browser.
You should be able to see pipeline shown in the image below:
An overview of Word-count pipeline shown in Streams Explorer
Attention
Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka Connect. Therefore, it might take a bit until you see the whole graph.
k3d (Version 5.4.6+) and Docker (Version >= v20.10.5) or an existing Kubernetes cluster (>= 1.21.0)
kubectl (Compatible with server version 1.21.0)
Helm (Version 3.8.0+)
"}, {"location": "user/getting-started/setup/#setup-kubernetes-with-k3d", "title": "Setup Kubernetes with k3d", "text": "
If you don't have access to an existing Kubernetes cluster, this section will guide you through creating a local cluster. We recommend the lightweight Kubernetes distribution k3s for this. k3d is a wrapper around k3s in Docker that lets you get started fast.
For other ways of installing k3d, you can have a look at their installation guide.
The Kafka deployment needs a modified Docker image. In that case the image is built and pushed to a Docker registry that holds it. If you do not have access to an existing Docker registry, you can use k3d's Docker registry:
Creating a new k3d cluster automatically configures kubectl to connect to the local cluster by modifying your ~/.kube/config. In case you manually set the KUBECONFIG variable or don't want k3d to modify your config, k3d offers many other options.
You can check the cluster status with kubectl get pods -n kube-system. If all returned elements have a STATUS of Running or Completed, then the cluster is up and running.
Kafka is an open-source data streaming platform. More information about Kafka can be found in the documentation. To deploy Kafka, this guide uses Confluent's Helm chart.
To allow connectivity to other systems Kafka Connect needs to be extended with drivers. You can install a JDBC driver for Kafka Connect by creating a new Docker image:
Create a Dockerfile with the following content:
FROM confluentinc/cp-kafka-connect:7.1.3\n\nRUN confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.6.0\n
Build and push the modified image to your private Docker registry:
Install Kafka, Zookeeper, Confluent's Schema Registry, Kafka Rest Proxy, and Kafka Connect. A single Helm chart installs all five components. Below you can find an example for the --values ./kafka.yaml file configuring the deployment accordingly. Deploy the services:
An example value configuration for Confluent's Helm chart. This configuration deploys a single Kafka Broker, a Schema Registry, Zookeeper, Kafka Rest Proxy, and Kafka Connect with minimal resources.
Streams Explorer allows examining Apache Kafka data pipelines in a Kubernetes cluster including the inspection of schemas and monitoring of metrics. First, add the Helm repository:
The kpops CLI can be used to destroy a pipeline that was previously deployed with KPOps. In case that doesn't work, the pipeline can always be taken down manually with helm (see section Infrastructure).
Navigate to the examples folder. Replace the <name-of-the-example-directory> with the example you want to tear down. For example the atm-fraud-detection.
Remove the pipeline
# Uncomment 1 line to either destroy, reset or clean.\n\n# poetry run kpops destroy <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops reset <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops clean <name-of-the-example-directory>/pipeline.yaml \\\n--config <name-of-the-example-directory>/config.yaml \\\n--execute\n
In case kpops destroy is not working one can uninstall the pipeline services one by one. This is equivalent to running kpops destroy. In case a clean uninstall (like the one kpops clean does) is needed, one needs to also delete the topics and schemas created by deployment of the pipeline.
"}, {"location": "user/migration-guide/v1-v2/", "title": "Migrate from V1 to V2", "text": ""}, {"location": "user/migration-guide/v1-v2/#derive-component-type-automatically-from-class-name", "title": "Derive component type automatically from class name", "text": "
KPOps automatically infers the component type from the class name. Therefore, the type and schema_type attributes can be removed from your custom components. By convention the type would be the lower, and kebab cased name of the class.
class MyCoolStreamApp(StreamsApp):\n- type = \"my-cool-stream-app\"\n+ ...\n
Because of this new convention producer has been renamed to producer-app. This must be addressed in your pipeline.yaml and defaults.yaml.
"}, {"location": "user/migration-guide/v1-v2/#remove-camel-case-conversion-of-internal-models", "title": "Remove camel case conversion of internal models", "text": "
All the internal KPOps models are now snake_case, and only Helm/Kubernetes values require camel casing. You can find an example of a pipeline.yaml in the following. Notice that the app section here remains untouched.
If you are using the KubernetesApp class to define your own Kubernetes resource to deploy, the abstract function get_helm_chart that returns the chart for deploying the app using Helm is now a Python property and renamed to helm_chart.
Previously, if you set the environment variable KPOPS_KAFKA_BROKER, you need to replace that now with KPOPS_KAFKA_BROKERS.
"}, {"location": "user/migration-guide/v2-v3/", "title": "Migrate from V2 to V3", "text": "
Jump to the summary
"}, {"location": "user/migration-guide/v2-v3/#use-hash-and-trim-long-helm-release-names-instead-of-only-trimming", "title": "Use hash and trim long Helm release names instead of only trimming", "text": "
KPOps handles long (more than 53 characters) Helm releases names differently. Helm will not find your (long) old release names anymore. Therefore, it is recommended that you should once destroy your pipeline with KPOps v2 to remove old Helm release names. After a clean destroy, re-deploy your pipeline with the KPOps v3.
For example if you have a component with the Helm release name example-component-name-too-long-fake-fakefakefakefakefake. The new release name will shorten the original name to 53 characters and then replace the last 6 characters of the trimmed name with the first 5 characters of the result of SHA-1(helm_release_name).
All Helm-specific parts of the built-in KubernetesApp have been extracted to a new child component that is more appropriately named HelmApp. It has to be renamed in your existing pipeline defintions and custom components module.
- from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n ...\n
"}, {"location": "user/migration-guide/v2-v3/#create-streamsbootstrap-component-refactor-cleanup-jobs-as-individual-helmapp", "title": "Create StreamsBootstrap component & refactor cleanup jobs as individual HelmApp", "text": "
Previously the default KafkaApp component configured the streams-bootstrap Helm Charts. Now, this component is no longer tied to Helm (or Kubernetes). Instead, there is a new StreamsBootstrap component that configures the Helm Chart repository for the components that use it, e.g. StreamsApp and ProducerApp. If you are using non-default values for the Helm Chart repository or version, it has to be updated as shown below.
Internally, the Kafka Connector resetter is now its own standard HelmApp, removing a lot of the shared code. It is configured using the resetter_namespace (formerly namespace) and resetter_values attributes.
The schema_registry_url is replaced with schema_registry.url (default http://localhost:8081) and schema_registry.enabled (default false).
kafka_rest_host is renamed to kafka_rest.url (default http://localhost:8082).
kafka_connect_host is replaced with kafka_connect.url (default http://localhost:8083).
brokers is renamed to kafka_brokers.
The environment variable names of these config fields changed respectively. Please refer to the environment variables documentation page to see the newest changes.
Specifying the environment is no longer mandatory. If not defined, only the global files will be used.
environment is no longer specified in config.yaml. Instead, it can be either set via the CLI flag --environment or with the environment variable KPOPS_ENVIRONMENT.
The --config flag in the CLI now points to the directory that contains config*.yaml files. The files to be used are resolved based on the provided (or not) environment.
"}, {"location": "user/migration-guide/v2-v3/#refactor-generate-template-for-python-api-usage", "title": "Refactor generate template for Python API usage", "text": "
The template method of every pipeline component has been renamed to manifest as it is no longer strictly tied to Helm template. Instead, it can be used to render the final resources of a component, such as Kubernetes manifests.
There is also a new kpops manifest command replacing the existing kpops generate --template flag.
If you're using this functionality in your custom components, it needs to be updated.
from kpops.components.base_components.models.resource import Resource\n\n @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n return [] # list of manifests\n
The global configuration variables are now namespaced under the config key, such as ${config.kafka_brokers}, ${config.schema_registry.url}. Same with pipeline variables, e.g. ${pipeline_name} \u2192 ${pipeline.name}. This would make it more uniform with the existing ${component.<key>} variables.
- from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n+ from kpops.components.base_components.models.resource import Resource\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n\n @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n return [] # list of manifests\n ...\n
"}, {"location": "user/migration-guide/v3-v4/", "title": "Migrate from V3 to V4", "text": ""}, {"location": "user/migration-guide/v3-v4/#distribute-defaults-across-multiple-files", "title": "Distribute defaults across multiple files", "text": "
Warning
The --defaults flag is removed
It is possible now to use multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.
For example, imagine the following folder structure:
"}, {"location": "user/migration-guide/v5-v6/", "title": "Migrate from V5 to V6", "text": ""}, {"location": "user/migration-guide/v5-v6/#deploy-multiple-pipelines", "title": "Deploy multiple pipelines", "text": "
KPOps can now deploy multiple pipelines in a single command. It is possible to pass one or many pipeline.yaml files or pass a directory with many pipeline.yaml files within it.
The environment variable KPOPS_PIPELINE_PATH is changed to KPOPS_PIPELINE_PATHS.
Read more:
CLI Usage
Environment variables
"}, {"location": "user/migration-guide/v5-v6/#separate-kpops-api-from-the-cli", "title": "Separate KPOps API from the CLI", "text": "
KPops Python API is now stable and separated from the CLI! \ud83c\udf89
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
--dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
--parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
--dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
--parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
--dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
--parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
Enrich pipeline steps with defaults. The enriched pipeline is used for all KPOps operations (deploy, destroy, ...).
Usage:
$ kpops generate [OPTIONS] PIPELINE_PATHS...\n
Arguments:
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
PATH: Path for a new KPOps project. It should lead to an empty (or non-existent) directory. The part of the path that doesn't exist will be created. [required]
Options:
--config-include-opt / --no-config-include-opt: Whether to include non-required settings in the generated 'config.yaml' [default: no-config-include-opt]
In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.
Usage:
$ kpops manifest [OPTIONS] PIPELINE_PATHS...\n
Arguments:
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
--dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
--parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
SCOPE:{pipeline|defaults|config}: Scope of the generated schema
pipeline: Schema of PipelineComponents. Includes the built-in KPOps components by default. To include custom components, provide components module in config.\n\nconfig: Schema of KpopsConfig. [required]\n
Options:
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--include-stock-components / --no-include-stock-components: Include the built-in KPOps components. [default: include-stock-components]
We provided a GitHub composite action bakdata/kpops that installs and executes KPOps commands with the given parameters.
"}, {"location": "user/references/ci-integration/github-actions/#input-parameters", "title": "Input Parameters", "text": "Name Required Default Value Type Description command \u2705 - string KPOps command to run. generate, deploy, destroy, reset, clean are possible values. Flags such as --dry-run and --execute need to be specified pipeline \u2705 - string Pipeline to run by KPOps working-directory \u274c . string root directory used by KPOps to run pipelines config \u274c - string Directory containing the config*.yaml file(s) environment \u274c - string Environment to run KPOps in components \u274c - string components package path filter-type \u274c - string Whether to include/exclude the steps defined in KPOPS_PIPELINE_STEPS parallel \u274c \"false\" string Whether to run pipelines in parallel python-version \u274c \"3.11.x\" string Python version to install (Defaults to the latest stable version of Python 3.11) kpops-version \u274c latest string KPOps version to install helm-version \u274c latest string Helm version to install token \u274c latest string secrets.GITHUB_TOKEN, needed for setup-helm action if helm-version is set to latest"}, {"location": "user/references/ci-integration/github-actions/#usage", "title": "Usage", "text": "
steps:\n # ...\n # This step is useful for debugging reasons\n - name: Generate Kafka pipeline\n uses: bakdata/kpops@main\n with:\n command: generate\n working-directory: home/my-kpops-root-dir\n pipeline: pipelines/my-pipeline-file.yaml\n kpops-version: 1.2.3\n\n # It is possible to use a pre-release KPOps version from TestPyPI https://test.pypi.org/project/kpops/#history\n - name: Deploy Kafka pipeline\n uses: bakdata/kpops@main\n with:\n command: deploy --execute\n working-directory: home/my-kpops-root-dir\n pipeline: pipelines/my-pipeline-file.yaml\n kpops-version: 1.2.5.dev20230707132709\n # ...\n
"}]}
\ No newline at end of file
+{"config": {"lang": ["en"], "separator": "[\\s\\-]+", "pipeline": ["stopWordFilter"]}, "docs": [{"location": "developer/api/", "title": "Python API", "text": ""}, {"location": "developer/api/#kpops.api", "title": "kpops.api", "text": ""}, {"location": "developer/api/#kpops.api.clean", "title": "clean", "text": "
Directory in which the project should be initiated.
TYPE: Path
conf_incl_opt
Whether to include non-required settings in the generated config file.
Source code in kpops/api/__init__.py
def init(\n path: Path,\n config_include_opt: bool = False,\n):\n \"\"\"Initiate a default empty project.\n\n :param path: Directory in which the project should be initiated.\n :param conf_incl_opt: Whether to include non-required settings\n in the generated config file.\n \"\"\"\n if not path.exists():\n path.mkdir(parents=False)\n elif next(path.iterdir(), False):\n log.warning(\"Please provide a path to an empty directory.\")\n return\n init_project(path, config_include_opt)\n
def build_execution_graph(\n self,\n runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],\n /,\n reverse: bool = False,\n) -> Awaitable[None]:\n async def run_parallel_tasks(\n coroutines: list[Coroutine[Any, Any, None]],\n ) -> None:\n tasks = []\n for coro in coroutines:\n tasks.append(asyncio.create_task(coro))\n await asyncio.gather(*tasks)\n\n async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None:\n for pending_task in pending_tasks:\n await pending_task\n\n graph: nx.DiGraph = self._graph.copy() # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx\n\n # We add an extra node to the graph, connecting all the leaf nodes to it\n # in that way we make this node the root of the graph, avoiding backtracking\n root_node = \"root_node_bfs\"\n graph.add_node(root_node)\n\n for node in graph:\n predecessors = list(graph.predecessors(node))\n if not predecessors:\n graph.add_edge(root_node, node)\n\n layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node))\n\n sorted_tasks = []\n for layer in layers_graph[1:]:\n if parallel_tasks := self.__get_parallel_tasks_from(layer, runner):\n sorted_tasks.append(run_parallel_tasks(parallel_tasks))\n\n if reverse:\n sorted_tasks.reverse()\n\n return run_graph_tasks(sorted_tasks)\n
Filter pipeline components using a custom predicate.
PARAMETER DESCRIPTION predicate
Filter function, returns boolean value whether the component should be kept or removed
TYPE: ComponentFilterPredicate
Source code in kpops/pipeline/__init__.py
def filter(self, predicate: ComponentFilterPredicate) -> None:\n \"\"\"Filter pipeline components using a custom predicate.\n\n :param predicate: Filter function,\n returns boolean value whether the component should be kept or removed\n \"\"\"\n for component in self.components:\n # filter out components not matching the predicate\n if not predicate(component):\n self.remove(component.id)\n
Find pipeline components matching a custom predicate.
PARAMETER DESCRIPTION predicate
Filter function, returns boolean value whether the component should be kept or removed
TYPE: ComponentFilterPredicate
RETURNS DESCRIPTION Iterator[PipelineComponent]
Iterator of components matching the predicate
Source code in kpops/pipeline/__init__.py
def find(self, predicate: ComponentFilterPredicate) -> Iterator[PipelineComponent]:\n \"\"\"Find pipeline components matching a custom predicate.\n\n :param predicate: Filter function,\n returns boolean value whether the component should be kept or removed\n :returns: Iterator of components matching the predicate\n \"\"\"\n for component in self.components:\n if predicate(component):\n yield component\n
Auto generation happens mostly with pre-commit hooks. You can find the pre-commit configuration here. These pre-commit hooks call different Python scripts to auto generate code for the documentation.
cli_env_vars.env -- All CLI environment variables in a dotenv file.
cli_env_vars.md -- All CLI environment variables in a table.
config_env_vars.env -- Almost all pipeline config environment variables in a dotenv file. The script checks for each field in KpopsConfig whether it has an env attribute defined. The script is currently unable to visit the classes of fields like topic_name_config, hence any environment variables defined there would remain unknown to it.
config_env_vars.env -- Almost all pipeline config environment variables in a table.
variable_substitution.yaml -- A copy of ./tests/pipeline/resources/component-type-substitution/pipeline.yaml used as an example of substitution.
Generated by typer-cli from the code in main.py. It is called with Python's subprocess module.
"}, {"location": "developer/auto-generation/#pipeline-and-defaults-example-definitions", "title": "Pipeline and defaults example definitions", "text": "
Generates example pipeline.yaml and defaults.yaml for each individual component, stores them and also concatenates them into 1 big pipeline definition and 1 big pipeline defaults definition.
User input
headers/*\\.yaml -- The top of each example. Includes a description comment, type and name. The headers for pipeline.yaml reside in the pipeline-components dir and the defaults.yaml headers reside in the pipeline-defaults dir. The names of the files must be equal to the respective component type.
sections/*\\.yaml -- Each YAML file contains a single section (component attribute) definition. The intention is to keep the minimal set of definitions there from which any component definition can be built. The names of the files must be equal to the respective component type and the attribute name. The sections are used for both defaults.yaml and pipeline.yaml generation and reside in the pipeline-components dir.
Generated
pipeline-components/dependencies/* Cached information about KPOps components
pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the pipeline.yaml generation.
defaults_pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the defaults.yaml generation.
kpops_structure.yaml -- Specifies the inheritance hierarchy of the components and what sections exist in each component.
pipeline-components/*\\.yaml -- All single-component pipeline definitions and one big (complete) pipeline.yaml that contains all of them.
pipeline-defaults/*\\.yaml -- All single-component defaults definitions and one big (complete) defaults.yaml that contains all of them.
"}, {"location": "developer/contributing/", "title": "How to contribute", "text": "
Welcome! We are glad to have you visit our contributing guide!
If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.
We are using git submodules to import the KPOps examples repository. You need to fetch the repository locally on your machine. To do so use this command:
We advise that you stick to our pre-commit hooks for code linting, formatting, and auto-generation of documentation. After you install them using poetry run pre-commit install they're triggered automatically during git commit. Additionally, you can manually invoke them with poetry run pre-commit run -a. In order for dprint to work, you have to manually install it locally. It will work in the CI, so it is also possible to manually carry out formatting changes flagged by dprint in the CI and skip installing it locally.
Welcome! We are glad to have you visit our developer guide! If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.
Find more about our code-style or insights into KPOps' code base here in our developer guide.
Work in progress
The developer guide is still under construction. If you have a question left unanswered here, feel free to ask it by opening an issue.
"}, {"location": "user/what-is-kpops/", "title": "What is KPOps?", "text": "
With a couple of easy commands in the shell, and a pipeline.yaml of under 30 lines, KPOps can not only deploy a Kafka pipeline1 to a Kubernetes cluster, but also reset, clean or destroy it!
Deploy Kafka apps to Kubernetes: KPOps allows to deploy consecutive Kafka Streams applications and producers using an easy-to-read and -write pipeline definition.
Manage Kafka Connectors: KPOps connects with your Kafka Connect cluster and deploys, validates, and deletes your connectors.
Configure multiple pipelines and steps: KPOps has various abstractions that simplify configuring multiple pipelines and steps within pipelines by sharing common configuration between different components, such as producers or streaming applications.
Handle your topics and schemas: KPOps not only creates and deletes your topics but also registers and deletes your schemas.
Clean termination of Kafka components: KPOps removes your pipeline components (i.e., Kafka Streams applications) from the Kubernetes cluster and cleans up the component-related states (i.e., removing/resetting offset of Kafka consumer groups).
Preview your pipeline changes: With the KPOps dry-run, you can ensure your pipeline definition is set up correctly. This helps to minimize downtime and prevent potential errors or issues that could impact your production environment.
"}, {"location": "user/what-is-kpops/#example", "title": "Example", "text": "An overview of Word-count pipeline shown in Streams Explorer Word-count pipeline.yaml
KPOps reads its global configuration that is unrelated to a pipeline's components from config.yaml.
Consider enabling KPOps' editor integration feature to enjoy the benefits of autocompletion and validation when configuring your pipeline.
To learn about any of the available settings, take a look at the example below.
config.yaml
# CONFIGURATION\n#\n# Base directory to the pipelines (default is current working directory)\npipeline_base_dir: .\n# The Kafka brokers address.\n# REQUIRED\nkafka_brokers: \"http://broker1:9092,http://broker2:9092\"\n# Configure the topic name variables you can use in the pipeline definition.\ntopic_name_config:\n # Configures the value for the variable ${output_topic_name}\n default_output_topic_name: ${pipeline.name}-${component.name}\n # Configures the value for the variable ${error_topic_name}\n default_error_topic_name: ${pipeline.name}-${component.name}-error\n# Configuration for Schema Registry.\nschema_registry:\n # Whether the Schema Registry handler should be initialized.\n enabled: false\n # Address of the Schema Registry.\n url: \"http://localhost:8081\"\n# Configuration for the Kafka REST Proxy.\nkafka_rest:\n # Address of the Kafka REST Proxy.\n url: \"http://localhost:8082\"\n# Configuration for Kafka Connect.\nkafka_connect:\n # Address of Kafka Connect.\n url: \"http://localhost:8083\"\n# Flag for `helm upgrade --install`.\n# Create the release namespace if not present.\ncreate_namespace: false\n# Global flags for Helm.\nhelm_config:\n # Name of kubeconfig context (`--kube-context`)\n context: name\n # Run Helm in Debug mode.\n debug: false\n # Kubernetes API version used for Capabilities.APIVersions\n api_version: null\n# Configure Helm Diff.\nhelm_diff_config:\n # Set of keys that should not be checked.\n ignore:\n - name\n - imageTag\n# Whether to retain clean up jobs in the cluster or uninstall the, after\n# completion.\nretain_clean_jobs: false\n
Environment-specific pipeline definitions
Similarly to defaults, it is possible to have an unlimited amount of additional environment-specific pipeline definitions. The naming convention is the same: add a suffix of the form _{environment} to the filename.
KPOps has a very efficient way of dealing with repeating settings which manifests as defaults.yaml. This file provides the user with the power to set defaults for any and all components, thus omitting the need to repeat the same settings in pipeline.yaml.
An important mechanic of KPOps is that defaults set for a component apply to all components that inherit from it.
It is possible, although not recommended, to add settings that are specific to a component's subclass. An example would be configuring offset_topic under kafka-connector instead of kafka-source-connector.
KPOps allows using multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.
The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.
It is important to note that defaults_{environment}.yaml overrides only the settings that are explicitly set to be different from the ones in the base defaults file.
defaults merge priority
Imagine the following folder structure, where the pipeline_base_dir is configured to pipelines:
The defaults codeblocks in this section contain the full set of settings that are specific to the component. If a setting already exists in a parent config, it will not be included in the child's.
# Base Kubernetes App\n#\n# Parent of: HelmApp\n# Child of: PipelineComponent\nkubernetes-app:\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example.\n app: # required\n image: exampleImage # Example\n debug: false # Example\n commandLine: {} # Example\n
# Base component for Kafka-based components.\n#\n# Parent of: ProducerApp, StreamsApp\n# Child of: KubernetesApp\nkafka-app:\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` can contain application-specific settings, hence the user is free to\n # add the key-value pairs they need.\n app: # required\n streams: # required\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n nameOverride: override-with-this-name # kafka-app-specific\n imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n
# StreamsApp component that configures a streams bootstrap app.\n#\n# Child of: KafkaApp\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\nstreams-app:\n # No arbitrary keys are allowed under `app`here\n # Allowed configs:\n # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n app: # required\n # Streams Bootstrap streams section\n streams: # required, streams-app-specific\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n inputTopics:\n - topic1\n - topic2\n outputTopic: output-topic\n inputPattern: input-pattern\n extraInputTopics:\n input_role1:\n - input_topic1\n - input_topic2\n input_role2:\n - input_topic3\n - input_topic4\n extraInputPatterns:\n pattern_role1: input_pattern1\n extraOutputTopics:\n output_role1: output_topic1\n output_role2: output_topic2\n errorTopic: error-topic\n config:\n my.streams.config: my.value\n nameOverride: override-with-this-name # streams-app-specific\n autoscaling: # streams-app-specific\n consumerGroup: consumer-group # required\n lagThreshold: 0 # Average target value to trigger scaling actions.\n enabled: false # Whether to enable auto-scaling using KEDA.\n # This is the interval to check each trigger on.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n pollingInterval: 30\n # The period to wait after the last trigger reported active before scaling\n # the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n cooldownPeriod: 300\n # The offset reset policy for the consumer if the the consumer group is\n # not yet subscribed to a partition.\n offsetResetPolicy: earliest\n # This setting is passed to the HPA definition that KEDA will create for a\n # given resource and holds the maximum number of replicas of the target resouce.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n maxReplicas: 1\n # Minimum number of replicas KEDA will scale the resource down to.\n # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n minReplicas: 0\n # If this property is set, KEDA will scale the resource down to this\n # number of replicas.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n idleReplicas: 0\n topics: # List of auto-generated Kafka Streams topics used by the streams app.\n - topic1\n - topic2\n
# Kafka connector\n#\n# Parent of: KafkaSinkConnector, KafkaSourceConnector\n# Child of: PipelineComponent\nkafka-connector:\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example. Extensive documentation on\n # connectors: https://kafka.apache.org/documentation/#connectconfigs\n app: # required\n tasks.max: 1\n # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n # Image Tag etc.\n resetter_values:\n imageTag: \"1.2.3\"\n
# Kubernetes app managed through Helm with an associated Helm chart\n- type: helm-app\n name: helm-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example.\n app: # required\n image: exampleImage # Example\n debug: false # Example\n commandLine: {} # Example\n # Helm repository configuration (optional)\n # If not set the helm repo add will not be called. Useful when using local Helm charts\n repo_config:\n repository_name: bakdata-streams-bootstrap # required\n url: https://bakdata.github.io/streams-bootstrap/ # required\n repo_auth_flags:\n username: user\n password: pass\n ca_file: /home/user/path/to/ca-file\n insecure_skip_tls_verify: false\n version: \"1.0.0\" # Helm chart version\n
# Base component for Kafka-based components.\n# Producer or streaming apps should inherit from this class.\n- type: kafka-app # required\n name: kafka-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` can contain application-specific settings, hence the user is free to\n # add the key-value pairs they need.\n app: # required\n streams: # required\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n nameOverride: override-with-this-name # kafka-app-specific\n imageTag: \"1.0.0\" # Example values that are shared between streams-app and producer-app\n
KafkaConnector is a component that deploys Kafka Connectors. Since a connector cannot be different from sink or source it is not recommended to use KafkaConnector for deployment in pipeline.yaml. Instead, KafkaConnector should be used in defaults.yaml to set defaults for all connectors in the pipeline as they can share some common settings.
# Kafka sink connector\n- type: kafka-sink-connector\n name: kafka-sink-connector # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example. Extensive documentation on\n # connectors: https://kafka.apache.org/documentation/#connectconfigs\n app: # required\n tasks.max: 1\n # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n # Image Tag etc.\n resetter_values:\n imageTag: \"1.2.3\"\n
# Kafka source connector\n- type: kafka-source-connector # required\n name: kafka-source-connector # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n # The source connector has no `from` section\n # from:\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example. Extensive documentation on\n # connectors: https://kafka.apache.org/documentation/#connectconfigs\n app: # required\n tasks.max: 1\n # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n # Image Tag etc.\n resetter_values:\n imageTag: \"1.2.3\"\n # offset.storage.topic\n # https://kafka.apache.org/documentation/#connect_running\n offset_topic: offset_topic\n
# Base Kubernetes App\n- type: kubernetes-app\n name: kubernetes-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # `app` contains application-specific settings, hence it does not have a rigid\n # structure. The fields below are just an example.\n app: # required\n image: exampleImage # Example\n debug: false # Example\n commandLine: {} # Example\n
# Holds configuration to use as values for the streams bootstrap producer-app Helm\n# chart.\n# More documentation on ProducerApp:\n# https://github.com/bakdata/streams-bootstrap\n- type: producer-app\n name: producer-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n # from: # While the producer-app does inherit from kafka-app, it does not need a\n # `from` section, hence it does not support it.\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # Allowed configs:\n # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app\n app: # required\n streams: # required, producer-app-specific\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n outputTopic: output_topic\n extraOutputTopics:\n output_role1: output_topic1\n output_role2: output_topic2\n nameOverride: override-with-this-name # kafka-app-specific\n # Helm repository configuration (optional)\n # If not set the helm repo add will not be called. Useful when using local Helm charts\n repo_config:\n repository_name: bakdata-streams-bootstrap # required\n url: https://bakdata.github.io/streams-bootstrap/ # required\n repo_auth_flags:\n username: user\n password: pass\n ca_file: /home/user/path/to/ca-file\n insecure_skip_tls_verify: false\n version: \"2.12.0\" # Helm chart version\n
# StreamsApp component that configures a streams bootstrap app.\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\n- type: streams-app # required\n name: streams-app # required\n # Pipeline prefix that will prefix every component name. If you wish to not\n # have any prefix you can specify an empty string.\n prefix: ${pipeline.name}-\n from: # Must not be null\n topics: # read from topic\n ${pipeline.name}-input-topic:\n type: input # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra\n ${pipeline.name}-input-pattern-topic:\n type: pattern # Implied to be an input pattern if `role` is undefined\n ${pipeline.name}-extra-pattern-topic:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n components: # read from specific component\n account-producer:\n type: input # Implied when role is NOT specified\n other-producer:\n role: some-role # Implies `type` to be extra\n component-as-input-pattern:\n type: pattern # Implied to be an input pattern if `role` is undefined\n component-as-extra-pattern:\n type: pattern # Implied to be an extra pattern if `role` is defined\n role: some-role\n # Topic(s) into which the component will write output\n to:\n topics:\n ${pipeline.name}-output-topic:\n type: output # Implied when role is NOT specified\n ${pipeline.name}-extra-topic:\n role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n ${pipeline.name}-error-topic:\n type: error\n # Currently KPOps supports Avro and JSON schemas.\n key_schema: key-schema # must implement SchemaProvider to use\n value_schema: value-schema\n partitions_count: 1\n replication_factor: 1\n configs: # https://kafka.apache.org/documentation/#topicconfigs\n cleanup.policy: compact\n models: # SchemaProvider is initiated with the values given here\n model: model\n namespace: namespace # required\n # No arbitrary keys are allowed under `app`here\n # Allowed configs:\n # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n app: # required\n # Streams Bootstrap streams section\n streams: # required, streams-app-specific\n brokers: ${config.kafka_brokers} # required\n schemaRegistryUrl: ${config.schema_registry.url}\n inputTopics:\n - topic1\n - topic2\n outputTopic: output-topic\n inputPattern: input-pattern\n extraInputTopics:\n input_role1:\n - input_topic1\n - input_topic2\n input_role2:\n - input_topic3\n - input_topic4\n extraInputPatterns:\n pattern_role1: input_pattern1\n extraOutputTopics:\n output_role1: output_topic1\n output_role2: output_topic2\n errorTopic: error-topic\n config:\n my.streams.config: my.value\n nameOverride: override-with-this-name # streams-app-specific\n autoscaling: # streams-app-specific\n consumerGroup: consumer-group # required\n lagThreshold: 0 # Average target value to trigger scaling actions.\n enabled: false # Whether to enable auto-scaling using KEDA.\n # This is the interval to check each trigger on.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n pollingInterval: 30\n # The period to wait after the last trigger reported active before scaling\n # the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n cooldownPeriod: 300\n # The offset reset policy for the consumer if the the consumer group is\n # not yet subscribed to a partition.\n offsetResetPolicy: earliest\n # This setting is passed to the HPA definition that KEDA will create for a\n # given resource and holds the maximum number of replicas of the target resouce.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n maxReplicas: 1\n # Minimum number of replicas KEDA will scale the resource down to.\n # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n minReplicas: 0\n # If this property is set, KEDA will scale the resource down to this\n # number of replicas.\n # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n idleReplicas: 0\n topics: # List of auto-generated Kafka Streams topics used by the streams app.\n - topic1\n - topic2\n # Helm repository configuration (optional)\n # If not set the helm repo add will not be called. Useful when using local Helm charts\n repo_config:\n repository_name: bakdata-streams-bootstrap # required\n url: https://bakdata.github.io/streams-bootstrap/ # required\n repo_auth_flags:\n username: user\n password: pass\n ca_file: /home/user/path/to/ca-file\n insecure_skip_tls_verify: false\n version: \"2.12.0\" # Helm chart version\n
Environment variables can be set by using the export command in Linux or the set command in Windows.
dotenv files
KPOps currently supports .env files only for variables related to the config. Full support for .env files is on the roadmap. One of the possible ways to use one and export the contents manually is with the following command: export $(xargs < .env). This would work in bash suppose there are no spaces inside the values.
These variables take precedence over the settings in config.yaml. Variables marked as required can instead be set in the global config.
Name Default Value Required Description Setting name KPOPS_PIPELINE_BASE_DIR . False Base directory to the pipelines (default is current working directory) pipeline_base_dir KPOPS_KAFKA_BROKERS True The comma separated Kafka brokers address. kafka_brokers KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME ${pipeline.name}-${component.name} False Configures the value for the variable ${output_topic_name} topic_name_config.default_output_topic_name KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME ${pipeline.name}-${component.name}-error False Configures the value for the variable ${error_topic_name} topic_name_config.default_error_topic_name KPOPS_SCHEMA_REGISTRY__ENABLED False False Whether the Schema Registry handler should be initialized. schema_registry.enabled KPOPS_SCHEMA_REGISTRY__URL http://localhost:8081/ False Address of the Schema Registry. schema_registry.url KPOPS_SCHEMA_REGISTRY__TIMEOUT 30 False Operation timeout in seconds. schema_registry.timeout KPOPS_KAFKA_REST__URL http://localhost:8082/ False Address of the Kafka REST Proxy. kafka_rest.url KPOPS_KAFKA_REST__TIMEOUT 30 False Operation timeout in seconds. kafka_rest.timeout KPOPS_KAFKA_CONNECT__URL http://localhost:8083/ False Address of Kafka Connect. kafka_connect.url KPOPS_KAFKA_CONNECT__TIMEOUT 30 False Operation timeout in seconds. kafka_connect.timeout KPOPS_CREATE_NAMESPACE False False Flag for helm upgrade --install. Create the release namespace if not present. create_namespace KPOPS_HELM_CONFIG__CONTEXT False Name of kubeconfig context (--kube-context) helm_config.context KPOPS_HELM_CONFIG__DEBUG False False Run Helm in Debug mode helm_config.debug KPOPS_HELM_CONFIG__API_VERSION False Kubernetes API version used for Capabilities.APIVersions helm_config.api_version KPOPS_HELM_DIFF_CONFIG__IGNORE True Set of keys that should not be checked. helm_diff_config.ignore KPOPS_RETAIN_CLEAN_JOBS False False Whether to retain clean up jobs in the cluster or uninstall the, after completion. retain_clean_jobs config_env_vars.env Exhaustive list of all config-related environment variables
# Global config environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# settings in `config.yaml`. Variables marked as required can instead\n# be set in the global config.\n#\n# pipeline_base_dir\n# Base directory to the pipelines (default is current working\n# directory)\nKPOPS_PIPELINE_BASE_DIR=.\n# kafka_brokers\n# The comma separated Kafka brokers address.\nKPOPS_KAFKA_BROKERS # No default value, required\n# topic_name_config.default_output_topic_name\n# Configures the value for the variable ${output_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name}\n# topic_name_config.default_error_topic_name\n# Configures the value for the variable ${error_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error\n# schema_registry.enabled\n# Whether the Schema Registry handler should be initialized.\nKPOPS_SCHEMA_REGISTRY__ENABLED=False\n# schema_registry.url\n# Address of the Schema Registry.\nKPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/\n# schema_registry.timeout\n# Operation timeout in seconds.\nKPOPS_SCHEMA_REGISTRY__TIMEOUT=30\n# kafka_rest.url\n# Address of the Kafka REST Proxy.\nKPOPS_KAFKA_REST__URL=http://localhost:8082/\n# kafka_rest.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_REST__TIMEOUT=30\n# kafka_connect.url\n# Address of Kafka Connect.\nKPOPS_KAFKA_CONNECT__URL=http://localhost:8083/\n# kafka_connect.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_CONNECT__TIMEOUT=30\n# create_namespace\n# Flag for `helm upgrade --install`. Create the release namespace if\n# not present.\nKPOPS_CREATE_NAMESPACE=False\n# helm_config.context\n# Name of kubeconfig context (`--kube-context`)\nKPOPS_HELM_CONFIG__CONTEXT # No default value, not required\n# helm_config.debug\n# Run Helm in Debug mode\nKPOPS_HELM_CONFIG__DEBUG=False\n# helm_config.api_version\n# Kubernetes API version used for `Capabilities.APIVersions`\nKPOPS_HELM_CONFIG__API_VERSION # No default value, not required\n# helm_diff_config.ignore\n# Set of keys that should not be checked.\nKPOPS_HELM_DIFF_CONFIG__IGNORE # No default value, required\n# retain_clean_jobs\n# Whether to retain clean up jobs in the cluster or uninstall the,\n# after completion.\nKPOPS_RETAIN_CLEAN_JOBS=False\n
These variables take precedence over the commands' flags. If a variable is set, the corresponding flag does not have to be specified in commands. Variables marked as required can instead be set as flags.
Name Default Value Required Description KPOPS_CONFIG_PATH . False Path to the dir containing config.yaml files KPOPS_DOTENV_PATH False Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. KPOPS_ENVIRONMENT False The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). KPOPS_PIPELINE_PATHS True Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. KPOPS_PIPELINE_STEPS False Comma separated list of steps to apply the command on cli_env_vars.env Exhaustive list of all cli-related environment variables
# CLI Environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# commands' flags. If a variable is set, the corresponding flag does\n# not have to be specified in commands. Variables marked as required\n# can instead be set as flags.\n#\n# Path to the dir containing config.yaml files\nKPOPS_CONFIG_PATH=.\n# Path to dotenv file. Multiple files can be provided. The files will\n# be loaded in order, with each file overriding the previous one.\nKPOPS_DOTENV_PATH # No default value, not required\n# The environment you want to generate and deploy the pipeline to.\n# Suffix your environment files with this value (e.g.\n# defaults_development.yaml for environment=development).\nKPOPS_ENVIRONMENT # No default value, not required\n# Paths to dir containing 'pipeline.yaml' or files named\n# 'pipeline.yaml'.\nKPOPS_PIPELINE_PATHS # No default value, required\n# Comma separated list of steps to apply the command on\nKPOPS_PIPELINE_STEPS # No default value, not required\n
These variables can be used in a component's definition to refer to any of its attributes, including ones that the user has defined in the defaults.
All of them are prefixed with component. and follow the following form: component.{attribute_name}. If the attribute itself contains attributes, they can be referred to like this: component.{attribute_name}.{subattribute_name}.
These variables include all fields in the config and refer to the pipeline configuration that is independent of the components.
All such variables are prefixed with config. and are of the same form as the component-specific variables.
Info
error_topic_name is an alias for config.topic_name_config.default_error_topic_nameoutput_topic_name is an alias for config.topic_name_config.default_output_topic_name
Environment variables such as $PATH can be used in the pipeline definition and defaults without any transformation following the form ${ENV_VAR_NAME}. This, of course, includes variables like the ones relevant to the KPOps cli that are exported by the user.
See all KPOps environment variables
"}, {"location": "user/core-concepts/variables/substitution/#pipeline-name-variables", "title": "Pipeline name variables", "text": "
These are special variables that refer to the name and path of a pipeline.
${pipeline.name}: Concatenated path of the parent directory where pipeline.yaml is defined in. For instance, ./data/pipelines/v1/pipeline.yaml, here the value for the variable would be data-pipelines-v1.
${pipeline_name_<level>}: Similar to the previous variable, each <level> contains a part of the path to the pipeline.yaml file. Consider the previous example, ${pipeline_name_0} would be data, ${pipeline_name_1} would be pipelines, and ${pipeline_name_2} equals to v1.
"}, {"location": "user/core-concepts/variables/substitution/#advanced-use-cases", "title": "Advanced use cases", "text": "
Refer to default component field values: As long as a value is assigned to a component attribute, it is possible to refer to it with a placeholder. To see all component fields, take a look at the pipeline schema.
Chaining variables: It is possible to chain any number of variables, see the example above.
Cross-component substitution: YAML is quite an intricate language and with some of its magic one could write cross-component references.
ATM fraud is a demo pipeline for ATM fraud detection. The original by Confluent is written in KSQL and outlined in this blogpost. The one used in this example is re-built from scratch using bakdata's streams-bootstrap library.
"}, {"location": "user/examples/atm-fraud-pipeline/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.
You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.
"}, {"location": "user/examples/atm-fraud-pipeline/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "
You can use the Streams Explorer to see the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:
After that open http://localhost:8080 in your browser. You should be able to see pipeline shown in the image below:
An overview of ATM fraud pipeline shown in Streams Explorer
Attention
Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka connect. Therefore, it might take a bit until you see the whole graph.
Word-count is a demo pipeline consisting of a producer producing words to Kafka, a Kafka streams app counting the number of times each word occurs, and finally a Redis database into which the words are exported.
"}, {"location": "user/getting-started/quick-start/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.
You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.
"}, {"location": "user/getting-started/quick-start/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "
You can use the Streams Explorer to inspect the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:
After that open http://localhost:8080 in your browser.
You should be able to see pipeline shown in the image below:
An overview of Word-count pipeline shown in Streams Explorer
Attention
Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka Connect. Therefore, it might take a bit until you see the whole graph.
k3d (Version 5.4.6+) and Docker (Version >= v20.10.5) or an existing Kubernetes cluster (>= 1.21.0)
kubectl (Compatible with server version 1.21.0)
Helm (Version 3.8.0+)
"}, {"location": "user/getting-started/setup/#setup-kubernetes-with-k3d", "title": "Setup Kubernetes with k3d", "text": "
If you don't have access to an existing Kubernetes cluster, this section will guide you through creating a local cluster. We recommend the lightweight Kubernetes distribution k3s for this. k3d is a wrapper around k3s in Docker that lets you get started fast.
For other ways of installing k3d, you can have a look at their installation guide.
The Kafka deployment needs a modified Docker image. In that case the image is built and pushed to a Docker registry that holds it. If you do not have access to an existing Docker registry, you can use k3d's Docker registry:
Creating a new k3d cluster automatically configures kubectl to connect to the local cluster by modifying your ~/.kube/config. In case you manually set the KUBECONFIG variable or don't want k3d to modify your config, k3d offers many other options.
You can check the cluster status with kubectl get pods -n kube-system. If all returned elements have a STATUS of Running or Completed, then the cluster is up and running.
Kafka is an open-source data streaming platform. More information about Kafka can be found in the documentation. To deploy Kafka, this guide uses Confluent's Helm chart.
To allow connectivity to other systems Kafka Connect needs to be extended with drivers. You can install a JDBC driver for Kafka Connect by creating a new Docker image:
Create a Dockerfile with the following content:
FROM confluentinc/cp-kafka-connect:7.1.3\n\nRUN confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.6.0\n
Build and push the modified image to your private Docker registry:
Install Kafka, Zookeeper, Confluent's Schema Registry, Kafka Rest Proxy, and Kafka Connect. A single Helm chart installs all five components. Below you can find an example for the --values ./kafka.yaml file configuring the deployment accordingly. Deploy the services:
An example value configuration for Confluent's Helm chart. This configuration deploys a single Kafka Broker, a Schema Registry, Zookeeper, Kafka Rest Proxy, and Kafka Connect with minimal resources.
Streams Explorer allows examining Apache Kafka data pipelines in a Kubernetes cluster including the inspection of schemas and monitoring of metrics. First, add the Helm repository:
The kpops CLI can be used to destroy a pipeline that was previously deployed with KPOps. In case that doesn't work, the pipeline can always be taken down manually with helm (see section Infrastructure).
Navigate to the examples folder. Replace the <name-of-the-example-directory> with the example you want to tear down. For example the atm-fraud-detection.
Remove the pipeline
# Uncomment 1 line to either destroy, reset or clean.\n\n# poetry run kpops destroy <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops reset <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops clean <name-of-the-example-directory>/pipeline.yaml \\\n--config <name-of-the-example-directory>/config.yaml \\\n--execute\n
In case kpops destroy is not working one can uninstall the pipeline services one by one. This is equivalent to running kpops destroy. In case a clean uninstall (like the one kpops clean does) is needed, one needs to also delete the topics and schemas created by deployment of the pipeline.
"}, {"location": "user/migration-guide/v1-v2/", "title": "Migrate from V1 to V2", "text": ""}, {"location": "user/migration-guide/v1-v2/#derive-component-type-automatically-from-class-name", "title": "Derive component type automatically from class name", "text": "
KPOps automatically infers the component type from the class name. Therefore, the type and schema_type attributes can be removed from your custom components. By convention the type would be the lower, and kebab cased name of the class.
class MyCoolStreamApp(StreamsApp):\n- type = \"my-cool-stream-app\"\n+ ...\n
Because of this new convention producer has been renamed to producer-app. This must be addressed in your pipeline.yaml and defaults.yaml.
"}, {"location": "user/migration-guide/v1-v2/#remove-camel-case-conversion-of-internal-models", "title": "Remove camel case conversion of internal models", "text": "
All the internal KPOps models are now snake_case, and only Helm/Kubernetes values require camel casing. You can find an example of a pipeline.yaml in the following. Notice that the app section here remains untouched.
If you are using the KubernetesApp class to define your own Kubernetes resource to deploy, the abstract function get_helm_chart that returns the chart for deploying the app using Helm is now a Python property and renamed to helm_chart.
Previously, if you set the environment variable KPOPS_KAFKA_BROKER, you need to replace that now with KPOPS_KAFKA_BROKERS.
"}, {"location": "user/migration-guide/v2-v3/", "title": "Migrate from V2 to V3", "text": "
Jump to the summary
"}, {"location": "user/migration-guide/v2-v3/#use-hash-and-trim-long-helm-release-names-instead-of-only-trimming", "title": "Use hash and trim long Helm release names instead of only trimming", "text": "
KPOps handles long (more than 53 characters) Helm releases names differently. Helm will not find your (long) old release names anymore. Therefore, it is recommended that you should once destroy your pipeline with KPOps v2 to remove old Helm release names. After a clean destroy, re-deploy your pipeline with the KPOps v3.
For example if you have a component with the Helm release name example-component-name-too-long-fake-fakefakefakefakefake. The new release name will shorten the original name to 53 characters and then replace the last 6 characters of the trimmed name with the first 5 characters of the result of SHA-1(helm_release_name).
All Helm-specific parts of the built-in KubernetesApp have been extracted to a new child component that is more appropriately named HelmApp. It has to be renamed in your existing pipeline defintions and custom components module.
- from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n ...\n
"}, {"location": "user/migration-guide/v2-v3/#create-streamsbootstrap-component-refactor-cleanup-jobs-as-individual-helmapp", "title": "Create StreamsBootstrap component & refactor cleanup jobs as individual HelmApp", "text": "
Previously the default KafkaApp component configured the streams-bootstrap Helm Charts. Now, this component is no longer tied to Helm (or Kubernetes). Instead, there is a new StreamsBootstrap component that configures the Helm Chart repository for the components that use it, e.g. StreamsApp and ProducerApp. If you are using non-default values for the Helm Chart repository or version, it has to be updated as shown below.
Internally, the Kafka Connector resetter is now its own standard HelmApp, removing a lot of the shared code. It is configured using the resetter_namespace (formerly namespace) and resetter_values attributes.
The schema_registry_url is replaced with schema_registry.url (default http://localhost:8081) and schema_registry.enabled (default false).
kafka_rest_host is renamed to kafka_rest.url (default http://localhost:8082).
kafka_connect_host is replaced with kafka_connect.url (default http://localhost:8083).
brokers is renamed to kafka_brokers.
The environment variable names of these config fields changed respectively. Please refer to the environment variables documentation page to see the newest changes.
Specifying the environment is no longer mandatory. If not defined, only the global files will be used.
environment is no longer specified in config.yaml. Instead, it can be either set via the CLI flag --environment or with the environment variable KPOPS_ENVIRONMENT.
The --config flag in the CLI now points to the directory that contains config*.yaml files. The files to be used are resolved based on the provided (or not) environment.
"}, {"location": "user/migration-guide/v2-v3/#refactor-generate-template-for-python-api-usage", "title": "Refactor generate template for Python API usage", "text": "
The template method of every pipeline component has been renamed to manifest as it is no longer strictly tied to Helm template. Instead, it can be used to render the final resources of a component, such as Kubernetes manifests.
There is also a new kpops manifest command replacing the existing kpops generate --template flag.
If you're using this functionality in your custom components, it needs to be updated.
from kpops.components.base_components.models.resource import Resource\n\n @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n return [] # list of manifests\n
The global configuration variables are now namespaced under the config key, such as ${config.kafka_brokers}, ${config.schema_registry.url}. Same with pipeline variables, e.g. ${pipeline_name} \u2192 ${pipeline.name}. This would make it more uniform with the existing ${component.<key>} variables.
- from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n+ from kpops.components.base_components.models.resource import Resource\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n\n @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n return [] # list of manifests\n ...\n
"}, {"location": "user/migration-guide/v3-v4/", "title": "Migrate from V3 to V4", "text": ""}, {"location": "user/migration-guide/v3-v4/#distribute-defaults-across-multiple-files", "title": "Distribute defaults across multiple files", "text": "
Warning
The --defaults flag is removed
It is possible now to use multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.
For example, imagine the following folder structure:
"}, {"location": "user/migration-guide/v5-v6/", "title": "Migrate from V5 to V6", "text": ""}, {"location": "user/migration-guide/v5-v6/#deploy-multiple-pipelines", "title": "Deploy multiple pipelines", "text": "
KPOps can now deploy multiple pipelines in a single command. It is possible to pass one or many pipeline.yaml files or pass a directory with many pipeline.yaml files within it.
The environment variable KPOPS_PIPELINE_PATH is changed to KPOPS_PIPELINE_PATHS.
Read more:
CLI Usage
Environment variables
"}, {"location": "user/migration-guide/v5-v6/#separate-kpops-api-from-the-cli", "title": "Separate KPOps API from the CLI", "text": "
KPops Python API is now stable and separated from the CLI! \ud83c\udf89
"}, {"location": "user/migration-guide/v6-v7/", "title": "Migrate from V6 to V7", "text": ""}, {"location": "user/migration-guide/v6-v7/#automatic-loading-of-namespaced-custom-components", "title": "Automatic loading of namespaced custom components", "text": "
KPOps is now distributed as a Python namespace package (as defined by PEP 420). This allows us to standardize the namespace kpops.components for both builtin and custom pipeline components.
As a result of the restructure, some imports need to be adjusted:
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
--dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
--parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
--dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
--parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
--dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
--parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
Enrich pipeline steps with defaults. The enriched pipeline is used for all KPOps operations (deploy, destroy, ...).
Usage:
$ kpops generate [OPTIONS] PIPELINE_PATHS...\n
Arguments:
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
PATH: Path for a new KPOps project. It should lead to an empty (or non-existent) directory. The part of the path that doesn't exist will be created. [required]
Options:
--config-include-opt / --no-config-include-opt: Whether to include non-required settings in the generated 'config.yaml' [default: no-config-include-opt]
In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.
Usage:
$ kpops manifest [OPTIONS] PIPELINE_PATHS...\n
Arguments:
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
Options:
--dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
--steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
--filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: FilterType.INCLUDE]
--environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
--dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
--parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
SCOPE:{pipeline|defaults|config}: Scope of the generated schema
- pipeline: Schema of PipelineComponents for KPOps pipeline.yaml\n\n- defaults: Schema of PipelineComponents for KPOps defaults.yaml\n\n- config: Schema for KPOps config.yaml [required]\n
We provided a GitHub composite action bakdata/kpops that installs and executes KPOps commands with the given parameters.
"}, {"location": "user/references/ci-integration/github-actions/#input-parameters", "title": "Input Parameters", "text": "Name Required Default Value Type Description command \u2705 - string KPOps command to run. generate, deploy, destroy, reset, clean are possible values. Flags such as --dry-run and --execute need to be specified pipeline \u2705 - string Pipeline to run by KPOps working-directory \u274c . string root directory used by KPOps to run pipelines config \u274c - string Directory containing the config*.yaml file(s) environment \u274c - string Environment to run KPOps in components \u274c - string components package path filter-type \u274c - string Whether to include/exclude the steps defined in KPOPS_PIPELINE_STEPS parallel \u274c \"false\" string Whether to run pipelines in parallel python-version \u274c \"3.11.x\" string Python version to install (Defaults to the latest stable version of Python 3.11) kpops-version \u274c latest string KPOps version to install helm-version \u274c latest string Helm version to install token \u274c latest string secrets.GITHUB_TOKEN, needed for setup-helm action if helm-version is set to latest"}, {"location": "user/references/ci-integration/github-actions/#usage", "title": "Usage", "text": "
steps:\n # ...\n # This step is useful for debugging reasons\n - name: Generate Kafka pipeline\n uses: bakdata/kpops@main\n with:\n command: generate\n working-directory: home/my-kpops-root-dir\n pipeline: pipelines/my-pipeline-file.yaml\n kpops-version: 1.2.3\n\n # It is possible to use a pre-release KPOps version from TestPyPI https://test.pypi.org/project/kpops/#history\n - name: Deploy Kafka pipeline\n uses: bakdata/kpops@main\n with:\n command: deploy --execute\n working-directory: home/my-kpops-root-dir\n pipeline: pipelines/my-pipeline-file.yaml\n kpops-version: 1.2.5.dev20230707132709\n # ...\n
"}]}
\ No newline at end of file
diff --git a/dev/sitemap.xml b/dev/sitemap.xml
index ec38ec6d7..d32868498 100644
--- a/dev/sitemap.xml
+++ b/dev/sitemap.xml
@@ -185,6 +185,11 @@
2024-07-08daily
+
+ https://bakdata.github.io/kpops/dev/user/migration-guide/v6-v7/
+ 2024-07-08
+ daily
+ https://bakdata.github.io/kpops/dev/user/references/cli-commands/2024-07-08
diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz
index e39f37bd1..831741560 100644
Binary files a/dev/sitemap.xml.gz and b/dev/sitemap.xml.gz differ
diff --git a/dev/user/changelog/index.html b/dev/user/changelog/index.html
index 607fcc127..652fd4669 100644
--- a/dev/user/changelog/index.html
+++ b/dev/user/changelog/index.html
@@ -2548,6 +2548,8 @@
+
+
@@ -2679,6 +2681,26 @@
+
+
+
+
+
+
# CONFIGURATION#
-# Custom Python module defining project-specific KPOps components
-components_module:null
-# Base directory to the pipelines (default is current working directory)
-pipeline_base_dir:.
-# The Kafka brokers address.
-# REQUIRED
-kafka_brokers:"http://broker1:9092,http://broker2:9092"
-# Configure the topic name variables you can use in the pipeline definition.
-topic_name_config:
-# Configures the value for the variable ${output_topic_name}
-default_output_topic_name:${pipeline.name}-${component.name}
-# Configures the value for the variable ${error_topic_name}
-default_error_topic_name:${pipeline.name}-${component.name}-error
-# Configuration for Schema Registry.
-schema_registry:
-# Whether the Schema Registry handler should be initialized.
-enabled:false
-# Address of the Schema Registry.
-url:"http://localhost:8081"
-# Configuration for the Kafka REST Proxy.
-kafka_rest:
-# Address of the Kafka REST Proxy.
-url:"http://localhost:8082"
-# Configuration for Kafka Connect.
-kafka_connect:
-# Address of Kafka Connect.
-url:"http://localhost:8083"
-# The timeout in seconds that specifies when actions like deletion or deploy
-# timeout.
-timeout:300
-# Flag for `helm upgrade --install`.
-# Create the release namespace if not present.
-create_namespace:false
-# Global flags for Helm.
-helm_config:
-# Name of kubeconfig context (`--kube-context`)
-context:name
-# Run Helm in Debug mode.
-debug:false
-# Kubernetes API version used for Capabilities.APIVersions
-api_version:null
-# Configure Helm Diff.
-helm_diff_config:
-# Set of keys that should not be checked.
-ignore:
--name
--imageTag
-# Whether to retain clean up jobs in the cluster or uninstall the, after
-# completion.
-retain_clean_jobs:false
+# Base directory to the pipelines (default is current working directory)
+pipeline_base_dir:.
+# The Kafka brokers address.
+# REQUIRED
+kafka_brokers:"http://broker1:9092,http://broker2:9092"
+# Configure the topic name variables you can use in the pipeline definition.
+topic_name_config:
+# Configures the value for the variable ${output_topic_name}
+default_output_topic_name:${pipeline.name}-${component.name}
+# Configures the value for the variable ${error_topic_name}
+default_error_topic_name:${pipeline.name}-${component.name}-error
+# Configuration for Schema Registry.
+schema_registry:
+# Whether the Schema Registry handler should be initialized.
+enabled:false
+# Address of the Schema Registry.
+url:"http://localhost:8081"
+# Configuration for the Kafka REST Proxy.
+kafka_rest:
+# Address of the Kafka REST Proxy.
+url:"http://localhost:8082"
+# Configuration for Kafka Connect.
+kafka_connect:
+# Address of Kafka Connect.
+url:"http://localhost:8083"
+# Flag for `helm upgrade --install`.
+# Create the release namespace if not present.
+create_namespace:false
+# Global flags for Helm.
+helm_config:
+# Name of kubeconfig context (`--kube-context`)
+context:name
+# Run Helm in Debug mode.
+debug:false
+# Kubernetes API version used for Capabilities.APIVersions
+api_version:null
+# Configure Helm Diff.
+helm_diff_config:
+# Set of keys that should not be checked.
+ignore:
+-name
+-imageTag
+# Whether to retain clean up jobs in the cluster or uninstall the, after
+# completion.
+retain_clean_jobs:false
# Global config environment variables## The default setup is shown. These variables take precedence over the# settings in `config.yaml`. Variables marked as required can instead# be set in the global config.#
-# components_module
-# Custom Python module defining project-specific KPOps components
-KPOPS_COMPONENTS_MODULE# No default value, not required
-# pipeline_base_dir
-# Base directory to the pipelines (default is current working
-# directory)
-KPOPS_PIPELINE_BASE_DIR=.
-# kafka_brokers
-# The comma separated Kafka brokers address.
-KPOPS_KAFKA_BROKERS# No default value, required
-# topic_name_config.default_output_topic_name
-# Configures the value for the variable ${output_topic_name}
-KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name}
-# topic_name_config.default_error_topic_name
-# Configures the value for the variable ${error_topic_name}
-KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error
-# schema_registry.enabled
-# Whether the Schema Registry handler should be initialized.
-KPOPS_SCHEMA_REGISTRY__ENABLED=False
-# schema_registry.url
-# Address of the Schema Registry.
-KPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/
-# schema_registry.timeout
-# Operation timeout in seconds.
-KPOPS_SCHEMA_REGISTRY__TIMEOUT=30
-# kafka_rest.url
-# Address of the Kafka REST Proxy.
-KPOPS_KAFKA_REST__URL=http://localhost:8082/
-# kafka_rest.timeout
-# Operation timeout in seconds.
-KPOPS_KAFKA_REST__TIMEOUT=30
-# kafka_connect.url
-# Address of Kafka Connect.
-KPOPS_KAFKA_CONNECT__URL=http://localhost:8083/
-# kafka_connect.timeout
-# Operation timeout in seconds.
-KPOPS_KAFKA_CONNECT__TIMEOUT=30
-# create_namespace
-# Flag for `helm upgrade --install`. Create the release namespace if
-# not present.
-KPOPS_CREATE_NAMESPACE=False
-# helm_config.context
-# Name of kubeconfig context (`--kube-context`)
-KPOPS_HELM_CONFIG__CONTEXT# No default value, not required
-# helm_config.debug
-# Run Helm in Debug mode
-KPOPS_HELM_CONFIG__DEBUG=False
-# helm_config.api_version
-# Kubernetes API version used for `Capabilities.APIVersions`
-KPOPS_HELM_CONFIG__API_VERSION# No default value, not required
-# helm_diff_config.ignore
-# Set of keys that should not be checked.
-KPOPS_HELM_DIFF_CONFIG__IGNORE# No default value, required
-# retain_clean_jobs
-# Whether to retain clean up jobs in the cluster or uninstall the,
-# after completion.
-KPOPS_RETAIN_CLEAN_JOBS=False
+# pipeline_base_dir
+# Base directory to the pipelines (default is current working
+# directory)
+KPOPS_PIPELINE_BASE_DIR=.
+# kafka_brokers
+# The comma separated Kafka brokers address.
+KPOPS_KAFKA_BROKERS# No default value, required
+# topic_name_config.default_output_topic_name
+# Configures the value for the variable ${output_topic_name}
+KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name}
+# topic_name_config.default_error_topic_name
+# Configures the value for the variable ${error_topic_name}
+KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error
+# schema_registry.enabled
+# Whether the Schema Registry handler should be initialized.
+KPOPS_SCHEMA_REGISTRY__ENABLED=False
+# schema_registry.url
+# Address of the Schema Registry.
+KPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/
+# schema_registry.timeout
+# Operation timeout in seconds.
+KPOPS_SCHEMA_REGISTRY__TIMEOUT=30
+# kafka_rest.url
+# Address of the Kafka REST Proxy.
+KPOPS_KAFKA_REST__URL=http://localhost:8082/
+# kafka_rest.timeout
+# Operation timeout in seconds.
+KPOPS_KAFKA_REST__TIMEOUT=30
+# kafka_connect.url
+# Address of Kafka Connect.
+KPOPS_KAFKA_CONNECT__URL=http://localhost:8083/
+# kafka_connect.timeout
+# Operation timeout in seconds.
+KPOPS_KAFKA_CONNECT__TIMEOUT=30
+# create_namespace
+# Flag for `helm upgrade --install`. Create the release namespace if
+# not present.
+KPOPS_CREATE_NAMESPACE=False
+# helm_config.context
+# Name of kubeconfig context (`--kube-context`)
+KPOPS_HELM_CONFIG__CONTEXT# No default value, not required
+# helm_config.debug
+# Run Helm in Debug mode
+KPOPS_HELM_CONFIG__DEBUG=False
+# helm_config.api_version
+# Kubernetes API version used for `Capabilities.APIVersions`
+KPOPS_HELM_CONFIG__API_VERSION# No default value, not required
+# helm_diff_config.ignore
+# Set of keys that should not be checked.
+KPOPS_HELM_DIFF_CONFIG__IGNORE# No default value, required
+# retain_clean_jobs
+# Whether to retain clean up jobs in the cluster or uninstall the,
+# after completion.
+KPOPS_RETAIN_CLEAN_JOBS=False
KPOps is now distributed as a Python namespace package (as defined by PEP 420). This allows us to standardize the namespace kpops.components for both builtin and custom pipeline components.
+
As a result of the restructure, some imports need to be adjusted:
pipeline: Schema of PipelineComponents. Includes the built-in KPOps components by default. To include custom components, provide components module in config.
+3
+4
+5
- pipeline: Schema of PipelineComponents for KPOps pipeline.yaml
-config: Schema of KpopsConfig. [required]
+- defaults: Schema of PipelineComponents for KPOps defaults.yaml
+
+- config: Schema for KPOps config.yaml [required]
Options:
-
--config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
-
--include-stock-components / --no-include-stock-components: Include the built-in KPOps components. [default: include-stock-components]