diff --git a/dev/developer/api/index.html b/dev/developer/api/index.html index 66378e327..1bae44e65 100644 --- a/dev/developer/api/index.html +++ b/dev/developer/api/index.html @@ -1616,6 +1616,13 @@ manifest + + +
  • + + patch + +
  • @@ -1664,13 +1671,6 @@ add -
  • - -
  • - - add_levels - -
  • @@ -1908,6 +1908,13 @@ manifest +
  • + +
  • + + patch + +
  • @@ -1956,13 +1963,6 @@ add -
  • - -
  • - - add_levels - -
  • @@ -3623,6 +3623,66 @@

    +

    + patch + + +

    +
    patch(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), environment: str | None = None, verbose: bool = True) -> list[Resource]
    +
    + +
    + +
    + Source code in kpops/api/__init__.py +
    def patch(
    +    pipeline_path: Path,
    +    dotenv: list[Path] | None = None,
    +    config: Path = Path(),
    +    environment: str | None = None,
    +    verbose: bool = True,
    +) -> list[Resource]:
    +    pipeline = generate(
    +        pipeline_path=pipeline_path,
    +        dotenv=dotenv,
    +        config=config,
    +        environment=environment,
    +        verbose=verbose,
    +    )
    +    resources: list[Resource] = []
    +
    +    for component in pipeline.components:
    +        resource = component.manifest_deploy()
    +        resources.append(resource)
    +    return resources
    +
    +
    +
    + +
    + +
    + +

    reset @@ -4141,7 +4201,11 @@

    205 206 207 -208

    class Pipeline(BaseModel):
    +208
    +209
    +210
    +211
    +212
    class Pipeline(BaseModel):
         """Pipeline representation."""
     
         _component_index: dict[str, PipelineComponent] = {}
    @@ -4204,115 +4268,119 @@ 

    msg = "Pipeline is not a valid DAG." raise ValueError(msg) - def add_levels(self): - sync_wave = "sync-wave" - for node in nx.topological_sort(self._graph): - node_ = self._graph.nodes[node] - if not len(list(self._graph.predecessors(node))): - node_[sync_wave] = 1 - else: - node_[sync_wave] = ( - max( - self._graph.nodes[n][sync_wave] - for n in self._graph.predecessors(node) - ) - + 1 - ) - if p := self._component_index.get(node): - p.sync_wave = self._graph.nodes[node][sync_wave] - - def to_yaml(self) -> str: - return yaml.dump( - self.model_dump(mode="json", by_alias=True, exclude_none=True)["components"] - ) - - def build_execution_graph( - self, - runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]], - /, - reverse: bool = False, - ) -> Awaitable[None]: - async def run_parallel_tasks( - coroutines: list[Coroutine[Any, Any, None]], - ) -> None: - tasks: list[asyncio.Task[None]] = [] - for coro in coroutines: - tasks.append(asyncio.create_task(coro)) - await asyncio.gather(*tasks) - - async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None: - for pending_task in pending_tasks: - await pending_task + # class ArgoResource(BaseModel): + # sync_wave: int + # resource: Resource + + # def add_levels(self): + # sync_wave = "sync-wave" + # for node in nx.topological_sort(self._graph): + # node_ = self._graph.nodes[node] + # if not len(list(self._graph.predecessors(node))): + # node_[sync_wave] = 1 + # else: + # node_[sync_wave] = ( + # max( + # self._graph.nodes[n][sync_wave] + # for n in self._graph.predecessors(node) + # ) + # + 1 + # ) + # if p := self._component_index.get(node): + # p.sync_wave = self._graph.nodes[node][sync_wave] + + def to_yaml(self) -> str: + return yaml.dump( + self.model_dump(mode="json", by_alias=True, exclude_none=True)["components"] + ) + + def build_execution_graph( + self, + runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]], + /, + reverse: bool = False, + ) -> Awaitable[None]: + async def run_parallel_tasks( + coroutines: list[Coroutine[Any, Any, None]], + ) -> None: + tasks: list[asyncio.Task[None]] = [] + for coro in coroutines: + tasks.append(asyncio.create_task(coro)) + await asyncio.gather(*tasks) - graph: nx.DiGraph = self._graph.copy() # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx - - # We add an extra node to the graph, connecting all the leaf nodes to it - # in that way we make this node the root of the graph, avoiding backtracking - root_node = "root_node_bfs" - graph.add_node(root_node) - - for node in graph: - predecessors = list(graph.predecessors(node)) - if not predecessors: - graph.add_edge(root_node, node) - - layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node)) - - sorted_tasks: list[Awaitable[None]] = [] - for layer in layers_graph[1:]: - if parallel_tasks := self.__get_parallel_tasks_from(layer, runner): - sorted_tasks.append(run_parallel_tasks(parallel_tasks)) - - if reverse: - sorted_tasks.reverse() - - return run_graph_tasks(sorted_tasks) - - def __getitem__(self, component_id: str) -> PipelineComponent: - try: - return self._component_index[component_id] - except KeyError as exc: - msg = f"Component {component_id} not found" - raise ValueError(msg) from exc - - def __bool__(self) -> bool: - return bool(self._component_index) - - def __iter__(self) -> Iterator[PipelineComponent]: # pyright: ignore [reportIncompatibleMethodOverride] - yield from self._component_index.values() - - def __len__(self) -> int: - return len(self.components) - - def __add_to_graph(self, component: PipelineComponent): - self._graph.add_node(component.id) - - for input_topic in component.inputs: - self.__add_input(input_topic.id, component.id) - - for output_topic in component.outputs: - self.__add_output(output_topic.id, component.id) - - def __add_output(self, topic_id: str, source: str) -> None: - self._graph.add_node(topic_id) - self._graph.add_edge(source, topic_id) + async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None: + for pending_task in pending_tasks: + await pending_task + + graph: nx.DiGraph = self._graph.copy() # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx + + # We add an extra node to the graph, connecting all the leaf nodes to it + # in that way we make this node the root of the graph, avoiding backtracking + root_node = "root_node_bfs" + graph.add_node(root_node) + + for node in graph: + predecessors = list(graph.predecessors(node)) + if not predecessors: + graph.add_edge(root_node, node) + + layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node)) + + sorted_tasks: list[Awaitable[None]] = [] + for layer in layers_graph[1:]: + if parallel_tasks := self.__get_parallel_tasks_from(layer, runner): + sorted_tasks.append(run_parallel_tasks(parallel_tasks)) + + if reverse: + sorted_tasks.reverse() + + return run_graph_tasks(sorted_tasks) + + def __getitem__(self, component_id: str) -> PipelineComponent: + try: + return self._component_index[component_id] + except KeyError as exc: + msg = f"Component {component_id} not found" + raise ValueError(msg) from exc + + def __bool__(self) -> bool: + return bool(self._component_index) + + def __iter__(self) -> Iterator[PipelineComponent]: # pyright: ignore [reportIncompatibleMethodOverride] + yield from self._component_index.values() + + def __len__(self) -> int: + return len(self.components) + + def __add_to_graph(self, component: PipelineComponent): + self._graph.add_node(component.id) + + for input_topic in component.inputs: + self.__add_input(input_topic.id, component.id) + + for output_topic in component.outputs: + self.__add_output(output_topic.id, component.id) - def __add_input(self, topic_id: str, target: str) -> None: + def __add_output(self, topic_id: str, source: str) -> None: self._graph.add_node(topic_id) - self._graph.add_edge(topic_id, target) + self._graph.add_edge(source, topic_id) - def __get_parallel_tasks_from( - self, - layer: list[str], - runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]], - ) -> list[Coroutine[Any, Any, None]]: - def gen_parallel_tasks(): - for node_in_layer in layer: - # check if component, skip topics - if (component := self._component_index.get(node_in_layer)) is not None: - yield runner(component) - - return list(gen_parallel_tasks()) + def __add_input(self, topic_id: str, target: str) -> None: + self._graph.add_node(topic_id) + self._graph.add_edge(topic_id, target) + + def __get_parallel_tasks_from( + self, + layer: list[str], + runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]], + ) -> list[Coroutine[Any, Any, None]]: + def gen_parallel_tasks(): + for node_in_layer in layer: + # check if component, skip topics + if (component := self._component_index.get(node_in_layer)) is not None: + yield runner(component) + + return list(gen_parallel_tasks())

    @@ -4430,58 +4498,6 @@

    -

    - add_levels - - -

    -
    add_levels()
    -
    - -
    - -
    - Source code in kpops/pipeline/__init__.py -
    def add_levels(self):
    -    sync_wave = "sync-wave"
    -    for node in nx.topological_sort(self._graph):
    -        node_ = self._graph.nodes[node]
    -        if not len(list(self._graph.predecessors(node))):
    -            node_[sync_wave] = 1
    -        else:
    -            node_[sync_wave] = (
    -                max(
    -                    self._graph.nodes[n][sync_wave]
    -                    for n in self._graph.predecessors(node)
    -                )
    -                + 1
    -            )
    -        if p := self._component_index.get(node):
    -            p.sync_wave = self._graph.nodes[node][sync_wave]
    -
    -
    -
    - -
    - -
    - -

    build_execution_graph @@ -4494,11 +4510,7 @@

    Source code in kpops/pipeline/__init__.py -
    122
    -123
    -124
    -125
    -126
    +              
    126
     127
     128
     129
    @@ -4534,47 +4546,51 @@ 

    159 160 161 -162

    def build_execution_graph(
    -    self,
    -    runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],
    -    /,
    -    reverse: bool = False,
    -) -> Awaitable[None]:
    -    async def run_parallel_tasks(
    -        coroutines: list[Coroutine[Any, Any, None]],
    -    ) -> None:
    -        tasks: list[asyncio.Task[None]] = []
    -        for coro in coroutines:
    -            tasks.append(asyncio.create_task(coro))
    -        await asyncio.gather(*tasks)
    -
    -    async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None:
    -        for pending_task in pending_tasks:
    -            await pending_task
    +162
    +163
    +164
    +165
    +166
    def build_execution_graph(
    +    self,
    +    runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],
    +    /,
    +    reverse: bool = False,
    +) -> Awaitable[None]:
    +    async def run_parallel_tasks(
    +        coroutines: list[Coroutine[Any, Any, None]],
    +    ) -> None:
    +        tasks: list[asyncio.Task[None]] = []
    +        for coro in coroutines:
    +            tasks.append(asyncio.create_task(coro))
    +        await asyncio.gather(*tasks)
     
    -    graph: nx.DiGraph = self._graph.copy()  # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx
    -
    -    # We add an extra node to the graph, connecting all the leaf nodes to it
    -    # in that way we make this node the root of the graph, avoiding backtracking
    -    root_node = "root_node_bfs"
    -    graph.add_node(root_node)
    -
    -    for node in graph:
    -        predecessors = list(graph.predecessors(node))
    -        if not predecessors:
    -            graph.add_edge(root_node, node)
    -
    -    layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node))
    -
    -    sorted_tasks: list[Awaitable[None]] = []
    -    for layer in layers_graph[1:]:
    -        if parallel_tasks := self.__get_parallel_tasks_from(layer, runner):
    -            sorted_tasks.append(run_parallel_tasks(parallel_tasks))
    -
    -    if reverse:
    -        sorted_tasks.reverse()
    -
    -    return run_graph_tasks(sorted_tasks)
    +    async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None:
    +        for pending_task in pending_tasks:
    +            await pending_task
    +
    +    graph: nx.DiGraph = self._graph.copy()  # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx
    +
    +    # We add an extra node to the graph, connecting all the leaf nodes to it
    +    # in that way we make this node the root of the graph, avoiding backtracking
    +    root_node = "root_node_bfs"
    +    graph.add_node(root_node)
    +
    +    for node in graph:
    +        predecessors = list(graph.predecessors(node))
    +        if not predecessors:
    +            graph.add_edge(root_node, node)
    +
    +    layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node))
    +
    +    sorted_tasks: list[Awaitable[None]] = []
    +    for layer in layers_graph[1:]:
    +        if parallel_tasks := self.__get_parallel_tasks_from(layer, runner):
    +            sorted_tasks.append(run_parallel_tasks(parallel_tasks))
    +
    +    if reverse:
    +        sorted_tasks.reverse()
    +
    +    return run_graph_tasks(sorted_tasks)
     
    @@ -4804,13 +4820,13 @@

    Source code in kpops/pipeline/__init__.py -
    def to_yaml(self) -> str:
    -    return yaml.dump(
    -        self.model_dump(mode="json", by_alias=True, exclude_none=True)["components"]
    -    )
    +              
    def to_yaml(self) -> str:
    +    return yaml.dump(
    +        self.model_dump(mode="json", by_alias=True, exclude_none=True)["components"]
    +    )
     
    diff --git a/dev/objects.inv b/dev/objects.inv index 508a3bbf4..c76bbc2bb 100644 Binary files a/dev/objects.inv and b/dev/objects.inv differ diff --git a/dev/search/search_index.json b/dev/search/search_index.json index f7b6e8eb1..d573b4f88 100644 --- a/dev/search/search_index.json +++ b/dev/search/search_index.json @@ -1 +1 @@ -{"config": {"lang": ["en"], "separator": "[\\s\\-]+", "pipeline": ["stopWordFilter"]}, "docs": [{"location": "developer/api/", "title": "Python API", "text": ""}, {"location": "developer/api/#kpops.api", "title": "kpops.api", "text": ""}, {"location": "developer/api/#kpops.api.clean", "title": "clean", "text": "
    clean(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, dry_run: bool = True, verbose: bool = True, parallel: bool = False) -> list[Resource] | None\n

    Clean pipeline steps.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    dry_run

    Whether to dry run the command or execute it.

    TYPE: bool DEFAULT: True

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: True

    parallel

    Enable or disable parallel execution of pipeline steps.

    TYPE: bool DEFAULT: False

    Source code in kpops/api/__init__.py
    def clean(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    dry_run: bool = True,\n    verbose: bool = True,\n    parallel: bool = False,\n) -> list[Resource] | None:\n    \"\"\"Clean pipeline steps.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param dry_run: Whether to dry run the command or execute it.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :param parallel: Enable or disable parallel execution of pipeline steps.\n    \"\"\"\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n\n    list: list[Resource] = []\n    if kpops_config.operation_mode is OperationMode.ARGO:\n        for component in reversed(pipeline.components):\n            clean = component.manifest_clean()\n            list.append(clean)\n\n        return list\n    if kpops_config.operation_mode is OperationMode.HELM:\n\n        async def clean_runner(component: PipelineComponent):\n            log_action(\"Clean\", component)\n            await component.clean(dry_run)\n\n        async def async_clean():\n            if parallel:\n                pipeline_tasks = pipeline.build_execution_graph(\n                    clean_runner, reverse=True\n                )\n                await pipeline_tasks\n            else:\n                for component in reversed(pipeline.components):\n                    await clean_runner(component)\n\n        asyncio.run(async_clean())\n    return []\n
    "}, {"location": "developer/api/#kpops.api.deploy", "title": "deploy", "text": "
    deploy(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, dry_run: bool = True, verbose: bool = True, parallel: bool = False) -> list[Resource]\n

    Deploy pipeline steps.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    dry_run

    Whether to dry run the command or execute it.

    TYPE: bool DEFAULT: True

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: True

    parallel

    Enable or disable parallel execution of pipeline steps.

    TYPE: bool DEFAULT: False

    Source code in kpops/api/__init__.py
    def deploy(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    dry_run: bool = True,\n    verbose: bool = True,\n    parallel: bool = False,\n) -> list[Resource]:\n    \"\"\"Deploy pipeline steps.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param dry_run: Whether to dry run the command or execute it.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :param parallel: Enable or disable parallel execution of pipeline steps.\n    \"\"\"\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n    if kpops_config.operation_mode is OperationMode.ARGO:\n        resources: list[Resource] = []\n        for component in pipeline.components:\n            resource = component.manifest_deploy()\n            resources.append(resource)\n        return resources\n\n    if kpops_config.operation_mode is OperationMode.HELM:\n\n        async def deploy_runner(component: PipelineComponent):\n            log_action(\"Deploy\", component)\n            await component.deploy(dry_run)\n\n        async def async_deploy():\n            if parallel:\n                pipeline_tasks = pipeline.build_execution_graph(deploy_runner)\n                await pipeline_tasks\n            else:\n                for component in pipeline.components:\n                    await deploy_runner(component)\n\n        asyncio.run(async_deploy())\n\n    return []\n
    "}, {"location": "developer/api/#kpops.api.destroy", "title": "destroy", "text": "
    destroy(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, dry_run: bool = True, verbose: bool = True, parallel: bool = False) -> list[Resource] | None\n

    Destroy pipeline steps.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    dry_run

    Whether to dry run the command or execute it.

    TYPE: bool DEFAULT: True

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: True

    parallel

    Enable or disable parallel execution of pipeline steps.

    TYPE: bool DEFAULT: False

    Source code in kpops/api/__init__.py
    def destroy(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    dry_run: bool = True,\n    verbose: bool = True,\n    parallel: bool = False,\n) -> list[Resource] | None:\n    \"\"\"Destroy pipeline steps.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param dry_run: Whether to dry run the command or execute it.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :param parallel: Enable or disable parallel execution of pipeline steps.\n    \"\"\"\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n\n    if kpops_config.operation_mode is OperationMode.ARGO:\n        resources: list[Resource] = []\n        for component in pipeline.components:\n            resource = component.manifest_destroy()\n            resources.append(resource)\n        return resources\n\n    if kpops_config.operation_mode is OperationMode.HELM:\n\n        async def destroy_runner(component: PipelineComponent):\n            log_action(\"Destroy\", component)\n            await component.destroy(dry_run)\n\n        async def async_destroy():\n            if parallel:\n                pipeline_tasks = pipeline.build_execution_graph(\n                    destroy_runner, reverse=True\n                )\n                await pipeline_tasks\n            else:\n                for component in reversed(pipeline.components):\n                    await destroy_runner(component)\n\n        asyncio.run(async_destroy())\n    return []\n
    "}, {"location": "developer/api/#kpops.api.generate", "title": "generate", "text": "
    generate(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, verbose: bool = False) -> Pipeline\n

    Generate enriched pipeline representation.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: False

    RETURNS DESCRIPTION Pipeline

    Generated Pipeline object.

    Source code in kpops/api/__init__.py
    def generate(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    verbose: bool = False,\n) -> Pipeline:\n    \"\"\"Generate enriched pipeline representation.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :return: Generated `Pipeline` object.\n    \"\"\"\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n    pipeline = _create_pipeline(pipeline_path, kpops_config, environment)\n    log.info(f\"Picked up pipeline '{pipeline_path.parent.name}'\")\n    if steps:\n        component_names = steps\n        log.debug(\n            f\"KPOPS_PIPELINE_STEPS is defined with values: {component_names} and filter type of {filter_type.value}\"\n        )\n\n        predicate = filter_type.create_default_step_names_filter_predicate(\n            component_names\n        )\n        pipeline.filter(predicate)\n        log.info(f\"Filtered pipeline:\\n{pipeline.step_names}\")\n    return pipeline\n
    "}, {"location": "developer/api/#kpops.api.init", "title": "init", "text": "
    init(path: Path, config_include_opt: bool = False)\n

    Initiate a default empty project.

    PARAMETER DESCRIPTION path

    Directory in which the project should be initiated.

    TYPE: Path

    conf_incl_opt

    Whether to include non-required settings in the generated config file.

    Source code in kpops/api/__init__.py
    def init(\n    path: Path,\n    config_include_opt: bool = False,\n):\n    \"\"\"Initiate a default empty project.\n\n    :param path: Directory in which the project should be initiated.\n    :param conf_incl_opt: Whether to include non-required settings\n        in the generated config file.\n    \"\"\"\n    if not path.exists():\n        path.mkdir(parents=False)\n    elif next(path.iterdir(), False):\n        log.warning(\"Please provide a path to an empty directory.\")\n        return\n    init_project(path, config_include_opt)\n
    "}, {"location": "developer/api/#kpops.api.manifest", "title": "manifest", "text": "
    manifest(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, verbose: bool = False) -> list[Resource]\n

    Generate pipeline, return final resource representations for each step.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: False

    RETURNS DESCRIPTION list[Resource]

    Resources.

    Source code in kpops/api/__init__.py
    def manifest(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    verbose: bool = False,\n) -> list[Resource]:\n    \"\"\"Generate pipeline, return final resource representations for each step.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :return: Resources.\n    \"\"\"\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n    resources: list[Resource] = []\n    for component in pipeline.components:\n        resource = component.manifest_deploy()\n        resources.append(resource)\n    return resources\n
    "}, {"location": "developer/api/#kpops.api.reset", "title": "reset", "text": "
    reset(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, dry_run: bool = True, verbose: bool = True, parallel: bool = False) -> list[Resource]\n

    Reset pipeline steps.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    dry_run

    Whether to dry run the command or execute it.

    TYPE: bool DEFAULT: True

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: True

    parallel

    Enable or disable parallel execution of pipeline steps.

    TYPE: bool DEFAULT: False

    Source code in kpops/api/__init__.py
    def reset(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    dry_run: bool = True,\n    verbose: bool = True,\n    parallel: bool = False,\n) -> list[Resource]:\n    \"\"\"Reset pipeline steps.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param dry_run: Whether to dry run the command or execute it.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :param parallel: Enable or disable parallel execution of pipeline steps.\n    \"\"\"\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n\n    if kpops_config.operation_mode is OperationMode.ARGO:\n        resources: list[Resource] = []\n        for component in pipeline.components:\n            resource = component.manifest_reset()\n            resources.append(resource)\n        return resources\n\n    if kpops_config.operation_mode is OperationMode.HELM:\n\n        async def reset_runner(component: PipelineComponent):\n            log_action(\"Reset\", component)\n            await component.reset(dry_run)\n\n        async def async_reset():\n            if parallel:\n                pipeline_tasks = pipeline.build_execution_graph(\n                    reset_runner, reverse=True\n                )\n                await pipeline_tasks\n            else:\n                for component in reversed(pipeline.components):\n                    await reset_runner(component)\n\n        asyncio.run(async_reset())\n\n    return []\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline", "title": "kpops.pipeline.Pipeline", "text": "

    Bases: BaseModel

    Pipeline representation.

    Source code in kpops/pipeline/__init__.py
    class Pipeline(BaseModel):\n    \"\"\"Pipeline representation.\"\"\"\n\n    _component_index: dict[str, PipelineComponent] = {}\n    _graph: nx.DiGraph = nx.DiGraph()\n\n    model_config = ConfigDict(arbitrary_types_allowed=True)\n\n    @property\n    def step_names(self) -> list[str]:\n        return [step.name for step in self.components]\n\n    @computed_field(title=\"Components\")\n    @property\n    def components(self) -> list[SerializeAsAny[PipelineComponent]]:\n        return list(self._component_index.values())\n\n    @property\n    def last(self) -> PipelineComponent:\n        return self.components[-1]\n\n    def add(self, component: PipelineComponent) -> None:\n        if self._component_index.get(component.id) is not None:\n            msg = (\n                f\"Pipeline steps must have unique id, '{component.id}' already exists.\"\n            )\n            raise ValidationError(msg)\n        self._component_index[component.id] = component\n        self.__add_to_graph(component)\n\n    def remove(self, component_id: str) -> None:\n        self._component_index.pop(component_id)\n\n    def get(self, component_id: str) -> PipelineComponent | None:\n        return self._component_index.get(component_id)\n\n    def find(self, predicate: ComponentFilterPredicate) -> Iterator[PipelineComponent]:\n        \"\"\"Find pipeline components matching a custom predicate.\n\n        :param predicate: Filter function,\n            returns boolean value whether the component should be kept or removed\n        :returns: Iterator of components matching the predicate\n        \"\"\"\n        for component in self.components:\n            if predicate(component):\n                yield component\n\n    def filter(self, predicate: ComponentFilterPredicate) -> None:\n        \"\"\"Filter pipeline components using a custom predicate.\n\n        :param predicate: Filter function,\n            returns boolean value whether the component should be kept or removed\n        \"\"\"\n        for component in self.components:\n            # filter out components not matching the predicate\n            if not predicate(component):\n                self.remove(component.id)\n\n    def validate(self) -> None:  # pyright: ignore [reportIncompatibleMethodOverride]\n        if not nx.is_directed_acyclic_graph(self._graph):\n            msg = \"Pipeline is not a valid DAG.\"\n            raise ValueError(msg)\n\n    def add_levels(self):\n        sync_wave = \"sync-wave\"\n        for node in nx.topological_sort(self._graph):\n            node_ = self._graph.nodes[node]\n            if not len(list(self._graph.predecessors(node))):\n                node_[sync_wave] = 1\n            else:\n                node_[sync_wave] = (\n                    max(\n                        self._graph.nodes[n][sync_wave]\n                        for n in self._graph.predecessors(node)\n                    )\n                    + 1\n                )\n            if p := self._component_index.get(node):\n                p.sync_wave = self._graph.nodes[node][sync_wave]\n\n    def to_yaml(self) -> str:\n        return yaml.dump(\n            self.model_dump(mode=\"json\", by_alias=True, exclude_none=True)[\"components\"]\n        )\n\n    def build_execution_graph(\n        self,\n        runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],\n        /,\n        reverse: bool = False,\n    ) -> Awaitable[None]:\n        async def run_parallel_tasks(\n            coroutines: list[Coroutine[Any, Any, None]],\n        ) -> None:\n            tasks: list[asyncio.Task[None]] = []\n            for coro in coroutines:\n                tasks.append(asyncio.create_task(coro))\n            await asyncio.gather(*tasks)\n\n        async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None:\n            for pending_task in pending_tasks:\n                await pending_task\n\n        graph: nx.DiGraph = self._graph.copy()  # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx\n\n        # We add an extra node to the graph, connecting all the leaf nodes to it\n        # in that way we make this node the root of the graph, avoiding backtracking\n        root_node = \"root_node_bfs\"\n        graph.add_node(root_node)\n\n        for node in graph:\n            predecessors = list(graph.predecessors(node))\n            if not predecessors:\n                graph.add_edge(root_node, node)\n\n        layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node))\n\n        sorted_tasks: list[Awaitable[None]] = []\n        for layer in layers_graph[1:]:\n            if parallel_tasks := self.__get_parallel_tasks_from(layer, runner):\n                sorted_tasks.append(run_parallel_tasks(parallel_tasks))\n\n        if reverse:\n            sorted_tasks.reverse()\n\n        return run_graph_tasks(sorted_tasks)\n\n    def __getitem__(self, component_id: str) -> PipelineComponent:\n        try:\n            return self._component_index[component_id]\n        except KeyError as exc:\n            msg = f\"Component {component_id} not found\"\n            raise ValueError(msg) from exc\n\n    def __bool__(self) -> bool:\n        return bool(self._component_index)\n\n    def __iter__(self) -> Iterator[PipelineComponent]:  # pyright: ignore [reportIncompatibleMethodOverride]\n        yield from self._component_index.values()\n\n    def __len__(self) -> int:\n        return len(self.components)\n\n    def __add_to_graph(self, component: PipelineComponent):\n        self._graph.add_node(component.id)\n\n        for input_topic in component.inputs:\n            self.__add_input(input_topic.id, component.id)\n\n        for output_topic in component.outputs:\n            self.__add_output(output_topic.id, component.id)\n\n    def __add_output(self, topic_id: str, source: str) -> None:\n        self._graph.add_node(topic_id)\n        self._graph.add_edge(source, topic_id)\n\n    def __add_input(self, topic_id: str, target: str) -> None:\n        self._graph.add_node(topic_id)\n        self._graph.add_edge(topic_id, target)\n\n    def __get_parallel_tasks_from(\n        self,\n        layer: list[str],\n        runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],\n    ) -> list[Coroutine[Any, Any, None]]:\n        def gen_parallel_tasks():\n            for node_in_layer in layer:\n                # check if component, skip topics\n                if (component := self._component_index.get(node_in_layer)) is not None:\n                    yield runner(component)\n\n        return list(gen_parallel_tasks())\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.components", "title": "components property", "text": "
    components: list[SerializeAsAny[PipelineComponent]]\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.last", "title": "last property", "text": "
    last: PipelineComponent\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.step_names", "title": "step_names property", "text": "
    step_names: list[str]\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.add", "title": "add", "text": "
    add(component: PipelineComponent) -> None\n
    Source code in kpops/pipeline/__init__.py
    def add(self, component: PipelineComponent) -> None:\n    if self._component_index.get(component.id) is not None:\n        msg = (\n            f\"Pipeline steps must have unique id, '{component.id}' already exists.\"\n        )\n        raise ValidationError(msg)\n    self._component_index[component.id] = component\n    self.__add_to_graph(component)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.add_levels", "title": "add_levels", "text": "
    add_levels()\n
    Source code in kpops/pipeline/__init__.py
    def add_levels(self):\n    sync_wave = \"sync-wave\"\n    for node in nx.topological_sort(self._graph):\n        node_ = self._graph.nodes[node]\n        if not len(list(self._graph.predecessors(node))):\n            node_[sync_wave] = 1\n        else:\n            node_[sync_wave] = (\n                max(\n                    self._graph.nodes[n][sync_wave]\n                    for n in self._graph.predecessors(node)\n                )\n                + 1\n            )\n        if p := self._component_index.get(node):\n            p.sync_wave = self._graph.nodes[node][sync_wave]\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.build_execution_graph", "title": "build_execution_graph", "text": "
    build_execution_graph(runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]], /, reverse: bool = False) -> Awaitable[None]\n
    Source code in kpops/pipeline/__init__.py
    def build_execution_graph(\n    self,\n    runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],\n    /,\n    reverse: bool = False,\n) -> Awaitable[None]:\n    async def run_parallel_tasks(\n        coroutines: list[Coroutine[Any, Any, None]],\n    ) -> None:\n        tasks: list[asyncio.Task[None]] = []\n        for coro in coroutines:\n            tasks.append(asyncio.create_task(coro))\n        await asyncio.gather(*tasks)\n\n    async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None:\n        for pending_task in pending_tasks:\n            await pending_task\n\n    graph: nx.DiGraph = self._graph.copy()  # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx\n\n    # We add an extra node to the graph, connecting all the leaf nodes to it\n    # in that way we make this node the root of the graph, avoiding backtracking\n    root_node = \"root_node_bfs\"\n    graph.add_node(root_node)\n\n    for node in graph:\n        predecessors = list(graph.predecessors(node))\n        if not predecessors:\n            graph.add_edge(root_node, node)\n\n    layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node))\n\n    sorted_tasks: list[Awaitable[None]] = []\n    for layer in layers_graph[1:]:\n        if parallel_tasks := self.__get_parallel_tasks_from(layer, runner):\n            sorted_tasks.append(run_parallel_tasks(parallel_tasks))\n\n    if reverse:\n        sorted_tasks.reverse()\n\n    return run_graph_tasks(sorted_tasks)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.filter", "title": "filter", "text": "
    filter(predicate: ComponentFilterPredicate) -> None\n

    Filter pipeline components using a custom predicate.

    PARAMETER DESCRIPTION predicate

    Filter function, returns boolean value whether the component should be kept or removed

    TYPE: ComponentFilterPredicate

    Source code in kpops/pipeline/__init__.py
    def filter(self, predicate: ComponentFilterPredicate) -> None:\n    \"\"\"Filter pipeline components using a custom predicate.\n\n    :param predicate: Filter function,\n        returns boolean value whether the component should be kept or removed\n    \"\"\"\n    for component in self.components:\n        # filter out components not matching the predicate\n        if not predicate(component):\n            self.remove(component.id)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.find", "title": "find", "text": "
    find(predicate: ComponentFilterPredicate) -> Iterator[PipelineComponent]\n

    Find pipeline components matching a custom predicate.

    PARAMETER DESCRIPTION predicate

    Filter function, returns boolean value whether the component should be kept or removed

    TYPE: ComponentFilterPredicate

    RETURNS DESCRIPTION Iterator[PipelineComponent]

    Iterator of components matching the predicate

    Source code in kpops/pipeline/__init__.py
    def find(self, predicate: ComponentFilterPredicate) -> Iterator[PipelineComponent]:\n    \"\"\"Find pipeline components matching a custom predicate.\n\n    :param predicate: Filter function,\n        returns boolean value whether the component should be kept or removed\n    :returns: Iterator of components matching the predicate\n    \"\"\"\n    for component in self.components:\n        if predicate(component):\n            yield component\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.get", "title": "get", "text": "
    get(component_id: str) -> PipelineComponent | None\n
    Source code in kpops/pipeline/__init__.py
    def get(self, component_id: str) -> PipelineComponent | None:\n    return self._component_index.get(component_id)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.remove", "title": "remove", "text": "
    remove(component_id: str) -> None\n
    Source code in kpops/pipeline/__init__.py
    def remove(self, component_id: str) -> None:\n    self._component_index.pop(component_id)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.to_yaml", "title": "to_yaml", "text": "
    to_yaml() -> str\n
    Source code in kpops/pipeline/__init__.py
    def to_yaml(self) -> str:\n    return yaml.dump(\n        self.model_dump(mode=\"json\", by_alias=True, exclude_none=True)[\"components\"]\n    )\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.validate", "title": "validate", "text": "
    validate() -> None\n
    Source code in kpops/pipeline/__init__.py
    def validate(self) -> None:  # pyright: ignore [reportIncompatibleMethodOverride]\n    if not nx.is_directed_acyclic_graph(self._graph):\n        msg = \"Pipeline is not a valid DAG.\"\n        raise ValueError(msg)\n
    "}, {"location": "developer/auto-generation/", "title": "Auto generation", "text": "

    Auto generation happens mostly with pre-commit hooks. You can find the pre-commit configuration here. These pre-commit hooks call different Python scripts to auto generate code for the documentation.

    "}, {"location": "developer/auto-generation/#generation-scripts-and-their-respective-files", "title": "Generation scripts and their respective files", "text": ""}, {"location": "developer/auto-generation/#documentation", "title": "Documentation", "text": ""}, {"location": "developer/auto-generation/#variables", "title": "Variables", "text": "
    • cli_env_vars.env -- All CLI environment variables in a dotenv file.
    • cli_env_vars.md -- All CLI environment variables in a table.
    • config_env_vars.env -- Almost all pipeline config environment variables in a dotenv file. The script checks for each field in KpopsConfig whether it has an env attribute defined. The script is currently unable to visit the classes of fields like topic_name_config, hence any environment variables defined there would remain unknown to it.
    • config_env_vars.env -- Almost all pipeline config environment variables in a table.
    • variable_substitution.yaml -- A copy of ./tests/pipeline/resources/component-type-substitution/pipeline.yaml used as an example of substitution.
    "}, {"location": "developer/auto-generation/#cli-commands", "title": "CLI commands", "text": "

    Generated by typer-cli from the code in main.py. It is called with Python's subprocess module.

    "}, {"location": "developer/auto-generation/#pipeline-and-defaults-example-definitions", "title": "Pipeline and defaults example definitions", "text": "

    Generates example pipeline.yaml and defaults.yaml for each individual component, stores them and also concatenates them into 1 big pipeline definition and 1 big pipeline defaults definition.

    User input

    • headers/*\\.yaml -- The top of each example. Includes a description comment, type and name. The headers for pipeline.yaml reside in the pipeline-components dir and the defaults.yaml headers reside in the pipeline-defaults dir. The names of the files must be equal to the respective component type.
    • sections/*\\.yaml -- Each YAML file contains a single section (component attribute) definition. The intention is to keep the minimal set of definitions there from which any component definition can be built. The names of the files must be equal to the respective component type and the attribute name. The sections are used for both defaults.yaml and pipeline.yaml generation and reside in the pipeline-components dir.

    Generated

    • pipeline-components/dependencies/* Cached information about KPOps components
    • pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the pipeline.yaml generation.
    • defaults_pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the defaults.yaml generation.
    • kpops_structure.yaml -- Specifies the inheritance hierarchy of the components and what sections exist in each component.
    • pipeline-components/*\\.yaml -- All single-component pipeline definitions and one big (complete) pipeline.yaml that contains all of them.
    • pipeline-defaults/*\\.yaml -- All single-component defaults definitions and one big (complete) defaults.yaml that contains all of them.
    "}, {"location": "developer/auto-generation/#editor-integration", "title": "Editor integration", "text": ""}, {"location": "developer/auto-generation/#schemas", "title": "Schemas", "text": "
    • config.json
    • pipeline.json
    "}, {"location": "developer/contributing/", "title": "How to contribute", "text": "

    Welcome! We are glad to have you visit our contributing guide!

    If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.

    "}, {"location": "developer/contributing/#git", "title": "git", "text": "

    We are using git submodules to import the KPOps examples repository. You need to fetch the repository locally on your machine. To do so use this command:

    git submodule init\ngit submodule update --recursive\n

    This will fetch the resources under the examples folder.

    "}, {"location": "developer/contributing/#style", "title": "Style", "text": "

    We advise that you stick to our pre-commit hooks for code linting, formatting, and auto-generation of documentation. After you install them using poetry run pre-commit install they're triggered automatically during git commit. Additionally, you can manually invoke them with poetry run pre-commit run -a. In order for dprint to work, you have to manually install it locally. It will work in the CI, so it is also possible to manually carry out formatting changes flagged by dprint in the CI and skip installing it locally.

    "}, {"location": "developer/contributing/#python", "title": "Python", "text": "

    To ensure a consistent Python code style, we use Ruff for both linting and formatting. The official docs contain a guide on editor integration.

    Our configuration can be found in KPOps' top-level pyproject.toml.

    "}, {"location": "developer/contributing/#markdown", "title": "Markdown", "text": "

    To ensure a consistent markdown style, we use dprint's Markdown code formatter. Our configuration can be found here.

    "}, {"location": "developer/contributing/#css", "title": "CSS", "text": "

    To ensure a consistent CSS style, we use the malva dprint's plugin. Our configuration can be found here.

    "}, {"location": "developer/contributing/#toml", "title": "TOML", "text": "

    To ensure a consistent TOML style, we use dprint's TOML code formatter. Our configuration can be found here.

    "}, {"location": "developer/getting-started/", "title": "Getting started", "text": "

    Welcome! We are glad to have you visit our developer guide! If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.

    Find more about our code-style or insights into KPOps' code base here in our developer guide.

    Work in progress

    The developer guide is still under construction. If you have a question left unanswered here, feel free to ask it by opening an issue.

    "}, {"location": "user/changelog/", "title": "Changelog", "text": ""}, {"location": "user/changelog/#810-release-date-2024-10-25", "title": "8.1.0 - Release Date: [2024-10-25]", "text": ""}, {"location": "user/changelog/#dependencies", "title": "\ud83e\uddea Dependencies", "text": "
    • Upgrade typer to support union types - #533
    "}, {"location": "user/changelog/#miscellaneous", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Extend StreamsBootstrap model - #534
    "}, {"location": "user/changelog/#801-release-date-2024-08-22", "title": "8.0.1 - Release Date: [2024-08-22]", "text": ""}, {"location": "user/changelog/#documentation", "title": "\ud83d\udcdd Documentation", "text": "
    • Fix changelog in docs - #532
    "}, {"location": "user/changelog/#800-release-date-2024-08-21", "title": "8.0.0 - Release Date: [2024-08-21]", "text": ""}, {"location": "user/changelog/#breaking-changes", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Make KafkaApp responsible for deploying/cleaning streams bootstrap components - https://github.com/bakdata/kpops/pull/522
    • Rename role to label - https://github.com/bakdata/kpops/pull/525
    • Fix Pyright warning about type override without default value - https://github.com/bakdata/kpops/pull/524
    • Remove -v3 suffix and suffix old streams bootstrap with -v2 - https://github.com/bakdata/kpops/pull/526
    "}, {"location": "user/changelog/#features", "title": "\ud83d\ude80 Features", "text": "
    • Add support for streams-bootstrap v3 - https://github.com/bakdata/kpops/pull/519
    "}, {"location": "user/changelog/#refactor", "title": "\ud83c\udfed Refactor", "text": "
    • Rename role to label - https://github.com/bakdata/kpops/pull/525
    • Fix Pyright warning about type override without default value - https://github.com/bakdata/kpops/pull/524
    "}, {"location": "user/changelog/#710-release-date-2024-08-15", "title": "7.1.0 - Release Date: [2024-08-15]", "text": ""}, {"location": "user/changelog/#dependencies_1", "title": "\ud83e\uddea Dependencies", "text": "
    • Update pytest - #527
    "}, {"location": "user/changelog/#refactor_1", "title": "\ud83c\udfed Refactor", "text": "
    • Improve incomplete type hints - #515

    • Fallback to user defined model when the validation of cluster model fails - #521

    • Replace kubernetes-asyncio with lightkube - #517

    "}, {"location": "user/changelog/#miscellaneous_1", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Fix incorrect parameter type annotation - #523
    "}, {"location": "user/changelog/#700-release-date-2024-07-23", "title": "7.0.0 - Release Date: [2024-07-23]", "text": ""}, {"location": "user/changelog/#breaking-changes_1", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Automatic loading of namespaced custom components - #500

    • Call destroy from inside of reset or clean - #501

    • Rename app field - #506

    "}, {"location": "user/changelog/#features_1", "title": "\ud83d\ude80 Features", "text": "
    • clean/reset streams-bootstrap components with cluster values - #498
    "}, {"location": "user/changelog/#refactor_2", "title": "\ud83c\udfed Refactor", "text": "
    • Call destroy from inside of reset or clean - #501

    • Rename app field - #506

    "}, {"location": "user/changelog/#610-release-date-2024-07-09", "title": "6.1.0 - Release Date: [2024-07-09]", "text": ""}, {"location": "user/changelog/#features_2", "title": "\ud83d\ude80 Features", "text": "
    • Add image tag field to streams-bootstrap app values - #499

    • Delete ignored keys from diff - #510

    "}, {"location": "user/changelog/#refactor_3", "title": "\ud83c\udfed Refactor", "text": "
    • Improve dataclass instance check - #507
    "}, {"location": "user/changelog/#602-release-date-2024-07-04", "title": "6.0.2 - Release Date: [2024-07-04]", "text": ""}, {"location": "user/changelog/#documentation_1", "title": "\ud83d\udcdd Documentation", "text": "
    • Generate developer docs for Python API - #503
    "}, {"location": "user/changelog/#miscellaneous_2", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update codeowners - #504
    "}, {"location": "user/changelog/#601-release-date-2024-06-12", "title": "6.0.1 - Release Date: [2024-06-12]", "text": ""}, {"location": "user/changelog/#fixes", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix connector resetter offset topic - #497
    "}, {"location": "user/changelog/#600-release-date-2024-06-06", "title": "6.0.0 - Release Date: [2024-06-06]", "text": ""}, {"location": "user/changelog/#breaking-changes_2", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • KPOps 6.0.0 - #496
    "}, {"location": "user/changelog/#features_3", "title": "\ud83d\ude80 Features", "text": "
    • KPOps 6.0.0 - #496
    "}, {"location": "user/changelog/#refactor_4", "title": "\ud83c\udfed Refactor", "text": "
    • KPOps 6.0.0 - #496
    "}, {"location": "user/changelog/#511-release-date-2024-05-22", "title": "5.1.1 - Release Date: [2024-05-22]", "text": ""}, {"location": "user/changelog/#fixes_1", "title": "\ud83d\udc1b Fixes", "text": "
    • Add YAML separator (---) to stdout - #491
    "}, {"location": "user/changelog/#510-release-date-2024-05-22", "title": "5.1.0 - Release Date: [2024-05-22]", "text": ""}, {"location": "user/changelog/#miscellaneous_3", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add computed field for Helm release name and name override - #490
    "}, {"location": "user/changelog/#501-release-date-2024-05-15", "title": "5.0.1 - Release Date: [2024-05-15]", "text": ""}, {"location": "user/changelog/#fixes_2", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix missing await on Kubernetes API - #488
    "}, {"location": "user/changelog/#500-release-date-2024-05-02", "title": "5.0.0 - Release Date: [2024-05-02]", "text": ""}, {"location": "user/changelog/#breaking-changes_3", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Allow custom timeout for external services - #485
    "}, {"location": "user/changelog/#miscellaneous_4", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update examples for v4 - #486
    "}, {"location": "user/changelog/#421-release-date-2024-04-25", "title": "4.2.1 - Release Date: [2024-04-25]", "text": ""}, {"location": "user/changelog/#features_4", "title": "\ud83d\ude80 Features", "text": "
    • Add support for cleaning StatefulSets with PVCs - #482
    "}, {"location": "user/changelog/#420-release-date-2024-04-25", "title": "4.2.0 - Release Date: [2024-04-25]", "text": ""}, {"location": "user/changelog/#refactor_5", "title": "\ud83c\udfed Refactor", "text": "
    • Improve type annotations for parallel pipeline jobs - #476
    "}, {"location": "user/changelog/#miscellaneous_5", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update Ruff - #475

    • Set Pyright to warn on unknown types - #480

    • Quiet faker debug logs in tests - #483

    • Add pyright matcher - #481

    "}, {"location": "user/changelog/#412-release-date-2024-03-11", "title": "4.1.2 - Release Date: [2024-03-11]", "text": ""}, {"location": "user/changelog/#documentation_2", "title": "\ud83d\udcdd Documentation", "text": "
    • fix(docs): Correct from.components.<component-name>.type to input - #473
    "}, {"location": "user/changelog/#411-release-date-2024-03-11", "title": "4.1.1 - Release Date: [2024-03-11]", "text": ""}, {"location": "user/changelog/#fixes_3", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix import errors - #472
    "}, {"location": "user/changelog/#refactor_6", "title": "\ud83c\udfed Refactor", "text": "
    • Fix import errors - #472
    "}, {"location": "user/changelog/#miscellaneous_6", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update httpx - #471
    "}, {"location": "user/changelog/#410-release-date-2024-03-07", "title": "4.1.0 - Release Date: [2024-03-07]", "text": ""}, {"location": "user/changelog/#documentation_3", "title": "\ud83d\udcdd Documentation", "text": "
    • Document precedence between env vars and config.yaml - #465
    "}, {"location": "user/changelog/#miscellaneous_7", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Create init command - #394
    "}, {"location": "user/changelog/#402-release-date-2024-03-04", "title": "4.0.2 - Release Date: [2024-03-04]", "text": ""}, {"location": "user/changelog/#documentation_4", "title": "\ud83d\udcdd Documentation", "text": "
    • Reference editor plugin for Neovim in docs - #464
    "}, {"location": "user/changelog/#miscellaneous_8", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add support for Python 3.12 - #467

    • Update Pyright - #468

    • Remove package classifiers that are automatically assigned by Poetry - #469

    • Validate autoscaling mandatory fields when enabled - #470

    "}, {"location": "user/changelog/#401-release-date-2024-02-29", "title": "4.0.1 - Release Date: [2024-02-29]", "text": ""}, {"location": "user/changelog/#fixes_4", "title": "\ud83d\udc1b Fixes", "text": "
    • Set supported Python cutoff to 3.11 - #466
    "}, {"location": "user/changelog/#400-release-date-2024-02-27", "title": "4.0.0 - Release Date: [2024-02-27]", "text": ""}, {"location": "user/changelog/#breaking-changes_4", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Distribute defaults across multiple files - #438
    "}, {"location": "user/changelog/#features_5", "title": "\ud83d\ude80 Features", "text": "
    • Distribute defaults across multiple files - #438
    "}, {"location": "user/changelog/#324-release-date-2024-02-26", "title": "3.2.4 - Release Date: [2024-02-26]", "text": ""}, {"location": "user/changelog/#fixes_5", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix docs CI to include the latest changes to a tagged version in the changelog - #459

    • Fix tempfile creation - #461

    • Fix symbolic link to CONTRIBUTING.md and parallel option in action.yaml - #462

    "}, {"location": "user/changelog/#refactor_7", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor Kafka topics - #447

    • Refactor PipelineGenerator to use component ids - #460

    "}, {"location": "user/changelog/#documentation_5", "title": "\ud83d\udcdd Documentation", "text": "
    • Fix docs CI to include the latest changes to a tagged version in the changelog - #459
    "}, {"location": "user/changelog/#323-release-date-2024-02-19", "title": "3.2.3 - Release Date: [2024-02-19]", "text": ""}, {"location": "user/changelog/#fixes_6", "title": "\ud83d\udc1b Fixes", "text": "
    • Trim and hash Helm name override to 63 characters - #456
    "}, {"location": "user/changelog/#322-release-date-2024-02-12", "title": "3.2.2 - Release Date: [2024-02-12]", "text": ""}, {"location": "user/changelog/#fixes_7", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix nested substitution - #451
    "}, {"location": "user/changelog/#321-release-date-2024-02-08", "title": "3.2.1 - Release Date: [2024-02-08]", "text": ""}, {"location": "user/changelog/#fixes_8", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix order of pipeline steps for clean/reset - #450

    • Fix substitution - #449

    • Fix cleaner inheritance, parent model should be aliased during instantiation - #452

    "}, {"location": "user/changelog/#refactor_8", "title": "\ud83c\udfed Refactor", "text": "
    • Simplify execution graph logic - #446
    "}, {"location": "user/changelog/#320-release-date-2024-02-01", "title": "3.2.0 - Release Date: [2024-02-01]", "text": ""}, {"location": "user/changelog/#features_6", "title": "\ud83d\ude80 Features", "text": "
    • Refactor pipeline filter and add to public API - #405
    "}, {"location": "user/changelog/#refactor_9", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor enrichment using Pydantic model validator - #444

    • Refactor pipeline filter and add to public API - #405

    "}, {"location": "user/changelog/#documentation_6", "title": "\ud83d\udcdd Documentation", "text": "
    • Improve Sphinx docs highlighting using RST markup - #443
    "}, {"location": "user/changelog/#310-release-date-2024-01-30", "title": "3.1.0 - Release Date: [2024-01-30]", "text": ""}, {"location": "user/changelog/#features_7", "title": "\ud83d\ude80 Features", "text": "
    • Add support for pipeline steps parallelization - #312
    "}, {"location": "user/changelog/#fixes_9", "title": "\ud83d\udc1b Fixes", "text": "
    • Update poetry publish workflow version to latest - #430
    "}, {"location": "user/changelog/#refactor_10", "title": "\ud83c\udfed Refactor", "text": "
    • Simplify loading of defaults - #435
    "}, {"location": "user/changelog/#miscellaneous_9", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add custom PascalCase to snake_case alias generator - #436

    • Add parallel flag support to kpops runner - #439

    "}, {"location": "user/changelog/#302-release-date-2024-01-23", "title": "3.0.2 - Release Date: [2024-01-23]", "text": ""}, {"location": "user/changelog/#fixes_10", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix Helm diff output - #434
    "}, {"location": "user/changelog/#documentation_7", "title": "\ud83d\udcdd Documentation", "text": "
    • Add step for submodule initialization on the docs - #431
    "}, {"location": "user/changelog/#miscellaneous_10", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add message if examples git submodule is not initialized - #432

    • Update type annotation for deserialized pipeline - #433

    "}, {"location": "user/changelog/#301-release-date-2024-01-19", "title": "3.0.1 - Release Date: [2024-01-19]", "text": ""}, {"location": "user/changelog/#fixes_11", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix broken doc link - #427

    • Add warning log if SR handler is disabled but URL is set - #428

    "}, {"location": "user/changelog/#documentation_8", "title": "\ud83d\udcdd Documentation", "text": "
    • Update docs of word-count example for v3 & new folder structure - #423

    • Move ATM fraud to examples repo - #425

    • Fix broken doc link - #427

    "}, {"location": "user/changelog/#miscellaneous_11", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update pydantic dependency - #422

    • Add git submodule instructions to the contributing.md - #429

    "}, {"location": "user/changelog/#300-release-date-2024-01-17", "title": "3.0.0 - Release Date: [2024-01-17]", "text": ""}, {"location": "user/changelog/#breaking-changes_5", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Move GitHub action to repository root - #356

    • Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config - #354

    • Create HelmApp component - #370

    • Change substitution variables separator to . - #388

    • Refactor pipeline generator & representation - #392

    • Define custom components module & pipeline base dir globally - #387

    • Use hash and trim long Helm release names instead of only trimming - #390

    • Refactor generate template for Python API usage - #380

    • Namespace substitution vars - #408

    • Refactor streams-bootstrap cleanup jobs as individual HelmApp - #398

    • Refactor Kafka Connector resetter as individual HelmApp - #400

    • Fix wrong Helm release name character limit - #418

    "}, {"location": "user/changelog/#features_8", "title": "\ud83d\ude80 Features", "text": "
    • Allow overriding config files - #391

    • Generate defaults schema - #402

    "}, {"location": "user/changelog/#fixes_12", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix missing component type in pipeline schema - #401

    • Fix enrichment of nested Pydantic BaseModel - #415

    • Fix wrong Helm release name character limit - #418

    • Update release workflow template to support custom changelog file path - #421

    "}, {"location": "user/changelog/#dependencies_2", "title": "\ud83e\uddea Dependencies", "text": "
    • Migrate to Pydantic v2 - #347
    "}, {"location": "user/changelog/#refactor_11", "title": "\ud83c\udfed Refactor", "text": "
    • Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config - #354

    • Migrate to Pydantic v2 - #347

    • Refactor pipeline generator & representation - #392

    • Use hash and trim long Helm release names instead of only trimming - #390

    • Refactor Helm nameOverride - #397

    • Mark component type as computed Pydantic field - #399

    • Refactor generate template for Python API usage - #380

    • Support multiple inheritance for doc generation - #406

    • Refactor streams-bootstrap cleanup jobs as individual HelmApp - #398

    • Refactor Kafka Connector resetter as individual HelmApp - #400

    "}, {"location": "user/changelog/#documentation_9", "title": "\ud83d\udcdd Documentation", "text": "
    • Move GitHub action to repository root - #356

    • Create HelmApp component - #370

    • Update docs for substitution variable usage in v3 - #409

    • Support multiple inheritance for doc generation - #406

    • Update docs for v3 - #416

    • Update tests resources - #417

    • Summarize all breaking changes in diffs at the top of the migration guide - #419

    "}, {"location": "user/changelog/#miscellaneous_12", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Replace black with ruff - #365

    • Add toml formatter to dprint - #386

    • Add malva to dprint - #385

    • Update KPOps runner with the new options - #395

    • Fix KPOps action to get package from testPyPI - #396

    • KPOps 3.0 - #420

    "}, {"location": "user/changelog/#2011-release-date-2023-10-24", "title": "2.0.11 - Release Date: [2023-10-24]", "text": ""}, {"location": "user/changelog/#fixes_13", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix early exit upon Helm exit code 1 - #376

    • Fix docs setup page list indentation - #377

    "}, {"location": "user/changelog/#documentation_10", "title": "\ud83d\udcdd Documentation", "text": "
    • Migrate deprecated mkdocs-material-extensions - #378

    • Fix docs setup page list indentation - #377

    • Exclude resources from docs search - #371

    "}, {"location": "user/changelog/#2010-release-date-2023-10-12", "title": "2.0.10 - Release Date: [2023-10-12]", "text": ""}, {"location": "user/changelog/#miscellaneous_13", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Fix environment variables documentation generation - #362

    • Introduce ruff - #363

    • Print details on connector name mismatch error - #369

    • Enable transparent OS environment lookups from internal environment - #368

    "}, {"location": "user/changelog/#209-release-date-2023-09-19", "title": "2.0.9 - Release Date: [2023-09-19]", "text": ""}, {"location": "user/changelog/#fixes_14", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix Kafka connect config name for deletion - #361
    "}, {"location": "user/changelog/#documentation_11", "title": "\ud83d\udcdd Documentation", "text": "
    • Fix link to kpops-examples - #357
    "}, {"location": "user/changelog/#208-release-date-2023-09-06", "title": "2.0.8 - Release Date: [2023-09-06]", "text": ""}, {"location": "user/changelog/#fixes_15", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix config.yaml overriding environment variables - #353
    "}, {"location": "user/changelog/#refactor_12", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor component prefix & name - #326

    • Remove unnecessary condition during inflate - #328

    "}, {"location": "user/changelog/#207-release-date-2023-08-31", "title": "2.0.7 - Release Date: [2023-08-31]", "text": ""}, {"location": "user/changelog/#fixes_16", "title": "\ud83d\udc1b Fixes", "text": "
    • Print only rendered templates when --template flag is set - #350
    "}, {"location": "user/changelog/#documentation_12", "title": "\ud83d\udcdd Documentation", "text": "
    • Add migration guide - #352
    "}, {"location": "user/changelog/#206-release-date-2023-08-30", "title": "2.0.6 - Release Date: [2023-08-30]", "text": ""}, {"location": "user/changelog/#refactor_13", "title": "\ud83c\udfed Refactor", "text": "
    • Simplify deployment with local Helm charts - #349
    "}, {"location": "user/changelog/#205-release-date-2023-08-30", "title": "2.0.5 - Release Date: [2023-08-30]", "text": ""}, {"location": "user/changelog/#fixes_17", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix versioning of docs when releasing - #346
    "}, {"location": "user/changelog/#204-release-date-2023-08-29", "title": "2.0.4 - Release Date: [2023-08-29]", "text": ""}, {"location": "user/changelog/#fixes_18", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix GitHub ref variable for pushing docs to main branch - #343
    "}, {"location": "user/changelog/#documentation_13", "title": "\ud83d\udcdd Documentation", "text": "
    • Add dprint as the markdown formatter - #337

    • Publish pre-release docs for PRs & main branch - #339

    • Align docs colours - #345

    "}, {"location": "user/changelog/#miscellaneous_14", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Exclude abstract components from pipeline schema - #332
    "}, {"location": "user/changelog/#203-release-date-2023-08-24", "title": "2.0.3 - Release Date: [2023-08-24]", "text": ""}, {"location": "user/changelog/#fixes_19", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix GitHub action error in non-Python projects - #340
    "}, {"location": "user/changelog/#miscellaneous_15", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Lint GitHub action - #342
    "}, {"location": "user/changelog/#202-release-date-2023-08-23", "title": "2.0.2 - Release Date: [2023-08-23]", "text": ""}, {"location": "user/changelog/#documentation_14", "title": "\ud83d\udcdd Documentation", "text": "
    • Add version dropdown to the documentation - #336

    • Break the documentation down into smaller subsection - #329

    "}, {"location": "user/changelog/#201-release-date-2023-08-22", "title": "2.0.1 - Release Date: [2023-08-22]", "text": ""}, {"location": "user/changelog/#fixes_20", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix optional flags in GitHub action - #334
    "}, {"location": "user/changelog/#200-release-date-2023-08-17", "title": "2.0.0 - Release Date: [2023-08-17]", "text": ""}, {"location": "user/changelog/#breaking-changes_6", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Remove camel case conversion of internal models - #308

    • Derive component type automatically from class name - #309

    • Refactor input/output types - #232

    • v2 - #321

    "}, {"location": "user/changelog/#features_9", "title": "\ud83d\ude80 Features", "text": "
    • Automatically support schema generation for custom components - #307

    • Derive component type automatically from class name - #309

    "}, {"location": "user/changelog/#refactor_14", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor input/output types - #232
    "}, {"location": "user/changelog/#documentation_15", "title": "\ud83d\udcdd Documentation", "text": "
    • Fix editor integration example in docs - #273
    "}, {"location": "user/changelog/#172-release-date-2023-08-16", "title": "1.7.2 - Release Date: [2023-08-16]", "text": ""}, {"location": "user/changelog/#refactor_15", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor Kafka Connect handler - #322
    "}, {"location": "user/changelog/#documentation_16", "title": "\ud83d\udcdd Documentation", "text": "
    • Add KPOps Runner GitHub Action to the documentation - #325

    • Remove :type and :rtype from docstrings - #324

    "}, {"location": "user/changelog/#171-release-date-2023-08-15", "title": "1.7.1 - Release Date: [2023-08-15]", "text": ""}, {"location": "user/changelog/#documentation_17", "title": "\ud83d\udcdd Documentation", "text": "
    • Modularize and autogenerate examples for the documentation - #267

    • Update the variable documentation - #266

    "}, {"location": "user/changelog/#170-release-date-2023-08-15", "title": "1.7.0 - Release Date: [2023-08-15]", "text": ""}, {"location": "user/changelog/#features_10", "title": "\ud83d\ude80 Features", "text": "
    • Add flag to exclude pipeline steps - #300
    "}, {"location": "user/changelog/#160-release-date-2023-08-10", "title": "1.6.0 - Release Date: [2023-08-10]", "text": ""}, {"location": "user/changelog/#refactor_16", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor handling of Helm flags - #319
    "}, {"location": "user/changelog/#150-release-date-2023-08-10", "title": "1.5.0 - Release Date: [2023-08-10]", "text": ""}, {"location": "user/changelog/#features_11", "title": "\ud83d\ude80 Features", "text": "
    • Refactor Helm wrapper and add --set-file flag - #311
    "}, {"location": "user/changelog/#refactor_17", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor Helm wrapper and add --set-file flag - #311

    • Set default for ToSection topics - #313

    • Annotate types for ToSection models mapping - #315

    "}, {"location": "user/changelog/#miscellaneous_16", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Check Poetry lock file consistency - #316
    "}, {"location": "user/changelog/#140-release-date-2023-08-02", "title": "1.4.0 - Release Date: [2023-08-02]", "text": ""}, {"location": "user/changelog/#fixes_21", "title": "\ud83d\udc1b Fixes", "text": "
    • Validate unique step names - #292
    "}, {"location": "user/changelog/#refactor_18", "title": "\ud83c\udfed Refactor", "text": "
    • Order PipelineComponent fields - #290

    • Migrate requests to httpx - #302

    • Refactor CLI using dtyper - #306

    "}, {"location": "user/changelog/#miscellaneous_17", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update Black - #294

    • Fix vulnerability in mkdocs-material - #295

    • Move breaking changes section upper in the change log config - #287

    "}, {"location": "user/changelog/#132-release-date-2023-07-13", "title": "1.3.2 - Release Date: [2023-07-13]", "text": ""}, {"location": "user/changelog/#fixes_22", "title": "\ud83d\udc1b Fixes", "text": "
    • Exclude Helm tests from dry-run diff - #293
    "}, {"location": "user/changelog/#131-release-date-2023-07-11", "title": "1.3.1 - Release Date: [2023-07-11]", "text": ""}, {"location": "user/changelog/#refactor_19", "title": "\ud83c\udfed Refactor", "text": "
    • Remove workaround for pipeline steps - #276
    "}, {"location": "user/changelog/#miscellaneous_18", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update codeowners - #281

    • Reactivate Windows CI - #255

    • Downgrade Poetry version on the Windows CI pipeline - #286

    • Set ANSI theme for output of kpops generate - #289

    "}, {"location": "user/changelog/#130-release-date-2023-07-07", "title": "1.3.0 - Release Date: [2023-07-07]", "text": ""}, {"location": "user/changelog/#refactor_20", "title": "\ud83c\udfed Refactor", "text": "
    • Plural broker field in pipeline config - #278
    "}, {"location": "user/changelog/#documentation_18", "title": "\ud83d\udcdd Documentation", "text": "
    • Update KPOps runner readme for dev versions - #279
    "}, {"location": "user/changelog/#breaking-changes_7", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Plural broker field in pipeline config - #278
    "}, {"location": "user/changelog/#miscellaneous_19", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add breaking changes section to change log config - #280
    "}, {"location": "user/changelog/#124-release-date-2023-06-27", "title": "1.2.4 - Release Date: [2023-06-27]", "text": ""}, {"location": "user/changelog/#miscellaneous_20", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update changelog action to contain miscellaneous PRs - #269
    "}, {"location": "user/changelog/#123-release-date-2023-06-22", "title": "1.2.3 - Release Date: [2023-06-22]", "text": ""}, {"location": "user/changelog/#fixes_23", "title": "\ud83d\udc1b Fixes", "text": "
    • Refactor custom component validation & hide field from kpops output - #265
    "}, {"location": "user/changelog/#refactor_21", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor custom component validation & hide field from kpops output - #265
    "}, {"location": "user/changelog/#miscellaneous_21", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#122-release-date-2023-06-21", "title": "1.2.2 - Release Date: [2023-06-21]", "text": ""}, {"location": "user/changelog/#miscellaneous_22", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Create workflow to lint CI - #260

    • Fix update docs when releasing - #261

    • Rename change log message for uncategorized issues - #262

    "}, {"location": "user/changelog/#121-release-date-2023-06-21", "title": "1.2.1 - Release Date: [2023-06-21]", "text": "Uncategorized
    • Fix update docs in release workflow - #258
    "}, {"location": "user/changelog/#120-release-date-2023-06-21", "title": "1.2.0 - Release Date: [2023-06-21]", "text": ""}, {"location": "user/changelog/#features_12", "title": "\ud83d\ude80 Features", "text": "
    • Add helm repo update <repo-name> for Helm >3.7 - #239
    "}, {"location": "user/changelog/#fixes_24", "title": "\ud83d\udc1b Fixes", "text": "
    • add --namespace option to Helm template command - #237

    • Add missing type annotation for Pydantic attributes - #238

    • Fix helm version check - #242

    • Fix Helm Version Check - #244

    • Fix import from external module - #256

    "}, {"location": "user/changelog/#refactor_22", "title": "\ud83c\udfed Refactor", "text": "
    • Remove enable option from helm diff - #235

    • Refactor variable substitution - #198

    Uncategorized
    • Add background to docs home page - #236

    • Update Poetry version in CI - #247

    • Add pip cache in KPOps runner action - #249

    • Check types using Pyright - #251

    • Remove MyPy - #252

    • Disable broken Windows CI temporarily - #253

    • Update release and publish workflows - #254

    • Fix release & publish workflows - #257

    "}, {"location": "user/what-is-kpops/", "title": "What is KPOps?", "text": "

    With a couple of easy commands in the shell, and a pipeline.yaml of under 30 lines, KPOps can not only deploy a Kafka pipeline1 to a Kubernetes cluster, but also reset, clean or destroy it!

    "}, {"location": "user/what-is-kpops/#key-features", "title": "Key features", "text": "
    • Deploy Kafka apps to Kubernetes: KPOps allows to deploy consecutive Kafka Streams applications and producers using an easy-to-read and -write pipeline definition.
    • Manage Kafka Connectors: KPOps connects with your Kafka Connect cluster and deploys, validates, and deletes your connectors.
    • Configure multiple pipelines and steps: KPOps has various abstractions that simplify configuring multiple pipelines and steps within pipelines by sharing common configuration between different components, such as producers or streaming applications.
    • Handle your topics and schemas: KPOps not only creates and deletes your topics but also registers and deletes your schemas.
    • Clean termination of Kafka components: KPOps removes your pipeline components (i.e., Kafka Streams applications) from the Kubernetes cluster and cleans up the component-related states (i.e., removing/resetting offset of Kafka consumer groups).
    • Preview your pipeline changes: With the KPOps dry-run, you can ensure your pipeline definition is set up correctly. This helps to minimize downtime and prevent potential errors or issues that could impact your production environment.
    "}, {"location": "user/what-is-kpops/#example", "title": "Example", "text": "An overview of Word-count pipeline shown in Streams Explorer Word-count pipeline.yaml
    - type: producer-app-v2\n  name: data-producer\n  values:\n    image: bakdata/kpops-demo-sentence-producer\n\n- type: streams-app-v2\n  name: word-counter\n  to:\n    topics:\n      ${output_topic_name}:\n        type: output\n        configs:\n          cleanup.policy: compact\n  values:\n    image: bakdata/kpops-demo-word-count-app\n    replicaCount: 1\n\n- type: kafka-sink-connector\n  name: redis-sink-connector\n  config:\n    connector.class: com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector\n    redis.hosts: redis-headless:6379\n    redis.database: 0\n    tasks.max: 1\n    key.converter: org.apache.kafka.connect.storage.StringConverter\n    value.converter: org.apache.kafka.connect.storage.StringConverter\n
    1. A Kafka pipeline can consist of consecutive streaming applications, producers, and connectors.\u00a0\u21a9

    "}, {"location": "user/core-concepts/config/", "title": "Configuration", "text": "

    KPOps reads its global configuration that is unrelated to a pipeline's components from config.yaml.

    Consider enabling KPOps' editor integration feature to enjoy the benefits of autocompletion and validation when configuring your pipeline.

    To learn about any of the available settings, take a look at the example below.

    config.yaml
    # CONFIGURATION\n#\n# Base directory to the pipelines (default is current working directory)\npipeline_base_dir: .\n# The Kafka brokers address.\n# REQUIRED\nkafka_brokers: \"http://broker1:9092,http://broker2:9092\"\n# Configure the topic name variables you can use in the pipeline definition.\ntopic_name_config:\n  # Configures the value for the variable ${output_topic_name}\n  default_output_topic_name: ${pipeline.name}-${component.name}\n  # Configures the value for the variable ${error_topic_name}\n  default_error_topic_name: ${pipeline.name}-${component.name}-error\n# Configuration for Schema Registry.\nschema_registry:\n  # Whether the Schema Registry handler should be initialized.\n  enabled: false\n  # Address of the Schema Registry.\n  url: \"http://localhost:8081\"\n# Configuration for the Kafka REST Proxy.\nkafka_rest:\n  # Address of the Kafka REST Proxy.\n  url: \"http://localhost:8082\"\n# Configuration for Kafka Connect.\nkafka_connect:\n  # Address of Kafka Connect.\n  url: \"http://localhost:8083\"\n# Flag for `helm upgrade --install`.\n# Create the release namespace if not present.\ncreate_namespace: false\n# Global flags for Helm.\nhelm_config:\n  # Name of kubeconfig context (`--kube-context`)\n  context: name\n  # Run Helm in Debug mode.\n  debug: false\n  # Kubernetes API version used for Capabilities.APIVersions\n  api_version: null\n# Configure Helm Diff.\nhelm_diff_config:\n  # Set of keys that should not be checked.\n  ignore:\n    - name\n    - imageTag\n# Whether to retain clean up jobs in the cluster or uninstall the, after\n# completion.\nretain_clean_jobs: false\n

    Environment-specific pipeline definitions

    Similarly to defaults, it is possible to have an unlimited amount of additional environment-specific pipeline definitions. The naming convention is the same: add a suffix of the form _{environment} to the filename.

    "}, {"location": "user/core-concepts/defaults/", "title": "Defaults", "text": "

    KPOps has a very efficient way of dealing with repeating settings which manifests as defaults.yaml. This file provides the user with the power to set defaults for any and all components, thus omitting the need to repeat the same settings in pipeline.yaml.

    See real-world examples for defaults.

    "}, {"location": "user/core-concepts/defaults/#features", "title": "Features", "text": ""}, {"location": "user/core-concepts/defaults/#inheritance", "title": "Inheritance", "text": "

    An important mechanic of KPOps is that defaults set for a component apply to all components that inherit from it.

    It is possible, although not recommended, to add settings that are specific to a component's subclass. An example would be configuring offset_topic under kafka-connector instead of kafka-source-connector.

    "}, {"location": "user/core-concepts/defaults/#configuration", "title": "Configuration", "text": "

    KPOps allows using multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.

    The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.

    It is important to note that defaults_{environment}.yaml overrides only the settings that are explicitly set to be different from the ones in the base defaults file.

    defaults merge priority

    Imagine the following folder structure, where the pipeline_base_dir is configured to pipelines:

    \u2514\u2500 pipelines\n   \u2514\u2500\u2500 distributed-defaults\n       \u251c\u2500\u2500 defaults.yaml\n       \u251c\u2500\u2500 defaults_dev.yaml\n       \u2514\u2500\u2500 pipeline-deep\n           \u251c\u2500\u2500 defaults.yaml\n           \u2514\u2500\u2500 pipeline.yaml\n

    KPOps picks up the defaults in the following order (high to low priority):

    • ./pipelines/distributed-defaults/pipeline-deep/defaults.yaml
    • ./pipelines/distributed-defaults/defaults_dev.yaml
    • ./pipelines/distributed-defaults/defaults.yaml
    "}, {"location": "user/core-concepts/defaults/#components", "title": "Components", "text": "

    The defaults codeblocks in this section contain the full set of settings that are specific to the component. If a setting already exists in a parent config, it will not be included in the child's.

    "}, {"location": "user/core-concepts/defaults/#kubernetesapp", "title": "KubernetesApp", "text": "defaults.yaml
    # Base Kubernetes App\n#\n# Parent of: HelmApp\n# Child of: PipelineComponent\nkubernetes-app:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  values: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/defaults/#streamsapp", "title": "StreamsApp", "text": "defaults.yaml
    # StreamsApp component that configures a streams bootstrap app.\n#\n# Child of: KafkaApp\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\nstreams-app:\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  values: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n
    "}, {"location": "user/core-concepts/defaults/#producerapp", "title": "ProducerApp", "text": "defaults.yaml
    \n
    "}, {"location": "user/core-concepts/defaults/#kafkaconnector", "title": "KafkaConnector", "text": "defaults.yaml
    # Kafka connector\n#\n# Parent of: KafkaSinkConnector, KafkaSourceConnector\n# Child of: PipelineComponent\nkafka-connector:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # Full documentation on connectors: https://kafka.apache.org/documentation/#connectconfigs\n  config: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/defaults/#kafkasourceconnector", "title": "KafkaSourceConnector", "text": "defaults.yaml
    # Kafka source connector\n#\n# Child of: KafkaConnector\nkafka-source-connector:\n  # The source connector has no `from` section\n  # from:\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/defaults/#kafkasinkconnector", "title": "KafkaSinkConnector", "text": "defaults.yaml
    # Kafka sink connector\n#\n# Child of: KafkaConnector\nkafka-sink-connector:\n  # No settings differ from `kafka-connector`\n
    "}, {"location": "user/core-concepts/components/helm-app/", "title": "HelmApp", "text": ""}, {"location": "user/core-concepts/components/helm-app/#usage", "title": "Usage", "text": "

    Can be used to deploy any app in Kubernetes using Helm, for example, a REST service that serves Kafka data.

    "}, {"location": "user/core-concepts/components/helm-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kubernetes app managed through Helm with an associated Helm chart\n- type: helm-app\n  name: helm-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  values: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/helm-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/helm-app/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/helm-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/helm-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/helm-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-connector/", "title": "KafkaConnector", "text": "

    KafkaConnector is a component that deploys Kafka Connectors. Since a connector cannot be different from sink or source it is not recommended to use KafkaConnector for deployment in pipeline.yaml. Instead, KafkaConnector should be used in defaults.yaml to set defaults for all connectors in the pipeline as they can share some common settings.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/", "title": "KafkaSinkConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#usage", "title": "Usage", "text": "

    Lets other systems pull data from Apache Kafka.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka sink connector\n- type: kafka-sink-connector\n  name: kafka-sink-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # Full documentation on connectors: https://kafka.apache.org/documentation/#connectconfigs\n  config: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-sink-connector/#deploy", "title": "deploy", "text": "
    • Add the sink connector to the Kafka Connect cluster
    • Create the output topics if provided (optional)
    • Register schemas in the Schema Registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#destroy", "title": "destroy", "text": "

    The associated sink connector is removed from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#reset", "title": "reset", "text": "

    Reset the consumer group offsets using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#clean", "title": "clean", "text": "
    • Delete associated consumer group
    • Delete configured error topics
    "}, {"location": "user/core-concepts/components/kafka-source-connector/", "title": "KafkaSourceConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#usage", "title": "Usage", "text": "

    Manages source connectors in your Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka source connector\n- type: kafka-source-connector # required\n  name: kafka-source-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  # The source connector has no `from` section\n  # from:\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # Full documentation on connectors: https://kafka.apache.org/documentation/#connectconfigs\n  config: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/#deploy", "title": "deploy", "text": "
    • Add the source connector to the Kafka Connect cluster
    • Create the output topics if provided (optional)
    • Register schemas in the Schema registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#destroy", "title": "destroy", "text": "

    Remove the source connector from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#reset", "title": "reset", "text": "

    Delete state associated with the connector using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#clean", "title": "clean", "text": "
    • Delete all associated output topics
    • Delete all associated schemas in the Schema Registry
    • Delete state associated with the connector
    "}, {"location": "user/core-concepts/components/kubernetes-app/", "title": "KubernetesApp", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#usage", "title": "Usage", "text": "

    Can be used to create components for any Kubernetes app.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base Kubernetes App\n- type: kubernetes-app\n  name: kubernetes-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  values: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/components/kubernetes-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#deploy", "title": "deploy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#destroy", "title": "destroy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/overview/", "title": "Overview", "text": "

    This section explains the different components of KPOps, their usage and configuration in the pipeline definition pipeline.yaml.

    flowchart BT\n    KubernetesApp --> PipelineComponent\n    HelmApp --> KubernetesApp\n    StreamsBootstrap --> HelmApp\n    StreamsApp --> StreamsBootstrap\n    ProducerApp --> StreamsBootstrap\n    KafkaConnector --> PipelineComponent\n    KafkaSourceConnector --> KafkaConnector\n    KafkaSinkConnector --> KafkaConnector\n\n    click KubernetesApp \"./../kubernetes-app\"\n    click HelmApp \"./../helm-app\"\n    click StreamsBootstrap \"./../streams-bootstrap\"\n    click StreamsApp \"./../streams-app\"\n    click ProducerApp \"./../producer-app\"\n    click KafkaConnector \"./../kafka-connector\"\n    click KafkaSourceConnector \"./../kafka-source-connector\"\n    click KafkaSinkConnector \"./../kafka-sink-connector\"

    KPOps component hierarchy

    "}, {"location": "user/core-concepts/components/producer-app/", "title": "ProducerApp", "text": "

    Subclass of StreamsBootstrap.

    "}, {"location": "user/core-concepts/components/producer-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka producer app

    "}, {"location": "user/core-concepts/components/producer-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Holds configuration to use as values for the streams bootstrap producer-app Helm\n# chart.\n# More documentation on ProducerApp:\n# https://github.com/bakdata/streams-bootstrap\n- type: producer-app\n  name: producer-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  # from: # While the producer-app does inherit from kafka-app, it does not need a\n  # `from` section, hence it does not support it.\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app\n  values: # required\n    streams: # required, producer-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      outputTopic: output_topic\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n    nameOverride: override-with-this-name # kafka-app-specific\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/producer-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/producer-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/producer-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/producer-app/#reset", "title": "reset", "text": "

    Do nothing, producers are stateless.

    "}, {"location": "user/core-concepts/components/producer-app/#clean", "title": "clean", "text": "
    • Delete the output topics of the Kafka producer
    • Delete all associated schemas in the Schema Registry
    "}, {"location": "user/core-concepts/components/streams-app/", "title": "StreamsApp", "text": "

    Subclass of and StreamsBootstrap.

    "}, {"location": "user/core-concepts/components/streams-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka Streams app

    "}, {"location": "user/core-concepts/components/streams-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # StreamsApp component that configures a streams bootstrap app.\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\n- type: streams-app # required\n  name: streams-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  values: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/streams-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/streams-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-app/#reset", "title": "reset", "text": "
    • Delete the consumer group offsets
    • Delete Kafka Streams state
    "}, {"location": "user/core-concepts/components/streams-app/#clean", "title": "clean", "text": "

    Similar to reset with to additional steps:

    • Delete the app's output topics
    • Delete all associated schemas in the Schema Registry
    "}, {"location": "user/core-concepts/components/streams-bootstrap/", "title": "Streams Bootstrap", "text": "

    Subclass of HelmApp.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#usage", "title": "Usage", "text": "
    • Defines a streams-bootstrap component

    • Often used in defaults.yaml

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-bootstrap/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/variables/environment_variables/", "title": "Environment variables", "text": "

    Environment variables can be set by using the export command in Linux or the set command in Windows.

    dotenv files

    KPOps currently supports .env files only for variables related to the config. Full support for .env files is on the roadmap. One of the possible ways to use one and export the contents manually is with the following command: export $(xargs < .env). This would work in bash suppose there are no spaces inside the values.

    "}, {"location": "user/core-concepts/variables/environment_variables/#config", "title": "Config", "text": "

    These variables take precedence over the settings in config.yaml. Variables marked as required can instead be set in the global config.

    Name Default Value Required Description Setting name KPOPS_PIPELINE_BASE_DIR . False Base directory to the pipelines (default is current working directory) pipeline_base_dir KPOPS_KAFKA_BROKERS True The comma separated Kafka brokers address. kafka_brokers KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME ${pipeline.name}-${component.name} False Configures the value for the variable ${output_topic_name} topic_name_config.default_output_topic_name KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME ${pipeline.name}-${component.name}-error False Configures the value for the variable ${error_topic_name} topic_name_config.default_error_topic_name KPOPS_SCHEMA_REGISTRY__ENABLED False False Whether the Schema Registry handler should be initialized. schema_registry.enabled KPOPS_SCHEMA_REGISTRY__URL http://localhost:8081/ False Address of the Schema Registry. schema_registry.url KPOPS_SCHEMA_REGISTRY__TIMEOUT 30 False Operation timeout in seconds. schema_registry.timeout KPOPS_KAFKA_REST__URL http://localhost:8082/ False Address of the Kafka REST Proxy. kafka_rest.url KPOPS_KAFKA_REST__TIMEOUT 30 False Operation timeout in seconds. kafka_rest.timeout KPOPS_KAFKA_CONNECT__URL http://localhost:8083/ False Address of Kafka Connect. kafka_connect.url KPOPS_KAFKA_CONNECT__TIMEOUT 30 False Operation timeout in seconds. kafka_connect.timeout KPOPS_CREATE_NAMESPACE False False Flag for helm upgrade --install. Create the release namespace if not present. create_namespace KPOPS_HELM_CONFIG__CONTEXT False Name of kubeconfig context (--kube-context) helm_config.context KPOPS_HELM_CONFIG__DEBUG False False Run Helm in Debug mode helm_config.debug KPOPS_HELM_CONFIG__API_VERSION False Kubernetes API version used for Capabilities.APIVersions helm_config.api_version KPOPS_HELM_DIFF_CONFIG__IGNORE True Set of keys that should not be checked. helm_diff_config.ignore KPOPS_RETAIN_CLEAN_JOBS False False Whether to retain clean up jobs in the cluster or uninstall the, after completion. retain_clean_jobs KPOPS_OPERATION_MODE Helm False The operation mode of KPOps operation_mode config_env_vars.env Exhaustive list of all config-related environment variables
    # Global config environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# settings in `config.yaml`. Variables marked as required can instead\n# be set in the global config.\n#\n# pipeline_base_dir\n# Base directory to the pipelines (default is current working\n# directory)\nKPOPS_PIPELINE_BASE_DIR=.\n# kafka_brokers\n# The comma separated Kafka brokers address.\nKPOPS_KAFKA_BROKERS # No default value, required\n# topic_name_config.default_output_topic_name\n# Configures the value for the variable ${output_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name}\n# topic_name_config.default_error_topic_name\n# Configures the value for the variable ${error_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error\n# schema_registry.enabled\n# Whether the Schema Registry handler should be initialized.\nKPOPS_SCHEMA_REGISTRY__ENABLED=False\n# schema_registry.url\n# Address of the Schema Registry.\nKPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/\n# schema_registry.timeout\n# Operation timeout in seconds.\nKPOPS_SCHEMA_REGISTRY__TIMEOUT=30\n# kafka_rest.url\n# Address of the Kafka REST Proxy.\nKPOPS_KAFKA_REST__URL=http://localhost:8082/\n# kafka_rest.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_REST__TIMEOUT=30\n# kafka_connect.url\n# Address of Kafka Connect.\nKPOPS_KAFKA_CONNECT__URL=http://localhost:8083/\n# kafka_connect.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_CONNECT__TIMEOUT=30\n# create_namespace\n# Flag for `helm upgrade --install`. Create the release namespace if\n# not present.\nKPOPS_CREATE_NAMESPACE=False\n# helm_config.context\n# Name of kubeconfig context (`--kube-context`)\nKPOPS_HELM_CONFIG__CONTEXT # No default value, not required\n# helm_config.debug\n# Run Helm in Debug mode\nKPOPS_HELM_CONFIG__DEBUG=False\n# helm_config.api_version\n# Kubernetes API version used for `Capabilities.APIVersions`\nKPOPS_HELM_CONFIG__API_VERSION # No default value, not required\n# helm_diff_config.ignore\n# Set of keys that should not be checked.\nKPOPS_HELM_DIFF_CONFIG__IGNORE # No default value, required\n# retain_clean_jobs\n# Whether to retain clean up jobs in the cluster or uninstall the,\n# after completion.\nKPOPS_RETAIN_CLEAN_JOBS=False\n# operation_mode\n# The operation mode of KPOps\nKPOPS_OPERATION_MODE=Helm\n
    "}, {"location": "user/core-concepts/variables/environment_variables/#cli", "title": "CLI", "text": "

    These variables take precedence over the commands' flags. If a variable is set, the corresponding flag does not have to be specified in commands. Variables marked as required can instead be set as flags.

    Name Default Value Required Description KPOPS_CONFIG_PATH . False Path to the dir containing config.yaml files KPOPS_DOTENV_PATH False Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. KPOPS_ENVIRONMENT False The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). KPOPS_PIPELINE_PATHS True Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. KPOPS_PIPELINE_STEPS False Comma separated list of steps to apply the command on cli_env_vars.env Exhaustive list of all cli-related environment variables
    # CLI Environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# commands' flags. If a variable is set, the corresponding flag does\n# not have to be specified in commands. Variables marked as required\n# can instead be set as flags.\n#\n# Path to the dir containing config.yaml files\nKPOPS_CONFIG_PATH=.\n# Path to dotenv file. Multiple files can be provided. The files will\n# be loaded in order, with each file overriding the previous one.\nKPOPS_DOTENV_PATH # No default value, not required\n# The environment you want to generate and deploy the pipeline to.\n# Suffix your environment files with this value (e.g.\n# defaults_development.yaml for environment=development).\nKPOPS_ENVIRONMENT # No default value, not required\n# Paths to dir containing 'pipeline.yaml' or files named\n# 'pipeline.yaml'.\nKPOPS_PIPELINE_PATHS # No default value, required\n# Comma separated list of steps to apply the command on\nKPOPS_PIPELINE_STEPS # No default value, not required\n
    "}, {"location": "user/core-concepts/variables/substitution/", "title": "Substitution", "text": "

    KPOps supports the usage of placeholders and environment variables in pipeline definition and defaults.

    "}, {"location": "user/core-concepts/variables/substitution/#component-specific-variables", "title": "Component-specific variables", "text": "

    These variables can be used in a component's definition to refer to any of its attributes, including ones that the user has defined in the defaults.

    All of them are prefixed with component. and follow the following form: component.{attribute_name}. If the attribute itself contains attributes, they can be referred to like this: component.{attribute_name}.{subattribute_name}.

    Example
    - type: scheduled-producer\n  values:\n    labels:\n      app_type: \"${component.type}\"\n      app_name: \"${component.name}\"\n      app_schedule: \"${component.values.schedule}\"\n    commandLine:\n      FAKE_ARG: \"fake-arg-value\"\n    schedule: \"30 3/8 * * *\"\n- type: converter\n  values:\n    commandLine:\n      CONVERT_XML: true\n    resources:\n      limits:\n        memory: 2G\n      requests:\n        memory: 2G\n- type: filter\n  name: \"filter-app\"\n  values:\n    labels:\n      app_type: \"${component.type}\"\n      app_name: \"${component.name}\"\n      app_resources_requests_memory: \"${component.values.resources.requests.memory}\"\n      ${component.type}: \"${component.values.labels.app_name}-${component.values.labels.app_type}\"\n      test_placeholder_in_placeholder: \"${component.values.labels.${component.type}}\"\n    commandLine:\n      TYPE: \"nothing\"\n    resources:\n      requests:\n        memory: 3G\n    replicaCount: 4\n    autoscaling:\n      minReplicas: 4\n      maxReplicas: 4\n
    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-config-specific-variables", "title": "Pipeline-config-specific variables", "text": "

    These variables include all fields in the config and refer to the pipeline configuration that is independent of the components.

    All such variables are prefixed with config. and are of the same form as the component-specific variables.

    Info

    error_topic_name is an alias for config.topic_name_config.default_error_topic_name output_topic_name is an alias for config.topic_name_config.default_output_topic_name

    "}, {"location": "user/core-concepts/variables/substitution/#environment-variables", "title": "Environment variables", "text": "

    Environment variables such as $PATH can be used in the pipeline definition and defaults without any transformation following the form ${ENV_VAR_NAME}. This, of course, includes variables like the ones relevant to the KPOps cli that are exported by the user.

    See all KPOps environment variables

    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-name-variables", "title": "Pipeline name variables", "text": "

    These are special variables that refer to the name and path of a pipeline.

    • ${pipeline.name}: Concatenated path of the parent directory where pipeline.yaml is defined in. For instance, ./data/pipelines/v1/pipeline.yaml, here the value for the variable would be data-pipelines-v1.

    • ${pipeline_name_<level>}: Similar to the previous variable, each <level> contains a part of the path to the pipeline.yaml file. Consider the previous example, ${pipeline_name_0} would be data, ${pipeline_name_1} would be pipelines, and ${pipeline_name_2} equals to v1.

    "}, {"location": "user/core-concepts/variables/substitution/#advanced-use-cases", "title": "Advanced use cases", "text": "
    1. Refer to default component field values: As long as a value is assigned to a component attribute, it is possible to refer to it with a placeholder. To see all component fields, take a look at the pipeline schema.
    2. Chaining variables: It is possible to chain any number of variables, see the example above.
    3. Cross-component substitution: YAML is quite an intricate language and with some of its magic one could write cross-component references.
    "}, {"location": "user/examples/atm-fraud-pipeline/", "title": "ATM fraud detection pipeline", "text": "

    ATM fraud is a demo pipeline for ATM fraud detection. The original by Confluent is written in KSQL and outlined in this blogpost. The one used in this example is re-built from scratch using bakdata's streams-bootstrap library.

    "}, {"location": "user/examples/atm-fraud-pipeline/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
    • Deploying a PostgreSQL database using Helm
    • Deploying a pipeline using KPOps
    • Destroying a pipeline using KPOps
    "}, {"location": "user/examples/atm-fraud-pipeline/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/examples/atm-fraud-pipeline/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postgresql", "title": "PostgreSQL", "text": "

    Deploy PostgreSQL using the Bitnami Helm chart: Add the helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install the PostgreSQL with helm:

    helm upgrade --install -f ./postgresql.yaml \\\n--namespace kpops \\\npostgresql bitnami/postgresql\n
    PostgreSQL Example Helm chart values (postgresql.yaml)
    auth:\n  database: app_db\n  enablePostgresUser: true\n  password: AppPassword\n  postgresPassword: StrongPassword\n  username: app1\nprimary:\n  persistence:\n    enabled: false\n    existingClaim: postgresql-data-claim\nvolumePermissions:\n  enabled: true\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-detection-example-pipeline-setup", "title": "ATM fraud detection example pipeline setup", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#deploying-the-atm-fraud-detection-pipeline", "title": "Deploying the ATM fraud detection pipeline", "text": "
    1. Clone the kpops-examples repository and cd into the directory.

    2. Install KPOps pip install -r requirements.txt.

    3. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    4. Deploy the pipeline

      kpops deploy atm-fraud/pipeline.yaml --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/examples/atm-fraud-pipeline/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to see the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser. You should be able to see pipeline shown in the image below:

    An overview of ATM fraud pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/examples/atm-fraud-pipeline/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postrgresql", "title": "PostrgreSQL", "text": "

    PostgreSQL can be uninstalled by running the following command:

    helm --namespace kpops uninstall postgresql\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-pipeline", "title": "ATM fraud pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean atm-fraud/pipeline.yaml --verbose  --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/examples/atm-fraud-pipeline/#common-errors", "title": "Common errors", "text": "
    • deploy fails:
    • Read the error message.
    • Try to correct the mistakes if there were any. Likely the configuration is incorrect, or the port-forwarding is not working as intended.
    • Run clean.
    • Run deploy --dry-run to avoid havig to clean again. If an error is dropped, start over from step 1.
    • If the dry-run is succesful, run deploy.
    • clean fails:
    • Read the error message.
    • Try to correct the indicated mistakes if there were any. Likely the configuration is incorrect, or the port-forwarding is not working as intended.
    • Run clean.
    • If clean fails, follow the steps in teardown.
    "}, {"location": "user/getting-started/quick-start/", "title": "Quick start", "text": ""}, {"location": "user/getting-started/quick-start/#word-count", "title": "Word-count", "text": "

    Word-count is a demo pipeline consisting of a producer producing words to Kafka, a Kafka streams app counting the number of times each word occurs, and finally a Redis database into which the words are exported.

    "}, {"location": "user/getting-started/quick-start/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
    • Deploying a Redis database using Helm
    • Deploying a pipeline using KPOps
    • Destroying a pipeline using KPOps
    "}, {"location": "user/getting-started/quick-start/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/getting-started/quick-start/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/getting-started/quick-start/#redis", "title": "Redis", "text": "

    Deploy Redis using the Bitnami Helm chart: Add the Helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install Redis with Helm:

    helm upgrade --install -f ./values-redis.yaml \\\n--namespace kpops \\\nredis bitnami/redis\n
    Redis example Helm chart values (values-redis.yaml)
    architecture: standalone\nauth:\n  enabled: false\nmaster:\n  count: 1\n  configuration: \"databases 1\"\nimage:\n  tag: 7.0.8\n
    "}, {"location": "user/getting-started/quick-start/#word-count-example-pipeline-setup", "title": "Word-count example pipeline setup", "text": ""}, {"location": "user/getting-started/quick-start/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/getting-started/quick-start/#deploying-the-word-count-pipeline", "title": "Deploying the Word-count pipeline", "text": "
    1. Clone the kpops-examples repository and cd into the directory.

    2. Install KPOps pip install -r requirements.txt.

    3. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    4. Deploy the pipeline

      kpops deploy word-count/pipeline.yaml --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/getting-started/quick-start/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to inspect the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser.

    You should be able to see pipeline shown in the image below:

    An overview of Word-count pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka Connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/getting-started/quick-start/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/quick-start/#redis_1", "title": "Redis", "text": "

    Redis can be uninstalled by running the following command:

    helm --namespace kpops uninstall redis\n
    "}, {"location": "user/getting-started/quick-start/#word-count-pipeline", "title": "Word-count pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean word-count/pipeline.yaml --verbose --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/getting-started/quick-start/#common-errors", "title": "Common errors", "text": "
    • deploy fails:
      1. Read the error message.
      2. Try to correct the mistakes if there were any. Likely the configuration is incorrect, or the port-forwarding is not working as intended.
      3. Run clean.
      4. Run deploy --dry-run to avoid having to clean again. If an error is dropped, start over from step 1.
      5. If the dry-run is successful, run deploy.
    • clean fails:
      1. Read the error message.
      2. Try to correct the indicated mistakes if there were any. Likely the configuration is incorrect, or the port-forwarding is not working as intended.
      3. Run clean.
      4. If clean fails, follow the steps in teardown.
    "}, {"location": "user/getting-started/setup/", "title": "Setup KPOps", "text": "

    In this part, you will set up KPOps. This includes:

    • optionally creating a local Kubernetes cluster
    • running Apache Kafka and Confluent's Schema Registry
    • installing KPOps
    "}, {"location": "user/getting-started/setup/#prerequisites", "title": "Prerequisites", "text": "
    • k3d (Version 5.4.6+) and Docker (Version >= v20.10.5) or an existing Kubernetes cluster (>= 1.21.0)
    • kubectl (Compatible with server version 1.21.0)
    • Helm (Version 3.8.0+)
    "}, {"location": "user/getting-started/setup/#setup-kubernetes-with-k3d", "title": "Setup Kubernetes with k3d", "text": "

    If you don't have access to an existing Kubernetes cluster, this section will guide you through creating a local cluster. We recommend the lightweight Kubernetes distribution k3s for this. k3d is a wrapper around k3s in Docker that lets you get started fast.

    1. You can install k3d with its installation script:

      wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/v5.4.6/install.sh | bash\n

      For other ways of installing k3d, you can have a look at their installation guide.

    2. The Kafka deployment needs a modified Docker image. In that case the image is built and pushed to a Docker registry that holds it. If you do not have access to an existing Docker registry, you can use k3d's Docker registry:

      k3d registry create kpops-registry.localhost --port 12345\n
    3. Now you can create a new cluster called kpops that uses the previously created Docker registry:

      k3d cluster create kpops --k3s-arg \"--no-deploy=traefik@server:*\" --registry-use k3d-kpops-registry.localhost:12345\n

    Note

    Creating a new k3d cluster automatically configures kubectl to connect to the local cluster by modifying your ~/.kube/config. In case you manually set the KUBECONFIG variable or don't want k3d to modify your config, k3d offers many other options.

    You can check the cluster status with kubectl get pods -n kube-system. If all returned elements have a STATUS of Running or Completed, then the cluster is up and running.

    "}, {"location": "user/getting-started/setup/#deploy-kafka", "title": "Deploy Kafka", "text": "

    Kafka is an open-source data streaming platform. More information about Kafka can be found in the documentation. To deploy Kafka, this guide uses Confluent's Helm chart.

    1. To allow connectivity to other systems Kafka Connect needs to be extended with drivers. You can install a JDBC driver for Kafka Connect by creating a new Docker image:

      1. Create a Dockerfile with the following content:

        FROM confluentinc/cp-kafka-connect:7.1.3\n\nRUN confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.6.0\n
      2. Build and push the modified image to your private Docker registry:

        docker build . --tag localhost:12345/kafka-connect-jdbc:7.1.3 && \\\ndocker push localhost:12345/kafka-connect-jdbc:7.1.3\n

      Detailed instructions on building, tagging and pushing a docker image can be found in Docker docs.

    2. Add Confluent's Helm chart repository and update the index:

      helm repo add confluentinc https://confluentinc.github.io/cp-helm-charts/ &&  \nhelm repo update\n
    3. Install Kafka, Zookeeper, Confluent's Schema Registry, Kafka Rest Proxy, and Kafka Connect. A single Helm chart installs all five components. Below you can find an example for the --values ./kafka.yaml file configuring the deployment accordingly. Deploy the services:

      helm upgrade \\\n    --install \\\n    --version 0.6.1 \\\n    --values ./kafka.yaml \\\n    --namespace kpops \\\n    --create-namespace \\\n    --wait \\\n    k8kafka confluentinc/cp-helm-charts\n
    Kafka Helm chart values (kafka.yaml)

    An example value configuration for Confluent's Helm chart. This configuration deploys a single Kafka Broker, a Schema Registry, Zookeeper, Kafka Rest Proxy, and Kafka Connect with minimal resources.

    cp-zookeeper:\n  enabled: true\n  servers: 1\n  imageTag: 7.1.3\n  heapOptions: \"-Xms124M -Xmx124M\"\n  overrideGroupId: k8kafka\n  fullnameOverride: \"k8kafka-cp-zookeeper\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.2G\n    limits:\n      cpu: 250m\n      memory: 0.2G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka:\n  enabled: true\n  brokers: 1\n  imageTag: 7.1.3\n  podManagementPolicy: Parallel\n  configurationOverrides:\n    \"auto.create.topics.enable\": false\n    \"offsets.topic.replication.factor\": 1\n    \"transaction.state.log.replication.factor\": 1\n    \"transaction.state.log.min.isr\": 1\n    \"confluent.metrics.reporter.topic.replicas\": 1\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.5G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n  persistence:\n    enabled: false\n\ncp-schema-registry:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-schema-registry\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.25G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-connect:\n  enabled: true\n  replicaCount: 1\n  image: k3d-kpops-registry.localhost:12345/kafka-connect-jdbc\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-kafka-connect\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 500m\n      memory: 0.25G\n    limits:\n      cpu: 500m\n      memory: 0.25G\n  configurationOverrides:\n    \"consumer.max.poll.records\": \"10\"\n    \"consumer.max.poll.interval.ms\": \"900000\"\n    \"config.storage.replication.factor\": \"1\"\n    \"offset.storage.replication.factor\": \"1\"\n    \"status.storage.replication.factor\": \"1\"\n  cp-schema-registry:\n    url: http://k8kafka-cp-schema-registry:8081\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-rest:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-rest\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-ksql-server:\n  enabled: false\ncp-control-center:\n  enabled: false\n
    "}, {"location": "user/getting-started/setup/#deploy-streams-explorer", "title": "Deploy Streams Explorer", "text": "

    Streams Explorer allows examining Apache Kafka data pipelines in a Kubernetes cluster including the inspection of schemas and monitoring of metrics. First, add the Helm repository:

    helm repo add streams-explorer https://bakdata.github.io/streams-explorer && \\\nhelm repo update\n

    Below you can find an example for the --values ./streams-explorer.yaml file configuring the deployment accordingly. Now, deploy the service:

    helm upgrade \\\n    --install \\\n    --version 0.2.3 \\\n    --values ./streams-explorer.yaml \\\n    --namespace kpops \\\n    streams-explorer streams-explorer/streams-explorer\n
    Streams Explorer Helm chart values (streams-explorer.yaml)

    An example value configuration for Steams Explorer Helm chart.

    imageTag: \"v2.1.2\"\nconfig:\n   K8S__deployment__cluster: true\n   SCHEMAREGISTRY__url: http://k8kafka-cp-schema-registry.kpops.svc.cluster.local:8081\n   KAFKACONNECT__url: http://k8kafka-cp-kafka-connect.kpops.svc.cluster.local:8083\nresources:\n   requests:\n       cpu: 200m\n       memory: 300Mi\n   limits:\n       cpu: 200m\n       memory: 300Mi\n
    "}, {"location": "user/getting-started/setup/#check-the-status-of-your-deployments", "title": "Check the status of your deployments", "text": "

    Now we will check if all the pods are running in our namespace. You can list all pods in the namespace with this command:

    kubectl --namespace kpops get pods\n

    Then you should see the following output in your terminal:

    NAME                                          READY   STATUS    RESTARTS   AGE\nk8kafka-cp-kafka-connect-8fc7d544f-8pjnt      1/1     Running   0          15m\nk8kafka-cp-zookeeper-0                        1/1     Running   0          15m\nk8kafka-cp-kafka-0                            1/1     Running   0          15m\nk8kafka-cp-schema-registry-588f8c65db-jdwbq   1/1     Running   0          15m\nk8kafka-cp-rest-6bbfd7b645-nwkf8              1/1     Running   0          15m\nstreams-explorer-54db878c67-s8wbz             1/1     Running   0          15m\n

    Pay attention to the STATUS row. The pods should have a status of Running.

    "}, {"location": "user/getting-started/setup/#install-kpops", "title": "Install KPOps", "text": "

    KPOps comes as a PyPI package. You can install it with pip:

    pip install kpops\n
    "}, {"location": "user/getting-started/teardown/", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/teardown/#kpops-teardown-commands", "title": "KPOps teardown commands", "text": "
    • destroy: Removes Kubernetes resources.
    • reset: Runs destroy, resets the states of Kafka Streams apps and resets offsets to zero.
    • clean: Runs reset and removes all Kafka resources.
    "}, {"location": "user/getting-started/teardown/#kpops-deployed-pipeline", "title": "KPOps-deployed pipeline", "text": "

    The kpops CLI can be used to destroy a pipeline that was previously deployed with KPOps. In case that doesn't work, the pipeline can always be taken down manually with helm (see section Infrastructure).

    1. Export environment variables.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Navigate to the examples folder. Replace the <name-of-the-example-directory> with the example you want to tear down. For example the atm-fraud-detection.

    3. Remove the pipeline

      # Uncomment 1 line to either destroy, reset or clean.\n\n# poetry run kpops destroy <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops reset <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops clean <name-of-the-example-directory>/pipeline.yaml \\\n--config <name-of-the-example-directory>/config.yaml \\\n--execute\n
    "}, {"location": "user/getting-started/teardown/#infrastructure", "title": "Infrastructure", "text": "

    Delete namespace:

    kubectl delete namespace kpops\n

    Note

    In case kpops destroy is not working one can uninstall the pipeline services one by one. This is equivalent to running kpops destroy. In case a clean uninstall (like the one kpops clean does) is needed, one needs to also delete the topics and schemas created by deployment of the pipeline.

    "}, {"location": "user/getting-started/teardown/#local-cluster", "title": "Local cluster", "text": "

    Delete local cluster:

    k3d cluster delete kpops\n
    "}, {"location": "user/getting-started/teardown/#local-image-registry", "title": "Local image registry", "text": "

    Delete local registry:

    k3d registry delete k3d-kpops-registry.localhost\n
    "}, {"location": "user/migration-guide/v1-v2/", "title": "Migrate from V1 to V2", "text": ""}, {"location": "user/migration-guide/v1-v2/#derive-component-type-automatically-from-class-name", "title": "Derive component type automatically from class name", "text": "

    KPOps automatically infers the component type from the class name. Therefore, the type and schema_type attributes can be removed from your custom components. By convention the type would be the lower, and kebab cased name of the class.

    class MyCoolStreamApp(StreamsApp):\n-    type = \"my-cool-stream-app\"\n+    ...\n

    Because of this new convention producer has been renamed to producer-app. This must be addressed in your pipeline.yaml and defaults.yaml.

    - producer:\n+ producer-app:\n    app:\n        streams:\n        outputTopic: output_topic\n        extraOutputTopics:\n            output_role1: output_topic1\n            output_role2: output_topic2\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-inputoutput-types", "title": "Refactor input/output types", "text": ""}, {"location": "user/migration-guide/v1-v2/#to-section", "title": "To section", "text": "

    In the to section these have changed:

    • The default type is output
    • If role is set, type is inferred to be extra
    • The type error needs to be defined explicitly
      to:\n    topics:\n      ${pipeline_name}-topic-1:\n-       type: extra\n        role: \"role-1\"\n        ...\n      ${pipeline_name}-topic-2:\n-       type: output\n        ...\n      ${pipeline_name}-topic-3:\n         type: error\n         ...\n
    "}, {"location": "user/migration-guide/v1-v2/#from-section", "title": "From section", "text": "

    In the from section these have changed:

    • The default type is input
    • input-pattern type is replaced by pattern
    • If role is set, type is inferred to be extra
    • If role is set, type is explicitly set to pattern, this would be inferred type extra-pattern
      from:\n    topics:\n      ${pipeline_name}-input-topic:\n-       type: input\n        ...\n      ${pipeline_name}-extra-topic:\n-       type: extra\n        role: topic-role\n        ...\n      ${pipeline_name}-input-pattern-topic:\n-       type: input-pattern\n+       type: pattern\n        ...\n      ${pipeline_name}-extra-pattern-topic:\n-       type: extra-pattern\n+       type: pattern\n        role: some-role\n        ...\n
    "}, {"location": "user/migration-guide/v1-v2/#remove-camel-case-conversion-of-internal-models", "title": "Remove camel case conversion of internal models", "text": "

    All the internal KPOps models are now snake_case, and only Helm/Kubernetes values require camel casing. You can find an example of a pipeline.yaml in the following. Notice that the app section here remains untouched.

    ...\ntype: streams-app\n  name: streams-app\n  namespace: namespace\n  app:\n    streams:\n      brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n     autoscaling:\n      consumerGroup: consumer-group\n      lagThreshold: 0\n      enabled: false\n      pollingInterval: 30\n\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: error\n-       keySchema: key-schema\n+       key_schema: key-schema\n-       valueSchema: value-schema\n+       value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs:\n          cleanup.policy: compact\n    models:\n      model: model\n  prefix: ${pipeline_name}-\n- repoConfig:\n+ repo_config:\n-   repositoryName: bakdata-streams-bootstrap\n+   repository_name: bakdata-streams-bootstrap\n    url: https://bakdata.github.io/streams-bootstrap/\n-   repoAuthFlags:\n+   repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.4\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-handling-of-helm-flags", "title": "Refactor handling of Helm flags", "text": "

    If you are using the KubernetesApp class to define your own Kubernetes resource to deploy, the abstract function get_helm_chart that returns the chart for deploying the app using Helm is now a Python property and renamed to helm_chart.

    class MyCoolApp(KubernetesApp):\n\n+   @property\n    @override\n-   def get_helm_chart(self) -> str:\n+   def helm_chart(self) -> str:\n        return \"./charts/charts-folder\"\n
    "}, {"location": "user/migration-guide/v1-v2/#plural-broker-field-in-pipeline-config", "title": "Plural broker field in pipeline config", "text": "

    Since you can pass a comma separated string of broker address, the broker field in KPOps is now plural. The pluralization has affected multiple areas:

    "}, {"location": "user/migration-guide/v1-v2/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- broker: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  kafka_connect_host: \"http://localhost:8083\"\n  kafka_rest_host: \"http://localhost:8082\"\n  schema_registry_url: \"http://localhost:8081\"\n
    "}, {"location": "user/migration-guide/v1-v2/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called brokers.

    ...\n  app:\n    streams:\n-     brokers: ${broker}\n+     brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#environment-variable", "title": "Environment variable", "text": "

    Previously, if you set the environment variable KPOPS_KAFKA_BROKER, you need to replace that now with KPOPS_KAFKA_BROKERS.

    "}, {"location": "user/migration-guide/v2-v3/", "title": "Migrate from V2 to V3", "text": "

    Jump to the summary

    "}, {"location": "user/migration-guide/v2-v3/#use-hash-and-trim-long-helm-release-names-instead-of-only-trimming", "title": "Use hash and trim long Helm release names instead of only trimming", "text": "

    KPOps handles long (more than 53 characters) Helm releases names differently. Helm will not find your (long) old release names anymore. Therefore, it is recommended that you should once destroy your pipeline with KPOps v2 to remove old Helm release names. After a clean destroy, re-deploy your pipeline with the KPOps v3.

    For example if you have a component with the Helm release name example-component-name-too-long-fake-fakefakefakefakefake. The new release name will shorten the original name to 53 characters and then replace the last 6 characters of the trimmed name with the first 5 characters of the result of SHA-1(helm_release_name).

    example-component-name-too-long-fake-fakefakef-0a7fc ----> 53 chars\n---------------------------------------------- -----\n  ^Shortened helm_release_name                 ^first 5 characters of SHA1(helm_release_name)\n
    "}, {"location": "user/migration-guide/v2-v3/#create-helmapp-component", "title": "Create HelmApp component", "text": "

    All Helm-specific parts of the built-in KubernetesApp have been extracted to a new child component that is more appropriately named HelmApp. It has to be renamed in your existing pipeline defintions and custom components module.

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml", "title": "pipeline.yaml", "text": "
    -- type: kubernetes-app\n+- type: helm-app\n   name: foo\n
    "}, {"location": "user/migration-guide/v2-v3/#custom_modulepy", "title": "custom_module.py", "text": "
    - from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n      ...\n
    "}, {"location": "user/migration-guide/v2-v3/#create-streamsbootstrap-component-refactor-cleanup-jobs-as-individual-helmapp", "title": "Create StreamsBootstrap component & refactor cleanup jobs as individual HelmApp", "text": "

    Previously the default KafkaApp component configured the streams-bootstrap Helm Charts. Now, this component is no longer tied to Helm (or Kubernetes). Instead, there is a new StreamsBootstrap component that configures the Helm Chart repository for the components that use it, e.g. StreamsApp and ProducerApp. If you are using non-default values for the Helm Chart repository or version, it has to be updated as shown below.

    "}, {"location": "user/migration-guide/v2-v3/#defaultsyaml", "title": "defaults.yaml", "text": "
      kafka-app:\n    app:\n      streams: ...\n\n+ streams-bootstrap:\n    repo_config: ...\n    version: ...\n
    "}, {"location": "user/migration-guide/v2-v3/#refactor-kafka-connector-resetter-as-individual-helmapp", "title": "Refactor Kafka Connector resetter as individual HelmApp", "text": "

    Internally, the Kafka Connector resetter is now its own standard HelmApp, removing a lot of the shared code. It is configured using the resetter_namespace (formerly namespace) and resetter_values attributes.

    "}, {"location": "user/migration-guide/v2-v3/#defaultsyaml_1", "title": "defaults.yaml", "text": "
      kafka-connector:\n-   namespace: my-namespace\n+   resetter_namespace: my-namespace\n
    "}, {"location": "user/migration-guide/v2-v3/#make-kafka-rest-proxy-kafka-connect-hosts-default-and-improve-schema-registry-config", "title": "Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config", "text": "

    The breaking changes target the config.yaml file:

    • The schema_registry_url is replaced with schema_registry.url (default http://localhost:8081) and schema_registry.enabled (default false).

    • kafka_rest_host is renamed to kafka_rest.url (default http://localhost:8082).

    • kafka_connect_host is replaced with kafka_connect.url (default http://localhost:8083).

    • brokers is renamed to kafka_brokers.

    The environment variable names of these config fields changed respectively. Please refer to the environment variables documentation page to see the newest changes.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called kafka_brokers.

    ...\n  app:\n    streams:\n-     brokers: ${brokers}\n+     brokers: ${kafka_brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v2-v3/#define-custom-components-module-pipeline-base-dir-globally", "title": "Define custom components module & pipeline base dir globally", "text": "

    Warning

    The previous CLI parameters have been removed.

    The options for a custom components_module and pipeline_base_dir are now global settings, defined in config.yaml.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml_1", "title": "config.yaml", "text": "
      kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  environment: development\n+ components_module: components\n+ pipeline_base_dir: pipelines\n
    "}, {"location": "user/migration-guide/v2-v3/#move-github-action-to-repsitory-root", "title": "Move GitHub action to repsitory root", "text": "

    The location of the GitHub action has changed, and it's now available directly as bakdata/kpops.

    You'll need to change it in your GitHub CI workflows.

    steps:\n  - name: kpops deploy\n-   uses: bakdata/kpops/actions/kpops-runner@main\n+   uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      # ...\n
    "}, {"location": "user/migration-guide/v2-v3/#allow-overriding-config-files", "title": "Allow overriding config files", "text": "

    Specifying the environment is no longer mandatory. If not defined, only the global files will be used.

    environment is no longer specified in config.yaml. Instead, it can be either set via the CLI flag --environment or with the environment variable KPOPS_ENVIRONMENT.

    The --config flag in the CLI now points to the directory that contains config*.yaml files. The files to be used are resolved based on the provided (or not) environment.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml_2", "title": "config.yaml", "text": "
    - environment: development\n  kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  schema_registry:\n    enabled: true\n    url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#change-substitution-variables-separator-to", "title": "Change substitution variables separator to .", "text": "

    The delimiter in the substitution variables is changed to ..

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml_1", "title": "pipeline.yaml and default.yaml", "text": "
    steps:\n  - type: scheduled-producer\n    app:\n      labels:\n-       app_type: \"${component_type}\"\n-       app_name: \"${component_name}\"\n-       app_schedule: \"${component_app_schedule}\"\n+       app_type: \"${component.type}\"\n+       app_name: \"${component.name}\"\n+       app_schedule: \"${component.app.schedule}\"\n
    "}, {"location": "user/migration-guide/v2-v3/#configyaml_3", "title": "config.yaml", "text": "
    topic_name_config:\n- default_error_topic_name: \"${pipeline_name}-${component_name}-dead-letter-topic\"\n- default_output_topic_name: \"${pipeline_name}-${component_name}-topic\"\n+ default_error_topic_name: \"${pipeline_name}-${component.name}-dead-letter-topic\"\n+ default_output_topic_name: \"${pipeline_name}-${component.name}-topic\"\n
    "}, {"location": "user/migration-guide/v2-v3/#refactor-generate-template-for-python-api-usage", "title": "Refactor generate template for Python API usage", "text": "

    The template method of every pipeline component has been renamed to manifest as it is no longer strictly tied to Helm template. Instead, it can be used to render the final resources of a component, such as Kubernetes manifests.

    There is also a new kpops manifest command replacing the existing kpops generate --template flag.

    If you're using this functionality in your custom components, it needs to be updated.

      from kpops.components.base_components.models.resource import Resource\n\n  @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n  \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n      return []  # list of manifests\n
    "}, {"location": "user/migration-guide/v2-v3/#namespace-substitution-vars", "title": "Namespace substitution vars", "text": "

    The global configuration variables are now namespaced under the config key, such as ${config.kafka_brokers}, ${config.schema_registry.url}. Same with pipeline variables, e.g. ${pipeline_name} \u2192 ${pipeline.name}. This would make it more uniform with the existing ${component.<key>} variables.

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml_1", "title": "pipeline.yaml", "text": "
      name: kafka-app\n- prefix: ${pipeline_name}-\n+ prefix: ${pipeline.name}-\n  app:\n    streams:\n-     brokers: ${kafka_brokers}\n-     schemaRegistryUrl: ${schema_registry.url}\n+     brokers: ${config.kafka_brokers}\n+     schemaRegistryUrl: ${config.schema_registry.url}\n
    "}, {"location": "user/migration-guide/v2-v3/#summary", "title": "Summary", "text": "

    Warning

    Helm will not find your (long) old release names anymore.

    defaults.yaml
      kafka-app:\n    app:\n      streams: ...\n\n+ streams-bootstrap:\n    repo_config: ...\n    version: ...\n
    pipeline.yaml
    - - type: kubernetes-app\n+ - type: helm-app\n  ...\n  - type: kafka-app\n    app:\n-     brokers: ${brokers}\n+     brokers: ${config.kafka_brokers}\n      labels:\n-       app_schedule: \"${component_app_schedule}\"\n+       app_schedule: \"${component.app.schedule}\"\n  ...\n  - type: kafka-connector\n-   namespace: my-namespace\n+   resetter_namespace: my-namespace\n  ...\n
    config.yaml
    - environment: development\n\n+ components_module: components\n\n+ pipeline_base_dir: pipelines\n\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n\n  topic_name_config:\n- default_error_topic_name: \"${pipeline_name}-${component_name}-dead-letter-topic\"\n+ default_error_topic_name: \"${pipeline.name}-${component.name}-dead-letter-topic\"\n  ...\n
    custom_module.py
    - from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n+ from kpops.components.base_components.models.resource import Resource\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n\n  @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n  \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n      return []  # list of manifests\n  ...\n
    github_ci_workflow.yaml
      steps:\n    - name: ...\n-     uses: bakdata/kpops/actions/kpops-runner@main\n+     uses: bakdata/kpops@main\n  ...\n
    "}, {"location": "user/migration-guide/v3-v4/", "title": "Migrate from V3 to V4", "text": ""}, {"location": "user/migration-guide/v3-v4/#distribute-defaults-across-multiple-files", "title": "Distribute defaults across multiple files", "text": "

    Warning

    The --defaults flag is removed

    It is possible now to use multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.

    For example, imagine the following folder structure:

    \u2514\u2500 pipelines\n   \u2514\u2500\u2500 distributed-defaults\n       \u251c\u2500\u2500 defaults.yaml\n       \u251c\u2500\u2500 defaults_dev.yaml\n       \u2514\u2500\u2500 pipeline-deep\n           \u251c\u2500\u2500 defaults.yaml\n           \u2514\u2500\u2500 pipeline.yaml\n

    The pipeline_base_dir is configured to pipelines. Now if we generate this pipeline with the following command:

    kpops generate \\\n      --environment dev\n      ./pipelines/distributed-defaults/pipeline-deep/pipeline.yaml\n

    The defaults would be picked in the following order (high to low priority):

    • ./pipelines/distributed-defaults/pipeline-deep/defaults.yaml
    • ./pipelines/distributed-defaults/defaults_dev.yaml
    • ./pipelines/distributed-defaults/defaults.yaml

    The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.

    "}, {"location": "user/migration-guide/v4-v5/", "title": "Migrate from V4 to V5", "text": ""}, {"location": "user/migration-guide/v4-v5/#allow-custom-timeout-for-external-services", "title": "Allow custom timeout for external services", "text": "

    The global timeout setting has been removed. Instead, an individual timeout can be set for each external service. The default is 30 seconds.

    "}, {"location": "user/migration-guide/v4-v5/#configyaml", "title": "config.yaml", "text": "
    - timeout: 300\n\n  kafka_rest:\n    url: \"http://my-custom-rest.url:8082\"\n+   timeout: 30\n  kafka_connect:\n    url: \"http://my-custom-connect.url:8083\"\n+   timeout: 30\n  schema_registry:\n    enabled: true\n    url: \"http://my-custom-sr.url:8081\"\n+   timeout: 30\n
    "}, {"location": "user/migration-guide/v5-v6/", "title": "Migrate from V5 to V6", "text": ""}, {"location": "user/migration-guide/v5-v6/#deploy-multiple-pipelines", "title": "Deploy multiple pipelines", "text": "

    KPOps can now deploy multiple pipelines in a single command. It is possible to pass one or many pipeline.yaml files or pass a directory with many pipeline.yaml files within it.

    The environment variable KPOPS_PIPELINE_PATH is changed to KPOPS_PIPELINE_PATHS.

    Read more:

    • CLI Usage
    • Environment variables
    "}, {"location": "user/migration-guide/v5-v6/#separate-kpops-api-from-the-cli", "title": "Separate KPOps API from the CLI", "text": "

    KPops Python API is now stable and separated from the CLI! \ud83c\udf89

    "}, {"location": "user/migration-guide/v6-v7/", "title": "Migrate from V6 to V7", "text": ""}, {"location": "user/migration-guide/v6-v7/#automatic-loading-of-namespaced-custom-components", "title": "Automatic loading of namespaced custom components", "text": "

    KPOps is now distributed as a Python namespace package (as defined by PEP 420). This allows us to standardize the namespace kpops.components for both builtin and custom pipeline components.

    As a result of the restructure, some imports need to be adjusted:

    KPOps Python API

    - import kpops\n+ import kpops.api as kpops\n

    builtin KPOps components

    - from kpops.components import (\n-     HelmApp,\n-     KafkaApp,\n-     KafkaConnector,\n-     KafkaSinkConnector,\n-     KafkaSourceConnector,\n-     KubernetesApp,\n-     StreamsBootstrap,\n-     ProducerApp,\n-     StreamsApp,\n-     PipelineComponent,\n-     StreamsApp,\n-     ProducerApp,\n- )\n+ from kpops.components.base_components import (\n+     HelmApp,\n+     KafkaApp,\n+     KafkaConnector,\n+     KafkaSinkConnector,\n+     KafkaSourceConnector,\n+     KubernetesApp,\n+     PipelineComponent,\n+ )\n+ from kpops.components.streams_bootstrap import (\n+     StreamsBootstrap,\n+     StreamsApp,\n+     ProducerApp,\n+ )\n
    "}, {"location": "user/migration-guide/v6-v7/#your-custom-kpops-components", "title": "your custom KPOps components", "text": ""}, {"location": "user/migration-guide/v6-v7/#configyaml", "title": "config.yaml", "text": "
    - components_module: components\n
    "}, {"location": "user/migration-guide/v6-v7/#python-module", "title": "Python module", "text": "
    - components/__init__.py\n+ kpops/components/custom/__init__.py\n
    "}, {"location": "user/migration-guide/v6-v7/#rename-app-field", "title": "Rename app field", "text": "

    The app attribute of the builtin KPOps components has been renamed to better differentiate them. Both your pipeline.yaml and defaults.yaml files have to be updated, e.g.:

    "}, {"location": "user/migration-guide/v6-v7/#defaultsyaml", "title": "defaults.yaml", "text": "
      kubernetes-app:\n-   app: {}\n+   values: {}\n\n  helm-app:\n-   app: {}\n+   values: {}\n\n  kafka-app:\n-   app: {}\n+   values: {}\n\n  streams-app:\n-   app: {}\n+   values: {}\n\n  producer-app:\n-   app: {}\n+   values: {}\n\n  kafka-connector:\n-   app: {}\n+   config: {}\n\n  kafka-source-connector:\n-   app: {}\n+   config: {}\n\n  kafka-sink-connector:\n-   app: {}\n+   config: {}\n
    "}, {"location": "user/migration-guide/v6-v7/#call-destroy-from-inside-of-reset-or-clean", "title": "Call destroy from inside of reset or clean", "text": "

    Before v7, the KPOps CLI executed destroy before running reset/clean to ensure the component was destroyed.

    This logic has changed. The destroy method is now called within the PipelineComponent's reset/clean.

    During migrating to v7, you should check your custom components and see if they override the reset/clean methods. If so, you need to call the supermethod reset/clean to trigger the destroy inside the parent class. Alternatively, if you are implementing the PipelineComponent class, you need to call the destroy method at the beginning of the method.

    "}, {"location": "user/migration-guide/v6-v7/#componentspy", "title": "components.py", "text": "

    For example, when creating a custom StreamsApp or ProducerApp (or any other custom component), you must call the supermethod reset/clean to execute the destroy in the parent class. Otherwise, the logic of destroy will not be executed!

    class MyStreamsApp(StreamsApp):\n\n    @override\n    async def clean(self, dry_run: bool) -> None:\n+       await super().clean(dry_run)\n        # Some custom clean logic\n        # ...\n        ```diff\n        \n \nclass MyCustomComponent(PipelineComponent):\n    \n    @override\n    async def destroy(self, dry_run: bool) -> None:\n        # Some custom destroy logic\n        # ...\n\n    @override\n    async def clean(self, dry_run: bool) -> None:\n+       await super().clean(dry_run)\n        # Some custom clean logic\n        # ...\n
    "}, {"location": "user/migration-guide/v7-v8/", "title": "Migrate from V7 to V8", "text": ""}, {"location": "user/migration-guide/v7-v8/#add-support-for-streams-bootstrap-v3", "title": "Add support for streams-bootstrap v3", "text": "

    From now on KPOps supports streams-bootstrap v3 as its default component. The previous streams-bootstrap version (below 3.x.x) is marked as deprecated and will be removed in a future version of KPOps. If you don't want to migrate your producer or streams app to v3, you should suffix your components with -v2. Here is an example of a pipeline.yaml file.

    "}, {"location": "user/migration-guide/v7-v8/#pipelineyaml", "title": "pipeline.yaml", "text": "
    - - type: producer-app\n+ - type: producer-app-v2\n\n- - type: streams-app\n+ - type: streams-app-v2\n\n# rest of your pipeline\n
    "}, {"location": "user/migration-guide/v7-v8/#my-componentspy", "title": "my-components.py", "text": "
    - class MyStreamsApp(StreamsApp):\n+ class MyStreamsApp(StreamsAppV2):\n    ...\n

    Info

    The streams-boostrap, streams-app, and producer-app now all take the Helm values of streams-bootstrap version 3. You can find these values under the Helm charts documentation or by referring to the Base model definitions.

    "}, {"location": "user/migration-guide/v7-v8/#rename-role-to-label", "title": "Rename role to label", "text": "

    The keyword role is renamed to label. You need to replace it in your pipeline.yaml, defaults.yaml, and the Python components definition files. Here is a simple example of the defaults.yaml.

    "}, {"location": "user/migration-guide/v7-v8/#defaultsyaml", "title": "defaults.yaml", "text": "
    streams-app-v2:\n  values:\n    streams:\n      brokers: localhost:9092\n  from:\n    topics:\n      my-labeled-input-topic:\n-       role: my-input-topic-label\n+       label: my-input-topic-label\n      my-labeled-input-pattern:\n        type: pattern\n-       role: my-input-topic-labeled-pattern\n+       label: my-input-topic-labeled-pattern\n\n  to:\n    topics:\n      my-labeled-topic-output:\n-       role: my-output-topic-label\n+       label: my-output-topic-label\n\n# rest of your pipeline\n
    "}, {"location": "user/migration-guide/v7-v8/#make-kafkaapp-responsible-for-deployingcleaning-streams-bootstrap-components", "title": "Make KafkaApp responsible for deploying/cleaning streams bootstrap components", "text": "

    The KafkaApp component now only contains the deployment logic of the stream-bootstrap applications (streams-app, producer-app). It should not be used in the defaults.yaml nor the pipeline.yaml. If you are using it, it should be replaced by streams-bootstrap.

    "}, {"location": "user/migration-guide/v7-v8/#defaultsyaml_1", "title": "defaults.yaml", "text": "
    - kafka-app:\n+ streams-bootstrap-v2:\n    values:\n      streams:\n        brokers: 127.0.0.1:9092\n        schemaRegistryUrl: 127.0.0.1:8081\n
    "}, {"location": "user/references/cli-commands/", "title": "CLI Usage", "text": "

    Usage:

    $ kpops [OPTIONS] COMMAND [ARGS]...\n

    Options:

    • -V, --version: Print KPOps version
    • --install-completion: Install completion for the current shell.
    • --show-completion: Show completion for the current shell, to copy it or customize the installation.
    • --help: Show this message and exit.

    Commands:

    • clean: Clean pipeline steps
    • deploy: Deploy pipeline steps
    • destroy: Destroy pipeline steps
    • generate: Generate enriched pipeline representation
    • init: Initialize a new KPOps project.
    • manifest: Render final resource representation
    • reset: Reset pipeline steps
    • schema: Generate JSON schema.
    "}, {"location": "user/references/cli-commands/#kpops-clean", "title": "kpops clean", "text": "

    Clean pipeline steps

    Usage:

    $ kpops clean [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-deploy", "title": "kpops deploy", "text": "

    Deploy pipeline steps

    Usage:

    $ kpops deploy [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-destroy", "title": "kpops destroy", "text": "

    Destroy pipeline steps

    Usage:

    $ kpops destroy [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-generate", "title": "kpops generate", "text": "

    Enrich pipeline steps with defaults. The enriched pipeline is used for all KPOps operations (deploy, destroy, ...).

    Usage:

    $ kpops generate [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-init", "title": "kpops init", "text": "

    Initialize a new KPOps project.

    Usage:

    $ kpops init [OPTIONS] PATH\n

    Arguments:

    • PATH: Path for a new KPOps project. It should lead to an empty (or non-existent) directory. The part of the path that doesn't exist will be created. [required]

    Options:

    • --config-include-opt / --no-config-include-opt: Whether to include non-required settings in the generated 'config.yaml' [default: no-config-include-opt]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-manifest", "title": "kpops manifest", "text": "

    In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.

    Usage:

    $ kpops manifest [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-reset", "title": "kpops reset", "text": "

    Reset pipeline steps

    Usage:

    $ kpops reset [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-schema", "title": "kpops schema", "text": "

    Generate JSON schema.

    The schemas can be used to enable support for KPOps files in a text editor.

    Usage:

    $ kpops schema [OPTIONS] SCOPE:{pipeline|defaults|config}\n

    Arguments:

    • SCOPE:{pipeline|defaults|config}: Scope of the generated schema
      - pipeline: Schema of PipelineComponents for KPOps pipeline.yaml\n\n- defaults: Schema of PipelineComponents for KPOps defaults.yaml\n\n- config: Schema for KPOps config.yaml  [required]\n

    Options:

    • --help: Show this message and exit.
    "}, {"location": "user/references/editor-integration/", "title": "Editor integration", "text": ""}, {"location": "user/references/editor-integration/#native", "title": "Native", "text": "

    We are working towards first-class editor support by providing plugins that work out of the box.

    • Neovim: kpops.nvim
    • Visual Studio Code: planned
    "}, {"location": "user/references/editor-integration/#manual-for-unsupported-editors-with-lsp", "title": "Manual (for unsupported editors with LSP)", "text": "
    1. Install the yaml-language-server in your editor of choice. (requires LSP support)
    2. Configure the extension with the settings below.
    settings.json
    {\n    \"yaml.schemas\": {\n        \"https://bakdata.github.io/kpops/4.0/schema/pipeline.json\": [\n            \"pipeline.yaml\",\n            \"pipeline_*.yaml\"\n        ],\n        \"https://bakdata.github.io/kpops/4.0/schema/defaults.json\": [\n            \"defaults.yaml\",\n            \"defaults_*.yaml\"\n        ],\n        \"https://bakdata.github.io/kpops/4.0/schema/config.json\": [\n            \"config.yaml\",\n            \"config_*.yaml\"\n        ]\n    }\n}\n

    Advanced usage

    It is possible to generate schemas with the kpops schema command. Useful for including custom components or when using a pre-release version of KPOps.

    "}, {"location": "user/references/editor-integration/#concepts", "title": "Concepts", "text": "

    KPOps provides JSON schemas that enable autocompletion and validation for all YAML files that the user must work with.

    "}, {"location": "user/references/editor-integration/#supported-files", "title": "Supported files", "text": "
    • pipeline.yaml
    • defaults.yaml
    • config.yaml
    "}, {"location": "user/references/ci-integration/github-actions/", "title": "GitHub Actions integration", "text": "

    We provided a GitHub composite action bakdata/kpops that installs and executes KPOps commands with the given parameters.

    "}, {"location": "user/references/ci-integration/github-actions/#input-parameters", "title": "Input Parameters", "text": "Name Required Default Value Type Description command \u2705 - string KPOps command to run. generate, deploy, destroy, reset, clean are possible values. Flags such as --dry-run and --execute need to be specified pipeline \u2705 - string Pipeline to run by KPOps working-directory \u274c . string root directory used by KPOps to run pipelines config \u274c - string Directory containing the config*.yaml file(s) environment \u274c - string Environment to run KPOps in components \u274c - string components package path filter-type \u274c - string Whether to include/exclude the steps defined in KPOPS_PIPELINE_STEPS parallel \u274c \"false\" string Whether to run pipelines in parallel python-version \u274c \"3.11.x\" string Python version to install (Defaults to the latest stable version of Python 3.11) kpops-version \u274c latest string KPOps version to install helm-version \u274c latest string Helm version to install token \u274c latest string secrets.GITHUB_TOKEN, needed for setup-helm action if helm-version is set to latest"}, {"location": "user/references/ci-integration/github-actions/#usage", "title": "Usage", "text": "
    steps:\n  # ...\n  # This step is useful for debugging reasons\n  - name: Generate Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: generate\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.3\n\n  # It is possible to use a pre-release KPOps version from TestPyPI https://test.pypi.org/project/kpops/#history\n  - name: Deploy Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.5.dev20230707132709\n  # ...\n
    "}]} \ No newline at end of file +{"config": {"lang": ["en"], "separator": "[\\s\\-]+", "pipeline": ["stopWordFilter"]}, "docs": [{"location": "developer/api/", "title": "Python API", "text": ""}, {"location": "developer/api/#kpops.api", "title": "kpops.api", "text": ""}, {"location": "developer/api/#kpops.api.clean", "title": "clean", "text": "
    clean(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, dry_run: bool = True, verbose: bool = True, parallel: bool = False) -> list[Resource] | None\n

    Clean pipeline steps.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    dry_run

    Whether to dry run the command or execute it.

    TYPE: bool DEFAULT: True

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: True

    parallel

    Enable or disable parallel execution of pipeline steps.

    TYPE: bool DEFAULT: False

    Source code in kpops/api/__init__.py
    def clean(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    dry_run: bool = True,\n    verbose: bool = True,\n    parallel: bool = False,\n) -> list[Resource] | None:\n    \"\"\"Clean pipeline steps.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param dry_run: Whether to dry run the command or execute it.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :param parallel: Enable or disable parallel execution of pipeline steps.\n    \"\"\"\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n\n    list: list[Resource] = []\n    if kpops_config.operation_mode is OperationMode.ARGO:\n        for component in reversed(pipeline.components):\n            clean = component.manifest_clean()\n            list.append(clean)\n\n        return list\n    if kpops_config.operation_mode is OperationMode.HELM:\n\n        async def clean_runner(component: PipelineComponent):\n            log_action(\"Clean\", component)\n            await component.clean(dry_run)\n\n        async def async_clean():\n            if parallel:\n                pipeline_tasks = pipeline.build_execution_graph(\n                    clean_runner, reverse=True\n                )\n                await pipeline_tasks\n            else:\n                for component in reversed(pipeline.components):\n                    await clean_runner(component)\n\n        asyncio.run(async_clean())\n    return []\n
    "}, {"location": "developer/api/#kpops.api.deploy", "title": "deploy", "text": "
    deploy(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, dry_run: bool = True, verbose: bool = True, parallel: bool = False) -> list[Resource]\n

    Deploy pipeline steps.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    dry_run

    Whether to dry run the command or execute it.

    TYPE: bool DEFAULT: True

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: True

    parallel

    Enable or disable parallel execution of pipeline steps.

    TYPE: bool DEFAULT: False

    Source code in kpops/api/__init__.py
    def deploy(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    dry_run: bool = True,\n    verbose: bool = True,\n    parallel: bool = False,\n) -> list[Resource]:\n    \"\"\"Deploy pipeline steps.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param dry_run: Whether to dry run the command or execute it.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :param parallel: Enable or disable parallel execution of pipeline steps.\n    \"\"\"\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n    if kpops_config.operation_mode is OperationMode.ARGO:\n        resources: list[Resource] = []\n        for component in pipeline.components:\n            resource = component.manifest_deploy()\n            resources.append(resource)\n        return resources\n\n    if kpops_config.operation_mode is OperationMode.HELM:\n\n        async def deploy_runner(component: PipelineComponent):\n            log_action(\"Deploy\", component)\n            await component.deploy(dry_run)\n\n        async def async_deploy():\n            if parallel:\n                pipeline_tasks = pipeline.build_execution_graph(deploy_runner)\n                await pipeline_tasks\n            else:\n                for component in pipeline.components:\n                    await deploy_runner(component)\n\n        asyncio.run(async_deploy())\n\n    return []\n
    "}, {"location": "developer/api/#kpops.api.destroy", "title": "destroy", "text": "
    destroy(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, dry_run: bool = True, verbose: bool = True, parallel: bool = False) -> list[Resource] | None\n

    Destroy pipeline steps.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    dry_run

    Whether to dry run the command or execute it.

    TYPE: bool DEFAULT: True

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: True

    parallel

    Enable or disable parallel execution of pipeline steps.

    TYPE: bool DEFAULT: False

    Source code in kpops/api/__init__.py
    def destroy(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    dry_run: bool = True,\n    verbose: bool = True,\n    parallel: bool = False,\n) -> list[Resource] | None:\n    \"\"\"Destroy pipeline steps.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param dry_run: Whether to dry run the command or execute it.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :param parallel: Enable or disable parallel execution of pipeline steps.\n    \"\"\"\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n\n    if kpops_config.operation_mode is OperationMode.ARGO:\n        resources: list[Resource] = []\n        for component in pipeline.components:\n            resource = component.manifest_destroy()\n            resources.append(resource)\n        return resources\n\n    if kpops_config.operation_mode is OperationMode.HELM:\n\n        async def destroy_runner(component: PipelineComponent):\n            log_action(\"Destroy\", component)\n            await component.destroy(dry_run)\n\n        async def async_destroy():\n            if parallel:\n                pipeline_tasks = pipeline.build_execution_graph(\n                    destroy_runner, reverse=True\n                )\n                await pipeline_tasks\n            else:\n                for component in reversed(pipeline.components):\n                    await destroy_runner(component)\n\n        asyncio.run(async_destroy())\n    return []\n
    "}, {"location": "developer/api/#kpops.api.generate", "title": "generate", "text": "
    generate(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, verbose: bool = False) -> Pipeline\n

    Generate enriched pipeline representation.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: False

    RETURNS DESCRIPTION Pipeline

    Generated Pipeline object.

    Source code in kpops/api/__init__.py
    def generate(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    verbose: bool = False,\n) -> Pipeline:\n    \"\"\"Generate enriched pipeline representation.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :return: Generated `Pipeline` object.\n    \"\"\"\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n    pipeline = _create_pipeline(pipeline_path, kpops_config, environment)\n    log.info(f\"Picked up pipeline '{pipeline_path.parent.name}'\")\n    if steps:\n        component_names = steps\n        log.debug(\n            f\"KPOPS_PIPELINE_STEPS is defined with values: {component_names} and filter type of {filter_type.value}\"\n        )\n\n        predicate = filter_type.create_default_step_names_filter_predicate(\n            component_names\n        )\n        pipeline.filter(predicate)\n        log.info(f\"Filtered pipeline:\\n{pipeline.step_names}\")\n    return pipeline\n
    "}, {"location": "developer/api/#kpops.api.init", "title": "init", "text": "
    init(path: Path, config_include_opt: bool = False)\n

    Initiate a default empty project.

    PARAMETER DESCRIPTION path

    Directory in which the project should be initiated.

    TYPE: Path

    conf_incl_opt

    Whether to include non-required settings in the generated config file.

    Source code in kpops/api/__init__.py
    def init(\n    path: Path,\n    config_include_opt: bool = False,\n):\n    \"\"\"Initiate a default empty project.\n\n    :param path: Directory in which the project should be initiated.\n    :param conf_incl_opt: Whether to include non-required settings\n        in the generated config file.\n    \"\"\"\n    if not path.exists():\n        path.mkdir(parents=False)\n    elif next(path.iterdir(), False):\n        log.warning(\"Please provide a path to an empty directory.\")\n        return\n    init_project(path, config_include_opt)\n
    "}, {"location": "developer/api/#kpops.api.manifest", "title": "manifest", "text": "
    manifest(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, verbose: bool = False) -> list[Resource]\n

    Generate pipeline, return final resource representations for each step.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: False

    RETURNS DESCRIPTION list[Resource]

    Resources.

    Source code in kpops/api/__init__.py
    def manifest(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    verbose: bool = False,\n) -> list[Resource]:\n    \"\"\"Generate pipeline, return final resource representations for each step.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :return: Resources.\n    \"\"\"\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n    resources: list[Resource] = []\n    for component in pipeline.components:\n        resource = component.manifest_deploy()\n        resources.append(resource)\n    return resources\n
    "}, {"location": "developer/api/#kpops.api.patch", "title": "patch", "text": "
    patch(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), environment: str | None = None, verbose: bool = True) -> list[Resource]\n
    Source code in kpops/api/__init__.py
    def patch(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    environment: str | None = None,\n    verbose: bool = True,\n) -> list[Resource]:\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        environment=environment,\n        verbose=verbose,\n    )\n    resources: list[Resource] = []\n\n    for component in pipeline.components:\n        resource = component.manifest_deploy()\n        resources.append(resource)\n    return resources\n
    "}, {"location": "developer/api/#kpops.api.reset", "title": "reset", "text": "
    reset(pipeline_path: Path, dotenv: list[Path] | None = None, config: Path = Path(), steps: set[str] | None = None, filter_type: FilterType = FilterType.INCLUDE, environment: str | None = None, dry_run: bool = True, verbose: bool = True, parallel: bool = False) -> list[Resource]\n

    Reset pipeline steps.

    PARAMETER DESCRIPTION pipeline_path

    Path to pipeline definition yaml file.

    TYPE: Path

    dotenv

    Paths to dotenv files.

    TYPE: list[Path] | None DEFAULT: None

    config

    Path to the dir containing config.yaml files.

    TYPE: Path DEFAULT: Path()

    steps

    Set of steps (components) to apply the command on.

    TYPE: set[str] | None DEFAULT: None

    filter_type

    Whether steps should include/exclude the steps.

    TYPE: FilterType DEFAULT: INCLUDE

    dry_run

    Whether to dry run the command or execute it.

    TYPE: bool DEFAULT: True

    environment

    The environment to generate and deploy the pipeline to.

    TYPE: str | None DEFAULT: None

    verbose

    Enable verbose printing.

    TYPE: bool DEFAULT: True

    parallel

    Enable or disable parallel execution of pipeline steps.

    TYPE: bool DEFAULT: False

    Source code in kpops/api/__init__.py
    def reset(\n    pipeline_path: Path,\n    dotenv: list[Path] | None = None,\n    config: Path = Path(),\n    steps: set[str] | None = None,\n    filter_type: FilterType = FilterType.INCLUDE,\n    environment: str | None = None,\n    dry_run: bool = True,\n    verbose: bool = True,\n    parallel: bool = False,\n) -> list[Resource]:\n    \"\"\"Reset pipeline steps.\n\n    :param pipeline_path: Path to pipeline definition yaml file.\n    :param dotenv: Paths to dotenv files.\n    :param config: Path to the dir containing config.yaml files.\n    :param steps: Set of steps (components) to apply the command on.\n    :param filter_type: Whether `steps` should include/exclude the steps.\n    :param dry_run: Whether to dry run the command or execute it.\n    :param environment: The environment to generate and deploy the pipeline to.\n    :param verbose: Enable verbose printing.\n    :param parallel: Enable or disable parallel execution of pipeline steps.\n    \"\"\"\n    kpops_config = KpopsConfig.create(\n        config,\n        dotenv,\n        environment,\n        verbose,\n    )\n    pipeline = generate(\n        pipeline_path=pipeline_path,\n        dotenv=dotenv,\n        config=config,\n        steps=steps,\n        filter_type=filter_type,\n        environment=environment,\n        verbose=verbose,\n    )\n\n    if kpops_config.operation_mode is OperationMode.ARGO:\n        resources: list[Resource] = []\n        for component in pipeline.components:\n            resource = component.manifest_reset()\n            resources.append(resource)\n        return resources\n\n    if kpops_config.operation_mode is OperationMode.HELM:\n\n        async def reset_runner(component: PipelineComponent):\n            log_action(\"Reset\", component)\n            await component.reset(dry_run)\n\n        async def async_reset():\n            if parallel:\n                pipeline_tasks = pipeline.build_execution_graph(\n                    reset_runner, reverse=True\n                )\n                await pipeline_tasks\n            else:\n                for component in reversed(pipeline.components):\n                    await reset_runner(component)\n\n        asyncio.run(async_reset())\n\n    return []\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline", "title": "kpops.pipeline.Pipeline", "text": "

    Bases: BaseModel

    Pipeline representation.

    Source code in kpops/pipeline/__init__.py
    class Pipeline(BaseModel):\n    \"\"\"Pipeline representation.\"\"\"\n\n    _component_index: dict[str, PipelineComponent] = {}\n    _graph: nx.DiGraph = nx.DiGraph()\n\n    model_config = ConfigDict(arbitrary_types_allowed=True)\n\n    @property\n    def step_names(self) -> list[str]:\n        return [step.name for step in self.components]\n\n    @computed_field(title=\"Components\")\n    @property\n    def components(self) -> list[SerializeAsAny[PipelineComponent]]:\n        return list(self._component_index.values())\n\n    @property\n    def last(self) -> PipelineComponent:\n        return self.components[-1]\n\n    def add(self, component: PipelineComponent) -> None:\n        if self._component_index.get(component.id) is not None:\n            msg = (\n                f\"Pipeline steps must have unique id, '{component.id}' already exists.\"\n            )\n            raise ValidationError(msg)\n        self._component_index[component.id] = component\n        self.__add_to_graph(component)\n\n    def remove(self, component_id: str) -> None:\n        self._component_index.pop(component_id)\n\n    def get(self, component_id: str) -> PipelineComponent | None:\n        return self._component_index.get(component_id)\n\n    def find(self, predicate: ComponentFilterPredicate) -> Iterator[PipelineComponent]:\n        \"\"\"Find pipeline components matching a custom predicate.\n\n        :param predicate: Filter function,\n            returns boolean value whether the component should be kept or removed\n        :returns: Iterator of components matching the predicate\n        \"\"\"\n        for component in self.components:\n            if predicate(component):\n                yield component\n\n    def filter(self, predicate: ComponentFilterPredicate) -> None:\n        \"\"\"Filter pipeline components using a custom predicate.\n\n        :param predicate: Filter function,\n            returns boolean value whether the component should be kept or removed\n        \"\"\"\n        for component in self.components:\n            # filter out components not matching the predicate\n            if not predicate(component):\n                self.remove(component.id)\n\n    def validate(self) -> None:  # pyright: ignore [reportIncompatibleMethodOverride]\n        if not nx.is_directed_acyclic_graph(self._graph):\n            msg = \"Pipeline is not a valid DAG.\"\n            raise ValueError(msg)\n\n    # class ArgoResource(BaseModel):\n    #     sync_wave: int\n    #     resource: Resource\n\n    # def add_levels(self):\n    #     sync_wave = \"sync-wave\"\n    #     for node in nx.topological_sort(self._graph):\n    #         node_ = self._graph.nodes[node]\n    #         if not len(list(self._graph.predecessors(node))):\n    #             node_[sync_wave] = 1\n    #         else:\n    #             node_[sync_wave] = (\n    #                 max(\n    #                     self._graph.nodes[n][sync_wave]\n    #                     for n in self._graph.predecessors(node)\n    #                 )\n    #                 + 1\n    #             )\n    #         if p := self._component_index.get(node):\n    #             p.sync_wave = self._graph.nodes[node][sync_wave]\n\n    def to_yaml(self) -> str:\n        return yaml.dump(\n            self.model_dump(mode=\"json\", by_alias=True, exclude_none=True)[\"components\"]\n        )\n\n    def build_execution_graph(\n        self,\n        runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],\n        /,\n        reverse: bool = False,\n    ) -> Awaitable[None]:\n        async def run_parallel_tasks(\n            coroutines: list[Coroutine[Any, Any, None]],\n        ) -> None:\n            tasks: list[asyncio.Task[None]] = []\n            for coro in coroutines:\n                tasks.append(asyncio.create_task(coro))\n            await asyncio.gather(*tasks)\n\n        async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None:\n            for pending_task in pending_tasks:\n                await pending_task\n\n        graph: nx.DiGraph = self._graph.copy()  # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx\n\n        # We add an extra node to the graph, connecting all the leaf nodes to it\n        # in that way we make this node the root of the graph, avoiding backtracking\n        root_node = \"root_node_bfs\"\n        graph.add_node(root_node)\n\n        for node in graph:\n            predecessors = list(graph.predecessors(node))\n            if not predecessors:\n                graph.add_edge(root_node, node)\n\n        layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node))\n\n        sorted_tasks: list[Awaitable[None]] = []\n        for layer in layers_graph[1:]:\n            if parallel_tasks := self.__get_parallel_tasks_from(layer, runner):\n                sorted_tasks.append(run_parallel_tasks(parallel_tasks))\n\n        if reverse:\n            sorted_tasks.reverse()\n\n        return run_graph_tasks(sorted_tasks)\n\n    def __getitem__(self, component_id: str) -> PipelineComponent:\n        try:\n            return self._component_index[component_id]\n        except KeyError as exc:\n            msg = f\"Component {component_id} not found\"\n            raise ValueError(msg) from exc\n\n    def __bool__(self) -> bool:\n        return bool(self._component_index)\n\n    def __iter__(self) -> Iterator[PipelineComponent]:  # pyright: ignore [reportIncompatibleMethodOverride]\n        yield from self._component_index.values()\n\n    def __len__(self) -> int:\n        return len(self.components)\n\n    def __add_to_graph(self, component: PipelineComponent):\n        self._graph.add_node(component.id)\n\n        for input_topic in component.inputs:\n            self.__add_input(input_topic.id, component.id)\n\n        for output_topic in component.outputs:\n            self.__add_output(output_topic.id, component.id)\n\n    def __add_output(self, topic_id: str, source: str) -> None:\n        self._graph.add_node(topic_id)\n        self._graph.add_edge(source, topic_id)\n\n    def __add_input(self, topic_id: str, target: str) -> None:\n        self._graph.add_node(topic_id)\n        self._graph.add_edge(topic_id, target)\n\n    def __get_parallel_tasks_from(\n        self,\n        layer: list[str],\n        runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],\n    ) -> list[Coroutine[Any, Any, None]]:\n        def gen_parallel_tasks():\n            for node_in_layer in layer:\n                # check if component, skip topics\n                if (component := self._component_index.get(node_in_layer)) is not None:\n                    yield runner(component)\n\n        return list(gen_parallel_tasks())\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.components", "title": "components property", "text": "
    components: list[SerializeAsAny[PipelineComponent]]\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.last", "title": "last property", "text": "
    last: PipelineComponent\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.step_names", "title": "step_names property", "text": "
    step_names: list[str]\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.add", "title": "add", "text": "
    add(component: PipelineComponent) -> None\n
    Source code in kpops/pipeline/__init__.py
    def add(self, component: PipelineComponent) -> None:\n    if self._component_index.get(component.id) is not None:\n        msg = (\n            f\"Pipeline steps must have unique id, '{component.id}' already exists.\"\n        )\n        raise ValidationError(msg)\n    self._component_index[component.id] = component\n    self.__add_to_graph(component)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.build_execution_graph", "title": "build_execution_graph", "text": "
    build_execution_graph(runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]], /, reverse: bool = False) -> Awaitable[None]\n
    Source code in kpops/pipeline/__init__.py
    def build_execution_graph(\n    self,\n    runner: Callable[[PipelineComponent], Coroutine[Any, Any, None]],\n    /,\n    reverse: bool = False,\n) -> Awaitable[None]:\n    async def run_parallel_tasks(\n        coroutines: list[Coroutine[Any, Any, None]],\n    ) -> None:\n        tasks: list[asyncio.Task[None]] = []\n        for coro in coroutines:\n            tasks.append(asyncio.create_task(coro))\n        await asyncio.gather(*tasks)\n\n    async def run_graph_tasks(pending_tasks: list[Awaitable[None]]) -> None:\n        for pending_task in pending_tasks:\n            await pending_task\n\n    graph: nx.DiGraph = self._graph.copy()  # pyright: ignore[reportAssignmentType, reportGeneralTypeIssues] imprecise type hint in networkx\n\n    # We add an extra node to the graph, connecting all the leaf nodes to it\n    # in that way we make this node the root of the graph, avoiding backtracking\n    root_node = \"root_node_bfs\"\n    graph.add_node(root_node)\n\n    for node in graph:\n        predecessors = list(graph.predecessors(node))\n        if not predecessors:\n            graph.add_edge(root_node, node)\n\n    layers_graph: list[list[str]] = list(nx.bfs_layers(graph, root_node))\n\n    sorted_tasks: list[Awaitable[None]] = []\n    for layer in layers_graph[1:]:\n        if parallel_tasks := self.__get_parallel_tasks_from(layer, runner):\n            sorted_tasks.append(run_parallel_tasks(parallel_tasks))\n\n    if reverse:\n        sorted_tasks.reverse()\n\n    return run_graph_tasks(sorted_tasks)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.filter", "title": "filter", "text": "
    filter(predicate: ComponentFilterPredicate) -> None\n

    Filter pipeline components using a custom predicate.

    PARAMETER DESCRIPTION predicate

    Filter function, returns boolean value whether the component should be kept or removed

    TYPE: ComponentFilterPredicate

    Source code in kpops/pipeline/__init__.py
    def filter(self, predicate: ComponentFilterPredicate) -> None:\n    \"\"\"Filter pipeline components using a custom predicate.\n\n    :param predicate: Filter function,\n        returns boolean value whether the component should be kept or removed\n    \"\"\"\n    for component in self.components:\n        # filter out components not matching the predicate\n        if not predicate(component):\n            self.remove(component.id)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.find", "title": "find", "text": "
    find(predicate: ComponentFilterPredicate) -> Iterator[PipelineComponent]\n

    Find pipeline components matching a custom predicate.

    PARAMETER DESCRIPTION predicate

    Filter function, returns boolean value whether the component should be kept or removed

    TYPE: ComponentFilterPredicate

    RETURNS DESCRIPTION Iterator[PipelineComponent]

    Iterator of components matching the predicate

    Source code in kpops/pipeline/__init__.py
    def find(self, predicate: ComponentFilterPredicate) -> Iterator[PipelineComponent]:\n    \"\"\"Find pipeline components matching a custom predicate.\n\n    :param predicate: Filter function,\n        returns boolean value whether the component should be kept or removed\n    :returns: Iterator of components matching the predicate\n    \"\"\"\n    for component in self.components:\n        if predicate(component):\n            yield component\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.get", "title": "get", "text": "
    get(component_id: str) -> PipelineComponent | None\n
    Source code in kpops/pipeline/__init__.py
    def get(self, component_id: str) -> PipelineComponent | None:\n    return self._component_index.get(component_id)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.remove", "title": "remove", "text": "
    remove(component_id: str) -> None\n
    Source code in kpops/pipeline/__init__.py
    def remove(self, component_id: str) -> None:\n    self._component_index.pop(component_id)\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.to_yaml", "title": "to_yaml", "text": "
    to_yaml() -> str\n
    Source code in kpops/pipeline/__init__.py
    def to_yaml(self) -> str:\n    return yaml.dump(\n        self.model_dump(mode=\"json\", by_alias=True, exclude_none=True)[\"components\"]\n    )\n
    "}, {"location": "developer/api/#kpops.pipeline.Pipeline.validate", "title": "validate", "text": "
    validate() -> None\n
    Source code in kpops/pipeline/__init__.py
    def validate(self) -> None:  # pyright: ignore [reportIncompatibleMethodOverride]\n    if not nx.is_directed_acyclic_graph(self._graph):\n        msg = \"Pipeline is not a valid DAG.\"\n        raise ValueError(msg)\n
    "}, {"location": "developer/auto-generation/", "title": "Auto generation", "text": "

    Auto generation happens mostly with pre-commit hooks. You can find the pre-commit configuration here. These pre-commit hooks call different Python scripts to auto generate code for the documentation.

    "}, {"location": "developer/auto-generation/#generation-scripts-and-their-respective-files", "title": "Generation scripts and their respective files", "text": ""}, {"location": "developer/auto-generation/#documentation", "title": "Documentation", "text": ""}, {"location": "developer/auto-generation/#variables", "title": "Variables", "text": "
    • cli_env_vars.env -- All CLI environment variables in a dotenv file.
    • cli_env_vars.md -- All CLI environment variables in a table.
    • config_env_vars.env -- Almost all pipeline config environment variables in a dotenv file. The script checks for each field in KpopsConfig whether it has an env attribute defined. The script is currently unable to visit the classes of fields like topic_name_config, hence any environment variables defined there would remain unknown to it.
    • config_env_vars.env -- Almost all pipeline config environment variables in a table.
    • variable_substitution.yaml -- A copy of ./tests/pipeline/resources/component-type-substitution/pipeline.yaml used as an example of substitution.
    "}, {"location": "developer/auto-generation/#cli-commands", "title": "CLI commands", "text": "

    Generated by typer-cli from the code in main.py. It is called with Python's subprocess module.

    "}, {"location": "developer/auto-generation/#pipeline-and-defaults-example-definitions", "title": "Pipeline and defaults example definitions", "text": "

    Generates example pipeline.yaml and defaults.yaml for each individual component, stores them and also concatenates them into 1 big pipeline definition and 1 big pipeline defaults definition.

    User input

    • headers/*\\.yaml -- The top of each example. Includes a description comment, type and name. The headers for pipeline.yaml reside in the pipeline-components dir and the defaults.yaml headers reside in the pipeline-defaults dir. The names of the files must be equal to the respective component type.
    • sections/*\\.yaml -- Each YAML file contains a single section (component attribute) definition. The intention is to keep the minimal set of definitions there from which any component definition can be built. The names of the files must be equal to the respective component type and the attribute name. The sections are used for both defaults.yaml and pipeline.yaml generation and reside in the pipeline-components dir.

    Generated

    • pipeline-components/dependencies/* Cached information about KPOps components
    • pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the pipeline.yaml generation.
    • defaults_pipeline_component_dependencies.yaml -- Specifies per component which files in the sections dir should be used for the defaults.yaml generation.
    • kpops_structure.yaml -- Specifies the inheritance hierarchy of the components and what sections exist in each component.
    • pipeline-components/*\\.yaml -- All single-component pipeline definitions and one big (complete) pipeline.yaml that contains all of them.
    • pipeline-defaults/*\\.yaml -- All single-component defaults definitions and one big (complete) defaults.yaml that contains all of them.
    "}, {"location": "developer/auto-generation/#editor-integration", "title": "Editor integration", "text": ""}, {"location": "developer/auto-generation/#schemas", "title": "Schemas", "text": "
    • config.json
    • pipeline.json
    "}, {"location": "developer/contributing/", "title": "How to contribute", "text": "

    Welcome! We are glad to have you visit our contributing guide!

    If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.

    "}, {"location": "developer/contributing/#git", "title": "git", "text": "

    We are using git submodules to import the KPOps examples repository. You need to fetch the repository locally on your machine. To do so use this command:

    git submodule init\ngit submodule update --recursive\n

    This will fetch the resources under the examples folder.

    "}, {"location": "developer/contributing/#style", "title": "Style", "text": "

    We advise that you stick to our pre-commit hooks for code linting, formatting, and auto-generation of documentation. After you install them using poetry run pre-commit install they're triggered automatically during git commit. Additionally, you can manually invoke them with poetry run pre-commit run -a. In order for dprint to work, you have to manually install it locally. It will work in the CI, so it is also possible to manually carry out formatting changes flagged by dprint in the CI and skip installing it locally.

    "}, {"location": "developer/contributing/#python", "title": "Python", "text": "

    To ensure a consistent Python code style, we use Ruff for both linting and formatting. The official docs contain a guide on editor integration.

    Our configuration can be found in KPOps' top-level pyproject.toml.

    "}, {"location": "developer/contributing/#markdown", "title": "Markdown", "text": "

    To ensure a consistent markdown style, we use dprint's Markdown code formatter. Our configuration can be found here.

    "}, {"location": "developer/contributing/#css", "title": "CSS", "text": "

    To ensure a consistent CSS style, we use the malva dprint's plugin. Our configuration can be found here.

    "}, {"location": "developer/contributing/#toml", "title": "TOML", "text": "

    To ensure a consistent TOML style, we use dprint's TOML code formatter. Our configuration can be found here.

    "}, {"location": "developer/getting-started/", "title": "Getting started", "text": "

    Welcome! We are glad to have you visit our developer guide! If you find any bugs or have suggestions for improvements, please open an issue and optionally a pull request (PR). In the case of a PR, we would appreciate it if you preface it with an issue outlining your goal and means of achieving it.

    Find more about our code-style or insights into KPOps' code base here in our developer guide.

    Work in progress

    The developer guide is still under construction. If you have a question left unanswered here, feel free to ask it by opening an issue.

    "}, {"location": "user/changelog/", "title": "Changelog", "text": ""}, {"location": "user/changelog/#810-release-date-2024-10-25", "title": "8.1.0 - Release Date: [2024-10-25]", "text": ""}, {"location": "user/changelog/#dependencies", "title": "\ud83e\uddea Dependencies", "text": "
    • Upgrade typer to support union types - #533
    "}, {"location": "user/changelog/#miscellaneous", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Extend StreamsBootstrap model - #534
    "}, {"location": "user/changelog/#801-release-date-2024-08-22", "title": "8.0.1 - Release Date: [2024-08-22]", "text": ""}, {"location": "user/changelog/#documentation", "title": "\ud83d\udcdd Documentation", "text": "
    • Fix changelog in docs - #532
    "}, {"location": "user/changelog/#800-release-date-2024-08-21", "title": "8.0.0 - Release Date: [2024-08-21]", "text": ""}, {"location": "user/changelog/#breaking-changes", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Make KafkaApp responsible for deploying/cleaning streams bootstrap components - https://github.com/bakdata/kpops/pull/522
    • Rename role to label - https://github.com/bakdata/kpops/pull/525
    • Fix Pyright warning about type override without default value - https://github.com/bakdata/kpops/pull/524
    • Remove -v3 suffix and suffix old streams bootstrap with -v2 - https://github.com/bakdata/kpops/pull/526
    "}, {"location": "user/changelog/#features", "title": "\ud83d\ude80 Features", "text": "
    • Add support for streams-bootstrap v3 - https://github.com/bakdata/kpops/pull/519
    "}, {"location": "user/changelog/#refactor", "title": "\ud83c\udfed Refactor", "text": "
    • Rename role to label - https://github.com/bakdata/kpops/pull/525
    • Fix Pyright warning about type override without default value - https://github.com/bakdata/kpops/pull/524
    "}, {"location": "user/changelog/#710-release-date-2024-08-15", "title": "7.1.0 - Release Date: [2024-08-15]", "text": ""}, {"location": "user/changelog/#dependencies_1", "title": "\ud83e\uddea Dependencies", "text": "
    • Update pytest - #527
    "}, {"location": "user/changelog/#refactor_1", "title": "\ud83c\udfed Refactor", "text": "
    • Improve incomplete type hints - #515

    • Fallback to user defined model when the validation of cluster model fails - #521

    • Replace kubernetes-asyncio with lightkube - #517

    "}, {"location": "user/changelog/#miscellaneous_1", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Fix incorrect parameter type annotation - #523
    "}, {"location": "user/changelog/#700-release-date-2024-07-23", "title": "7.0.0 - Release Date: [2024-07-23]", "text": ""}, {"location": "user/changelog/#breaking-changes_1", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Automatic loading of namespaced custom components - #500

    • Call destroy from inside of reset or clean - #501

    • Rename app field - #506

    "}, {"location": "user/changelog/#features_1", "title": "\ud83d\ude80 Features", "text": "
    • clean/reset streams-bootstrap components with cluster values - #498
    "}, {"location": "user/changelog/#refactor_2", "title": "\ud83c\udfed Refactor", "text": "
    • Call destroy from inside of reset or clean - #501

    • Rename app field - #506

    "}, {"location": "user/changelog/#610-release-date-2024-07-09", "title": "6.1.0 - Release Date: [2024-07-09]", "text": ""}, {"location": "user/changelog/#features_2", "title": "\ud83d\ude80 Features", "text": "
    • Add image tag field to streams-bootstrap app values - #499

    • Delete ignored keys from diff - #510

    "}, {"location": "user/changelog/#refactor_3", "title": "\ud83c\udfed Refactor", "text": "
    • Improve dataclass instance check - #507
    "}, {"location": "user/changelog/#602-release-date-2024-07-04", "title": "6.0.2 - Release Date: [2024-07-04]", "text": ""}, {"location": "user/changelog/#documentation_1", "title": "\ud83d\udcdd Documentation", "text": "
    • Generate developer docs for Python API - #503
    "}, {"location": "user/changelog/#miscellaneous_2", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update codeowners - #504
    "}, {"location": "user/changelog/#601-release-date-2024-06-12", "title": "6.0.1 - Release Date: [2024-06-12]", "text": ""}, {"location": "user/changelog/#fixes", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix connector resetter offset topic - #497
    "}, {"location": "user/changelog/#600-release-date-2024-06-06", "title": "6.0.0 - Release Date: [2024-06-06]", "text": ""}, {"location": "user/changelog/#breaking-changes_2", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • KPOps 6.0.0 - #496
    "}, {"location": "user/changelog/#features_3", "title": "\ud83d\ude80 Features", "text": "
    • KPOps 6.0.0 - #496
    "}, {"location": "user/changelog/#refactor_4", "title": "\ud83c\udfed Refactor", "text": "
    • KPOps 6.0.0 - #496
    "}, {"location": "user/changelog/#511-release-date-2024-05-22", "title": "5.1.1 - Release Date: [2024-05-22]", "text": ""}, {"location": "user/changelog/#fixes_1", "title": "\ud83d\udc1b Fixes", "text": "
    • Add YAML separator (---) to stdout - #491
    "}, {"location": "user/changelog/#510-release-date-2024-05-22", "title": "5.1.0 - Release Date: [2024-05-22]", "text": ""}, {"location": "user/changelog/#miscellaneous_3", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add computed field for Helm release name and name override - #490
    "}, {"location": "user/changelog/#501-release-date-2024-05-15", "title": "5.0.1 - Release Date: [2024-05-15]", "text": ""}, {"location": "user/changelog/#fixes_2", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix missing await on Kubernetes API - #488
    "}, {"location": "user/changelog/#500-release-date-2024-05-02", "title": "5.0.0 - Release Date: [2024-05-02]", "text": ""}, {"location": "user/changelog/#breaking-changes_3", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Allow custom timeout for external services - #485
    "}, {"location": "user/changelog/#miscellaneous_4", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update examples for v4 - #486
    "}, {"location": "user/changelog/#421-release-date-2024-04-25", "title": "4.2.1 - Release Date: [2024-04-25]", "text": ""}, {"location": "user/changelog/#features_4", "title": "\ud83d\ude80 Features", "text": "
    • Add support for cleaning StatefulSets with PVCs - #482
    "}, {"location": "user/changelog/#420-release-date-2024-04-25", "title": "4.2.0 - Release Date: [2024-04-25]", "text": ""}, {"location": "user/changelog/#refactor_5", "title": "\ud83c\udfed Refactor", "text": "
    • Improve type annotations for parallel pipeline jobs - #476
    "}, {"location": "user/changelog/#miscellaneous_5", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update Ruff - #475

    • Set Pyright to warn on unknown types - #480

    • Quiet faker debug logs in tests - #483

    • Add pyright matcher - #481

    "}, {"location": "user/changelog/#412-release-date-2024-03-11", "title": "4.1.2 - Release Date: [2024-03-11]", "text": ""}, {"location": "user/changelog/#documentation_2", "title": "\ud83d\udcdd Documentation", "text": "
    • fix(docs): Correct from.components.<component-name>.type to input - #473
    "}, {"location": "user/changelog/#411-release-date-2024-03-11", "title": "4.1.1 - Release Date: [2024-03-11]", "text": ""}, {"location": "user/changelog/#fixes_3", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix import errors - #472
    "}, {"location": "user/changelog/#refactor_6", "title": "\ud83c\udfed Refactor", "text": "
    • Fix import errors - #472
    "}, {"location": "user/changelog/#miscellaneous_6", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update httpx - #471
    "}, {"location": "user/changelog/#410-release-date-2024-03-07", "title": "4.1.0 - Release Date: [2024-03-07]", "text": ""}, {"location": "user/changelog/#documentation_3", "title": "\ud83d\udcdd Documentation", "text": "
    • Document precedence between env vars and config.yaml - #465
    "}, {"location": "user/changelog/#miscellaneous_7", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Create init command - #394
    "}, {"location": "user/changelog/#402-release-date-2024-03-04", "title": "4.0.2 - Release Date: [2024-03-04]", "text": ""}, {"location": "user/changelog/#documentation_4", "title": "\ud83d\udcdd Documentation", "text": "
    • Reference editor plugin for Neovim in docs - #464
    "}, {"location": "user/changelog/#miscellaneous_8", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add support for Python 3.12 - #467

    • Update Pyright - #468

    • Remove package classifiers that are automatically assigned by Poetry - #469

    • Validate autoscaling mandatory fields when enabled - #470

    "}, {"location": "user/changelog/#401-release-date-2024-02-29", "title": "4.0.1 - Release Date: [2024-02-29]", "text": ""}, {"location": "user/changelog/#fixes_4", "title": "\ud83d\udc1b Fixes", "text": "
    • Set supported Python cutoff to 3.11 - #466
    "}, {"location": "user/changelog/#400-release-date-2024-02-27", "title": "4.0.0 - Release Date: [2024-02-27]", "text": ""}, {"location": "user/changelog/#breaking-changes_4", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Distribute defaults across multiple files - #438
    "}, {"location": "user/changelog/#features_5", "title": "\ud83d\ude80 Features", "text": "
    • Distribute defaults across multiple files - #438
    "}, {"location": "user/changelog/#324-release-date-2024-02-26", "title": "3.2.4 - Release Date: [2024-02-26]", "text": ""}, {"location": "user/changelog/#fixes_5", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix docs CI to include the latest changes to a tagged version in the changelog - #459

    • Fix tempfile creation - #461

    • Fix symbolic link to CONTRIBUTING.md and parallel option in action.yaml - #462

    "}, {"location": "user/changelog/#refactor_7", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor Kafka topics - #447

    • Refactor PipelineGenerator to use component ids - #460

    "}, {"location": "user/changelog/#documentation_5", "title": "\ud83d\udcdd Documentation", "text": "
    • Fix docs CI to include the latest changes to a tagged version in the changelog - #459
    "}, {"location": "user/changelog/#323-release-date-2024-02-19", "title": "3.2.3 - Release Date: [2024-02-19]", "text": ""}, {"location": "user/changelog/#fixes_6", "title": "\ud83d\udc1b Fixes", "text": "
    • Trim and hash Helm name override to 63 characters - #456
    "}, {"location": "user/changelog/#322-release-date-2024-02-12", "title": "3.2.2 - Release Date: [2024-02-12]", "text": ""}, {"location": "user/changelog/#fixes_7", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix nested substitution - #451
    "}, {"location": "user/changelog/#321-release-date-2024-02-08", "title": "3.2.1 - Release Date: [2024-02-08]", "text": ""}, {"location": "user/changelog/#fixes_8", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix order of pipeline steps for clean/reset - #450

    • Fix substitution - #449

    • Fix cleaner inheritance, parent model should be aliased during instantiation - #452

    "}, {"location": "user/changelog/#refactor_8", "title": "\ud83c\udfed Refactor", "text": "
    • Simplify execution graph logic - #446
    "}, {"location": "user/changelog/#320-release-date-2024-02-01", "title": "3.2.0 - Release Date: [2024-02-01]", "text": ""}, {"location": "user/changelog/#features_6", "title": "\ud83d\ude80 Features", "text": "
    • Refactor pipeline filter and add to public API - #405
    "}, {"location": "user/changelog/#refactor_9", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor enrichment using Pydantic model validator - #444

    • Refactor pipeline filter and add to public API - #405

    "}, {"location": "user/changelog/#documentation_6", "title": "\ud83d\udcdd Documentation", "text": "
    • Improve Sphinx docs highlighting using RST markup - #443
    "}, {"location": "user/changelog/#310-release-date-2024-01-30", "title": "3.1.0 - Release Date: [2024-01-30]", "text": ""}, {"location": "user/changelog/#features_7", "title": "\ud83d\ude80 Features", "text": "
    • Add support for pipeline steps parallelization - #312
    "}, {"location": "user/changelog/#fixes_9", "title": "\ud83d\udc1b Fixes", "text": "
    • Update poetry publish workflow version to latest - #430
    "}, {"location": "user/changelog/#refactor_10", "title": "\ud83c\udfed Refactor", "text": "
    • Simplify loading of defaults - #435
    "}, {"location": "user/changelog/#miscellaneous_9", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add custom PascalCase to snake_case alias generator - #436

    • Add parallel flag support to kpops runner - #439

    "}, {"location": "user/changelog/#302-release-date-2024-01-23", "title": "3.0.2 - Release Date: [2024-01-23]", "text": ""}, {"location": "user/changelog/#fixes_10", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix Helm diff output - #434
    "}, {"location": "user/changelog/#documentation_7", "title": "\ud83d\udcdd Documentation", "text": "
    • Add step for submodule initialization on the docs - #431
    "}, {"location": "user/changelog/#miscellaneous_10", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add message if examples git submodule is not initialized - #432

    • Update type annotation for deserialized pipeline - #433

    "}, {"location": "user/changelog/#301-release-date-2024-01-19", "title": "3.0.1 - Release Date: [2024-01-19]", "text": ""}, {"location": "user/changelog/#fixes_11", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix broken doc link - #427

    • Add warning log if SR handler is disabled but URL is set - #428

    "}, {"location": "user/changelog/#documentation_8", "title": "\ud83d\udcdd Documentation", "text": "
    • Update docs of word-count example for v3 & new folder structure - #423

    • Move ATM fraud to examples repo - #425

    • Fix broken doc link - #427

    "}, {"location": "user/changelog/#miscellaneous_11", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update pydantic dependency - #422

    • Add git submodule instructions to the contributing.md - #429

    "}, {"location": "user/changelog/#300-release-date-2024-01-17", "title": "3.0.0 - Release Date: [2024-01-17]", "text": ""}, {"location": "user/changelog/#breaking-changes_5", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Move GitHub action to repository root - #356

    • Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config - #354

    • Create HelmApp component - #370

    • Change substitution variables separator to . - #388

    • Refactor pipeline generator & representation - #392

    • Define custom components module & pipeline base dir globally - #387

    • Use hash and trim long Helm release names instead of only trimming - #390

    • Refactor generate template for Python API usage - #380

    • Namespace substitution vars - #408

    • Refactor streams-bootstrap cleanup jobs as individual HelmApp - #398

    • Refactor Kafka Connector resetter as individual HelmApp - #400

    • Fix wrong Helm release name character limit - #418

    "}, {"location": "user/changelog/#features_8", "title": "\ud83d\ude80 Features", "text": "
    • Allow overriding config files - #391

    • Generate defaults schema - #402

    "}, {"location": "user/changelog/#fixes_12", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix missing component type in pipeline schema - #401

    • Fix enrichment of nested Pydantic BaseModel - #415

    • Fix wrong Helm release name character limit - #418

    • Update release workflow template to support custom changelog file path - #421

    "}, {"location": "user/changelog/#dependencies_2", "title": "\ud83e\uddea Dependencies", "text": "
    • Migrate to Pydantic v2 - #347
    "}, {"location": "user/changelog/#refactor_11", "title": "\ud83c\udfed Refactor", "text": "
    • Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config - #354

    • Migrate to Pydantic v2 - #347

    • Refactor pipeline generator & representation - #392

    • Use hash and trim long Helm release names instead of only trimming - #390

    • Refactor Helm nameOverride - #397

    • Mark component type as computed Pydantic field - #399

    • Refactor generate template for Python API usage - #380

    • Support multiple inheritance for doc generation - #406

    • Refactor streams-bootstrap cleanup jobs as individual HelmApp - #398

    • Refactor Kafka Connector resetter as individual HelmApp - #400

    "}, {"location": "user/changelog/#documentation_9", "title": "\ud83d\udcdd Documentation", "text": "
    • Move GitHub action to repository root - #356

    • Create HelmApp component - #370

    • Update docs for substitution variable usage in v3 - #409

    • Support multiple inheritance for doc generation - #406

    • Update docs for v3 - #416

    • Update tests resources - #417

    • Summarize all breaking changes in diffs at the top of the migration guide - #419

    "}, {"location": "user/changelog/#miscellaneous_12", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Replace black with ruff - #365

    • Add toml formatter to dprint - #386

    • Add malva to dprint - #385

    • Update KPOps runner with the new options - #395

    • Fix KPOps action to get package from testPyPI - #396

    • KPOps 3.0 - #420

    "}, {"location": "user/changelog/#2011-release-date-2023-10-24", "title": "2.0.11 - Release Date: [2023-10-24]", "text": ""}, {"location": "user/changelog/#fixes_13", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix early exit upon Helm exit code 1 - #376

    • Fix docs setup page list indentation - #377

    "}, {"location": "user/changelog/#documentation_10", "title": "\ud83d\udcdd Documentation", "text": "
    • Migrate deprecated mkdocs-material-extensions - #378

    • Fix docs setup page list indentation - #377

    • Exclude resources from docs search - #371

    "}, {"location": "user/changelog/#2010-release-date-2023-10-12", "title": "2.0.10 - Release Date: [2023-10-12]", "text": ""}, {"location": "user/changelog/#miscellaneous_13", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Fix environment variables documentation generation - #362

    • Introduce ruff - #363

    • Print details on connector name mismatch error - #369

    • Enable transparent OS environment lookups from internal environment - #368

    "}, {"location": "user/changelog/#209-release-date-2023-09-19", "title": "2.0.9 - Release Date: [2023-09-19]", "text": ""}, {"location": "user/changelog/#fixes_14", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix Kafka connect config name for deletion - #361
    "}, {"location": "user/changelog/#documentation_11", "title": "\ud83d\udcdd Documentation", "text": "
    • Fix link to kpops-examples - #357
    "}, {"location": "user/changelog/#208-release-date-2023-09-06", "title": "2.0.8 - Release Date: [2023-09-06]", "text": ""}, {"location": "user/changelog/#fixes_15", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix config.yaml overriding environment variables - #353
    "}, {"location": "user/changelog/#refactor_12", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor component prefix & name - #326

    • Remove unnecessary condition during inflate - #328

    "}, {"location": "user/changelog/#207-release-date-2023-08-31", "title": "2.0.7 - Release Date: [2023-08-31]", "text": ""}, {"location": "user/changelog/#fixes_16", "title": "\ud83d\udc1b Fixes", "text": "
    • Print only rendered templates when --template flag is set - #350
    "}, {"location": "user/changelog/#documentation_12", "title": "\ud83d\udcdd Documentation", "text": "
    • Add migration guide - #352
    "}, {"location": "user/changelog/#206-release-date-2023-08-30", "title": "2.0.6 - Release Date: [2023-08-30]", "text": ""}, {"location": "user/changelog/#refactor_13", "title": "\ud83c\udfed Refactor", "text": "
    • Simplify deployment with local Helm charts - #349
    "}, {"location": "user/changelog/#205-release-date-2023-08-30", "title": "2.0.5 - Release Date: [2023-08-30]", "text": ""}, {"location": "user/changelog/#fixes_17", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix versioning of docs when releasing - #346
    "}, {"location": "user/changelog/#204-release-date-2023-08-29", "title": "2.0.4 - Release Date: [2023-08-29]", "text": ""}, {"location": "user/changelog/#fixes_18", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix GitHub ref variable for pushing docs to main branch - #343
    "}, {"location": "user/changelog/#documentation_13", "title": "\ud83d\udcdd Documentation", "text": "
    • Add dprint as the markdown formatter - #337

    • Publish pre-release docs for PRs & main branch - #339

    • Align docs colours - #345

    "}, {"location": "user/changelog/#miscellaneous_14", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Exclude abstract components from pipeline schema - #332
    "}, {"location": "user/changelog/#203-release-date-2023-08-24", "title": "2.0.3 - Release Date: [2023-08-24]", "text": ""}, {"location": "user/changelog/#fixes_19", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix GitHub action error in non-Python projects - #340
    "}, {"location": "user/changelog/#miscellaneous_15", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Lint GitHub action - #342
    "}, {"location": "user/changelog/#202-release-date-2023-08-23", "title": "2.0.2 - Release Date: [2023-08-23]", "text": ""}, {"location": "user/changelog/#documentation_14", "title": "\ud83d\udcdd Documentation", "text": "
    • Add version dropdown to the documentation - #336

    • Break the documentation down into smaller subsection - #329

    "}, {"location": "user/changelog/#201-release-date-2023-08-22", "title": "2.0.1 - Release Date: [2023-08-22]", "text": ""}, {"location": "user/changelog/#fixes_20", "title": "\ud83d\udc1b Fixes", "text": "
    • Fix optional flags in GitHub action - #334
    "}, {"location": "user/changelog/#200-release-date-2023-08-17", "title": "2.0.0 - Release Date: [2023-08-17]", "text": ""}, {"location": "user/changelog/#breaking-changes_6", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Remove camel case conversion of internal models - #308

    • Derive component type automatically from class name - #309

    • Refactor input/output types - #232

    • v2 - #321

    "}, {"location": "user/changelog/#features_9", "title": "\ud83d\ude80 Features", "text": "
    • Automatically support schema generation for custom components - #307

    • Derive component type automatically from class name - #309

    "}, {"location": "user/changelog/#refactor_14", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor input/output types - #232
    "}, {"location": "user/changelog/#documentation_15", "title": "\ud83d\udcdd Documentation", "text": "
    • Fix editor integration example in docs - #273
    "}, {"location": "user/changelog/#172-release-date-2023-08-16", "title": "1.7.2 - Release Date: [2023-08-16]", "text": ""}, {"location": "user/changelog/#refactor_15", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor Kafka Connect handler - #322
    "}, {"location": "user/changelog/#documentation_16", "title": "\ud83d\udcdd Documentation", "text": "
    • Add KPOps Runner GitHub Action to the documentation - #325

    • Remove :type and :rtype from docstrings - #324

    "}, {"location": "user/changelog/#171-release-date-2023-08-15", "title": "1.7.1 - Release Date: [2023-08-15]", "text": ""}, {"location": "user/changelog/#documentation_17", "title": "\ud83d\udcdd Documentation", "text": "
    • Modularize and autogenerate examples for the documentation - #267

    • Update the variable documentation - #266

    "}, {"location": "user/changelog/#170-release-date-2023-08-15", "title": "1.7.0 - Release Date: [2023-08-15]", "text": ""}, {"location": "user/changelog/#features_10", "title": "\ud83d\ude80 Features", "text": "
    • Add flag to exclude pipeline steps - #300
    "}, {"location": "user/changelog/#160-release-date-2023-08-10", "title": "1.6.0 - Release Date: [2023-08-10]", "text": ""}, {"location": "user/changelog/#refactor_16", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor handling of Helm flags - #319
    "}, {"location": "user/changelog/#150-release-date-2023-08-10", "title": "1.5.0 - Release Date: [2023-08-10]", "text": ""}, {"location": "user/changelog/#features_11", "title": "\ud83d\ude80 Features", "text": "
    • Refactor Helm wrapper and add --set-file flag - #311
    "}, {"location": "user/changelog/#refactor_17", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor Helm wrapper and add --set-file flag - #311

    • Set default for ToSection topics - #313

    • Annotate types for ToSection models mapping - #315

    "}, {"location": "user/changelog/#miscellaneous_16", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Check Poetry lock file consistency - #316
    "}, {"location": "user/changelog/#140-release-date-2023-08-02", "title": "1.4.0 - Release Date: [2023-08-02]", "text": ""}, {"location": "user/changelog/#fixes_21", "title": "\ud83d\udc1b Fixes", "text": "
    • Validate unique step names - #292
    "}, {"location": "user/changelog/#refactor_18", "title": "\ud83c\udfed Refactor", "text": "
    • Order PipelineComponent fields - #290

    • Migrate requests to httpx - #302

    • Refactor CLI using dtyper - #306

    "}, {"location": "user/changelog/#miscellaneous_17", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update Black - #294

    • Fix vulnerability in mkdocs-material - #295

    • Move breaking changes section upper in the change log config - #287

    "}, {"location": "user/changelog/#132-release-date-2023-07-13", "title": "1.3.2 - Release Date: [2023-07-13]", "text": ""}, {"location": "user/changelog/#fixes_22", "title": "\ud83d\udc1b Fixes", "text": "
    • Exclude Helm tests from dry-run diff - #293
    "}, {"location": "user/changelog/#131-release-date-2023-07-11", "title": "1.3.1 - Release Date: [2023-07-11]", "text": ""}, {"location": "user/changelog/#refactor_19", "title": "\ud83c\udfed Refactor", "text": "
    • Remove workaround for pipeline steps - #276
    "}, {"location": "user/changelog/#miscellaneous_18", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update codeowners - #281

    • Reactivate Windows CI - #255

    • Downgrade Poetry version on the Windows CI pipeline - #286

    • Set ANSI theme for output of kpops generate - #289

    "}, {"location": "user/changelog/#130-release-date-2023-07-07", "title": "1.3.0 - Release Date: [2023-07-07]", "text": ""}, {"location": "user/changelog/#refactor_20", "title": "\ud83c\udfed Refactor", "text": "
    • Plural broker field in pipeline config - #278
    "}, {"location": "user/changelog/#documentation_18", "title": "\ud83d\udcdd Documentation", "text": "
    • Update KPOps runner readme for dev versions - #279
    "}, {"location": "user/changelog/#breaking-changes_7", "title": "\ud83c\udfd7\ufe0f Breaking changes", "text": "
    • Plural broker field in pipeline config - #278
    "}, {"location": "user/changelog/#miscellaneous_19", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Add breaking changes section to change log config - #280
    "}, {"location": "user/changelog/#124-release-date-2023-06-27", "title": "1.2.4 - Release Date: [2023-06-27]", "text": ""}, {"location": "user/changelog/#miscellaneous_20", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Update changelog action to contain miscellaneous PRs - #269
    "}, {"location": "user/changelog/#123-release-date-2023-06-22", "title": "1.2.3 - Release Date: [2023-06-22]", "text": ""}, {"location": "user/changelog/#fixes_23", "title": "\ud83d\udc1b Fixes", "text": "
    • Refactor custom component validation & hide field from kpops output - #265
    "}, {"location": "user/changelog/#refactor_21", "title": "\ud83c\udfed Refactor", "text": "
    • Refactor custom component validation & hide field from kpops output - #265
    "}, {"location": "user/changelog/#miscellaneous_21", "title": "\ud83c\udf00 Miscellaneous", "text": ""}, {"location": "user/changelog/#122-release-date-2023-06-21", "title": "1.2.2 - Release Date: [2023-06-21]", "text": ""}, {"location": "user/changelog/#miscellaneous_22", "title": "\ud83c\udf00 Miscellaneous", "text": "
    • Create workflow to lint CI - #260

    • Fix update docs when releasing - #261

    • Rename change log message for uncategorized issues - #262

    "}, {"location": "user/changelog/#121-release-date-2023-06-21", "title": "1.2.1 - Release Date: [2023-06-21]", "text": "Uncategorized
    • Fix update docs in release workflow - #258
    "}, {"location": "user/changelog/#120-release-date-2023-06-21", "title": "1.2.0 - Release Date: [2023-06-21]", "text": ""}, {"location": "user/changelog/#features_12", "title": "\ud83d\ude80 Features", "text": "
    • Add helm repo update <repo-name> for Helm >3.7 - #239
    "}, {"location": "user/changelog/#fixes_24", "title": "\ud83d\udc1b Fixes", "text": "
    • add --namespace option to Helm template command - #237

    • Add missing type annotation for Pydantic attributes - #238

    • Fix helm version check - #242

    • Fix Helm Version Check - #244

    • Fix import from external module - #256

    "}, {"location": "user/changelog/#refactor_22", "title": "\ud83c\udfed Refactor", "text": "
    • Remove enable option from helm diff - #235

    • Refactor variable substitution - #198

    Uncategorized
    • Add background to docs home page - #236

    • Update Poetry version in CI - #247

    • Add pip cache in KPOps runner action - #249

    • Check types using Pyright - #251

    • Remove MyPy - #252

    • Disable broken Windows CI temporarily - #253

    • Update release and publish workflows - #254

    • Fix release & publish workflows - #257

    "}, {"location": "user/what-is-kpops/", "title": "What is KPOps?", "text": "

    With a couple of easy commands in the shell, and a pipeline.yaml of under 30 lines, KPOps can not only deploy a Kafka pipeline1 to a Kubernetes cluster, but also reset, clean or destroy it!

    "}, {"location": "user/what-is-kpops/#key-features", "title": "Key features", "text": "
    • Deploy Kafka apps to Kubernetes: KPOps allows to deploy consecutive Kafka Streams applications and producers using an easy-to-read and -write pipeline definition.
    • Manage Kafka Connectors: KPOps connects with your Kafka Connect cluster and deploys, validates, and deletes your connectors.
    • Configure multiple pipelines and steps: KPOps has various abstractions that simplify configuring multiple pipelines and steps within pipelines by sharing common configuration between different components, such as producers or streaming applications.
    • Handle your topics and schemas: KPOps not only creates and deletes your topics but also registers and deletes your schemas.
    • Clean termination of Kafka components: KPOps removes your pipeline components (i.e., Kafka Streams applications) from the Kubernetes cluster and cleans up the component-related states (i.e., removing/resetting offset of Kafka consumer groups).
    • Preview your pipeline changes: With the KPOps dry-run, you can ensure your pipeline definition is set up correctly. This helps to minimize downtime and prevent potential errors or issues that could impact your production environment.
    "}, {"location": "user/what-is-kpops/#example", "title": "Example", "text": "An overview of Word-count pipeline shown in Streams Explorer Word-count pipeline.yaml
    - type: producer-app-v2\n  name: data-producer\n  values:\n    image: bakdata/kpops-demo-sentence-producer\n\n- type: streams-app-v2\n  name: word-counter\n  to:\n    topics:\n      ${output_topic_name}:\n        type: output\n        configs:\n          cleanup.policy: compact\n  values:\n    image: bakdata/kpops-demo-word-count-app\n    replicaCount: 1\n\n- type: kafka-sink-connector\n  name: redis-sink-connector\n  config:\n    connector.class: com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector\n    redis.hosts: redis-headless:6379\n    redis.database: 0\n    tasks.max: 1\n    key.converter: org.apache.kafka.connect.storage.StringConverter\n    value.converter: org.apache.kafka.connect.storage.StringConverter\n
    1. A Kafka pipeline can consist of consecutive streaming applications, producers, and connectors.\u00a0\u21a9

    "}, {"location": "user/core-concepts/config/", "title": "Configuration", "text": "

    KPOps reads its global configuration that is unrelated to a pipeline's components from config.yaml.

    Consider enabling KPOps' editor integration feature to enjoy the benefits of autocompletion and validation when configuring your pipeline.

    To learn about any of the available settings, take a look at the example below.

    config.yaml
    # CONFIGURATION\n#\n# Base directory to the pipelines (default is current working directory)\npipeline_base_dir: .\n# The Kafka brokers address.\n# REQUIRED\nkafka_brokers: \"http://broker1:9092,http://broker2:9092\"\n# Configure the topic name variables you can use in the pipeline definition.\ntopic_name_config:\n  # Configures the value for the variable ${output_topic_name}\n  default_output_topic_name: ${pipeline.name}-${component.name}\n  # Configures the value for the variable ${error_topic_name}\n  default_error_topic_name: ${pipeline.name}-${component.name}-error\n# Configuration for Schema Registry.\nschema_registry:\n  # Whether the Schema Registry handler should be initialized.\n  enabled: false\n  # Address of the Schema Registry.\n  url: \"http://localhost:8081\"\n# Configuration for the Kafka REST Proxy.\nkafka_rest:\n  # Address of the Kafka REST Proxy.\n  url: \"http://localhost:8082\"\n# Configuration for Kafka Connect.\nkafka_connect:\n  # Address of Kafka Connect.\n  url: \"http://localhost:8083\"\n# Flag for `helm upgrade --install`.\n# Create the release namespace if not present.\ncreate_namespace: false\n# Global flags for Helm.\nhelm_config:\n  # Name of kubeconfig context (`--kube-context`)\n  context: name\n  # Run Helm in Debug mode.\n  debug: false\n  # Kubernetes API version used for Capabilities.APIVersions\n  api_version: null\n# Configure Helm Diff.\nhelm_diff_config:\n  # Set of keys that should not be checked.\n  ignore:\n    - name\n    - imageTag\n# Whether to retain clean up jobs in the cluster or uninstall the, after\n# completion.\nretain_clean_jobs: false\n

    Environment-specific pipeline definitions

    Similarly to defaults, it is possible to have an unlimited amount of additional environment-specific pipeline definitions. The naming convention is the same: add a suffix of the form _{environment} to the filename.

    "}, {"location": "user/core-concepts/defaults/", "title": "Defaults", "text": "

    KPOps has a very efficient way of dealing with repeating settings which manifests as defaults.yaml. This file provides the user with the power to set defaults for any and all components, thus omitting the need to repeat the same settings in pipeline.yaml.

    See real-world examples for defaults.

    "}, {"location": "user/core-concepts/defaults/#features", "title": "Features", "text": ""}, {"location": "user/core-concepts/defaults/#inheritance", "title": "Inheritance", "text": "

    An important mechanic of KPOps is that defaults set for a component apply to all components that inherit from it.

    It is possible, although not recommended, to add settings that are specific to a component's subclass. An example would be configuring offset_topic under kafka-connector instead of kafka-source-connector.

    "}, {"location": "user/core-concepts/defaults/#configuration", "title": "Configuration", "text": "

    KPOps allows using multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.

    The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.

    It is important to note that defaults_{environment}.yaml overrides only the settings that are explicitly set to be different from the ones in the base defaults file.

    defaults merge priority

    Imagine the following folder structure, where the pipeline_base_dir is configured to pipelines:

    \u2514\u2500 pipelines\n   \u2514\u2500\u2500 distributed-defaults\n       \u251c\u2500\u2500 defaults.yaml\n       \u251c\u2500\u2500 defaults_dev.yaml\n       \u2514\u2500\u2500 pipeline-deep\n           \u251c\u2500\u2500 defaults.yaml\n           \u2514\u2500\u2500 pipeline.yaml\n

    KPOps picks up the defaults in the following order (high to low priority):

    • ./pipelines/distributed-defaults/pipeline-deep/defaults.yaml
    • ./pipelines/distributed-defaults/defaults_dev.yaml
    • ./pipelines/distributed-defaults/defaults.yaml
    "}, {"location": "user/core-concepts/defaults/#components", "title": "Components", "text": "

    The defaults codeblocks in this section contain the full set of settings that are specific to the component. If a setting already exists in a parent config, it will not be included in the child's.

    "}, {"location": "user/core-concepts/defaults/#kubernetesapp", "title": "KubernetesApp", "text": "defaults.yaml
    # Base Kubernetes App\n#\n# Parent of: HelmApp\n# Child of: PipelineComponent\nkubernetes-app:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  values: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/defaults/#streamsapp", "title": "StreamsApp", "text": "defaults.yaml
    # StreamsApp component that configures a streams bootstrap app.\n#\n# Child of: KafkaApp\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\nstreams-app:\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  values: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n
    "}, {"location": "user/core-concepts/defaults/#producerapp", "title": "ProducerApp", "text": "defaults.yaml
    \n
    "}, {"location": "user/core-concepts/defaults/#kafkaconnector", "title": "KafkaConnector", "text": "defaults.yaml
    # Kafka connector\n#\n# Parent of: KafkaSinkConnector, KafkaSourceConnector\n# Child of: PipelineComponent\nkafka-connector:\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # Full documentation on connectors: https://kafka.apache.org/documentation/#connectconfigs\n  config: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/defaults/#kafkasourceconnector", "title": "KafkaSourceConnector", "text": "defaults.yaml
    # Kafka source connector\n#\n# Child of: KafkaConnector\nkafka-source-connector:\n  # The source connector has no `from` section\n  # from:\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/defaults/#kafkasinkconnector", "title": "KafkaSinkConnector", "text": "defaults.yaml
    # Kafka sink connector\n#\n# Child of: KafkaConnector\nkafka-sink-connector:\n  # No settings differ from `kafka-connector`\n
    "}, {"location": "user/core-concepts/components/helm-app/", "title": "HelmApp", "text": ""}, {"location": "user/core-concepts/components/helm-app/#usage", "title": "Usage", "text": "

    Can be used to deploy any app in Kubernetes using Helm, for example, a REST service that serves Kafka data.

    "}, {"location": "user/core-concepts/components/helm-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kubernetes app managed through Helm with an associated Helm chart\n- type: helm-app\n  name: helm-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  values: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/helm-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/helm-app/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/helm-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/helm-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/helm-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kafka-connector/", "title": "KafkaConnector", "text": "

    KafkaConnector is a component that deploys Kafka Connectors. Since a connector cannot be different from sink or source it is not recommended to use KafkaConnector for deployment in pipeline.yaml. Instead, KafkaConnector should be used in defaults.yaml to set defaults for all connectors in the pipeline as they can share some common settings.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/", "title": "KafkaSinkConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#usage", "title": "Usage", "text": "

    Lets other systems pull data from Apache Kafka.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka sink connector\n- type: kafka-sink-connector\n  name: kafka-sink-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # Full documentation on connectors: https://kafka.apache.org/documentation/#connectconfigs\n  config: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-sink-connector/#deploy", "title": "deploy", "text": "
    • Add the sink connector to the Kafka Connect cluster
    • Create the output topics if provided (optional)
    • Register schemas in the Schema Registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#destroy", "title": "destroy", "text": "

    The associated sink connector is removed from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#reset", "title": "reset", "text": "

    Reset the consumer group offsets using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-sink-connector/#clean", "title": "clean", "text": "
    • Delete associated consumer group
    • Delete configured error topics
    "}, {"location": "user/core-concepts/components/kafka-source-connector/", "title": "KafkaSourceConnector", "text": "

    Subclass of KafkaConnector.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#usage", "title": "Usage", "text": "

    Manages source connectors in your Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Kafka source connector\n- type: kafka-source-connector # required\n  name: kafka-source-connector # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  # The source connector has no `from` section\n  # from:\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  # Full documentation on connectors: https://kafka.apache.org/documentation/#connectconfigs\n  config: # required\n    tasks.max: 1\n  # Overriding Kafka Connect Resetter Helm values. E.g. to override the\n  # Image Tag etc.\n  resetter_values:\n    imageTag: \"1.2.3\"\n  # offset.storage.topic\n  # https://kafka.apache.org/documentation/#connect_running\n  offset_topic: offset_topic\n
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kafka-source-connector/#deploy", "title": "deploy", "text": "
    • Add the source connector to the Kafka Connect cluster
    • Create the output topics if provided (optional)
    • Register schemas in the Schema registry if provided (optional)
    "}, {"location": "user/core-concepts/components/kafka-source-connector/#destroy", "title": "destroy", "text": "

    Remove the source connector from the Kafka Connect cluster.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#reset", "title": "reset", "text": "

    Delete state associated with the connector using bakdata's sink resetter.

    "}, {"location": "user/core-concepts/components/kafka-source-connector/#clean", "title": "clean", "text": "
    • Delete all associated output topics
    • Delete all associated schemas in the Schema Registry
    • Delete state associated with the connector
    "}, {"location": "user/core-concepts/components/kubernetes-app/", "title": "KubernetesApp", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#usage", "title": "Usage", "text": "

    Can be used to create components for any Kubernetes app.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Base Kubernetes App\n- type: kubernetes-app\n  name: kubernetes-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  values: # required\n    image: exampleImage # Example\n    debug: false # Example\n    commandLine: {} # Example\n
    "}, {"location": "user/core-concepts/components/kubernetes-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/kubernetes-app/#deploy", "title": "deploy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#destroy", "title": "destroy", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/kubernetes-app/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/overview/", "title": "Overview", "text": "

    This section explains the different components of KPOps, their usage and configuration in the pipeline definition pipeline.yaml.

    flowchart BT\n    KubernetesApp --> PipelineComponent\n    HelmApp --> KubernetesApp\n    StreamsBootstrap --> HelmApp\n    StreamsApp --> StreamsBootstrap\n    ProducerApp --> StreamsBootstrap\n    KafkaConnector --> PipelineComponent\n    KafkaSourceConnector --> KafkaConnector\n    KafkaSinkConnector --> KafkaConnector\n\n    click KubernetesApp \"./../kubernetes-app\"\n    click HelmApp \"./../helm-app\"\n    click StreamsBootstrap \"./../streams-bootstrap\"\n    click StreamsApp \"./../streams-app\"\n    click ProducerApp \"./../producer-app\"\n    click KafkaConnector \"./../kafka-connector\"\n    click KafkaSourceConnector \"./../kafka-source-connector\"\n    click KafkaSinkConnector \"./../kafka-sink-connector\"

    KPOps component hierarchy

    "}, {"location": "user/core-concepts/components/producer-app/", "title": "ProducerApp", "text": "

    Subclass of StreamsBootstrap.

    "}, {"location": "user/core-concepts/components/producer-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka producer app

    "}, {"location": "user/core-concepts/components/producer-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # Holds configuration to use as values for the streams bootstrap producer-app Helm\n# chart.\n# More documentation on ProducerApp:\n# https://github.com/bakdata/streams-bootstrap\n- type: producer-app\n  name: producer-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  # from: # While the producer-app does inherit from kafka-app, it does not need a\n  # `from` section, hence it does not support it.\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/producer-app\n  values: # required\n    streams: # required, producer-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      outputTopic: output_topic\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n    nameOverride: override-with-this-name # kafka-app-specific\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/producer-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/producer-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/producer-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/producer-app/#reset", "title": "reset", "text": "

    Do nothing, producers are stateless.

    "}, {"location": "user/core-concepts/components/producer-app/#clean", "title": "clean", "text": "
    • Delete the output topics of the Kafka producer
    • Delete all associated schemas in the Schema Registry
    "}, {"location": "user/core-concepts/components/streams-app/", "title": "StreamsApp", "text": "

    Subclass of and StreamsBootstrap.

    "}, {"location": "user/core-concepts/components/streams-app/#usage", "title": "Usage", "text": "

    Configures a streams-bootstrap Kafka Streams app

    "}, {"location": "user/core-concepts/components/streams-app/#configuration", "title": "Configuration", "text": "pipeline.yaml
    # StreamsApp component that configures a streams bootstrap app.\n# More documentation on StreamsApp: https://github.com/bakdata/streams-bootstrap\n- type: streams-app # required\n  name: streams-app # required\n  # Pipeline prefix that will prefix every component name. If you wish to not\n  # have any prefix you can specify an empty string.\n  prefix: ${pipeline.name}-\n  from: # Must not be null\n    topics: # read from topic\n      ${pipeline.name}-input-topic:\n        type: input # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra\n      ${pipeline.name}-input-pattern-topic:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      ${pipeline.name}-extra-pattern-topic:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n    components: # read from specific component\n      account-producer:\n        type: input # Implied when role is NOT specified\n      other-producer:\n        role: some-role # Implies `type` to be extra\n      component-as-input-pattern:\n        type: pattern # Implied to be an input pattern if `role` is undefined\n      component-as-extra-pattern:\n        type: pattern # Implied to be an extra pattern if `role` is defined\n        role: some-role\n  # Topic(s) into which the component will write output\n  to:\n    topics:\n      ${pipeline.name}-output-topic:\n        type: output # Implied when role is NOT specified\n      ${pipeline.name}-extra-topic:\n        role: topic-role # Implies `type` to be extra; Will throw an error if `type` is defined\n      ${pipeline.name}-error-topic:\n        type: error\n        # Currently KPOps supports Avro and JSON schemas.\n        key_schema: key-schema # must implement SchemaProvider to use\n        value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs: # https://kafka.apache.org/documentation/#topicconfigs\n          cleanup.policy: compact\n    models: # SchemaProvider is initiated with the values given here\n      model: model\n  namespace: namespace # required\n  # No arbitrary keys are allowed under `app`here\n  # Allowed configs:\n  # https://github.com/bakdata/streams-bootstrap/tree/master/charts/streams-app\n  values: # required\n    # Streams Bootstrap streams section\n    streams: # required, streams-app-specific\n      brokers: ${config.kafka_brokers} # required\n      schemaRegistryUrl: ${config.schema_registry.url}\n      inputTopics:\n        - topic1\n        - topic2\n      outputTopic: output-topic\n      inputPattern: input-pattern\n      extraInputTopics:\n        input_role1:\n          - input_topic1\n          - input_topic2\n        input_role2:\n          - input_topic3\n          - input_topic4\n      extraInputPatterns:\n        pattern_role1: input_pattern1\n      extraOutputTopics:\n        output_role1: output_topic1\n        output_role2: output_topic2\n      errorTopic: error-topic\n      config:\n        my.streams.config: my.value\n    nameOverride: override-with-this-name # streams-app-specific\n    autoscaling: # streams-app-specific\n      consumerGroup: consumer-group # required\n      lagThreshold: 0 # Average target value to trigger scaling actions.\n      enabled: false # Whether to enable auto-scaling using KEDA.\n      # This is the interval to check each trigger on.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#pollinginterval\n      pollingInterval: 30\n      # The period to wait after the last trigger reported active before scaling\n      #  the resource back to 0. https://keda.sh/docs/2.9/concepts/scaling-deployments/#cooldownperiod\n      cooldownPeriod: 300\n      # The offset reset policy for the consumer if the the consumer group is\n      # not yet subscribed to a partition.\n      offsetResetPolicy: earliest\n      # This setting is passed to the HPA definition that KEDA will create for a\n      # given resource and holds the maximum number of replicas of the target resouce.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#maxreplicacount\n      maxReplicas: 1\n      # Minimum number of replicas KEDA will scale the resource down to.\n      # https://keda.sh/docs/2.7/concepts/scaling-deployments/#minreplicacount\n      minReplicas: 0\n      # If this property is set, KEDA will scale the resource down to this\n      # number of replicas.\n      # https://keda.sh/docs/2.9/concepts/scaling-deployments/#idlereplicacount\n      idleReplicas: 0\n      topics: # List of auto-generated Kafka Streams topics used by the streams app.\n        - topic1\n        - topic2\n  # Helm repository configuration (optional)\n  # If not set the helm repo add will not be called. Useful when using local Helm charts\n  repo_config:\n    repository_name: bakdata-streams-bootstrap # required\n    url: https://bakdata.github.io/streams-bootstrap/ # required\n    repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"2.12.0\" # Helm chart version\n
    "}, {"location": "user/core-concepts/components/streams-app/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-app/#deploy", "title": "deploy", "text": "

    In addition to KubernetesApp's deploy:

    • Create topics if provided (optional)
    • Submit Avro schemas to the registry if provided (optional)
    "}, {"location": "user/core-concepts/components/streams-app/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-app/#reset", "title": "reset", "text": "
    • Delete the consumer group offsets
    • Delete Kafka Streams state
    "}, {"location": "user/core-concepts/components/streams-app/#clean", "title": "clean", "text": "

    Similar to reset with to additional steps:

    • Delete the app's output topics
    • Delete all associated schemas in the Schema Registry
    "}, {"location": "user/core-concepts/components/streams-bootstrap/", "title": "Streams Bootstrap", "text": "

    Subclass of HelmApp.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#usage", "title": "Usage", "text": "
    • Defines a streams-bootstrap component

    • Often used in defaults.yaml

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#operations", "title": "Operations", "text": ""}, {"location": "user/core-concepts/components/streams-bootstrap/#deploy", "title": "deploy", "text": "

    Deploy using Helm.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#destroy", "title": "destroy", "text": "

    Uninstall Helm release.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#reset", "title": "reset", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/components/streams-bootstrap/#clean", "title": "clean", "text": "

    Do nothing.

    "}, {"location": "user/core-concepts/variables/environment_variables/", "title": "Environment variables", "text": "

    Environment variables can be set by using the export command in Linux or the set command in Windows.

    dotenv files

    KPOps currently supports .env files only for variables related to the config. Full support for .env files is on the roadmap. One of the possible ways to use one and export the contents manually is with the following command: export $(xargs < .env). This would work in bash suppose there are no spaces inside the values.

    "}, {"location": "user/core-concepts/variables/environment_variables/#config", "title": "Config", "text": "

    These variables take precedence over the settings in config.yaml. Variables marked as required can instead be set in the global config.

    Name Default Value Required Description Setting name KPOPS_PIPELINE_BASE_DIR . False Base directory to the pipelines (default is current working directory) pipeline_base_dir KPOPS_KAFKA_BROKERS True The comma separated Kafka brokers address. kafka_brokers KPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME ${pipeline.name}-${component.name} False Configures the value for the variable ${output_topic_name} topic_name_config.default_output_topic_name KPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME ${pipeline.name}-${component.name}-error False Configures the value for the variable ${error_topic_name} topic_name_config.default_error_topic_name KPOPS_SCHEMA_REGISTRY__ENABLED False False Whether the Schema Registry handler should be initialized. schema_registry.enabled KPOPS_SCHEMA_REGISTRY__URL http://localhost:8081/ False Address of the Schema Registry. schema_registry.url KPOPS_SCHEMA_REGISTRY__TIMEOUT 30 False Operation timeout in seconds. schema_registry.timeout KPOPS_KAFKA_REST__URL http://localhost:8082/ False Address of the Kafka REST Proxy. kafka_rest.url KPOPS_KAFKA_REST__TIMEOUT 30 False Operation timeout in seconds. kafka_rest.timeout KPOPS_KAFKA_CONNECT__URL http://localhost:8083/ False Address of Kafka Connect. kafka_connect.url KPOPS_KAFKA_CONNECT__TIMEOUT 30 False Operation timeout in seconds. kafka_connect.timeout KPOPS_CREATE_NAMESPACE False False Flag for helm upgrade --install. Create the release namespace if not present. create_namespace KPOPS_HELM_CONFIG__CONTEXT False Name of kubeconfig context (--kube-context) helm_config.context KPOPS_HELM_CONFIG__DEBUG False False Run Helm in Debug mode helm_config.debug KPOPS_HELM_CONFIG__API_VERSION False Kubernetes API version used for Capabilities.APIVersions helm_config.api_version KPOPS_HELM_DIFF_CONFIG__IGNORE True Set of keys that should not be checked. helm_diff_config.ignore KPOPS_RETAIN_CLEAN_JOBS False False Whether to retain clean up jobs in the cluster or uninstall the, after completion. retain_clean_jobs KPOPS_OPERATION_MODE Helm False The operation mode of KPOps operation_mode config_env_vars.env Exhaustive list of all config-related environment variables
    # Global config environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# settings in `config.yaml`. Variables marked as required can instead\n# be set in the global config.\n#\n# pipeline_base_dir\n# Base directory to the pipelines (default is current working\n# directory)\nKPOPS_PIPELINE_BASE_DIR=.\n# kafka_brokers\n# The comma separated Kafka brokers address.\nKPOPS_KAFKA_BROKERS # No default value, required\n# topic_name_config.default_output_topic_name\n# Configures the value for the variable ${output_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_OUTPUT_TOPIC_NAME=${pipeline.name}-${component.name}\n# topic_name_config.default_error_topic_name\n# Configures the value for the variable ${error_topic_name}\nKPOPS_TOPIC_NAME_CONFIG__DEFAULT_ERROR_TOPIC_NAME=${pipeline.name}-${component.name}-error\n# schema_registry.enabled\n# Whether the Schema Registry handler should be initialized.\nKPOPS_SCHEMA_REGISTRY__ENABLED=False\n# schema_registry.url\n# Address of the Schema Registry.\nKPOPS_SCHEMA_REGISTRY__URL=http://localhost:8081/\n# schema_registry.timeout\n# Operation timeout in seconds.\nKPOPS_SCHEMA_REGISTRY__TIMEOUT=30\n# kafka_rest.url\n# Address of the Kafka REST Proxy.\nKPOPS_KAFKA_REST__URL=http://localhost:8082/\n# kafka_rest.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_REST__TIMEOUT=30\n# kafka_connect.url\n# Address of Kafka Connect.\nKPOPS_KAFKA_CONNECT__URL=http://localhost:8083/\n# kafka_connect.timeout\n# Operation timeout in seconds.\nKPOPS_KAFKA_CONNECT__TIMEOUT=30\n# create_namespace\n# Flag for `helm upgrade --install`. Create the release namespace if\n# not present.\nKPOPS_CREATE_NAMESPACE=False\n# helm_config.context\n# Name of kubeconfig context (`--kube-context`)\nKPOPS_HELM_CONFIG__CONTEXT # No default value, not required\n# helm_config.debug\n# Run Helm in Debug mode\nKPOPS_HELM_CONFIG__DEBUG=False\n# helm_config.api_version\n# Kubernetes API version used for `Capabilities.APIVersions`\nKPOPS_HELM_CONFIG__API_VERSION # No default value, not required\n# helm_diff_config.ignore\n# Set of keys that should not be checked.\nKPOPS_HELM_DIFF_CONFIG__IGNORE # No default value, required\n# retain_clean_jobs\n# Whether to retain clean up jobs in the cluster or uninstall the,\n# after completion.\nKPOPS_RETAIN_CLEAN_JOBS=False\n# operation_mode\n# The operation mode of KPOps\nKPOPS_OPERATION_MODE=Helm\n
    "}, {"location": "user/core-concepts/variables/environment_variables/#cli", "title": "CLI", "text": "

    These variables take precedence over the commands' flags. If a variable is set, the corresponding flag does not have to be specified in commands. Variables marked as required can instead be set as flags.

    Name Default Value Required Description KPOPS_CONFIG_PATH . False Path to the dir containing config.yaml files KPOPS_DOTENV_PATH False Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. KPOPS_ENVIRONMENT False The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). KPOPS_PIPELINE_PATHS True Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. KPOPS_PIPELINE_STEPS False Comma separated list of steps to apply the command on cli_env_vars.env Exhaustive list of all cli-related environment variables
    # CLI Environment variables\n#\n# The default setup is shown. These variables take precedence over the\n# commands' flags. If a variable is set, the corresponding flag does\n# not have to be specified in commands. Variables marked as required\n# can instead be set as flags.\n#\n# Path to the dir containing config.yaml files\nKPOPS_CONFIG_PATH=.\n# Path to dotenv file. Multiple files can be provided. The files will\n# be loaded in order, with each file overriding the previous one.\nKPOPS_DOTENV_PATH # No default value, not required\n# The environment you want to generate and deploy the pipeline to.\n# Suffix your environment files with this value (e.g.\n# defaults_development.yaml for environment=development).\nKPOPS_ENVIRONMENT # No default value, not required\n# Paths to dir containing 'pipeline.yaml' or files named\n# 'pipeline.yaml'.\nKPOPS_PIPELINE_PATHS # No default value, required\n# Comma separated list of steps to apply the command on\nKPOPS_PIPELINE_STEPS # No default value, not required\n
    "}, {"location": "user/core-concepts/variables/substitution/", "title": "Substitution", "text": "

    KPOps supports the usage of placeholders and environment variables in pipeline definition and defaults.

    "}, {"location": "user/core-concepts/variables/substitution/#component-specific-variables", "title": "Component-specific variables", "text": "

    These variables can be used in a component's definition to refer to any of its attributes, including ones that the user has defined in the defaults.

    All of them are prefixed with component. and follow the following form: component.{attribute_name}. If the attribute itself contains attributes, they can be referred to like this: component.{attribute_name}.{subattribute_name}.

    Example
    - type: scheduled-producer\n  values:\n    labels:\n      app_type: \"${component.type}\"\n      app_name: \"${component.name}\"\n      app_schedule: \"${component.values.schedule}\"\n    commandLine:\n      FAKE_ARG: \"fake-arg-value\"\n    schedule: \"30 3/8 * * *\"\n- type: converter\n  values:\n    commandLine:\n      CONVERT_XML: true\n    resources:\n      limits:\n        memory: 2G\n      requests:\n        memory: 2G\n- type: filter\n  name: \"filter-app\"\n  values:\n    labels:\n      app_type: \"${component.type}\"\n      app_name: \"${component.name}\"\n      app_resources_requests_memory: \"${component.values.resources.requests.memory}\"\n      ${component.type}: \"${component.values.labels.app_name}-${component.values.labels.app_type}\"\n      test_placeholder_in_placeholder: \"${component.values.labels.${component.type}}\"\n    commandLine:\n      TYPE: \"nothing\"\n    resources:\n      requests:\n        memory: 3G\n    replicaCount: 4\n    autoscaling:\n      minReplicas: 4\n      maxReplicas: 4\n
    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-config-specific-variables", "title": "Pipeline-config-specific variables", "text": "

    These variables include all fields in the config and refer to the pipeline configuration that is independent of the components.

    All such variables are prefixed with config. and are of the same form as the component-specific variables.

    Info

    error_topic_name is an alias for config.topic_name_config.default_error_topic_name output_topic_name is an alias for config.topic_name_config.default_output_topic_name

    "}, {"location": "user/core-concepts/variables/substitution/#environment-variables", "title": "Environment variables", "text": "

    Environment variables such as $PATH can be used in the pipeline definition and defaults without any transformation following the form ${ENV_VAR_NAME}. This, of course, includes variables like the ones relevant to the KPOps cli that are exported by the user.

    See all KPOps environment variables

    "}, {"location": "user/core-concepts/variables/substitution/#pipeline-name-variables", "title": "Pipeline name variables", "text": "

    These are special variables that refer to the name and path of a pipeline.

    • ${pipeline.name}: Concatenated path of the parent directory where pipeline.yaml is defined in. For instance, ./data/pipelines/v1/pipeline.yaml, here the value for the variable would be data-pipelines-v1.

    • ${pipeline_name_<level>}: Similar to the previous variable, each <level> contains a part of the path to the pipeline.yaml file. Consider the previous example, ${pipeline_name_0} would be data, ${pipeline_name_1} would be pipelines, and ${pipeline_name_2} equals to v1.

    "}, {"location": "user/core-concepts/variables/substitution/#advanced-use-cases", "title": "Advanced use cases", "text": "
    1. Refer to default component field values: As long as a value is assigned to a component attribute, it is possible to refer to it with a placeholder. To see all component fields, take a look at the pipeline schema.
    2. Chaining variables: It is possible to chain any number of variables, see the example above.
    3. Cross-component substitution: YAML is quite an intricate language and with some of its magic one could write cross-component references.
    "}, {"location": "user/examples/atm-fraud-pipeline/", "title": "ATM fraud detection pipeline", "text": "

    ATM fraud is a demo pipeline for ATM fraud detection. The original by Confluent is written in KSQL and outlined in this blogpost. The one used in this example is re-built from scratch using bakdata's streams-bootstrap library.

    "}, {"location": "user/examples/atm-fraud-pipeline/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
    • Deploying a PostgreSQL database using Helm
    • Deploying a pipeline using KPOps
    • Destroying a pipeline using KPOps
    "}, {"location": "user/examples/atm-fraud-pipeline/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/examples/atm-fraud-pipeline/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postgresql", "title": "PostgreSQL", "text": "

    Deploy PostgreSQL using the Bitnami Helm chart: Add the helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install the PostgreSQL with helm:

    helm upgrade --install -f ./postgresql.yaml \\\n--namespace kpops \\\npostgresql bitnami/postgresql\n
    PostgreSQL Example Helm chart values (postgresql.yaml)
    auth:\n  database: app_db\n  enablePostgresUser: true\n  password: AppPassword\n  postgresPassword: StrongPassword\n  username: app1\nprimary:\n  persistence:\n    enabled: false\n    existingClaim: postgresql-data-claim\nvolumePermissions:\n  enabled: true\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-detection-example-pipeline-setup", "title": "ATM fraud detection example pipeline setup", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#deploying-the-atm-fraud-detection-pipeline", "title": "Deploying the ATM fraud detection pipeline", "text": "
    1. Clone the kpops-examples repository and cd into the directory.

    2. Install KPOps pip install -r requirements.txt.

    3. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    4. Deploy the pipeline

      kpops deploy atm-fraud/pipeline.yaml --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/examples/atm-fraud-pipeline/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to see the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser. You should be able to see pipeline shown in the image below:

    An overview of ATM fraud pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/examples/atm-fraud-pipeline/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/examples/atm-fraud-pipeline/#postrgresql", "title": "PostrgreSQL", "text": "

    PostgreSQL can be uninstalled by running the following command:

    helm --namespace kpops uninstall postgresql\n
    "}, {"location": "user/examples/atm-fraud-pipeline/#atm-fraud-pipeline", "title": "ATM fraud pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean atm-fraud/pipeline.yaml --verbose  --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/examples/atm-fraud-pipeline/#common-errors", "title": "Common errors", "text": "
    • deploy fails:
    • Read the error message.
    • Try to correct the mistakes if there were any. Likely the configuration is incorrect, or the port-forwarding is not working as intended.
    • Run clean.
    • Run deploy --dry-run to avoid havig to clean again. If an error is dropped, start over from step 1.
    • If the dry-run is succesful, run deploy.
    • clean fails:
    • Read the error message.
    • Try to correct the indicated mistakes if there were any. Likely the configuration is incorrect, or the port-forwarding is not working as intended.
    • Run clean.
    • If clean fails, follow the steps in teardown.
    "}, {"location": "user/getting-started/quick-start/", "title": "Quick start", "text": ""}, {"location": "user/getting-started/quick-start/#word-count", "title": "Word-count", "text": "

    Word-count is a demo pipeline consisting of a producer producing words to Kafka, a Kafka streams app counting the number of times each word occurs, and finally a Redis database into which the words are exported.

    "}, {"location": "user/getting-started/quick-start/#what-this-will-demonstrate", "title": "What this will demonstrate", "text": "
    • Deploying a Redis database using Helm
    • Deploying a pipeline using KPOps
    • Destroying a pipeline using KPOps
    "}, {"location": "user/getting-started/quick-start/#prerequisites", "title": "Prerequisites", "text": "

    Completed all steps in the setup.

    "}, {"location": "user/getting-started/quick-start/#setup-and-deployment", "title": "Setup and deployment", "text": ""}, {"location": "user/getting-started/quick-start/#redis", "title": "Redis", "text": "

    Deploy Redis using the Bitnami Helm chart: Add the Helm repository:

    helm repo add bitnami https://charts.bitnami.com/bitnami && \\\nhelm repo update\n

    Install Redis with Helm:

    helm upgrade --install -f ./values-redis.yaml \\\n--namespace kpops \\\nredis bitnami/redis\n
    Redis example Helm chart values (values-redis.yaml)
    architecture: standalone\nauth:\n  enabled: false\nmaster:\n  count: 1\n  configuration: \"databases 1\"\nimage:\n  tag: 7.0.8\n
    "}, {"location": "user/getting-started/quick-start/#word-count-example-pipeline-setup", "title": "Word-count example pipeline setup", "text": ""}, {"location": "user/getting-started/quick-start/#port-forwarding", "title": "Port forwarding", "text": "

    Before we deploy the pipeline, we need to forward the ports of kafka-rest-proxy and kafka-connect. Run the following commands in two different terminals.

    kubectl port-forward --namespace kpops service/k8kafka-cp-rest 8082:8082\n
    kubectl port-forward --namespace kpops service/k8kafka-cp-kafka-connect 8083:8083\n
    "}, {"location": "user/getting-started/quick-start/#deploying-the-word-count-pipeline", "title": "Deploying the Word-count pipeline", "text": "
    1. Clone the kpops-examples repository and cd into the directory.

    2. Install KPOps pip install -r requirements.txt.

    3. Export environment variables in your terminal:

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    4. Deploy the pipeline

      kpops deploy word-count/pipeline.yaml --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be deployed correctly.

    "}, {"location": "user/getting-started/quick-start/#check-if-the-deployment-is-successful", "title": "Check if the deployment is successful", "text": "

    You can use the Streams Explorer to inspect the deployed pipeline. To do so, port-forward the service in a separate terminal session using the command below:

    kubectl port-forward -n kpops service/streams-explorer 8080:8080\n

    After that open http://localhost:8080 in your browser.

    You should be able to see pipeline shown in the image below:

    An overview of Word-count pipeline shown in Streams Explorer

    Attention

    Kafka Connect needs some time to set up the connector. Moreover, Streams Explorer needs a while to scrape the information from Kafka Connect. Therefore, it might take a bit until you see the whole graph.

    "}, {"location": "user/getting-started/quick-start/#teardown-resources", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/quick-start/#redis_1", "title": "Redis", "text": "

    Redis can be uninstalled by running the following command:

    helm --namespace kpops uninstall redis\n
    "}, {"location": "user/getting-started/quick-start/#word-count-pipeline", "title": "Word-count pipeline", "text": "
    1. Export environment variables in your terminal.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Remove the pipeline

      kpops clean word-count/pipeline.yaml --verbose --execute\n

    Note

    You can use the --dry-run flag instead of the --execute flag and check the logs if your pipeline will be destroyed correctly.

    Attention

    If you face any issues destroying this example see Teardown for manual deletion.

    "}, {"location": "user/getting-started/quick-start/#common-errors", "title": "Common errors", "text": "
    • deploy fails:
      1. Read the error message.
      2. Try to correct the mistakes if there were any. Likely the configuration is incorrect, or the port-forwarding is not working as intended.
      3. Run clean.
      4. Run deploy --dry-run to avoid having to clean again. If an error is dropped, start over from step 1.
      5. If the dry-run is successful, run deploy.
    • clean fails:
      1. Read the error message.
      2. Try to correct the indicated mistakes if there were any. Likely the configuration is incorrect, or the port-forwarding is not working as intended.
      3. Run clean.
      4. If clean fails, follow the steps in teardown.
    "}, {"location": "user/getting-started/setup/", "title": "Setup KPOps", "text": "

    In this part, you will set up KPOps. This includes:

    • optionally creating a local Kubernetes cluster
    • running Apache Kafka and Confluent's Schema Registry
    • installing KPOps
    "}, {"location": "user/getting-started/setup/#prerequisites", "title": "Prerequisites", "text": "
    • k3d (Version 5.4.6+) and Docker (Version >= v20.10.5) or an existing Kubernetes cluster (>= 1.21.0)
    • kubectl (Compatible with server version 1.21.0)
    • Helm (Version 3.8.0+)
    "}, {"location": "user/getting-started/setup/#setup-kubernetes-with-k3d", "title": "Setup Kubernetes with k3d", "text": "

    If you don't have access to an existing Kubernetes cluster, this section will guide you through creating a local cluster. We recommend the lightweight Kubernetes distribution k3s for this. k3d is a wrapper around k3s in Docker that lets you get started fast.

    1. You can install k3d with its installation script:

      wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/v5.4.6/install.sh | bash\n

      For other ways of installing k3d, you can have a look at their installation guide.

    2. The Kafka deployment needs a modified Docker image. In that case the image is built and pushed to a Docker registry that holds it. If you do not have access to an existing Docker registry, you can use k3d's Docker registry:

      k3d registry create kpops-registry.localhost --port 12345\n
    3. Now you can create a new cluster called kpops that uses the previously created Docker registry:

      k3d cluster create kpops --k3s-arg \"--no-deploy=traefik@server:*\" --registry-use k3d-kpops-registry.localhost:12345\n

    Note

    Creating a new k3d cluster automatically configures kubectl to connect to the local cluster by modifying your ~/.kube/config. In case you manually set the KUBECONFIG variable or don't want k3d to modify your config, k3d offers many other options.

    You can check the cluster status with kubectl get pods -n kube-system. If all returned elements have a STATUS of Running or Completed, then the cluster is up and running.

    "}, {"location": "user/getting-started/setup/#deploy-kafka", "title": "Deploy Kafka", "text": "

    Kafka is an open-source data streaming platform. More information about Kafka can be found in the documentation. To deploy Kafka, this guide uses Confluent's Helm chart.

    1. To allow connectivity to other systems Kafka Connect needs to be extended with drivers. You can install a JDBC driver for Kafka Connect by creating a new Docker image:

      1. Create a Dockerfile with the following content:

        FROM confluentinc/cp-kafka-connect:7.1.3\n\nRUN confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.6.0\n
      2. Build and push the modified image to your private Docker registry:

        docker build . --tag localhost:12345/kafka-connect-jdbc:7.1.3 && \\\ndocker push localhost:12345/kafka-connect-jdbc:7.1.3\n

      Detailed instructions on building, tagging and pushing a docker image can be found in Docker docs.

    2. Add Confluent's Helm chart repository and update the index:

      helm repo add confluentinc https://confluentinc.github.io/cp-helm-charts/ &&  \nhelm repo update\n
    3. Install Kafka, Zookeeper, Confluent's Schema Registry, Kafka Rest Proxy, and Kafka Connect. A single Helm chart installs all five components. Below you can find an example for the --values ./kafka.yaml file configuring the deployment accordingly. Deploy the services:

      helm upgrade \\\n    --install \\\n    --version 0.6.1 \\\n    --values ./kafka.yaml \\\n    --namespace kpops \\\n    --create-namespace \\\n    --wait \\\n    k8kafka confluentinc/cp-helm-charts\n
    Kafka Helm chart values (kafka.yaml)

    An example value configuration for Confluent's Helm chart. This configuration deploys a single Kafka Broker, a Schema Registry, Zookeeper, Kafka Rest Proxy, and Kafka Connect with minimal resources.

    cp-zookeeper:\n  enabled: true\n  servers: 1\n  imageTag: 7.1.3\n  heapOptions: \"-Xms124M -Xmx124M\"\n  overrideGroupId: k8kafka\n  fullnameOverride: \"k8kafka-cp-zookeeper\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.2G\n    limits:\n      cpu: 250m\n      memory: 0.2G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka:\n  enabled: true\n  brokers: 1\n  imageTag: 7.1.3\n  podManagementPolicy: Parallel\n  configurationOverrides:\n    \"auto.create.topics.enable\": false\n    \"offsets.topic.replication.factor\": 1\n    \"transaction.state.log.replication.factor\": 1\n    \"transaction.state.log.min.isr\": 1\n    \"confluent.metrics.reporter.topic.replicas\": 1\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.5G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n  persistence:\n    enabled: false\n\ncp-schema-registry:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-schema-registry\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.25G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-connect:\n  enabled: true\n  replicaCount: 1\n  image: k3d-kpops-registry.localhost:12345/kafka-connect-jdbc\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-kafka-connect\"\n  overrideGroupId: k8kafka\n  kafka:\n    bootstrapServers: \"PLAINTEXT://k8kafka-cp-kafka-headless:9092\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 500m\n      memory: 0.25G\n    limits:\n      cpu: 500m\n      memory: 0.25G\n  configurationOverrides:\n    \"consumer.max.poll.records\": \"10\"\n    \"consumer.max.poll.interval.ms\": \"900000\"\n    \"config.storage.replication.factor\": \"1\"\n    \"offset.storage.replication.factor\": \"1\"\n    \"status.storage.replication.factor\": \"1\"\n  cp-schema-registry:\n    url: http://k8kafka-cp-schema-registry:8081\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-kafka-rest:\n  enabled: true\n  imageTag: 7.1.3\n  fullnameOverride: \"k8kafka-cp-rest\"\n  heapOptions: \"-Xms256M -Xmx256M\"\n  resources:\n    requests:\n      cpu: 50m\n      memory: 0.25G\n    limits:\n      cpu: 250m\n      memory: 0.5G\n  prometheus:\n    jmx:\n      enabled: false\n\ncp-ksql-server:\n  enabled: false\ncp-control-center:\n  enabled: false\n
    "}, {"location": "user/getting-started/setup/#deploy-streams-explorer", "title": "Deploy Streams Explorer", "text": "

    Streams Explorer allows examining Apache Kafka data pipelines in a Kubernetes cluster including the inspection of schemas and monitoring of metrics. First, add the Helm repository:

    helm repo add streams-explorer https://bakdata.github.io/streams-explorer && \\\nhelm repo update\n

    Below you can find an example for the --values ./streams-explorer.yaml file configuring the deployment accordingly. Now, deploy the service:

    helm upgrade \\\n    --install \\\n    --version 0.2.3 \\\n    --values ./streams-explorer.yaml \\\n    --namespace kpops \\\n    streams-explorer streams-explorer/streams-explorer\n
    Streams Explorer Helm chart values (streams-explorer.yaml)

    An example value configuration for Steams Explorer Helm chart.

    imageTag: \"v2.1.2\"\nconfig:\n   K8S__deployment__cluster: true\n   SCHEMAREGISTRY__url: http://k8kafka-cp-schema-registry.kpops.svc.cluster.local:8081\n   KAFKACONNECT__url: http://k8kafka-cp-kafka-connect.kpops.svc.cluster.local:8083\nresources:\n   requests:\n       cpu: 200m\n       memory: 300Mi\n   limits:\n       cpu: 200m\n       memory: 300Mi\n
    "}, {"location": "user/getting-started/setup/#check-the-status-of-your-deployments", "title": "Check the status of your deployments", "text": "

    Now we will check if all the pods are running in our namespace. You can list all pods in the namespace with this command:

    kubectl --namespace kpops get pods\n

    Then you should see the following output in your terminal:

    NAME                                          READY   STATUS    RESTARTS   AGE\nk8kafka-cp-kafka-connect-8fc7d544f-8pjnt      1/1     Running   0          15m\nk8kafka-cp-zookeeper-0                        1/1     Running   0          15m\nk8kafka-cp-kafka-0                            1/1     Running   0          15m\nk8kafka-cp-schema-registry-588f8c65db-jdwbq   1/1     Running   0          15m\nk8kafka-cp-rest-6bbfd7b645-nwkf8              1/1     Running   0          15m\nstreams-explorer-54db878c67-s8wbz             1/1     Running   0          15m\n

    Pay attention to the STATUS row. The pods should have a status of Running.

    "}, {"location": "user/getting-started/setup/#install-kpops", "title": "Install KPOps", "text": "

    KPOps comes as a PyPI package. You can install it with pip:

    pip install kpops\n
    "}, {"location": "user/getting-started/teardown/", "title": "Teardown resources", "text": ""}, {"location": "user/getting-started/teardown/#kpops-teardown-commands", "title": "KPOps teardown commands", "text": "
    • destroy: Removes Kubernetes resources.
    • reset: Runs destroy, resets the states of Kafka Streams apps and resets offsets to zero.
    • clean: Runs reset and removes all Kafka resources.
    "}, {"location": "user/getting-started/teardown/#kpops-deployed-pipeline", "title": "KPOps-deployed pipeline", "text": "

    The kpops CLI can be used to destroy a pipeline that was previously deployed with KPOps. In case that doesn't work, the pipeline can always be taken down manually with helm (see section Infrastructure).

    1. Export environment variables.

      export DOCKER_REGISTRY=bakdata && \\\nexport NAMESPACE=kpops\n
    2. Navigate to the examples folder. Replace the <name-of-the-example-directory> with the example you want to tear down. For example the atm-fraud-detection.

    3. Remove the pipeline

      # Uncomment 1 line to either destroy, reset or clean.\n\n# poetry run kpops destroy <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops reset <name-of-the-example-directory>/pipeline.yaml \\\n# poetry run kpops clean <name-of-the-example-directory>/pipeline.yaml \\\n--config <name-of-the-example-directory>/config.yaml \\\n--execute\n
    "}, {"location": "user/getting-started/teardown/#infrastructure", "title": "Infrastructure", "text": "

    Delete namespace:

    kubectl delete namespace kpops\n

    Note

    In case kpops destroy is not working one can uninstall the pipeline services one by one. This is equivalent to running kpops destroy. In case a clean uninstall (like the one kpops clean does) is needed, one needs to also delete the topics and schemas created by deployment of the pipeline.

    "}, {"location": "user/getting-started/teardown/#local-cluster", "title": "Local cluster", "text": "

    Delete local cluster:

    k3d cluster delete kpops\n
    "}, {"location": "user/getting-started/teardown/#local-image-registry", "title": "Local image registry", "text": "

    Delete local registry:

    k3d registry delete k3d-kpops-registry.localhost\n
    "}, {"location": "user/migration-guide/v1-v2/", "title": "Migrate from V1 to V2", "text": ""}, {"location": "user/migration-guide/v1-v2/#derive-component-type-automatically-from-class-name", "title": "Derive component type automatically from class name", "text": "

    KPOps automatically infers the component type from the class name. Therefore, the type and schema_type attributes can be removed from your custom components. By convention the type would be the lower, and kebab cased name of the class.

    class MyCoolStreamApp(StreamsApp):\n-    type = \"my-cool-stream-app\"\n+    ...\n

    Because of this new convention producer has been renamed to producer-app. This must be addressed in your pipeline.yaml and defaults.yaml.

    - producer:\n+ producer-app:\n    app:\n        streams:\n        outputTopic: output_topic\n        extraOutputTopics:\n            output_role1: output_topic1\n            output_role2: output_topic2\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-inputoutput-types", "title": "Refactor input/output types", "text": ""}, {"location": "user/migration-guide/v1-v2/#to-section", "title": "To section", "text": "

    In the to section these have changed:

    • The default type is output
    • If role is set, type is inferred to be extra
    • The type error needs to be defined explicitly
      to:\n    topics:\n      ${pipeline_name}-topic-1:\n-       type: extra\n        role: \"role-1\"\n        ...\n      ${pipeline_name}-topic-2:\n-       type: output\n        ...\n      ${pipeline_name}-topic-3:\n         type: error\n         ...\n
    "}, {"location": "user/migration-guide/v1-v2/#from-section", "title": "From section", "text": "

    In the from section these have changed:

    • The default type is input
    • input-pattern type is replaced by pattern
    • If role is set, type is inferred to be extra
    • If role is set, type is explicitly set to pattern, this would be inferred type extra-pattern
      from:\n    topics:\n      ${pipeline_name}-input-topic:\n-       type: input\n        ...\n      ${pipeline_name}-extra-topic:\n-       type: extra\n        role: topic-role\n        ...\n      ${pipeline_name}-input-pattern-topic:\n-       type: input-pattern\n+       type: pattern\n        ...\n      ${pipeline_name}-extra-pattern-topic:\n-       type: extra-pattern\n+       type: pattern\n        role: some-role\n        ...\n
    "}, {"location": "user/migration-guide/v1-v2/#remove-camel-case-conversion-of-internal-models", "title": "Remove camel case conversion of internal models", "text": "

    All the internal KPOps models are now snake_case, and only Helm/Kubernetes values require camel casing. You can find an example of a pipeline.yaml in the following. Notice that the app section here remains untouched.

    ...\ntype: streams-app\n  name: streams-app\n  namespace: namespace\n  app:\n    streams:\n      brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n     autoscaling:\n      consumerGroup: consumer-group\n      lagThreshold: 0\n      enabled: false\n      pollingInterval: 30\n\n  to:\n    topics:\n      ${pipeline_name}-output-topic:\n        type: error\n-       keySchema: key-schema\n+       key_schema: key-schema\n-       valueSchema: value-schema\n+       value_schema: value-schema\n        partitions_count: 1\n        replication_factor: 1\n        configs:\n          cleanup.policy: compact\n    models:\n      model: model\n  prefix: ${pipeline_name}-\n- repoConfig:\n+ repo_config:\n-   repositoryName: bakdata-streams-bootstrap\n+   repository_name: bakdata-streams-bootstrap\n    url: https://bakdata.github.io/streams-bootstrap/\n-   repoAuthFlags:\n+   repo_auth_flags:\n      username: user\n      password: pass\n      ca_file: /home/user/path/to/ca-file\n      insecure_skip_tls_verify: false\n  version: \"1.0.4\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#refactor-handling-of-helm-flags", "title": "Refactor handling of Helm flags", "text": "

    If you are using the KubernetesApp class to define your own Kubernetes resource to deploy, the abstract function get_helm_chart that returns the chart for deploying the app using Helm is now a Python property and renamed to helm_chart.

    class MyCoolApp(KubernetesApp):\n\n+   @property\n    @override\n-   def get_helm_chart(self) -> str:\n+   def helm_chart(self) -> str:\n        return \"./charts/charts-folder\"\n
    "}, {"location": "user/migration-guide/v1-v2/#plural-broker-field-in-pipeline-config", "title": "Plural broker field in pipeline config", "text": "

    Since you can pass a comma separated string of broker address, the broker field in KPOps is now plural. The pluralization has affected multiple areas:

    "}, {"location": "user/migration-guide/v1-v2/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- broker: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  kafka_connect_host: \"http://localhost:8083\"\n  kafka_rest_host: \"http://localhost:8082\"\n  schema_registry_url: \"http://localhost:8081\"\n
    "}, {"location": "user/migration-guide/v1-v2/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called brokers.

    ...\n  app:\n    streams:\n-     brokers: ${broker}\n+     brokers: ${brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v1-v2/#environment-variable", "title": "Environment variable", "text": "

    Previously, if you set the environment variable KPOPS_KAFKA_BROKER, you need to replace that now with KPOPS_KAFKA_BROKERS.

    "}, {"location": "user/migration-guide/v2-v3/", "title": "Migrate from V2 to V3", "text": "

    Jump to the summary

    "}, {"location": "user/migration-guide/v2-v3/#use-hash-and-trim-long-helm-release-names-instead-of-only-trimming", "title": "Use hash and trim long Helm release names instead of only trimming", "text": "

    KPOps handles long (more than 53 characters) Helm releases names differently. Helm will not find your (long) old release names anymore. Therefore, it is recommended that you should once destroy your pipeline with KPOps v2 to remove old Helm release names. After a clean destroy, re-deploy your pipeline with the KPOps v3.

    For example if you have a component with the Helm release name example-component-name-too-long-fake-fakefakefakefakefake. The new release name will shorten the original name to 53 characters and then replace the last 6 characters of the trimmed name with the first 5 characters of the result of SHA-1(helm_release_name).

    example-component-name-too-long-fake-fakefakef-0a7fc ----> 53 chars\n---------------------------------------------- -----\n  ^Shortened helm_release_name                 ^first 5 characters of SHA1(helm_release_name)\n
    "}, {"location": "user/migration-guide/v2-v3/#create-helmapp-component", "title": "Create HelmApp component", "text": "

    All Helm-specific parts of the built-in KubernetesApp have been extracted to a new child component that is more appropriately named HelmApp. It has to be renamed in your existing pipeline defintions and custom components module.

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml", "title": "pipeline.yaml", "text": "
    -- type: kubernetes-app\n+- type: helm-app\n   name: foo\n
    "}, {"location": "user/migration-guide/v2-v3/#custom_modulepy", "title": "custom_module.py", "text": "
    - from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n      ...\n
    "}, {"location": "user/migration-guide/v2-v3/#create-streamsbootstrap-component-refactor-cleanup-jobs-as-individual-helmapp", "title": "Create StreamsBootstrap component & refactor cleanup jobs as individual HelmApp", "text": "

    Previously the default KafkaApp component configured the streams-bootstrap Helm Charts. Now, this component is no longer tied to Helm (or Kubernetes). Instead, there is a new StreamsBootstrap component that configures the Helm Chart repository for the components that use it, e.g. StreamsApp and ProducerApp. If you are using non-default values for the Helm Chart repository or version, it has to be updated as shown below.

    "}, {"location": "user/migration-guide/v2-v3/#defaultsyaml", "title": "defaults.yaml", "text": "
      kafka-app:\n    app:\n      streams: ...\n\n+ streams-bootstrap:\n    repo_config: ...\n    version: ...\n
    "}, {"location": "user/migration-guide/v2-v3/#refactor-kafka-connector-resetter-as-individual-helmapp", "title": "Refactor Kafka Connector resetter as individual HelmApp", "text": "

    Internally, the Kafka Connector resetter is now its own standard HelmApp, removing a lot of the shared code. It is configured using the resetter_namespace (formerly namespace) and resetter_values attributes.

    "}, {"location": "user/migration-guide/v2-v3/#defaultsyaml_1", "title": "defaults.yaml", "text": "
      kafka-connector:\n-   namespace: my-namespace\n+   resetter_namespace: my-namespace\n
    "}, {"location": "user/migration-guide/v2-v3/#make-kafka-rest-proxy-kafka-connect-hosts-default-and-improve-schema-registry-config", "title": "Make Kafka REST Proxy & Kafka Connect hosts default and improve Schema Registry config", "text": "

    The breaking changes target the config.yaml file:

    • The schema_registry_url is replaced with schema_registry.url (default http://localhost:8081) and schema_registry.enabled (default false).

    • kafka_rest_host is renamed to kafka_rest.url (default http://localhost:8082).

    • kafka_connect_host is replaced with kafka_connect.url (default http://localhost:8083).

    • brokers is renamed to kafka_brokers.

    The environment variable names of these config fields changed respectively. Please refer to the environment variables documentation page to see the newest changes.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml", "title": "config.yaml", "text": "
      environment: development\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml", "title": "pipeline.yaml and default.yaml", "text": "

    The variable is now called kafka_brokers.

    ...\n  app:\n    streams:\n-     brokers: ${brokers}\n+     brokers: ${kafka_brokers}\n      schemaRegistryUrl: ${schema_registry_url}\n    nameOverride: override-with-this-name\n    imageTag: \"1.0.0\"\n...\n
    "}, {"location": "user/migration-guide/v2-v3/#define-custom-components-module-pipeline-base-dir-globally", "title": "Define custom components module & pipeline base dir globally", "text": "

    Warning

    The previous CLI parameters have been removed.

    The options for a custom components_module and pipeline_base_dir are now global settings, defined in config.yaml.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml_1", "title": "config.yaml", "text": "
      kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  environment: development\n+ components_module: components\n+ pipeline_base_dir: pipelines\n
    "}, {"location": "user/migration-guide/v2-v3/#move-github-action-to-repsitory-root", "title": "Move GitHub action to repsitory root", "text": "

    The location of the GitHub action has changed, and it's now available directly as bakdata/kpops.

    You'll need to change it in your GitHub CI workflows.

    steps:\n  - name: kpops deploy\n-   uses: bakdata/kpops/actions/kpops-runner@main\n+   uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      # ...\n
    "}, {"location": "user/migration-guide/v2-v3/#allow-overriding-config-files", "title": "Allow overriding config files", "text": "

    Specifying the environment is no longer mandatory. If not defined, only the global files will be used.

    environment is no longer specified in config.yaml. Instead, it can be either set via the CLI flag --environment or with the environment variable KPOPS_ENVIRONMENT.

    The --config flag in the CLI now points to the directory that contains config*.yaml files. The files to be used are resolved based on the provided (or not) environment.

    "}, {"location": "user/migration-guide/v2-v3/#configyaml_2", "title": "config.yaml", "text": "
    - environment: development\n  kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n  schema_registry:\n    enabled: true\n    url: \"http://my-custom-sr.url:8081\"\n
    "}, {"location": "user/migration-guide/v2-v3/#change-substitution-variables-separator-to", "title": "Change substitution variables separator to .", "text": "

    The delimiter in the substitution variables is changed to ..

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml-and-defaultyaml_1", "title": "pipeline.yaml and default.yaml", "text": "
    steps:\n  - type: scheduled-producer\n    app:\n      labels:\n-       app_type: \"${component_type}\"\n-       app_name: \"${component_name}\"\n-       app_schedule: \"${component_app_schedule}\"\n+       app_type: \"${component.type}\"\n+       app_name: \"${component.name}\"\n+       app_schedule: \"${component.app.schedule}\"\n
    "}, {"location": "user/migration-guide/v2-v3/#configyaml_3", "title": "config.yaml", "text": "
    topic_name_config:\n- default_error_topic_name: \"${pipeline_name}-${component_name}-dead-letter-topic\"\n- default_output_topic_name: \"${pipeline_name}-${component_name}-topic\"\n+ default_error_topic_name: \"${pipeline_name}-${component.name}-dead-letter-topic\"\n+ default_output_topic_name: \"${pipeline_name}-${component.name}-topic\"\n
    "}, {"location": "user/migration-guide/v2-v3/#refactor-generate-template-for-python-api-usage", "title": "Refactor generate template for Python API usage", "text": "

    The template method of every pipeline component has been renamed to manifest as it is no longer strictly tied to Helm template. Instead, it can be used to render the final resources of a component, such as Kubernetes manifests.

    There is also a new kpops manifest command replacing the existing kpops generate --template flag.

    If you're using this functionality in your custom components, it needs to be updated.

      from kpops.components.base_components.models.resource import Resource\n\n  @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n  \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n      return []  # list of manifests\n
    "}, {"location": "user/migration-guide/v2-v3/#namespace-substitution-vars", "title": "Namespace substitution vars", "text": "

    The global configuration variables are now namespaced under the config key, such as ${config.kafka_brokers}, ${config.schema_registry.url}. Same with pipeline variables, e.g. ${pipeline_name} \u2192 ${pipeline.name}. This would make it more uniform with the existing ${component.<key>} variables.

    "}, {"location": "user/migration-guide/v2-v3/#pipelineyaml_1", "title": "pipeline.yaml", "text": "
      name: kafka-app\n- prefix: ${pipeline_name}-\n+ prefix: ${pipeline.name}-\n  app:\n    streams:\n-     brokers: ${kafka_brokers}\n-     schemaRegistryUrl: ${schema_registry.url}\n+     brokers: ${config.kafka_brokers}\n+     schemaRegistryUrl: ${config.schema_registry.url}\n
    "}, {"location": "user/migration-guide/v2-v3/#summary", "title": "Summary", "text": "

    Warning

    Helm will not find your (long) old release names anymore.

    defaults.yaml
      kafka-app:\n    app:\n      streams: ...\n\n+ streams-bootstrap:\n    repo_config: ...\n    version: ...\n
    pipeline.yaml
    - - type: kubernetes-app\n+ - type: helm-app\n  ...\n  - type: kafka-app\n    app:\n-     brokers: ${brokers}\n+     brokers: ${config.kafka_brokers}\n      labels:\n-       app_schedule: \"${component_app_schedule}\"\n+       app_schedule: \"${component.app.schedule}\"\n  ...\n  - type: kafka-connector\n-   namespace: my-namespace\n+   resetter_namespace: my-namespace\n  ...\n
    config.yaml
    - environment: development\n\n+ components_module: components\n\n+ pipeline_base_dir: pipelines\n\n- brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n+ kafka_brokers: \"http://k8kafka-cp-kafka-headless.kpops.svc.cluster.local:9092\"\n\n- kafka_rest_host: \"http://my-custom-rest.url:8082\"\n+ kafka_rest:\n+   url: \"http://my-custom-rest.url:8082\"\n\n- kafka_connect_host: \"http://my-custom-connect.url:8083\"\n+ kafka_connect:\n+   url: \"http://my-custom-connect.url:8083\"\n\n- schema_registry_url: \"http://my-custom-sr.url:8081\"\n+ schema_registry:\n+   enabled: true\n+   url: \"http://my-custom-sr.url:8081\"\n\n  topic_name_config:\n- default_error_topic_name: \"${pipeline_name}-${component_name}-dead-letter-topic\"\n+ default_error_topic_name: \"${pipeline.name}-${component.name}-dead-letter-topic\"\n  ...\n
    custom_module.py
    - from kpops.components import KubernetesApp\n+ from kpops.components import HelmApp\n+ from kpops.components.base_components.models.resource import Resource\n\n- class CustomHelmApp(KubernetesApp):\n+ class CustomHelmApp(HelmApp):\n\n  @override\n- def template(self) -> None:\n+ def manifest(self) -> Resource:\n  \"\"\"Render final component resources, e.g. Kubernetes manifests.\"\"\"\n      return []  # list of manifests\n  ...\n
    github_ci_workflow.yaml
      steps:\n    - name: ...\n-     uses: bakdata/kpops/actions/kpops-runner@main\n+     uses: bakdata/kpops@main\n  ...\n
    "}, {"location": "user/migration-guide/v3-v4/", "title": "Migrate from V3 to V4", "text": ""}, {"location": "user/migration-guide/v3-v4/#distribute-defaults-across-multiple-files", "title": "Distribute defaults across multiple files", "text": "

    Warning

    The --defaults flag is removed

    It is possible now to use multiple default values. The defaults.yaml (or defaults_<env>.yaml) files can be distributed across multiple files. These will be picked up by KPOps and get merged into a single pipeline.yaml file. KPOps starts from reading the default files from where the pipeline path is defined and picks up every defaults file on its way to where the pipeline_base_dir is defined.

    For example, imagine the following folder structure:

    \u2514\u2500 pipelines\n   \u2514\u2500\u2500 distributed-defaults\n       \u251c\u2500\u2500 defaults.yaml\n       \u251c\u2500\u2500 defaults_dev.yaml\n       \u2514\u2500\u2500 pipeline-deep\n           \u251c\u2500\u2500 defaults.yaml\n           \u2514\u2500\u2500 pipeline.yaml\n

    The pipeline_base_dir is configured to pipelines. Now if we generate this pipeline with the following command:

    kpops generate \\\n      --environment dev\n      ./pipelines/distributed-defaults/pipeline-deep/pipeline.yaml\n

    The defaults would be picked in the following order (high to low priority):

    • ./pipelines/distributed-defaults/pipeline-deep/defaults.yaml
    • ./pipelines/distributed-defaults/defaults_dev.yaml
    • ./pipelines/distributed-defaults/defaults.yaml

    The deepest defaults.yaml file in the folder hierarchy (i.e., the closest one to the pipeline.yaml) overwrites the higher-level defaults' values.

    "}, {"location": "user/migration-guide/v4-v5/", "title": "Migrate from V4 to V5", "text": ""}, {"location": "user/migration-guide/v4-v5/#allow-custom-timeout-for-external-services", "title": "Allow custom timeout for external services", "text": "

    The global timeout setting has been removed. Instead, an individual timeout can be set for each external service. The default is 30 seconds.

    "}, {"location": "user/migration-guide/v4-v5/#configyaml", "title": "config.yaml", "text": "
    - timeout: 300\n\n  kafka_rest:\n    url: \"http://my-custom-rest.url:8082\"\n+   timeout: 30\n  kafka_connect:\n    url: \"http://my-custom-connect.url:8083\"\n+   timeout: 30\n  schema_registry:\n    enabled: true\n    url: \"http://my-custom-sr.url:8081\"\n+   timeout: 30\n
    "}, {"location": "user/migration-guide/v5-v6/", "title": "Migrate from V5 to V6", "text": ""}, {"location": "user/migration-guide/v5-v6/#deploy-multiple-pipelines", "title": "Deploy multiple pipelines", "text": "

    KPOps can now deploy multiple pipelines in a single command. It is possible to pass one or many pipeline.yaml files or pass a directory with many pipeline.yaml files within it.

    The environment variable KPOPS_PIPELINE_PATH is changed to KPOPS_PIPELINE_PATHS.

    Read more:

    • CLI Usage
    • Environment variables
    "}, {"location": "user/migration-guide/v5-v6/#separate-kpops-api-from-the-cli", "title": "Separate KPOps API from the CLI", "text": "

    KPops Python API is now stable and separated from the CLI! \ud83c\udf89

    "}, {"location": "user/migration-guide/v6-v7/", "title": "Migrate from V6 to V7", "text": ""}, {"location": "user/migration-guide/v6-v7/#automatic-loading-of-namespaced-custom-components", "title": "Automatic loading of namespaced custom components", "text": "

    KPOps is now distributed as a Python namespace package (as defined by PEP 420). This allows us to standardize the namespace kpops.components for both builtin and custom pipeline components.

    As a result of the restructure, some imports need to be adjusted:

    KPOps Python API

    - import kpops\n+ import kpops.api as kpops\n

    builtin KPOps components

    - from kpops.components import (\n-     HelmApp,\n-     KafkaApp,\n-     KafkaConnector,\n-     KafkaSinkConnector,\n-     KafkaSourceConnector,\n-     KubernetesApp,\n-     StreamsBootstrap,\n-     ProducerApp,\n-     StreamsApp,\n-     PipelineComponent,\n-     StreamsApp,\n-     ProducerApp,\n- )\n+ from kpops.components.base_components import (\n+     HelmApp,\n+     KafkaApp,\n+     KafkaConnector,\n+     KafkaSinkConnector,\n+     KafkaSourceConnector,\n+     KubernetesApp,\n+     PipelineComponent,\n+ )\n+ from kpops.components.streams_bootstrap import (\n+     StreamsBootstrap,\n+     StreamsApp,\n+     ProducerApp,\n+ )\n
    "}, {"location": "user/migration-guide/v6-v7/#your-custom-kpops-components", "title": "your custom KPOps components", "text": ""}, {"location": "user/migration-guide/v6-v7/#configyaml", "title": "config.yaml", "text": "
    - components_module: components\n
    "}, {"location": "user/migration-guide/v6-v7/#python-module", "title": "Python module", "text": "
    - components/__init__.py\n+ kpops/components/custom/__init__.py\n
    "}, {"location": "user/migration-guide/v6-v7/#rename-app-field", "title": "Rename app field", "text": "

    The app attribute of the builtin KPOps components has been renamed to better differentiate them. Both your pipeline.yaml and defaults.yaml files have to be updated, e.g.:

    "}, {"location": "user/migration-guide/v6-v7/#defaultsyaml", "title": "defaults.yaml", "text": "
      kubernetes-app:\n-   app: {}\n+   values: {}\n\n  helm-app:\n-   app: {}\n+   values: {}\n\n  kafka-app:\n-   app: {}\n+   values: {}\n\n  streams-app:\n-   app: {}\n+   values: {}\n\n  producer-app:\n-   app: {}\n+   values: {}\n\n  kafka-connector:\n-   app: {}\n+   config: {}\n\n  kafka-source-connector:\n-   app: {}\n+   config: {}\n\n  kafka-sink-connector:\n-   app: {}\n+   config: {}\n
    "}, {"location": "user/migration-guide/v6-v7/#call-destroy-from-inside-of-reset-or-clean", "title": "Call destroy from inside of reset or clean", "text": "

    Before v7, the KPOps CLI executed destroy before running reset/clean to ensure the component was destroyed.

    This logic has changed. The destroy method is now called within the PipelineComponent's reset/clean.

    During migrating to v7, you should check your custom components and see if they override the reset/clean methods. If so, you need to call the supermethod reset/clean to trigger the destroy inside the parent class. Alternatively, if you are implementing the PipelineComponent class, you need to call the destroy method at the beginning of the method.

    "}, {"location": "user/migration-guide/v6-v7/#componentspy", "title": "components.py", "text": "

    For example, when creating a custom StreamsApp or ProducerApp (or any other custom component), you must call the supermethod reset/clean to execute the destroy in the parent class. Otherwise, the logic of destroy will not be executed!

    class MyStreamsApp(StreamsApp):\n\n    @override\n    async def clean(self, dry_run: bool) -> None:\n+       await super().clean(dry_run)\n        # Some custom clean logic\n        # ...\n        ```diff\n        \n \nclass MyCustomComponent(PipelineComponent):\n    \n    @override\n    async def destroy(self, dry_run: bool) -> None:\n        # Some custom destroy logic\n        # ...\n\n    @override\n    async def clean(self, dry_run: bool) -> None:\n+       await super().clean(dry_run)\n        # Some custom clean logic\n        # ...\n
    "}, {"location": "user/migration-guide/v7-v8/", "title": "Migrate from V7 to V8", "text": ""}, {"location": "user/migration-guide/v7-v8/#add-support-for-streams-bootstrap-v3", "title": "Add support for streams-bootstrap v3", "text": "

    From now on KPOps supports streams-bootstrap v3 as its default component. The previous streams-bootstrap version (below 3.x.x) is marked as deprecated and will be removed in a future version of KPOps. If you don't want to migrate your producer or streams app to v3, you should suffix your components with -v2. Here is an example of a pipeline.yaml file.

    "}, {"location": "user/migration-guide/v7-v8/#pipelineyaml", "title": "pipeline.yaml", "text": "
    - - type: producer-app\n+ - type: producer-app-v2\n\n- - type: streams-app\n+ - type: streams-app-v2\n\n# rest of your pipeline\n
    "}, {"location": "user/migration-guide/v7-v8/#my-componentspy", "title": "my-components.py", "text": "
    - class MyStreamsApp(StreamsApp):\n+ class MyStreamsApp(StreamsAppV2):\n    ...\n

    Info

    The streams-boostrap, streams-app, and producer-app now all take the Helm values of streams-bootstrap version 3. You can find these values under the Helm charts documentation or by referring to the Base model definitions.

    "}, {"location": "user/migration-guide/v7-v8/#rename-role-to-label", "title": "Rename role to label", "text": "

    The keyword role is renamed to label. You need to replace it in your pipeline.yaml, defaults.yaml, and the Python components definition files. Here is a simple example of the defaults.yaml.

    "}, {"location": "user/migration-guide/v7-v8/#defaultsyaml", "title": "defaults.yaml", "text": "
    streams-app-v2:\n  values:\n    streams:\n      brokers: localhost:9092\n  from:\n    topics:\n      my-labeled-input-topic:\n-       role: my-input-topic-label\n+       label: my-input-topic-label\n      my-labeled-input-pattern:\n        type: pattern\n-       role: my-input-topic-labeled-pattern\n+       label: my-input-topic-labeled-pattern\n\n  to:\n    topics:\n      my-labeled-topic-output:\n-       role: my-output-topic-label\n+       label: my-output-topic-label\n\n# rest of your pipeline\n
    "}, {"location": "user/migration-guide/v7-v8/#make-kafkaapp-responsible-for-deployingcleaning-streams-bootstrap-components", "title": "Make KafkaApp responsible for deploying/cleaning streams bootstrap components", "text": "

    The KafkaApp component now only contains the deployment logic of the stream-bootstrap applications (streams-app, producer-app). It should not be used in the defaults.yaml nor the pipeline.yaml. If you are using it, it should be replaced by streams-bootstrap.

    "}, {"location": "user/migration-guide/v7-v8/#defaultsyaml_1", "title": "defaults.yaml", "text": "
    - kafka-app:\n+ streams-bootstrap-v2:\n    values:\n      streams:\n        brokers: 127.0.0.1:9092\n        schemaRegistryUrl: 127.0.0.1:8081\n
    "}, {"location": "user/references/cli-commands/", "title": "CLI Usage", "text": "

    Usage:

    $ kpops [OPTIONS] COMMAND [ARGS]...\n

    Options:

    • -V, --version: Print KPOps version
    • --install-completion: Install completion for the current shell.
    • --show-completion: Show completion for the current shell, to copy it or customize the installation.
    • --help: Show this message and exit.

    Commands:

    • clean: Clean pipeline steps
    • deploy: Deploy pipeline steps
    • destroy: Destroy pipeline steps
    • generate: Generate enriched pipeline representation
    • init: Initialize a new KPOps project.
    • manifest: Render final resource representation
    • patch: Render final resource representation
    • reset: Reset pipeline steps
    • schema: Generate JSON schema.
    "}, {"location": "user/references/cli-commands/#kpops-clean", "title": "kpops clean", "text": "

    Clean pipeline steps

    Usage:

    $ kpops clean [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-deploy", "title": "kpops deploy", "text": "

    Deploy pipeline steps

    Usage:

    $ kpops deploy [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-destroy", "title": "kpops destroy", "text": "

    Destroy pipeline steps

    Usage:

    $ kpops destroy [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-generate", "title": "kpops generate", "text": "

    Enrich pipeline steps with defaults. The enriched pipeline is used for all KPOps operations (deploy, destroy, ...).

    Usage:

    $ kpops generate [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-init", "title": "kpops init", "text": "

    Initialize a new KPOps project.

    Usage:

    $ kpops init [OPTIONS] PATH\n

    Arguments:

    • PATH: Path for a new KPOps project. It should lead to an empty (or non-existent) directory. The part of the path that doesn't exist will be created. [required]

    Options:

    • --config-include-opt / --no-config-include-opt: Whether to include non-required settings in the generated 'config.yaml' [default: no-config-include-opt]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-manifest", "title": "kpops manifest", "text": "

    In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.

    Usage:

    $ kpops manifest [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-patch", "title": "kpops patch", "text": "

    In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.

    Usage:

    $ kpops patch [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-reset", "title": "kpops reset", "text": "

    Reset pipeline steps

    Usage:

    $ kpops reset [OPTIONS] PIPELINE_PATHS...\n

    Arguments:

    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]

    Options:

    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • --steps TEXT: Comma separated list of steps to apply the command on [env var: KPOPS_PIPELINE_STEPS]
    • --filter-type [include|exclude]: Whether the --steps option should include/exclude the steps [default: include]
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • --dry-run / --execute: Whether to dry run the command or execute it [default: dry-run]
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • --parallel / --no-parallel: Enable or disable parallel execution of pipeline steps. If enabled, multiple steps can be processed concurrently. If disabled, steps will be processed sequentially. [default: no-parallel]
    • --help: Show this message and exit.
    "}, {"location": "user/references/cli-commands/#kpops-schema", "title": "kpops schema", "text": "

    Generate JSON schema.

    The schemas can be used to enable support for KPOps files in a text editor.

    Usage:

    $ kpops schema [OPTIONS] SCOPE:{pipeline|defaults|config}\n

    Arguments:

    • SCOPE:{pipeline|defaults|config}: Scope of the generated schema
      - pipeline: Schema of PipelineComponents for KPOps pipeline.yaml\n\n- defaults: Schema of PipelineComponents for KPOps defaults.yaml\n\n- config: Schema for KPOps config.yaml  [required]\n

    Options:

    • --help: Show this message and exit.
    "}, {"location": "user/references/editor-integration/", "title": "Editor integration", "text": ""}, {"location": "user/references/editor-integration/#native", "title": "Native", "text": "

    We are working towards first-class editor support by providing plugins that work out of the box.

    • Neovim: kpops.nvim
    • Visual Studio Code: planned
    "}, {"location": "user/references/editor-integration/#manual-for-unsupported-editors-with-lsp", "title": "Manual (for unsupported editors with LSP)", "text": "
    1. Install the yaml-language-server in your editor of choice. (requires LSP support)
    2. Configure the extension with the settings below.
    settings.json
    {\n    \"yaml.schemas\": {\n        \"https://bakdata.github.io/kpops/4.0/schema/pipeline.json\": [\n            \"pipeline.yaml\",\n            \"pipeline_*.yaml\"\n        ],\n        \"https://bakdata.github.io/kpops/4.0/schema/defaults.json\": [\n            \"defaults.yaml\",\n            \"defaults_*.yaml\"\n        ],\n        \"https://bakdata.github.io/kpops/4.0/schema/config.json\": [\n            \"config.yaml\",\n            \"config_*.yaml\"\n        ]\n    }\n}\n

    Advanced usage

    It is possible to generate schemas with the kpops schema command. Useful for including custom components or when using a pre-release version of KPOps.

    "}, {"location": "user/references/editor-integration/#concepts", "title": "Concepts", "text": "

    KPOps provides JSON schemas that enable autocompletion and validation for all YAML files that the user must work with.

    "}, {"location": "user/references/editor-integration/#supported-files", "title": "Supported files", "text": "
    • pipeline.yaml
    • defaults.yaml
    • config.yaml
    "}, {"location": "user/references/ci-integration/github-actions/", "title": "GitHub Actions integration", "text": "

    We provided a GitHub composite action bakdata/kpops that installs and executes KPOps commands with the given parameters.

    "}, {"location": "user/references/ci-integration/github-actions/#input-parameters", "title": "Input Parameters", "text": "Name Required Default Value Type Description command \u2705 - string KPOps command to run. generate, deploy, destroy, reset, clean are possible values. Flags such as --dry-run and --execute need to be specified pipeline \u2705 - string Pipeline to run by KPOps working-directory \u274c . string root directory used by KPOps to run pipelines config \u274c - string Directory containing the config*.yaml file(s) environment \u274c - string Environment to run KPOps in components \u274c - string components package path filter-type \u274c - string Whether to include/exclude the steps defined in KPOPS_PIPELINE_STEPS parallel \u274c \"false\" string Whether to run pipelines in parallel python-version \u274c \"3.11.x\" string Python version to install (Defaults to the latest stable version of Python 3.11) kpops-version \u274c latest string KPOps version to install helm-version \u274c latest string Helm version to install token \u274c latest string secrets.GITHUB_TOKEN, needed for setup-helm action if helm-version is set to latest"}, {"location": "user/references/ci-integration/github-actions/#usage", "title": "Usage", "text": "
    steps:\n  # ...\n  # This step is useful for debugging reasons\n  - name: Generate Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: generate\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.3\n\n  # It is possible to use a pre-release KPOps version from TestPyPI https://test.pypi.org/project/kpops/#history\n  - name: Deploy Kafka pipeline\n    uses: bakdata/kpops@main\n    with:\n      command: deploy --execute\n      working-directory: home/my-kpops-root-dir\n      pipeline: pipelines/my-pipeline-file.yaml\n      kpops-version: 1.2.5.dev20230707132709\n  # ...\n
    "}]} \ No newline at end of file diff --git a/dev/sitemap.xml b/dev/sitemap.xml index e6cd21786..c781ae941 100644 --- a/dev/sitemap.xml +++ b/dev/sitemap.xml @@ -2,207 +2,207 @@ https://bakdata.github.io/kpops/dev/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/developer/api/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/developer/auto-generation/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/developer/contributing/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/developer/getting-started/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/resources/architecture/components-hierarchy/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/resources/examples/defaults/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/resources/examples/pipeline/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/resources/pipeline-components/pipeline/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/resources/pipeline-defaults/defaults/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/resources/variables/cli_env_vars/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/resources/variables/config_env_vars/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/changelog/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/what-is-kpops/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/config/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/defaults/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/helm-app/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kafka-connector/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kafka-sink-connector/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kafka-source-connector/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/kubernetes-app/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/overview/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/producer-app/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/streams-app/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/components/streams-bootstrap/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/variables/environment_variables/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/core-concepts/variables/substitution/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/examples/atm-fraud-pipeline/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/getting-started/quick-start/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/getting-started/setup/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/getting-started/teardown/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/migration-guide/v1-v2/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/migration-guide/v2-v3/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/migration-guide/v3-v4/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/migration-guide/v4-v5/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/migration-guide/v5-v6/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/migration-guide/v6-v7/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/migration-guide/v7-v8/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/references/cli-commands/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/references/editor-integration/ - 2024-10-29 + 2024-11-25 daily https://bakdata.github.io/kpops/dev/user/references/ci-integration/github-actions/ - 2024-10-29 + 2024-11-25 daily \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index 94013f440..97db7b7b5 100644 Binary files a/dev/sitemap.xml.gz and b/dev/sitemap.xml.gz differ diff --git a/dev/user/references/cli-commands/index.html b/dev/user/references/cli-commands/index.html index 93622ca20..39757ee54 100644 --- a/dev/user/references/cli-commands/index.html +++ b/dev/user/references/cli-commands/index.html @@ -1418,6 +1418,13 @@ kpops manifest + + +
  • + + kpops patch + +
  • @@ -1799,6 +1806,13 @@ kpops manifest +
  • + +
  • + + kpops patch + +
  • @@ -1852,6 +1866,7 @@

    CLI Usagekpops manifestkpops patch

    +

    In addition to generate, render final resource representation for each pipeline step, e.g. Kubernetes manifests.

    +

    Usage:

    +
    $ kpops patch [OPTIONS] PIPELINE_PATHS...
    +
    +

    Arguments:

    +
      +
    • PIPELINE_PATHS...: Paths to dir containing 'pipeline.yaml' or files named 'pipeline.yaml'. [env var: KPOPS_PIPELINE_PATHS;required]
    • +
    +

    Options:

    +
      +
    • --dotenv FILE: Path to dotenv file. Multiple files can be provided. The files will be loaded in order, with each file overriding the previous one. [env var: KPOPS_DOTENV_PATH]
    • +
    • --config DIRECTORY: Path to the dir containing config.yaml files [env var: KPOPS_CONFIG_PATH; default: .]
    • +
    • --environment TEXT: The environment you want to generate and deploy the pipeline to. Suffix your environment files with this value (e.g. defaults_development.yaml for environment=development). [env var: KPOPS_ENVIRONMENT]
    • +
    • --verbose / --no-verbose: Enable verbose printing [default: no-verbose]
    • +
    • --help: Show this message and exit.
    • +

    kpops reset

    Reset pipeline steps

    Usage:

    -
    $ kpops reset [OPTIONS] PIPELINE_PATHS...
    +
    $ kpops reset [OPTIONS] PIPELINE_PATHS...
     

    Arguments: