diff --git a/.github/bin/tree b/.github/bin/tree index 07f31fa..ad6a1e1 100755 --- a/.github/bin/tree +++ b/.github/bin/tree @@ -11,4 +11,4 @@ else fi set -x -$TREE_BIN -a -I .git -I .mypy_cache -I .github -I demos -I .pytest_cache -I .pixi -I .actrc -I .cursorrules -I .cursorignore -I __pycache__ +$TREE_BIN -a -I .git -I .mypy_cache -I .github -I demos -I .pytest_cache -I .pixi -I .actrc -I .cursorrules -I .cursorignore -I __pycache__ $@ diff --git a/__main__.py b/__main__.py index 01610a8..4aeb0ed 100644 --- a/__main__.py +++ b/__main__.py @@ -36,6 +36,14 @@ def main() -> None: log.info(f"Deploying modules: {modules_to_deploy}") deployment_manager = DeploymentManager(init_config, config_manager) deployment_manager.deploy_modules(modules_to_deploy) + + # Test k8s provider + k8s_provider = deployment_manager.get_k8s_provider() + if k8s_provider: + log.info("Successfully retrieved k8s_provider from EKS cluster") + else: + log.warn("No k8s_provider available - EKS cluster may not be enabled") + else: # Log and proceed with core IaC execution even if no modules are deployed log.info("No modules to deploy.. Proceeding with core IaC execution...") diff --git a/modules/aws/deployment.py b/modules/aws/deployment.py index 7b0ef34..85e66b2 100644 --- a/modules/aws/deployment.py +++ b/modules/aws/deployment.py @@ -4,6 +4,7 @@ from pulumi import log import pulumi_aws as aws from pulumi import ResourceOptions +import json from modules.core.interfaces import ModuleInterface, ModuleDeploymentResult from modules.core.types import InitializationConfig @@ -66,6 +67,10 @@ def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult: log.info(f"Successfully authenticated as: {caller_identity.arn}") log.info(f"AWS Account ID: {caller_identity.account_id}") + # Initialize metadata dict + aws_metadata = {} + k8s_provider = None + # Deploy EKS if enabled if aws_config.eks and aws_config.eks.enabled: log.info(f"Deploying EKS cluster: {aws_config.eks.name}") @@ -77,42 +82,48 @@ def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult: scaling_config=aws_config.eks.node_groups[0].scaling_config if aws_config.eks.node_groups else None, ) + # Store k8s_provider and EKS info in metadata + k8s_provider = eks_resources["k8s_provider"] + aws_metadata["k8s_provider"] = k8s_provider + aws_metadata["eks_cluster_name"] = aws_config.eks.name + # Export EKS outputs pulumi.export("eks_cluster_name", eks_resources["cluster"].name) pulumi.export("eks_cluster_endpoint", eks_resources["cluster"].endpoint) pulumi.export("eks_cluster_vpc_id", eks_resources["vpc"].id) # Get Git info as dictionary - # this is required code for initializing the git info, do not remove + # retain this code for now, do not remove git_info = init_config.git_info.model_dump() # Collect metadata for resource tagging global_metadata = collect_global_metadata() - aws_metadata = collect_module_metadata( + + # Update aws_metadata with caller identity info + caller_identity = provider.get_caller_identity() + aws_metadata.update({ + "account_id": caller_identity.account_id, + "user_id": caller_identity.user_id, + "arn": caller_identity.arn, + }) + + # Merge with global metadata + combined_metadata = collect_module_metadata( global_metadata=global_metadata, - modules_metadata={ - "aws": { - "account_id": caller_identity.account_id, - "user_id": caller_identity.user_id, - "arn": caller_identity.arn, - } - }, + modules_metadata={"aws": aws_metadata}, ) # Sanitize resource tags - resource_tags = provider.sanitize_tags( - { - "Project": aws_metadata["project_name"], - "Stack": aws_metadata["stack_name"], - "GitCommit": aws_metadata.get("commit_hash", ""), - "AWSAccountID": aws_metadata.get("aws", {}).get("account_id", ""), - "Compliance:Framework": "NIST", - "Compliance:Controls": "AC-2, AC-3", - } - ) + resource_tags = provider.sanitize_tags({ + "Project": combined_metadata["project_name"], + "Stack": combined_metadata["stack_name"], + "GitCommit": combined_metadata.get("commit_hash", ""), + "AWSAccountID": aws_metadata["account_id"], + "Compliance:Framework": "NIST", + "Compliance:Controls": "AC-2, AC-3", + }) # Define AWS resources with sanitized tags - # Bucket is for demonstration purposes only during development bucket_name = f"konductor-{init_config.stack_name}-{provider.region}" s3_bucket = aws.s3.Bucket( bucket_name, @@ -124,48 +135,89 @@ def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult: # Export outputs pulumi.export("aws_s3_bucket_name", s3_bucket.id) - # Collect AWS caller identity - caller_identity = aws.get_caller_identity(opts=pulumi.InvokeOptions(provider=provider.provider)) + # Parse compliance config + compliance_config = ComplianceConfig.model_validate(config.get("compliance", {})) - # Log success - log.info(f"Successfully authenticated as: {caller_identity.arn}") + # Create IAM role for Crossplane AWS Provider if EKS is enabled + crossplane_role = None + if aws_config.eks and aws_config.eks.enabled: + # Get the OIDC issuer URL and clean it properly + oidc_url = eks_resources['cluster'].identities[0].oidcs[0].issuer.apply( + lambda x: x.removeprefix('https://') + ) - # Collect AWS-specific metadata - aws_metadata = { - "aws_account_id": caller_identity.account_id, - "aws_user_id": caller_identity.user_id, - "aws_arn": caller_identity.arn, - } + # Get the OIDC provider ARN + oidc_provider_arn = pulumi.Output.all(account_id=caller_identity.account_id, url=oidc_url).apply( + lambda args: f"arn:aws:iam::{args['account_id']}:oidc-provider/{args['url']}" + ) - # Collect resource identifiers - provider_urn = str(provider.provider.urn) - bucket_name = str(s3_bucket.id) + # Create the trust policy with proper formatting + crossplane_trust_policy = pulumi.Output.all(provider_arn=oidc_provider_arn, url=oidc_url).apply( + lambda args: { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": args['provider_arn'] + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + f"{args['url']}:sub": "system:serviceaccount:crossplane-system:provider-aws", + f"{args['url']}:aud": "sts.amazonaws.com" + } + } + } + ] + } + ) - # Update metadata to include EKS info if deployed - if aws_config.eks and aws_config.eks.enabled: - aws_metadata["eks_cluster_name"] = aws_config.eks.name + # Create the IAM role with the fixed trust policy + crossplane_role = aws.iam.Role( + "crossplane-provider-aws", + assume_role_policy=pulumi.Output.json_dumps(crossplane_trust_policy), + tags=resource_tags, + opts=ResourceOptions( + provider=provider.provider, + depends_on=[eks_resources['cluster']] + ) + ) - # Parse compliance config - compliance_config = ComplianceConfig.model_validate(config.get("compliance", {})) + # Attach required policies + aws.iam.RolePolicyAttachment( + "crossplane-provider-aws-admin", + role=crossplane_role.name, + policy_arn="arn:aws:iam::aws:policy/AdministratorAccess", # Note: Consider limiting this in production + opts=ResourceOptions( + provider=provider.provider, + depends_on=[crossplane_role] + ) + ) + + # Add role ARN to metadata + aws_metadata["crossplane_provider_role_arn"] = crossplane_role.arn - # Return deployment result without version - # Return deployment result without version + # Return deployment result return ModuleDeploymentResult( success=True, version="0.0.1", - resources=[provider_urn, bucket_name], + resources=[str(provider.provider.urn), str(s3_bucket.id)], metadata={ "compliance": compliance_config.model_dump(), "aws_account_id": caller_identity.account_id, "aws_user_id": caller_identity.user_id, "aws_arn": caller_identity.arn, + "k8s_provider": k8s_provider if k8s_provider else None, **aws_metadata, }, ) except Exception as e: return ModuleDeploymentResult( - success=False, version="", errors=[str(e)] # Empty string since AWS module doesn't use versions + success=False, + version="", + errors=[str(e)] ) def get_dependencies(self) -> List[str]: diff --git a/modules/aws/eks.py b/modules/aws/eks.py index 01cdb38..554fa4c 100644 --- a/modules/aws/eks.py +++ b/modules/aws/eks.py @@ -469,6 +469,10 @@ def deploy_cluster( opts=ResourceOptions(parent=cluster), ) + # Export the kubeconfig for use in other resources. + secret_kubeconfig = pulumi.Output.secret(internal_kubeconfig) + pulumi.export("kubeconfig", secret_kubeconfig) + # Step 7: Deploy test nginx pod to verify the cluster. self.deploy_test_nginx(k8s_provider, name) diff --git a/modules/aws/types.py b/modules/aws/types.py index 61d63b3..995ace2 100644 --- a/modules/aws/types.py +++ b/modules/aws/types.py @@ -167,7 +167,7 @@ class EksClusterConfig(BaseModel): enabled: bool = Field(default=True) name: str = Field(..., description="Name of the EKS cluster") - version: str = Field(default="1.27", description="Kubernetes version") + version: str = Field(default="1.29", description="Kubernetes version") subnet_ids: Optional[List[str]] = Field(default=None) endpoint_private_access: bool = Field(default=True) endpoint_public_access: bool = Field(default=True) @@ -176,7 +176,7 @@ class EksClusterConfig(BaseModel): @validator("version") def validate_version(cls, v): - valid_versions = ["1.27", "1.26", "1.25"] # Add supported versions + valid_versions = ["1.29", "1.28", "1.27", "1.26", "1.25"] # Add supported versions if v not in valid_versions: raise ValueError(f"Invalid EKS version: {v}. Must be one of {valid_versions}") return v diff --git a/modules/core/config.py b/modules/core/config.py index 5febd9c..8c2a40d 100644 --- a/modules/core/config.py +++ b/modules/core/config.py @@ -533,9 +533,17 @@ def get_enabled_modules(self) -> List[str]: # Import module's config module_config = self.get_module_config(module_name) - # Check if module is enabled + # Check if module is enabled either directly or through submodules is_enabled = module_config.get("enabled", False) + # For kubernetes module, also check submodules + if module_name == "kubernetes": + # Check if any kubernetes submodule is enabled + for submodule, subconfig in module_config.items(): + if isinstance(subconfig, dict) and subconfig.get("enabled", False): + is_enabled = True + break + if is_enabled: log.info(f"{module_name} module is enabled in configuration") enabled_modules.append(module_name) diff --git a/modules/core/deployment.py b/modules/core/deployment.py index 40c392b..d673df5 100644 --- a/modules/core/deployment.py +++ b/modules/core/deployment.py @@ -1,10 +1,12 @@ # ./modules/core/deployment.py -from typing import List, Any +from typing import List, Any, Dict from pulumi import log from modules.core.types import InitializationConfig from modules.core.interfaces import ModuleInterface from modules.core.exceptions import ModuleDeploymentError +from modules.kubernetes.deployment import KubernetesModule +from modules.kubernetes.provider import KubernetesProvider class DeploymentManager: @@ -21,26 +23,77 @@ def __init__(self, init_config: InitializationConfig, config_manager: Any): self.init_config = init_config self.config_manager = config_manager self.modules_metadata = {} + self.k8s_provider = None def deploy_modules(self, modules_to_deploy: List[str]) -> None: for module_name in modules_to_deploy: try: - module_class = self.load_module(module_name) - module_config = self.config_manager.get_module_config(module_name) + if module_name == "kubernetes": + # Get kubernetes config + k8s_config = self.config_manager.get_module_config(module_name) - # Add compliance config to module config - module_config["compliance"] = self.init_config.compliance_config.model_dump() + # Deploy each enabled kubernetes submodule + if k8s_config.get("prometheus", {}).get("enabled"): + self.deploy_k8s_submodule("prometheus", k8s_config.get("prometheus", {})) - module_instance = module_class(init_config=self.init_config) - result = module_instance.deploy(module_config) + if k8s_config.get("flux", {}).get("enabled"): + self.deploy_k8s_submodule("flux", k8s_config.get("flux", {})) - if result.success: - self.modules_metadata[module_name] = result.metadata + if k8s_config.get("crossplane", {}).get("enabled"): + self.deploy_k8s_submodule("crossplane", k8s_config.get("crossplane", {})) else: - raise ModuleDeploymentError(f"Module {module_name} deployment failed.") + # Standard module deployment + module_class = self.load_module(module_name) + module_config = self.config_manager.get_module_config(module_name) + module_config["compliance"] = self.init_config.compliance_config.model_dump() + + module_instance = module_class(init_config=self.init_config) + result = module_instance.deploy(module_config) + + if result.success: + self.modules_metadata[module_name] = result.metadata + if module_name == "aws" and "k8s_provider" in result.metadata: + self.k8s_provider = result.metadata["k8s_provider"] + else: + raise ModuleDeploymentError(f"Module {module_name} deployment failed.") except Exception as e: raise ModuleDeploymentError(f"Error deploying module {module_name}: {str(e)}") from e + def deploy_k8s_submodule(self, submodule_name: str, submodule_config: Dict[str, Any]) -> None: + """Deploy a Kubernetes submodule.""" + try: + # Load the submodule + if submodule_name == "prometheus": + module = __import__("modules.kubernetes.prometheus.deployment", fromlist=[""]) + module_class = getattr(module, "PrometheusModule") + elif submodule_name == "flux": + module = __import__("modules.kubernetes.flux.deployment", fromlist=[""]) + module_class = getattr(module, "FluxModule") + elif submodule_name == "crossplane": + module = __import__("modules.kubernetes.crossplane.deployment", fromlist=[""]) + module_class = getattr(module, "CrossplaneModule") + else: + raise ValueError(f"Unknown kubernetes submodule: {submodule_name}") + + # Initialize and configure the module + module_instance = module_class(init_config=self.init_config) + if self.k8s_provider: + module_instance.set_provider(KubernetesProvider(self.k8s_provider)) + + # Deploy the submodule + result = module_instance.deploy(submodule_config) + if result.success: + self.modules_metadata[f"kubernetes_{submodule_name}"] = result.metadata + else: + raise ModuleDeploymentError(f"Kubernetes submodule {submodule_name} deployment failed.") + + except Exception as e: + raise ModuleDeploymentError(f"Error deploying kubernetes submodule {submodule_name}: {str(e)}") + + def get_k8s_provider(self): + """Get the Kubernetes provider if available""" + return self.k8s_provider + # Dynamically load module classes from modules//deployment.py # This allows us to maintain a shared interface for all modules while # still allowing each module to be implemented independently @@ -50,10 +103,37 @@ def load_module(self, module_name: str) -> ModuleInterface: Dynamically load module classes from modules//deployment.py """ try: - module = __import__(f"modules.{module_name}.deployment", fromlist=[""]) - module_class = getattr(module, f"{module_name.capitalize()}Module") - log.info(f"Successfully loaded module: {module_name}") - return module_class + if module_name == "kubernetes": + # Get kubernetes config to check which submodule to load + k8s_config = self.config_manager.get_module_config(module_name) + + # Check for enabled submodules + # TODO: re-enable dynamic submodule loading without hardcoding submodule names + if k8s_config.get("prometheus", {}).get("enabled"): + # Import and return Prometheus module + module = __import__(f"modules.kubernetes.prometheus.deployment", fromlist=[""]) + module_class = getattr(module, "PrometheusModule") + log.info(f"Successfully loaded kubernetes submodule: prometheus") + return module_class + if k8s_config.get("flux", {}).get("enabled"): + # Import and return Flux module + module = __import__(f"modules.kubernetes.flux.deployment", fromlist=[""]) + module_class = getattr(module, "FluxModule") + log.info(f"Successfully loaded kubernetes submodule: flux") + return module_class + if k8s_config.get("crossplane", {}).get("enabled"): + # Import and return Crossplane module + module = __import__(f"modules.kubernetes.crossplane.deployment", fromlist=[""]) + module_class = getattr(module, "CrossplaneModule") + log.info(f"Successfully loaded kubernetes submodule: crossplane") + return module_class + else: + # Standard module loading + module = __import__(f"modules.{module_name}.deployment", fromlist=[""]) + module_class = getattr(module, f"{module_name.capitalize()}Module") + log.info(f"Successfully loaded module: {module_name}") + return module_class + except ImportError as e: log.error(f"Failed to load module {module_name}: {str(e)}") raise diff --git a/modules/core/interfaces.py b/modules/core/interfaces.py index af2b288..02582c2 100644 --- a/modules/core/interfaces.py +++ b/modules/core/interfaces.py @@ -29,10 +29,13 @@ class ModuleDeploymentResult(BaseModel): success: bool version: Optional[str] = Field(default="") - resources: List[str] = Field(default_factory=list) + resources: List[str] = Field(default_factory=list, description="List of resource names/identifiers") errors: List[str] = Field(default_factory=list) metadata: Dict[str, Any] = Field(default_factory=dict) + class Config: + arbitrary_types_allowed = True + class DeploymentContext(Protocol): """Protocol defining the deployment context interface.""" diff --git a/modules/kubernetes/__init__.py b/modules/kubernetes/__init__.py index e69de29..dfb796e 100644 --- a/modules/kubernetes/__init__.py +++ b/modules/kubernetes/__init__.py @@ -0,0 +1,4 @@ +# ./modules/kubernetes/__init__.py +""" +Kubernetes submodule +""" diff --git a/modules/kubernetes/cert_manager/README.md b/modules/kubernetes/cert_manager/README.md deleted file mode 100644 index 24bb224..0000000 --- a/modules/kubernetes/cert_manager/README.md +++ /dev/null @@ -1,198 +0,0 @@ -# Cert Manager Module Guide - -# TODO: Convert from Kargo to generalized Konductor Framework Template repo docs content - -Welcome to the **Cert Manager Module** for the Konductor IaC Framework! This guide is tailored for both newcomers to DevOps and experienced developers, providing a comprehensive overview of how to deploy and configure the Cert Manager module within the Kargo platform. - ---- - -## Table of Contents - -- [Introduction](#introduction) -- [Why Use Cert Manager?](#why-use-cert-manager) -- [Getting Started](#getting-started) -- [Enabling the Module](#enabling-the-module) -- [Configuration Options](#configuration-options) - - [Default Settings](#default-settings) - - [Customizing Your Deployment](#customizing-your-deployment) - -- [Module Components Explained](#module-components-explained) - - [Namespace Creation](#namespace-creation) - - [Helm Chart Deployment](#helm-chart-deployment) - - [Self-Signed Cluster Issuer Setup](#self-signed-cluster-issuer-setup) - -- [Using the Module](#using-the-module) - - [Example Usage](#example-usage) - -- [Troubleshooting and FAQs](#troubleshooting-and-faqs) -- [Additional Resources](#additional-resources) -- [Conclusion](#conclusion) - ---- - -## Introduction - -The Cert Manager module automates the management of SSL/TLS certificates in your Kubernetes cluster using [cert-manager](https://cert-manager.io/). It simplifies the process of obtaining, renewing, and managing certificates, enhancing the security of your applications without manual intervention. - ---- - -## Why Use Cert Manager? - -- **Automation**: Automatically provisions and renews certificates. -- **Integration**: Works seamlessly with Kubernetes Ingress resources and other services. -- **Security**: Enhances security by ensuring certificates are always up-to-date. -- **Compliance**: Helps meet compliance requirements by managing PKI effectively. - ---- - -## Getting Started - -### Prerequisites - -- **Kubernetes Cluster**: Ensure you have access to a Kubernetes cluster. -- **Pulumi CLI**: Install the Pulumi CLI and configure it. -- **Kubeconfig**: Your kubeconfig file should be properly set up. - -### Setup Steps - -1. **Navigate to the Kargo Pulumi Directory**: - -```bash -cd Kargo/pulumi -``` - -2. **Install Dependencies**: - -```bash -pip install -r requirements.txt -``` - -3. **Initialize Pulumi Stack**: - -```bash -pulumi stack init dev -``` - ---- - -## Enabling the Module - -The Cert Manager module is enabled by default. To verify or modify its enabled status, adjust your Pulumi configuration. - -### Verifying Module Enablement - -```yaml -# Pulumi..yaml - -config: - cert_manager: - enabled: true # Set to false to disable -``` - -Alternatively, use the Pulumi CLI: - -```bash -pulumi config set --path cert_manager.enabled true -``` - ---- - -## Configuration Options - -### Default Settings - -The module is designed to work out-of-the-box with default settings: - -- **Namespace**: `cert-manager` -- __Version__: Defined in `default_versions.json` -- **Cluster Issuer Name**: `cluster-selfsigned-issuer` -- **Install CRDs**: `true` - -### Customizing Your Deployment - -You can tailor the module to fit your specific needs by customizing its configuration. - -#### Available Configuration Parameters - -- **enabled** *(bool)*: Enable or disable the module. -- **namespace** *(string)*: Kubernetes namespace for cert-manager. -- **version** *(string)*: Helm chart version to deploy. Use `'latest'` to fetch the most recent stable version. -- __cluster_issuer__ _(string)_: Name of the ClusterIssuer resource. -- __install_crds__ _(bool)_: Whether to install Custom Resource Definitions. - -#### Example Custom Configuration - -```yaml -config: - cert_manager: - enabled: true - namespace: "my-cert-manager" - version: "1.15.3" - cluster_issuer: "my-cluster-issuer" - install_crds: true -``` - ---- - -## Module Components Explained - -### Namespace Creation - -A dedicated namespace is created to isolate cert-manager resources. - -- **Why?**: Ensures better organization and avoids conflicts. -- **Customizable**: Change the namespace using the `namespace` parameter. - -### Helm Chart Deployment - -Deploys cert-manager using Helm. - -- **Chart Repository**: `https://charts.jetstack.io` -- **Version Management**: Specify a version or use `'latest'`. -- **Custom Values**: Resource requests and limits are set for optimal performance. - -### Self-Signed Cluster Issuer Setup - -Sets up a self-signed ClusterIssuer for certificate provisioning. - -- **Root ClusterIssuer**: Creates a root issuer. -- **CA Certificate**: Generates a CA certificate stored in a Kubernetes Secret. -- **Primary ClusterIssuer**: Issues certificates for your applications using the CA certificate. -- **Exported Values**: CA certificate data is exported for use in other modules. - ---- - -## Using the Module - -### Example Usage - -After enabling and configuring the module, deploy it using Pulumi: - -```bash -pulumi up -``` - ---- - -## Troubleshooting and FAQs - -**Q1: Cert-manager pods are not running.** - -- **A**: Check the namespace and ensure that CRDs are installed. Verify the Kubernetes version compatibility. - -**Q2: Certificates are not being issued.** - -- **A**: Ensure that the ClusterIssuer is correctly configured and that your Ingress resources reference it. - -**Q3: How do I update cert-manager to a newer version?** - -- **A**: Update the `version` parameter in your configuration and run `pulumi up`. - ---- - -## Additional Resources - -- **cert-manager Documentation**: [cert-manager.io/docs](https://cert-manager.io/docs/) -- **Kargo Project**: [Kargo GitHub Repository](https://github.com/ContainerCraft/Kargo) -- **Pulumi Kubernetes Provider**: [Pulumi Kubernetes Docs](https://www.pulumi.com/docs/reference/pkg/kubernetes/) -- **Helm Charts Repository**: [Artifact Hub - cert-manager](https://artifacthub.io/packages/helm/cert-manager/cert-manager) diff --git a/modules/kubernetes/cert_manager/deploy.py b/modules/kubernetes/cert_manager/deploy.py deleted file mode 100644 index e6a98e7..0000000 --- a/modules/kubernetes/cert_manager/deploy.py +++ /dev/null @@ -1,294 +0,0 @@ -# pulumi/modules/cert_manager/deploy.py - -""" -Deploys the cert-manager module with proper dependency management. -""" - -from typing import List, Dict, Any, Tuple, Optional, cast - -import pulumi -import pulumi_kubernetes as k8s -from pulumi import log - -from core.types import NamespaceConfig -from core.utils import get_latest_helm_chart_version, wait_for_crds -from core.resource_helpers import ( - create_namespace, - create_helm_release, - create_custom_resource, - create_secret, -) - -from .types import CertManagerConfig - - -def deploy_cert_manager_module( - config_cert_manager: CertManagerConfig, - global_depends_on: List[pulumi.Resource], - k8s_provider: k8s.Provider, -) -> Tuple[str, k8s.helm.v3.Release, str]: - """ - Deploys the cert-manager module and returns the version, release resource, and CA certificate. - """ - # TODO: Create module specific dependencies object to avoid blocking global resources on k8s_provider or other module specific dependencies - - # Deploy cert-manager - cert_manager_version, release, ca_cert_b64 = deploy_cert_manager( - config_cert_manager=config_cert_manager, - depends_on=global_depends_on, # Correctly pass the global dependencies - k8s_provider=k8s_provider, - ) - - # Update global dependencies - global_depends_on.append(release) - - return cert_manager_version, release, ca_cert_b64 - - -def deploy_cert_manager( - config_cert_manager: CertManagerConfig, - depends_on: List[pulumi.Resource], - k8s_provider: k8s.Provider, -) -> Tuple[str, k8s.helm.v3.Release, str]: - """ - Deploys cert-manager using Helm and sets up cluster issuers, - ensuring that CRDs are available before creating custom resources. - """ - namespace = config_cert_manager.namespace - version = config_cert_manager.version - cluster_issuer_name = config_cert_manager.cluster_issuer - install_crds = config_cert_manager.install_crds - - # Create Namespace using the helper function - namespace_resource = create_namespace( - name=namespace, - k8s_provider=k8s_provider, - parent=k8s_provider, - depends_on=depends_on, - ) - # TODO: consider adding k8s_provider to module_depends_on dependencies - - # Get Helm Chart Version - # TODO: set the chart name and repo URL as variables in the CertManagerConfig class to allow for user configuration - chart_name = "cert-manager" - chart_repo_url = "https://charts.jetstack.io" - - # TODO: re-implement into the get_module_config function and adopt across all modules to reduce code duplication - if version == "latest" or version is None: - version = get_latest_helm_chart_version(chart_repo_url, chart_name) - log.info(f"Setting cert-manager chart version to latest: {version}") - else: - log.info(f"Using cert-manager chart version: {version}") - - # Generate Helm values - helm_values = generate_helm_values(config_cert_manager) - - # Create Helm Release using the helper function - release = create_helm_release( - name=chart_name, - args=k8s.helm.v3.ReleaseArgs( - chart=chart_name, - version=version, - namespace=namespace, - skip_await=False, - repository_opts=k8s.helm.v3.RepositoryOptsArgs(repo=chart_repo_url), - values=helm_values, - ), - opts=pulumi.ResourceOptions( - parent=namespace_resource, - custom_timeouts=pulumi.CustomTimeouts( - create="8m", update="4m", delete="4m" - ), - ), - k8s_provider=k8s_provider, - depends_on=[namespace_resource] + depends_on, - ) - - # Wait for the CRDs to be registered - # TODO: re-evaluate effectiveness of approach to wait for CRDs and complete the wait_for_crds implementation until it's effective. - # The current implementation fails to wait for the CRDs to be registered before continuing with child and dependent resources. - crds = wait_for_crds( - crd_names=[ - "certificaterequests.cert-manager.io", - "certificates.cert-manager.io", - "challenges.acme.cert-manager.io", - "clusterissuers.cert-manager.io", - "issuers.cert-manager.io", - "orders.acme.cert-manager.io", - ], - k8s_provider=k8s_provider, - depends_on=[release], - parent=release, - ) - - # Create Cluster Issuers using the helper function - # TODO: - # - make self-signed-issuer configurable enabled/disabled from boolean set in cert_manager/types.py CertManagerConfig class, default to enabled. - ( - cluster_issuer_root, - cluster_issuer_ca_certificate, - cluster_issuer, - ca_secret, - ) = create_cluster_issuers( - cluster_issuer_name, namespace, release, crds, k8s_provider - ) - - # Extract the CA certificate from the secret - # TODO: - # - re-evaluate relevance. IIRC this is used to return unwrapped secret values as b64 encoded strings for OpenUnison configuration. - # - consider maintaining the secret object as a return value for future use in other modules without exporting the secret values. - # - if user need requires the CA for client secret trust then consider documenting the use case and user instructions for utilization. - if ca_secret: - ca_data_tls_crt_b64 = ca_secret.data.apply(lambda data: data["tls.crt"]) - else: - ca_data_tls_crt_b64 = "" - - return version, release, ca_data_tls_crt_b64 - - -def create_cluster_issuers( - cluster_issuer_name: str, - namespace: str, - release: k8s.helm.v3.Release, - crds: List[pulumi.Resource], - k8s_provider: k8s.Provider, -) -> Tuple[ - Optional[k8s.apiextensions.CustomResource], - Optional[k8s.apiextensions.CustomResource], - Optional[k8s.apiextensions.CustomResource], - Optional[k8s.core.v1.Secret], -]: - """ - Creates cluster issuers required for cert-manager, ensuring dependencies on CRDs. - - Args: - cluster_issuer_name (str): The name of the cluster issuer. - namespace (str): The Kubernetes namespace. - release (k8s.helm.v3.Release): The Helm release resource. - crds (List[pulumi.Resource]): List of CRDs. - k8s_provider (k8s.Provider): Kubernetes provider. - - Returns: - Tuple containing: - - ClusterIssuer for the self-signed root. - - ClusterIssuer's CA certificate. - - Primary ClusterIssuer. - - The secret resource containing the CA certificate. - """ - try: - # SelfSigned Root Issuer - cluster_issuer_root = create_custom_resource( - name="cluster-selfsigned-issuer-root", - args={ - "apiVersion": "cert-manager.io/v1", - "kind": "ClusterIssuer", - "metadata": { - "name": "cluster-selfsigned-issuer-root", - }, - "spec": {"selfSigned": {}}, - }, - opts=pulumi.ResourceOptions( - parent=release, - provider=k8s_provider, - depends_on=crds, - custom_timeouts=pulumi.CustomTimeouts( - create="5m", update="5m", delete="5m" - ), - ), - ) - - # CA Certificate Issuer - cluster_issuer_ca_certificate = create_custom_resource( - name="cluster-selfsigned-issuer-ca", - args={ - "apiVersion": "cert-manager.io/v1", - "kind": "Certificate", - "metadata": { - "name": "cluster-selfsigned-issuer-ca", - "namespace": namespace, - }, - "spec": { - "commonName": "cluster-selfsigned-issuer-ca", - "duration": "2160h0m0s", - "isCA": True, - "issuerRef": { - "group": "cert-manager.io", - "kind": "ClusterIssuer", - "name": "cluster-selfsigned-issuer-root", - }, - "privateKey": {"algorithm": "RSA", "size": 2048}, - "renewBefore": "360h0m0s", - "secretName": "cluster-selfsigned-issuer-ca", - }, - }, - opts=pulumi.ResourceOptions( - parent=cluster_issuer_root, - provider=k8s_provider, - depends_on=[cluster_issuer_root], - custom_timeouts=pulumi.CustomTimeouts( - create="5m", update="5m", delete="10m" - ), - ), - ) - - # Main Cluster Issuer - cluster_issuer = create_custom_resource( - name=cluster_issuer_name, - args={ - "apiVersion": "cert-manager.io/v1", - "kind": "ClusterIssuer", - "metadata": { - "name": cluster_issuer_name, - }, - "spec": { - "ca": {"secretName": "cluster-selfsigned-issuer-ca"}, - }, - }, - opts=pulumi.ResourceOptions( - parent=cluster_issuer_ca_certificate, - provider=k8s_provider, - depends_on=[cluster_issuer_ca_certificate], - custom_timeouts=pulumi.CustomTimeouts( - create="5m", update="5m", delete="5m" - ), - ), - ) - - # Fetch CA Secret if not in dry-run - if not pulumi.runtime.is_dry_run(): - ca_secret = k8s.core.v1.Secret.get( - resource_name="cluster-selfsigned-issuer-ca", - id=f"{namespace}/cluster-selfsigned-issuer-ca", - opts=pulumi.ResourceOptions( - parent=cluster_issuer_ca_certificate, - provider=k8s_provider, - depends_on=[cluster_issuer_ca_certificate], - ), - ) - else: - ca_secret = None - - return ( - cluster_issuer_root, - cluster_issuer_ca_certificate, - cluster_issuer, - ca_secret, - ) - - except Exception as e: - log.error(f"Error during the creation of cluster issuers: {str(e)}") - return None, None, None, None - - -def generate_helm_values(config_cert_manager: CertManagerConfig) -> Dict[str, Any]: - """ - Generates Helm values for the CertManager deployment. - """ - return { - "replicaCount": 1, - "installCRDs": config_cert_manager.install_crds, - "resources": { - "limits": {"cpu": "500m", "memory": "1024Mi"}, - "requests": {"cpu": "250m", "memory": "512Mi"}, - }, - } diff --git a/modules/kubernetes/cert_manager/types.py b/modules/kubernetes/cert_manager/types.py deleted file mode 100644 index 34fb2cc..0000000 --- a/modules/kubernetes/cert_manager/types.py +++ /dev/null @@ -1,34 +0,0 @@ -# ./pulumi/modules/cert_manager/types.py -""" -Merges user-provided configuration with default configuration. - -Args: - user_config (Dict[str, Any]): The user-provided configuration. - -Returns: - CertManagerConfig: The merged configuration object. -""" - -from dataclasses import dataclass -from typing import Optional, Dict, Any -import pulumi - - -@dataclass -class CertManagerConfig: - version: Optional[str] = "latest" - namespace: str = "cert-manager" - cluster_issuer: str = "cluster-selfsigned-issuer" - install_crds: bool = True - - @staticmethod - def merge(user_config: Dict[str, Any]) -> "CertManagerConfig": - default_config = CertManagerConfig() - for key, value in user_config.items(): - if hasattr(default_config, key): - setattr(default_config, key, value) - else: - pulumi.log.warn( - f"Unknown configuration key '{key}' in cert_manager config." - ) - return default_config diff --git a/modules/kubernetes/cert_manager/__init__.py b/modules/kubernetes/crossplane/__init__.py similarity index 100% rename from modules/kubernetes/cert_manager/__init__.py rename to modules/kubernetes/crossplane/__init__.py diff --git a/modules/kubernetes/crossplane/deployment.py b/modules/kubernetes/crossplane/deployment.py new file mode 100644 index 0000000..cb95176 --- /dev/null +++ b/modules/kubernetes/crossplane/deployment.py @@ -0,0 +1,108 @@ +# modules/kubernetes/crossplane/deployment.py +from typing import Dict, Any, List +from pulumi import log, ResourceOptions +import pulumi_kubernetes as k8s + +from ..deployment import KubernetesModule +from .types import CrossplaneConfig +from modules.core.interfaces import ModuleDeploymentResult + + +class CrossplaneModule(KubernetesModule): + """Crossplane module implementation.""" + + def __init__(self, init_config): + super().__init__(init_config) + self.name = "crossplane" + + def validate_config(self, config: Dict[str, Any]) -> List[str]: + """Validate Crossplane configuration.""" + try: + if config is None: + config = {} + CrossplaneConfig(**config) + return [] + except Exception as e: + return [str(e)] + + def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult: + """Deploy Crossplane system.""" + try: + # Parse config + crossplane_config = CrossplaneConfig(**config) + + # Deploy namespace + namespace = k8s.core.v1.Namespace( + f"{self.name}-namespace", + metadata=k8s.meta.v1.ObjectMetaArgs( + name=crossplane_config.namespace, + labels={ + "app.kubernetes.io/name": "crossplane", + "app.kubernetes.io/part-of": "crossplane", + } + ), + opts=ResourceOptions(provider=self.provider.provider), + ) + + # Configure Helm values + helm_values = { + "metrics": { + "enabled": True, + }, + "resourcesCrossplane": crossplane_config.resource_limits, + "serviceAccount": { + "create": True, + "name": "crossplane" + }, + "provider": { + "packages": [] + } + } + + # Add feature flags if enabled + if crossplane_config.enable_external_secret_stores: + helm_values["args"] = ["--enable-external-secret-stores"] + if crossplane_config.enable_composition_revisions: + helm_values["args"] = helm_values.get("args", []) + ["--enable-composition-revisions"] + + # Deploy Helm release + log.info(f"Deploying Crossplane Helm release to namespace {crossplane_config.namespace}") + release = k8s.helm.v3.Release( + f"{self.name}-system", + k8s.helm.v3.ReleaseArgs( + chart="crossplane", + repository_opts=k8s.helm.v3.RepositoryOptsArgs( + repo="https://charts.crossplane.io/stable" + ), + version=crossplane_config.version, + namespace=crossplane_config.namespace, + values=helm_values, + wait_for_jobs=True, + ), + opts=ResourceOptions( + provider=self.provider.provider, + parent=namespace, + depends_on=[namespace], + ), + ) + + return ModuleDeploymentResult( + success=True, + version=crossplane_config.version, + resources=[ + f"{self.name}-namespace", + f"{self.name}-system", + ], + metadata={ + "namespace": crossplane_config.namespace, + "version": crossplane_config.version, + } + ) + + except Exception as e: + log.error(f"Failed to deploy Crossplane: {str(e)}") + return ModuleDeploymentResult( + success=False, + version="", + errors=[str(e)] + ) diff --git a/modules/kubernetes/crossplane/types.py b/modules/kubernetes/crossplane/types.py new file mode 100644 index 0000000..509bde4 --- /dev/null +++ b/modules/kubernetes/crossplane/types.py @@ -0,0 +1,53 @@ +# modules/kubernetes/crossplane/types.py +from typing import Optional, Dict, Any, List +from pydantic import BaseModel, Field +from ..types import KubernetesConfig +from pulumi import log + + +class ProviderConfig(BaseModel): + """Configuration for a Crossplane provider.""" + name: str + version: str + config: Dict[str, Any] = Field(default_factory=dict) + + +class CrossplaneConfig(KubernetesConfig): + """Crossplane module configuration.""" + + namespace: str = Field(default="crossplane-system") + version: str = Field(default="1.14.1") + providers: List[str] = Field(default=["provider-aws", "provider-kubernetes"]) + aws_provider_version: str = Field(default="v0.43.1") + kubernetes_provider_version: str = Field(default="v0.10.0") + aws_provider_config: Dict[str, Any] = Field(default_factory=dict) + enable_external_secret_stores: bool = Field(default=True) + enable_composition_revisions: bool = Field(default=True) + metrics_enabled: bool = Field(default=True) + debug_mode: bool = Field(default=False) + resource_limits: Dict[str, str] = Field( + default_factory=lambda: { + "cpu": "100m", + "memory": "512Mi" + } + ) + resource_requests: Dict[str, str] = Field( + default_factory=lambda: { + "cpu": "100m", + "memory": "256Mi" + } + ) + + class Config: + arbitrary_types_allowed = True + + @classmethod + def merge(cls, user_config: Dict[str, Any]) -> "CrossplaneConfig": + """Merge user configuration with defaults.""" + config = cls() + for key, value in user_config.items(): + if hasattr(config, key): + setattr(config, key, value) + else: + log.warn(f"Unknown configuration key '{key}' in crossplane config.") + return config diff --git a/modules/kubernetes/deployment.py b/modules/kubernetes/deployment.py new file mode 100644 index 0000000..cfeff61 --- /dev/null +++ b/modules/kubernetes/deployment.py @@ -0,0 +1,43 @@ +# ./modules/kubernetes/deployment.py +""" +Kubernetes submodule deployment handler +""" +from typing import Dict, Any, List, Optional +from pulumi import log +from ..core.interfaces import ModuleInterface, ModuleDeploymentResult +from ..core.types import InitializationConfig +from .provider import KubernetesProvider +from .types import KubernetesConfig + + +class KubernetesModule(ModuleInterface): + """Base Kubernetes module implementation.""" + + def __init__(self, init_config: InitializationConfig): + self.name = "kubernetes" + self.init_config = init_config + self._provider: Optional[KubernetesProvider] = None + + def set_provider(self, provider: KubernetesProvider) -> None: + """Set the Kubernetes provider.""" + self._provider = provider + + @property + def provider(self) -> KubernetesProvider: + if not self._provider: + raise RuntimeError("Kubernetes provider not initialized") + return self._provider + + def validate_config(self, config: Dict[str, Any]) -> List[str]: + """Validate Kubernetes configuration.""" + try: + if config is None: + config = {} + KubernetesConfig(**config) + return [] + except Exception as e: + return [str(e)] + + def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult: + """Deploy Kubernetes resources.""" + raise NotImplementedError("Subclasses must implement deploy()") diff --git a/modules/kubernetes/flux/__init__.py b/modules/kubernetes/flux/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/modules/kubernetes/flux/deployment.py b/modules/kubernetes/flux/deployment.py new file mode 100644 index 0000000..857d728 --- /dev/null +++ b/modules/kubernetes/flux/deployment.py @@ -0,0 +1,153 @@ +# modules/kubernetes/flux/deployment.py +from typing import Dict, Any, List +from pulumi import log, ResourceOptions, CustomTimeouts +import pulumi_kubernetes as k8s + +from ..deployment import KubernetesModule +from .types import FluxConfig +from modules.core.interfaces import ModuleDeploymentResult + + +class FluxModule(KubernetesModule): + """Flux module implementation.""" + + def __init__(self, init_config): + super().__init__(init_config) + self.name = "flux" + + def validate_config(self, config: Dict[str, Any]) -> List[str]: + """Validate Flux configuration.""" + try: + if config is None: + config = {} + FluxConfig(**config) + return [] + except Exception as e: + return [str(e)] + + def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult: + """Deploy Flux system.""" + try: + # Parse config + flux_config = FluxConfig(**config) + + # Deploy namespace + namespace = k8s.core.v1.Namespace( + f"{self.name}-namespace", + metadata=k8s.meta.v1.ObjectMetaArgs( + name=flux_config.namespace, + labels=flux_config.labels, + annotations=flux_config.annotations, + ), + opts=ResourceOptions(provider=self.provider.provider) + ) + + # Deploy Flux components using Helm + chart_name = "flux2" + chart_url = "https://fluxcd-community.github.io/helm-charts" + + # Configure Helm values + helm_values = { + "git": { + "url": flux_config.git_repository, + "branch": flux_config.git_branch, + "path": flux_config.git_path, + }, + "interval": flux_config.interval, + "components": { + component: {"enabled": component in flux_config.components} + for component in [ + "source-controller", + "kustomize-controller", + "helm-controller", + "notification-controller", + ] + }, + } + + # Deploy Helm release + release = k8s.helm.v3.Release( + f"{self.name}-system", + k8s.helm.v3.ReleaseArgs( + chart=chart_name, + version=flux_config.version, + namespace=flux_config.namespace, + repository_opts=k8s.helm.v3.RepositoryOptsArgs(repo=chart_url), + values=helm_values, + ), + opts=ResourceOptions( + provider=self.provider.provider, + parent=namespace, + depends_on=[namespace], + custom_timeouts=CustomTimeouts( + create="2m", + update="2m", + delete="2m" + ), + ), + ) + + # TODO: Uncomment this when we have a git repository + ## Create GitRepository resource + #git_repo = k8s.apiextensions.CustomResource( + # f"{self.name}-gitrepository", + # api_version="source.toolkit.fluxcd.io/v1", + # kind="GitRepository", + # metadata=k8s.meta.v1.ObjectMetaArgs( + # name="flux-system", + # namespace=flux_config.namespace, + # ), + # spec={ + # "interval": flux_config.interval, + # "ref": { + # "branch": flux_config.git_branch, + # }, + # "url": flux_config.git_repository, + # }, + # opts=ResourceOptions(provider=self.provider.provider, parent=release, depends_on=[release]), + #) + + ## Create Kustomization resource + #kustomization = k8s.apiextensions.CustomResource( + # f"{self.name}-kustomization", + # api_version="kustomize.toolkit.fluxcd.io/v1", + # kind="Kustomization", + # metadata=k8s.meta.v1.ObjectMetaArgs( + # name="flux-system", + # namespace=flux_config.namespace, + # ), + # spec={ + # "interval": flux_config.interval, + # "path": flux_config.git_path, + # "prune": True, + # "sourceRef": { + # "kind": "GitRepository", + # "name": "flux-system", + # }, + # }, + # opts=ResourceOptions(provider=self.provider.provider, parent=git_repo, depends_on=[git_repo]), + #) + + # Return deployment result + resource_ids = [ + f"{self.name}-namespace", + f"{self.name}-system", + f"{self.name}-gitrepository", + f"{self.name}-kustomization", + ] + + return ModuleDeploymentResult( + success=True, + version=flux_config.version or "latest", + resources=resource_ids, + metadata={ + "namespace": flux_config.namespace, + "git_repository": flux_config.git_repository, + "git_branch": flux_config.git_branch, + "components": flux_config.components, + }, + ) + + except Exception as e: + log.error(f"Failed to deploy Flux: {str(e)}") + return ModuleDeploymentResult(success=False, version="", errors=[str(e)]) diff --git a/modules/kubernetes/flux/types.py b/modules/kubernetes/flux/types.py new file mode 100644 index 0000000..162f16f --- /dev/null +++ b/modules/kubernetes/flux/types.py @@ -0,0 +1,34 @@ +# modules/kubernetes/flux/types.py +from typing import Optional, Dict, Any, List +from pydantic import Field + +from pulumi import log + +from ..types import KubernetesConfig + +class FluxConfig(KubernetesConfig): + """Flux module configuration.""" + + namespace: str = Field(default="flux-system") + version: Optional[str] = Field(default=None) + git_repository: str = Field(...) # Required + git_branch: str = Field(default="main") + git_path: str = Field(default="./") + interval: str = Field(default="1m") + components: List[str] = Field( + default=["source-controller", "kustomize-controller", "helm-controller", "notification-controller"] + ) + + class Config: + arbitrary_types_allowed = True + + @classmethod + def merge(cls, user_config: Dict[str, Any]) -> "FluxConfig": + """Merge user configuration with defaults.""" + config = cls() + for key, value in user_config.items(): + if hasattr(config, key): + setattr(config, key, value) + else: + log.warn(f"Unknown configuration key '{key}' in flux config.") + return config diff --git a/modules/kubernetes/prometheus/__init__.py b/modules/kubernetes/prometheus/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/modules/kubernetes/prometheus/deployment.py b/modules/kubernetes/prometheus/deployment.py new file mode 100644 index 0000000..76edefd --- /dev/null +++ b/modules/kubernetes/prometheus/deployment.py @@ -0,0 +1,202 @@ +# ./modules/prometheus/deployment.py + +""" +Deploys the Prometheus module following the shared design patterns. +""" + +from typing import Dict, Any, List +from pulumi import log, ResourceOptions +import pulumi_kubernetes as k8s + +from ..deployment import KubernetesModule +from ..types import KubernetesConfig +from .types import PrometheusConfig +from modules.core.interfaces import ModuleDeploymentResult + +class PrometheusModule(KubernetesModule): + """Prometheus module implementation.""" + + def __init__(self, init_config): + super().__init__(init_config) + self.name = "prometheus" + + def validate_config(self, config: Dict[str, Any]) -> List[str]: + """Validate Prometheus configuration.""" + try: + if config is None: + config = {} + PrometheusConfig(**config) + return [] + except Exception as e: + return [str(e)] + + def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult: + """Deploy Prometheus stack.""" + try: + # Parse config + prometheus_config = PrometheusConfig(**config) + + # Define service definitions that will be created + service_definitions = [ + { + "name": "grafana", + "port": 80, + "targetPort": 3000, + "selector": "app.kubernetes.io/name", + }, + { + "name": "alertmanager", + "port": 9093, + "targetPort": 9093, + "selector": "app.kubernetes.io/name", + }, + { + "name": "prometheus", + "port": 9090, + "targetPort": 9090, + "selector": "app.kubernetes.io/name", + }, + ] + + # Deploy namespace + namespace = k8s.core.v1.Namespace( + f"{self.name}-namespace", + metadata=k8s.meta.v1.ObjectMetaArgs( + name=prometheus_config.namespace, + labels=prometheus_config.labels, + annotations=prometheus_config.annotations, + ), + opts=ResourceOptions(provider=self.provider.provider) + ) + + # Deploy Prometheus stack + chart_name = "kube-prometheus-stack" + chart_url = "https://prometheus-community.github.io/helm-charts" + + # Configure Helm values + helm_values = {} + if prometheus_config.openunison_enabled: + helm_values["grafana"] = { + "grafana.ini": { + "users": { + "allow_sign_up": False, + "auto_assign_org": True, + "auto_assign_org_role": "Admin", + }, + "auth.proxy": { + "enabled": True, + "header_name": "X-WEBAUTH-USER", + "auto_sign_up": True, + "headers": "Groups:X-WEBAUTH-GROUPS", + }, + } + } + + # Deploy Helm release + release = k8s.helm.v3.Release( + chart_name, + k8s.helm.v3.ReleaseArgs( + chart=chart_name, + version=prometheus_config.version, + namespace=prometheus_config.namespace, + repository_opts=k8s.helm.v3.RepositoryOptsArgs( + repo=chart_url + ), + values=helm_values, + ), + opts=ResourceOptions( + provider=self.provider.provider, + parent=namespace, + depends_on=[namespace] + ) + ) + + # Create services + services = self._create_services(prometheus_config, namespace, release) + + # Convert Pulumi Output objects to strings for resource IDs + resource_ids = [ + f"{self.name}-namespace", # Use resource names instead of IDs + f"{chart_name}", + ] + resource_ids.extend([f"{self.name}-{svc_def['name']}" for svc_def in service_definitions]) + + return ModuleDeploymentResult( + success=True, + version=prometheus_config.version or "latest", + resources=resource_ids, # Use string resource names + metadata={ + "namespace": prometheus_config.namespace, + "chart_version": prometheus_config.version, + "services": [svc_def["name"] for svc_def in service_definitions] + } + ) + + except Exception as e: + log.error(f"Failed to deploy Prometheus: {str(e)}") + return ModuleDeploymentResult( + success=False, + version="", + errors=[str(e)] + ) + + def _create_services( + self, + config: PrometheusConfig, + namespace: k8s.core.v1.Namespace, + release: k8s.helm.v3.Release + ) -> List[k8s.core.v1.Service]: + """Create Prometheus services.""" + services = [] + + service_definitions = [ + { + "name": "grafana", + "port": 80, + "targetPort": 3000, + "selector": "app.kubernetes.io/name", + }, + { + "name": "alertmanager", + "port": 9093, + "targetPort": 9093, + "selector": "app.kubernetes.io/name", + }, + { + "name": "prometheus", + "port": 9090, + "targetPort": 9090, + "selector": "app.kubernetes.io/name", + }, + ] + + for svc_def in service_definitions: + service = k8s.core.v1.Service( + f"{self.name}-{svc_def['name']}", + metadata=k8s.meta.v1.ObjectMetaArgs( + name=svc_def["name"], + namespace=config.namespace, + labels=config.labels, + annotations=config.annotations, + ), + spec=k8s.core.v1.ServiceSpecArgs( + type="ClusterIP", + ports=[ + k8s.core.v1.ServicePortArgs( + name="http-web", + port=svc_def["port"], + protocol="TCP", + target_port=svc_def["targetPort"], + ) + ], + selector={svc_def["selector"]: svc_def["name"]}, + ), + opts=ResourceOptions( + provider=self.provider.provider, + parent=namespace, + depends_on=[release] + ), + ) + services.append(service) + + return services diff --git a/modules/kubernetes/prometheus/types.py b/modules/kubernetes/prometheus/types.py new file mode 100644 index 0000000..18c8081 --- /dev/null +++ b/modules/kubernetes/prometheus/types.py @@ -0,0 +1,34 @@ +# modules/kubernetes/prometheus/types.py + +""" +Defines the data structure for the Prometheus module configuration. +""" + +from typing import Optional, Dict, Any +from pydantic import Field +from ..types import KubernetesConfig +from pulumi import log + + +class PrometheusConfig(KubernetesConfig): + """Prometheus module configuration.""" + + namespace: str = Field(default="monitoring") + version: Optional[str] = Field(default=None) + openunison_enabled: bool = Field(default=False) + + class Config: + arbitrary_types_allowed = True + + @classmethod + def merge(cls, user_config: Dict[str, Any]) -> "PrometheusConfig": + """Merge user configuration with defaults.""" + config = cls() + for key, value in user_config.items(): + if hasattr(config, key): + setattr(config, key, value) + else: + log.warn( + f"Unknown configuration key '{key}' in prometheus config." + ) + return config diff --git a/modules/kubernetes/provider.py b/modules/kubernetes/provider.py new file mode 100644 index 0000000..f402fcc --- /dev/null +++ b/modules/kubernetes/provider.py @@ -0,0 +1,28 @@ +# ./modules/kubernetes/provider.py +""" +Kubernetes submodule authentication provider +""" +# modules/kubernetes/provider.py +from typing import Optional +import pulumi_kubernetes as k8s +from pulumi import ResourceOptions, log + + +class KubernetesProvider: + """Manages Kubernetes provider initialization and configuration.""" + + def __init__(self, k8s_provider: Optional[k8s.Provider] = None): + """ + Initialize Kubernetes provider. + + Args: + k8s_provider: Optional existing provider (e.g. from EKS) + """ + self._provider = k8s_provider + + @property + def provider(self) -> k8s.Provider: + """Get the Kubernetes provider instance.""" + if not self._provider: + raise RuntimeError("Kubernetes Provider not initialized") + return self._provider diff --git a/modules/kubernetes/types.py b/modules/kubernetes/types.py new file mode 100644 index 0000000..80b7ce0 --- /dev/null +++ b/modules/kubernetes/types.py @@ -0,0 +1,26 @@ +# ./modules/kubernetes/types.py +""" +Kubernetes submodule shared types +""" +from typing import Dict, Any, Optional +from pydantic import BaseModel, Field +from ..core.types import ComplianceConfig + +class KubernetesConfig(BaseModel): + """Base Kubernetes configuration.""" + + enabled: bool = Field(default=True) + provider_type: str = Field(default="eks") # eks, kind, etc + namespace: str = Field(default="default") + labels: Dict[str, str] = Field(default_factory=dict) + annotations: Dict[str, Any] = Field(default_factory=dict) + compliance: ComplianceConfig = Field(default_factory=ComplianceConfig) + + @classmethod + def merge(cls, user_config: Dict[str, Any]) -> "KubernetesConfig": + """Merge user configuration with defaults.""" + config = cls() + for key, value in user_config.items(): + if hasattr(config, key): + setattr(config, key, value) + return config