Skip to content

Commit

Permalink
working prometheus, flux, and crossplane iac deployment
Browse files Browse the repository at this point in the history
  • Loading branch information
usrbinkat committed Dec 10, 2024
1 parent b515722 commit 277a4f3
Show file tree
Hide file tree
Showing 24 changed files with 901 additions and 587 deletions.
2 changes: 1 addition & 1 deletion .github/bin/tree
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@ else
fi

set -x
$TREE_BIN -a -I .git -I .mypy_cache -I .github -I demos -I .pytest_cache -I .pixi -I .actrc -I .cursorrules -I .cursorignore -I __pycache__
$TREE_BIN -a -I .git -I .mypy_cache -I .github -I demos -I .pytest_cache -I .pixi -I .actrc -I .cursorrules -I .cursorignore -I __pycache__ $@
8 changes: 8 additions & 0 deletions __main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,14 @@ def main() -> None:
log.info(f"Deploying modules: {modules_to_deploy}")
deployment_manager = DeploymentManager(init_config, config_manager)
deployment_manager.deploy_modules(modules_to_deploy)

# Test k8s provider
k8s_provider = deployment_manager.get_k8s_provider()
if k8s_provider:
log.info("Successfully retrieved k8s_provider from EKS cluster")
else:
log.warn("No k8s_provider available - EKS cluster may not be enabled")

else:
# Log and proceed with core IaC execution even if no modules are deployed
log.info("No modules to deploy.. Proceeding with core IaC execution...")
Expand Down
136 changes: 94 additions & 42 deletions modules/aws/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from pulumi import log
import pulumi_aws as aws
from pulumi import ResourceOptions
import json

from modules.core.interfaces import ModuleInterface, ModuleDeploymentResult
from modules.core.types import InitializationConfig
Expand Down Expand Up @@ -66,6 +67,10 @@ def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult:
log.info(f"Successfully authenticated as: {caller_identity.arn}")
log.info(f"AWS Account ID: {caller_identity.account_id}")

# Initialize metadata dict
aws_metadata = {}
k8s_provider = None

# Deploy EKS if enabled
if aws_config.eks and aws_config.eks.enabled:
log.info(f"Deploying EKS cluster: {aws_config.eks.name}")
Expand All @@ -77,42 +82,48 @@ def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult:
scaling_config=aws_config.eks.node_groups[0].scaling_config if aws_config.eks.node_groups else None,
)

# Store k8s_provider and EKS info in metadata
k8s_provider = eks_resources["k8s_provider"]
aws_metadata["k8s_provider"] = k8s_provider
aws_metadata["eks_cluster_name"] = aws_config.eks.name

# Export EKS outputs
pulumi.export("eks_cluster_name", eks_resources["cluster"].name)
pulumi.export("eks_cluster_endpoint", eks_resources["cluster"].endpoint)
pulumi.export("eks_cluster_vpc_id", eks_resources["vpc"].id)

# Get Git info as dictionary
# this is required code for initializing the git info, do not remove
# retain this code for now, do not remove
git_info = init_config.git_info.model_dump()

# Collect metadata for resource tagging
global_metadata = collect_global_metadata()
aws_metadata = collect_module_metadata(

# Update aws_metadata with caller identity info
caller_identity = provider.get_caller_identity()
aws_metadata.update({
"account_id": caller_identity.account_id,
"user_id": caller_identity.user_id,
"arn": caller_identity.arn,
})

# Merge with global metadata
combined_metadata = collect_module_metadata(
global_metadata=global_metadata,
modules_metadata={
"aws": {
"account_id": caller_identity.account_id,
"user_id": caller_identity.user_id,
"arn": caller_identity.arn,
}
},
modules_metadata={"aws": aws_metadata},
)

# Sanitize resource tags
resource_tags = provider.sanitize_tags(
{
"Project": aws_metadata["project_name"],
"Stack": aws_metadata["stack_name"],
"GitCommit": aws_metadata.get("commit_hash", ""),
"AWSAccountID": aws_metadata.get("aws", {}).get("account_id", ""),
"Compliance:Framework": "NIST",
"Compliance:Controls": "AC-2, AC-3",
}
)
resource_tags = provider.sanitize_tags({
"Project": combined_metadata["project_name"],
"Stack": combined_metadata["stack_name"],
"GitCommit": combined_metadata.get("commit_hash", ""),
"AWSAccountID": aws_metadata["account_id"],
"Compliance:Framework": "NIST",
"Compliance:Controls": "AC-2, AC-3",
})

# Define AWS resources with sanitized tags
# Bucket is for demonstration purposes only during development
bucket_name = f"konductor-{init_config.stack_name}-{provider.region}"
s3_bucket = aws.s3.Bucket(
bucket_name,
Expand All @@ -124,48 +135,89 @@ def deploy(self, config: Dict[str, Any]) -> ModuleDeploymentResult:
# Export outputs
pulumi.export("aws_s3_bucket_name", s3_bucket.id)

# Collect AWS caller identity
caller_identity = aws.get_caller_identity(opts=pulumi.InvokeOptions(provider=provider.provider))
# Parse compliance config
compliance_config = ComplianceConfig.model_validate(config.get("compliance", {}))

# Log success
log.info(f"Successfully authenticated as: {caller_identity.arn}")
# Create IAM role for Crossplane AWS Provider if EKS is enabled
crossplane_role = None
if aws_config.eks and aws_config.eks.enabled:
# Get the OIDC issuer URL and clean it properly
oidc_url = eks_resources['cluster'].identities[0].oidcs[0].issuer.apply(
lambda x: x.removeprefix('https://')
)

# Collect AWS-specific metadata
aws_metadata = {
"aws_account_id": caller_identity.account_id,
"aws_user_id": caller_identity.user_id,
"aws_arn": caller_identity.arn,
}
# Get the OIDC provider ARN
oidc_provider_arn = pulumi.Output.all(account_id=caller_identity.account_id, url=oidc_url).apply(
lambda args: f"arn:aws:iam::{args['account_id']}:oidc-provider/{args['url']}"
)

# Collect resource identifiers
provider_urn = str(provider.provider.urn)
bucket_name = str(s3_bucket.id)
# Create the trust policy with proper formatting
crossplane_trust_policy = pulumi.Output.all(provider_arn=oidc_provider_arn, url=oidc_url).apply(
lambda args: {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": args['provider_arn']
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
f"{args['url']}:sub": "system:serviceaccount:crossplane-system:provider-aws",
f"{args['url']}:aud": "sts.amazonaws.com"
}
}
}
]
}
)

# Update metadata to include EKS info if deployed
if aws_config.eks and aws_config.eks.enabled:
aws_metadata["eks_cluster_name"] = aws_config.eks.name
# Create the IAM role with the fixed trust policy
crossplane_role = aws.iam.Role(
"crossplane-provider-aws",
assume_role_policy=pulumi.Output.json_dumps(crossplane_trust_policy),
tags=resource_tags,
opts=ResourceOptions(
provider=provider.provider,
depends_on=[eks_resources['cluster']]
)
)

# Parse compliance config
compliance_config = ComplianceConfig.model_validate(config.get("compliance", {}))
# Attach required policies
aws.iam.RolePolicyAttachment(
"crossplane-provider-aws-admin",
role=crossplane_role.name,
policy_arn="arn:aws:iam::aws:policy/AdministratorAccess", # Note: Consider limiting this in production
opts=ResourceOptions(
provider=provider.provider,
depends_on=[crossplane_role]
)
)

# Add role ARN to metadata
aws_metadata["crossplane_provider_role_arn"] = crossplane_role.arn

# Return deployment result without version
# Return deployment result without version
# Return deployment result
return ModuleDeploymentResult(
success=True,
version="0.0.1",
resources=[provider_urn, bucket_name],
resources=[str(provider.provider.urn), str(s3_bucket.id)],
metadata={
"compliance": compliance_config.model_dump(),
"aws_account_id": caller_identity.account_id,
"aws_user_id": caller_identity.user_id,
"aws_arn": caller_identity.arn,
"k8s_provider": k8s_provider if k8s_provider else None,
**aws_metadata,
},
)

except Exception as e:
return ModuleDeploymentResult(
success=False, version="", errors=[str(e)] # Empty string since AWS module doesn't use versions
success=False,
version="",
errors=[str(e)]
)

def get_dependencies(self) -> List[str]:
Expand Down
4 changes: 4 additions & 0 deletions modules/aws/eks.py
Original file line number Diff line number Diff line change
Expand Up @@ -469,6 +469,10 @@ def deploy_cluster(
opts=ResourceOptions(parent=cluster),
)

# Export the kubeconfig for use in other resources.
secret_kubeconfig = pulumi.Output.secret(internal_kubeconfig)
pulumi.export("kubeconfig", secret_kubeconfig)

# Step 7: Deploy test nginx pod to verify the cluster.
self.deploy_test_nginx(k8s_provider, name)

Expand Down
4 changes: 2 additions & 2 deletions modules/aws/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ class EksClusterConfig(BaseModel):

enabled: bool = Field(default=True)
name: str = Field(..., description="Name of the EKS cluster")
version: str = Field(default="1.27", description="Kubernetes version")
version: str = Field(default="1.29", description="Kubernetes version")
subnet_ids: Optional[List[str]] = Field(default=None)
endpoint_private_access: bool = Field(default=True)
endpoint_public_access: bool = Field(default=True)
Expand All @@ -176,7 +176,7 @@ class EksClusterConfig(BaseModel):

@validator("version")
def validate_version(cls, v):
valid_versions = ["1.27", "1.26", "1.25"] # Add supported versions
valid_versions = ["1.29", "1.28", "1.27", "1.26", "1.25"] # Add supported versions
if v not in valid_versions:
raise ValueError(f"Invalid EKS version: {v}. Must be one of {valid_versions}")
return v
10 changes: 9 additions & 1 deletion modules/core/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -533,9 +533,17 @@ def get_enabled_modules(self) -> List[str]:
# Import module's config
module_config = self.get_module_config(module_name)

# Check if module is enabled
# Check if module is enabled either directly or through submodules
is_enabled = module_config.get("enabled", False)

# For kubernetes module, also check submodules
if module_name == "kubernetes":
# Check if any kubernetes submodule is enabled
for submodule, subconfig in module_config.items():
if isinstance(subconfig, dict) and subconfig.get("enabled", False):
is_enabled = True
break

if is_enabled:
log.info(f"{module_name} module is enabled in configuration")
enabled_modules.append(module_name)
Expand Down
Loading

0 comments on commit 277a4f3

Please sign in to comment.