From 7981c07e0f83669c656b72d946e3253340b44253 Mon Sep 17 00:00:00 2001
From: Peter Balogh
Date: Thu, 1 Aug 2024 17:00:40 +0200
Subject: [PATCH 1/5] feat(cfg): add EKS 1.5.0
---
cfg/eks-1.5.0/config.yaml | 9 +
cfg/eks-1.5.0/controlplane.yaml | 32 ++
cfg/eks-1.5.0/managedservices.yaml | 227 +++++++++++++++
cfg/eks-1.5.0/master.yaml | 6 +
cfg/eks-1.5.0/node.yaml | 453 +++++++++++++++++++++++++++++
cfg/eks-1.5.0/policies.yaml | 250 ++++++++++++++++
6 files changed, 977 insertions(+)
create mode 100644 cfg/eks-1.5.0/config.yaml
create mode 100644 cfg/eks-1.5.0/controlplane.yaml
create mode 100644 cfg/eks-1.5.0/managedservices.yaml
create mode 100644 cfg/eks-1.5.0/master.yaml
create mode 100644 cfg/eks-1.5.0/node.yaml
create mode 100644 cfg/eks-1.5.0/policies.yaml
diff --git a/cfg/eks-1.5.0/config.yaml b/cfg/eks-1.5.0/config.yaml
new file mode 100644
index 000000000..17301a751
--- /dev/null
+++ b/cfg/eks-1.5.0/config.yaml
@@ -0,0 +1,9 @@
+---
+## Version-specific settings that override the values in cfg/config.yaml
+## These settings are required if you are using the --asff option to report findings to AWS Security Hub
+## AWS account number is required.
+AWS_ACCOUNT: ""
+## AWS region is required.
+AWS_REGION: ""
+## EKS Cluster ARN is required.
+CLUSTER_ARN: ""
diff --git a/cfg/eks-1.5.0/controlplane.yaml b/cfg/eks-1.5.0/controlplane.yaml
new file mode 100644
index 000000000..6323f03c6
--- /dev/null
+++ b/cfg/eks-1.5.0/controlplane.yaml
@@ -0,0 +1,32 @@
+---
+controls:
+version: "eks-1.5.0"
+id: 2
+text: "Control Plane Configuration"
+type: "controlplane"
+groups:
+ - id: 2.1
+ text: "Logging"
+ checks:
+ - id: 2.1.1
+ text: "Enable audit Logs (Automated)"
+ remediation: |
+ From Console:
+ 1. For each EKS Cluster in each region;
+ 2. Go to 'Amazon EKS' > 'Clusters' > '' > 'Configuration' > 'Logging'.
+ 3. Click 'Manage logging'.
+ 4. Ensure that all options are toggled to 'Enabled'.
+ API server: Enabled
+ Audit: Enabled
+ Authenticator: Enabled
+ Controller manager: Enabled
+ Scheduler: Enabled
+ 5. Click 'Save Changes'.
+
+ From CLI:
+ # For each EKS Cluster in each region;
+ aws eks update-cluster-config \
+ --region '${REGION_CODE}' \
+ --name '${CLUSTER_NAME}' \
+ --logging '{"clusterLogging":[{"types":["api","audit","authenticator","controllerManager","scheduler"],"enabled":true}]}'
+ scored: false
diff --git a/cfg/eks-1.5.0/managedservices.yaml b/cfg/eks-1.5.0/managedservices.yaml
new file mode 100644
index 000000000..4ad870acd
--- /dev/null
+++ b/cfg/eks-1.5.0/managedservices.yaml
@@ -0,0 +1,227 @@
+---
+controls:
+version: "eks-1.5.0"
+id: 5
+text: "Managed Services"
+type: "managedservices"
+groups:
+ - id: 5.1
+ text: "Image Registry and Image Scanning"
+ checks:
+ - id: 5.1.1
+ text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third party provider (Automated)"
+ type: "manual"
+ remediation: |
+ To utilize AWS ECR for Image scanning please follow the steps below:
+
+ To create a repository configured for scan on push (AWS CLI):
+ aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
+
+ To edit the settings of an existing repository (AWS CLI):
+ aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE
+
+ Use the following steps to start a manual image scan using the AWS Management Console.
+
+ 1. Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories.
+ 2. From the navigation bar, choose the Region to create your repository in.
+ 3. In the navigation pane, choose Repositories.
+ 4. On the Repositories page, choose the repository that contains the image to scan.
+ 5. On the Images page, select the image to scan and then choose Scan.
+ scored: false
+
+ - id: 5.1.2
+ text: "Minimize user access to Amazon ECR (Manual)"
+ type: "manual"
+ remediation: |
+ Before you use IAM to manage access to Amazon ECR, you should understand what IAM features
+ are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other
+ AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide.
+ scored: false
+
+ - id: 5.1.3
+ text: "Minimize cluster access to read-only for Amazon ECR (Manual)"
+ type: "manual"
+ remediation: |
+ You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites.
+
+ The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess
+ the following IAM policy permissions for Amazon ECR.
+
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ecr:BatchCheckLayerAvailability",
+ "ecr:BatchGetImage",
+ "ecr:GetDownloadUrlForLayer",
+ "ecr:GetAuthorizationToken"
+ ],
+ "Resource": "*"
+ }
+ ]
+ }
+ scored: false
+
+ - id: 5.1.4
+ text: "Minimize Container Registries to only those approved (Manual)"
+ type: "manual"
+ remediation: |
+ To minimize AWS ECR container registries to only those approved, you can follow these steps:
+
+ 1. Define your approval criteria: Determine the criteria that containers must meet to
+ be considered approved. This can include factors such as security, compliance,
+ compatibility, and other requirements.
+ 2. Identify all existing ECR registries: Identify all ECR registries that are currently
+ being used in your organization.
+ 3. Evaluate ECR registries against approval criteria: Evaluate each ECR registry
+ against your approval criteria to determine whether it should be approved or not.
+ This can be done by reviewing the registry settings and configuration, as well as
+ conducting security assessments and vulnerability scans.
+ 4. Establish policies and procedures: Establish policies and procedures that outline
+ how ECR registries will be approved, maintained, and monitored. This should
+ include guidelines for developers to follow when selecting a registry for their
+ container images.
+ 5. Implement access controls: Implement access controls to ensure that only
+ approved ECR registries are used to store and distribute container images. This
+ can be done by setting up IAM policies and roles that restrict access to
+ unapproved registries or create a whitelist of approved registries.
+ 6. Monitor and review: Continuously monitor and review the use of ECR registries
+ to ensure that they continue to meet your approval criteria. This can include
+ scored: false
+
+ - id: 5.2
+ text: "Identity and Access Management (IAM)"
+ checks:
+ - id: 5.2.1
+ text: "Prefer using dedicated Amazon EKS Service Accounts (Automated)"
+ type: "manual"
+ remediation: |
+ With IAM roles for service accounts on Amazon EKS clusters, you can associate an
+ IAM role with a Kubernetes service account. This service account can then provide
+ AWS permissions to the containers in any pod that uses that service account. With this
+ feature, you no longer need to provide extended permissions to the worker node IAM
+ role so that pods on that node can call AWS APIs.
+ Applications must sign their AWS API requests with AWS credentials. This feature
+ provides a strategy for managing credentials for your applications, similar to the way
+ that Amazon EC2 instance profiles provide credentials to Amazon EC2 instances.
+ Instead of creating and distributing your AWS credentials to the containers or using the
+ Amazon EC2 instance’s role, you can associate an IAM role with a Kubernetes service
+ account. The applications in the pod’s containers can then use an AWS SDK or the
+ AWS CLI to make API requests to authorized AWS services.
+
+ The IAM roles for service accounts feature provides the following benefits:
+
+ - Least privilege - By using the IAM roles for service accounts feature, you no
+ longer need to provide extended permissions to the worker node IAM role so that
+ pods on that node can call AWS APIs. You can scope IAM permissions to a
+ service account, and only pods that use that service account have access to
+ those permissions. This feature also eliminates the need for third-party solutions
+ such as kiam or kube2iam.
+ - Credential isolation - A container can only retrieve credentials for the IAM role
+ that is associated with the service account to which it belongs. A container never
+ has access to credentials that are intended for another container that belongs to
+ another pod.
+ - Audit-ability - Access and event logging is available through CloudTrail to help
+ ensure retrospective auditing.
+ scored: false
+
+ - id: 5.3
+ text: "AWS EKS Key Management Service"
+ checks:
+ - id: 5.3.1
+ text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)"
+ type: "manual"
+ remediation: |
+ This process can only be performed during Cluster Creation.
+
+ Enable 'Secrets Encryption' during Amazon EKS cluster creation as described
+ in the links within the 'References' section.
+ scored: false
+
+ - id: 5.4
+ text: "Cluster Networking"
+ checks:
+ - id: 5.4.1
+ text: "Restrict Access to the Control Plane Endpoint (Automated)"
+ type: "manual"
+ remediation: |
+ By enabling private endpoint access to the Kubernetes API server, all communication
+ between your nodes and the API server stays within your VPC. You can also limit the IP
+ addresses that can access your API server from the internet, or completely disable
+ internet access to the API server.
+ With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
+ that Private Endpoint Access is enabled.
+ If you choose to also enable Public Endpoint Access then you should also configure a
+ list of allowable CIDR blocks, resulting in restricted access from the internet. If you
+ specify no CIDR blocks, then the public API server endpoint is able to receive and
+ process requests from all IP addresses by defaulting to ['0.0.0.0/0'].
+ For example, the following command would enable private access to the Kubernetes
+ API as well as limited public access over the internet from a single IP address (noting
+ the /32 CIDR suffix):
+ aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPrivateAccess=true,publicAccessCidrs="203.0.113.5/32"
+
+ Note: The CIDR blocks specified cannot include reserved addresses.
+ There is a maximum number of CIDR blocks that you can specify. For more information,
+ see the EKS Service Quotas link in the references section.
+ For more detailed information, see the EKS Cluster Endpoint documentation link in the
+ references section.
+ scored: false
+
+ - id: 5.4.2
+ text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Automated)"
+ type: "manual"
+ remediation: |
+ By enabling private endpoint access to the Kubernetes API server, all communication
+ between your nodes and the API server stays within your VPC.
+ With this in mind, you can update your cluster accordingly using the AWS CLI to ensure
+ that Private Endpoint Access is enabled.
+ For example, the following command would enable private access to the Kubernetes
+ API and ensure that no public access is permitted:
+ aws eks update-cluster-config --region $AWS_REGION --name $CLUSTER_NAME --resources-vpc-config endpointPrivateAccess=true,endpointPublicAccess=false
+
+ Note: For more detailed information, see the EKS Cluster Endpoint documentation link
+ in the references section.
+ scored: false
+
+ - id: 5.4.3
+ text: "Ensure clusters are created with Private Nodes (Automated)"
+ type: "manual"
+ remediation: |
+ aws eks update-cluster-config \
+ --region region-code \
+ --name my-cluster \
+ --resources-vpc-config endpointPublicAccess=true,publicAccessCidrs="203.0.113.5/32",endpointPrivateAccess=true
+ scored: false
+
+ - id: 5.4.4
+ text: "Ensure Network Policy is Enabled and set as appropriate (Automated)"
+ type: "manual"
+ remediation: |
+ Utilize Calico or other network policy engine to segment and isolate your traffic.
+ scored: false
+
+ - id: 5.4.5
+ text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)"
+ type: "manual"
+ remediation: |
+ Your load balancer vendor can provide details on configuring HTTPS with TLS.
+ scored: false
+
+
+ - id: 5.5
+ text: "Authentication and Authorization"
+ checks:
+ - id: 5.5.1
+ text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes or Upgrade to AWS CLI v1.16.156 or greater (Manual)"
+ type: "manual"
+ remediation: |
+ Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation.
+
+ Note: If using AWS CLI version 1.16.156 or later there is no need to install the AWS
+ IAM Authenticator anymore.
+ The relevant AWS CLI commands, depending on the use case, are:
+ aws eks update-kubeconfig
+ aws eks get-token
+ scored: false
diff --git a/cfg/eks-1.5.0/master.yaml b/cfg/eks-1.5.0/master.yaml
new file mode 100644
index 000000000..8aba89c7e
--- /dev/null
+++ b/cfg/eks-1.5.0/master.yaml
@@ -0,0 +1,6 @@
+---
+controls:
+version: "eks-1.5.0"
+id: 1
+text: "Control Plane Components"
+type: "master"
diff --git a/cfg/eks-1.5.0/node.yaml b/cfg/eks-1.5.0/node.yaml
new file mode 100644
index 000000000..8d470d499
--- /dev/null
+++ b/cfg/eks-1.5.0/node.yaml
@@ -0,0 +1,453 @@
+---
+controls:
+version: "eks-1.5.0"
+id: 3
+text: "Worker Node Security Configuration"
+type: "node"
+groups:
+ - id: 3.1
+ text: "Worker Node Configuration Files"
+ checks:
+ - id: 3.1.1
+ text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Automated)"
+ audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' '
+ tests:
+ test_items:
+ - flag: "permissions"
+ compare:
+ op: bitmask
+ value: "644"
+ remediation: |
+ Run the below command (based on the file location on your system) on the each worker node.
+ For example,
+ chmod 644 $kubeletkubeconfig
+ scored: false
+
+ - id: 3.1.2
+ text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Automated)"
+ audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' '
+ tests:
+ test_items:
+ - flag: root:root
+ remediation: |
+ Run the below command (based on the file location on your system) on the each worker node.
+ For example,
+ chown root:root $kubeletkubeconfig
+ scored: false
+
+ - id: 3.1.3
+ text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Automated)"
+ audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' '
+ tests:
+ test_items:
+ - flag: "permissions"
+ compare:
+ op: bitmask
+ value: "644"
+ remediation: |
+ Run the following command (using the config file location identified in the Audit step)
+ chmod 644 $kubeletconf
+ scored: false
+
+ - id: 3.1.4
+ text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)"
+ audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' '
+ tests:
+ test_items:
+ - flag: root:root
+ remediation: |
+ Run the following command (using the config file location identified in the Audit step)
+ chown root:root $kubeletconf
+ scored: false
+
+ - id: 3.2
+ text: "Kubelet"
+ checks:
+ - id: 3.2.1
+ text: "Ensure that the Anonymous Auth is Not Enabled (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: "--anonymous-auth"
+ path: '{.authentication.anonymous.enabled}'
+ set: true
+ compare:
+ op: eq
+ value: false
+ remediation: |
+ Remediation Method 1:
+ If configuring via the Kubelet config file, you first need to locate the file.
+ To do this, SSH to each node and execute the following command to find the kubelet
+ process:
+ ps -ef | grep kubelet
+ The output of the above command provides details of the active kubelet process, from
+ which we can see the location of the configuration file provided to the kubelet service
+ with the --config argument. The file can be viewed with a command such as more or
+ less, like so:
+ sudo less /path/to/kubelet-config.json
+ Disable Anonymous Authentication by setting the following parameter:
+ "authentication": { "anonymous": { "enabled": false } }
+
+ Remediation Method 2.
+ If using executable arguments, edit the kubelet service file on each worker node and
+ ensure the below parameters are part of the KUBELET_ARGS variable string.
+ For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
+ Bottlerocket AMIs, then this file can be found at
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
+ you may need to look up documentation for your chosen operating system to determine
+ which service manager is configured:
+ --anonymous-auth=false
+
+ For Both Remediation Steps:
+ Based on your system, restart the kubelet service and check the service status.
+ The following example is for operating systems using systemd, such as the Amazon
+ EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
+ command. If systemctl is not available then you will need to look up documentation for
+ your chosen operating system to determine which service manager is configured:
+ systemctl daemon-reload
+ systemctl restart kubelet.service
+ systemctl status kubelet -l
+ scored: true
+
+ - id: 3.2.2
+ text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: --authorization-mode
+ path: '{.authorization.mode}'
+ set: true
+ compare:
+ op: nothave
+ value: AlwaysAllow
+ remediation: |
+ Remediation Method 1:
+ If configuring via the Kubelet config file, you first need to locate the file.
+ To do this, SSH to each node and execute the following command to find the kubelet
+ process:
+ ps -ef | grep kubelet
+ The output of the above command provides details of the active kubelet process, from
+ which we can see the location of the configuration file provided to the kubelet service
+ with the --config argument. The file can be viewed with a command such as more or
+ less, like so:
+ sudo less /path/to/kubelet-config.json
+ Enable Webhook Authentication by setting the following parameter:
+ "authentication": { "webhook": { "enabled": true } }
+ Next, set the Authorization Mode to Webhook by setting the following parameter:
+ "authorization": { "mode": "Webhook }
+ Finer detail of the authentication and authorization fields can be found in the
+ Kubelet Configuration documentation.
+
+ Remediation Method 2:
+ If using executable arguments, edit the kubelet service file on each worker node and
+ ensure the below parameters are part of the KUBELET_ARGS variable string.
+ For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
+ Bottlerocket AMIs, then this file can be found at
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
+ you may need to look up documentation for your chosen operating system to determine
+ which service manager is configured:
+ --authentication-token-webhook
+ --authorization-mode=Webhook
+
+ For Both Remediation Steps:
+ Based on your system, restart the kubelet service and check the service status.
+ The following example is for operating systems using systemd, such as the Amazon
+ EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
+ command. If systemctl is not available then you will need to look up documentation for
+ your chosen operating system to determine which service manager is configured:
+ systemctl daemon-reload
+ systemctl restart kubelet.service
+ systemctl status kubelet -l
+ scored: true
+
+ - id: 3.2.3
+ text: "Ensure that a Client CA File is Configured (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: --client-ca-file
+ path: '{.authentication.x509.clientCAFile}'
+ set: true
+ remediation: |
+ Remediation Method 1:
+ If configuring via the Kubelet config file, you first need to locate the file.
+ To do this, SSH to each node and execute the following command to find the kubelet
+ process:
+ ps -ef | grep kubelet
+ The output of the above command provides details of the active kubelet process, from
+ which we can see the location of the configuration file provided to the kubelet service
+ with the --config argument. The file can be viewed with a command such as more or
+ less, like so:
+ sudo less /path/to/kubelet-config.json
+ Configure the client certificate authority file by setting the following parameter
+ appropriately:
+ "authentication": { "x509": {"clientCAFile": } }"
+
+ Remediation Method 2:
+ If using executable arguments, edit the kubelet service file on each worker node and
+ ensure the below parameters are part of the KUBELET_ARGS variable string.
+ For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or
+ Bottlerocket AMIs, then this file can be found at
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise,
+ you may need to look up documentation for your chosen operating system to determine
+ which service manager is configured:
+ --client-ca-file=
+
+ For Both Remediation Steps:
+ Based on your system, restart the kubelet service and check the service status.
+ The following example is for operating systems using systemd, such as the Amazon
+ EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl
+ command. If systemctl is not available then you will need to look up documentation for
+ your chosen operating system to determine which service manager is configured:
+ systemctl daemon-reload
+ systemctl restart kubelet.service
+ systemctl status kubelet -l
+ scored: false
+
+ - id: 3.2.4
+ text: "Ensure that the --read-only-port is disabled (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: "--read-only-port"
+ path: '{.readOnlyPort}'
+ set: true
+ compare:
+ op: eq
+ value: 0
+ remediation: |
+ If modifying the Kubelet config file, edit the kubelet-config.json file
+ /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0
+ "readOnlyPort": 0
+ If using executable arguments, edit the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
+ worker node and add the below parameter at the end of the KUBELET_ARGS variable
+ string.
+ --read-only-port=0
+
+ Based on your system, restart the kubelet service and check status
+ systemctl daemon-reload
+ systemctl restart kubelet.service
+ systemctl status kubelet -l
+ scored: false
+
+ - id: 3.2.5
+ text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: --streaming-connection-idle-timeout
+ path: '{.streamingConnectionIdleTimeout}'
+ set: true
+ compare:
+ op: noteq
+ value: 0
+ - flag: --streaming-connection-idle-timeout
+ path: '{.streamingConnectionIdleTimeout}'
+ set: false
+ bin_op: or
+ remediation: |
+ Remediation Method 1:
+ If modifying the Kubelet config file, edit the kubelet-config.json file
+ /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to a
+ non-zero value in the format of #h#m#s
+ "streamingConnectionIdleTimeout": "4h0m0s"
+ You should ensure that the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not
+ specify a --streaming-connection-idle-timeout argument because it would
+ override the Kubelet config file.
+
+ Remediation Method 2:
+ If using executable arguments, edit the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
+ worker node and add the below parameter at the end of the KUBELET_ARGS variable
+ string.
+ --streaming-connection-idle-timeout=4h0m0s
+
+ Remediation Method 3:
+ If using the api configz endpoint consider searching for the status of
+ "streamingConnectionIdleTimeout": by extracting the live configuration from the
+ nodes running kubelet.
+ **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
+ Live Cluster, and then rerun the curl statement from audit process to check for kubelet
+ configuration changes
+ kubectl proxy --port=8001 &
+ export HOSTNAME_PORT=localhost:8001 (example host and port number)
+ export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
+ curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
+
+ For all three remediations:
+ Based on your system, restart the kubelet service and check status
+ systemctl daemon-reload
+ systemctl restart kubelet.service
+ systemctl status kubelet -l
+ scored: true
+
+ - id: 3.2.6
+ text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: --make-iptables-util-chains
+ path: '{.makeIPTablesUtilChains}'
+ set: true
+ compare:
+ op: eq
+ value: true
+ - flag: --make-iptables-util-chains
+ path: '{.makeIPTablesUtilChains}'
+ set: false
+ bin_op: or
+ remediation: |
+ Remediation Method 1:
+ If modifying the Kubelet config file, edit the kubelet-config.json file
+ /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
+ true
+ "makeIPTablesUtilChains": true
+ Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf
+ does not set the --make-iptables-util-chains argument because that would
+ override your Kubelet config file.
+
+ Remediation Method 2:
+ If using executable arguments, edit the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
+ worker node and add the below parameter at the end of the KUBELET_ARGS variable
+ string.
+ --make-iptables-util-chains:true
+
+ Remediation Method 3:
+ If using the api configz endpoint consider searching for the status of
+ "makeIPTablesUtilChains.: true by extracting the live configuration from the nodes
+ running kubelet.
+ **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
+ Live Cluster, and then rerun the curl statement from audit process to check for kubelet
+ configuration changes
+ kubectl proxy --port=8001 &
+ export HOSTNAME_PORT=localhost:8001 (example host and port number)
+ export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
+ curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
+
+ For all three remediations:
+ Based on your system, restart the kubelet service and check status
+ systemctl daemon-reload
+ systemctl restart kubelet.service
+ systemctl status kubelet -l
+ scored: true
+
+ - id: 3.2.7
+ text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: --event-qps
+ path: '{.eventRecordQPS}'
+ set: true
+ compare:
+ op: gte
+ value: 0
+ remediation: |
+ If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate
+ level.
+ If using command line arguments, edit the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node
+ and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable.
+ Based on your system, restart the kubelet service. For example:
+ systemctl daemon-reload
+ systemctl restart kubelet.service
+ scored: false
+
+ - id: 3.2.8
+ text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: --rotate-certificates
+ path: '{.rotateCertificates}'
+ set: true
+ compare:
+ op: eq
+ value: true
+ - flag: --rotate-certificates
+ path: '{.rotateCertificates}'
+ set: false
+ bin_op: or
+ remediation: |
+ Remediation Method 1:
+ If modifying the Kubelet config file, edit the kubelet-config.json file
+ /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
+ true
+ "RotateCertificate":true
+ Additionally, ensure that the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate
+ executable argument to false because this would override the Kubelet
+ config file.
+
+ Remediation Method 2:
+ If using executable arguments, edit the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
+ worker node and add the below parameter at the end of the KUBELET_ARGS variable
+ string.
+ --RotateCertificate=true
+ scored: false
+
+ - id: 3.2.9
+ text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)"
+ audit: "/bin/ps -fC $kubeletbin"
+ audit_config: "/bin/cat $kubeletconf"
+ tests:
+ test_items:
+ - flag: RotateKubeletServerCertificate
+ path: '{.featureGates.RotateKubeletServerCertificate}'
+ set: true
+ compare:
+ op: eq
+ value: true
+ remediation: |
+ Remediation Method 1:
+ If modifying the Kubelet config file, edit the kubelet-config.json file
+ /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to
+ true
+
+ "featureGates": {
+ "RotateKubeletServerCertificate":true
+ },
+
+ Additionally, ensure that the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set
+ the --rotate-kubelet-server-certificate executable argument to false because
+ this would override the Kubelet config file.
+
+ Remediation Method 2:
+ If using executable arguments, edit the kubelet service file
+ /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each
+ worker node and add the below parameter at the end of the KUBELET_ARGS variable
+ string.
+ --rotate-kubelet-server-certificate=true
+
+ Remediation Method 3:
+ If using the api configz endpoint consider searching for the status of
+ "RotateKubeletServerCertificate": by extracting the live configuration from the
+ nodes running kubelet.
+ **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a
+ Live Cluster, and then rerun the curl statement from audit process to check for kubelet
+ configuration changes
+ kubectl proxy --port=8001 &
+ export HOSTNAME_PORT=localhost:8001 (example host and port number)
+ export NODE_NAME=ip-192.168.31.226.ec2.internal (example node name from "kubectl get nodes")
+ curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz"
+
+ For all three remediation methods:
+ Restart the kubelet service and check status. The example below is for when using
+ systemctl to manage services:
+ systemctl daemon-reload
+ systemctl restart kubelet.service
+ systemctl status kubelet -l
+ scored: false
diff --git a/cfg/eks-1.5.0/policies.yaml b/cfg/eks-1.5.0/policies.yaml
new file mode 100644
index 000000000..6dd7fbe8e
--- /dev/null
+++ b/cfg/eks-1.5.0/policies.yaml
@@ -0,0 +1,250 @@
+---
+controls:
+version: "eks-1.5.0"
+id: 4
+text: "Policies"
+type: "policies"
+groups:
+ - id: 4.1
+ text: "RBAC and Service Accounts"
+ checks:
+ - id: 4.1.1
+ text: "Ensure that the cluster-admin role is only used where required (Automated)"
+ type: "manual"
+ remediation: |
+ Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if
+ they need this role or if they could use a role with fewer privileges.
+ Where possible, first bind users to a lower privileged role and then remove the
+ clusterrolebinding to the cluster-admin role :
+ kubectl delete clusterrolebinding [name]
+ scored: false
+
+ - id: 4.1.2
+ text: "Minimize access to secrets (Automated)"
+ type: "manual"
+ remediation: |
+ Where possible, remove get, list and watch access to secret objects in the cluster.
+ scored: false
+
+ - id: 4.1.3
+ text: "Minimize wildcard use in Roles and ClusterRoles (Automated)"
+ type: "manual"
+ remediation: |
+ Where possible replace any use of wildcards in clusterroles and roles with specific
+ objects or actions.
+ scored: false
+
+ - id: 4.1.4
+ text: "Minimize access to create pods (Automated)"
+ type: "manual"
+ remediation: |
+ Where possible, remove create access to pod objects in the cluster.
+ scored: false
+
+ - id: 4.1.5
+ text: "Ensure that default service accounts are not actively used. ((Automated)"
+ type: "manual"
+ remediation: |
+ Create explicit service accounts wherever a Kubernetes workload requires specific
+ access to the Kubernetes API server.
+ Modify the configuration of each default service account to include this value
+ automountServiceAccountToken: false
+
+ Automatic remediation for the default account:
+ kubectl patch serviceaccount default -p
+ $'automountServiceAccountToken: false'
+ scored: false
+
+ - id: 4.1.6
+ text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)"
+ type: "manual"
+ remediation: |
+ Modify the definition of pods and service accounts which do not need to mount service
+ account tokens to disable it.
+ scored: false
+
+ - id: 4.1.7
+ text: "Avoid use of system:masters group (Automated)"
+ type: "manual"
+ remediation: |
+ Remove the system:masters group from all users in the cluster.
+ scored: false
+
+ - id: 4.1.8
+ text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)"
+ type: "manual"
+ remediation: |
+ Where possible, remove the impersonate, bind and escalate rights from subjects.
+ scored: false
+
+ - id: 4.2
+ text: "Pod Security Standards"
+ checks:
+ - id: 4.2.1
+ text: "Minimize the admission of privileged containers (Automated)"
+ type: "manual"
+ remediation: |
+ Add policies to each namespace in the cluster which has user workloads to restrict the
+ admission of privileged containers.
+ To enable PSA for a namespace in your cluster, set the pod-security.kubernetes.io/enforce
+ label with the policy value you want to enforce.
+ kubectl label --overwrite ns NAMESPACE pod-security.kubernetes.io/enforce=restricted
+ The above command enforces the restricted policy for the NAMESPACE namespace.
+ You can also enable Pod Security Admission for all your namespaces. For example:
+ kubectl label --overwrite ns --all pod-security.kubernetes.io/warn=baseline
+ scored: false
+
+ - id: 4.2.2
+ text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)"
+ type: "manual"
+ remediation: |
+ Add policies to each namespace in the cluster which has user workloads to restrict the
+ admission of hostPID containers.
+ scored: false
+
+ - id: 4.2.3
+ text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)"
+ type: "manual"
+ remediation: |
+ Add policies to each namespace in the cluster which has user workloads to restrict the
+ admission of hostIPC containers.
+ scored: false
+
+ - id: 4.2.4
+ text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
+ type: "manual"
+ remediation: |
+ Create a PSP as described in the Kubernetes documentation, ensuring that the
+ .spec.hostNetwork field is omitted or set to false.
+ scored: false
+
+ - id: 4.2.5
+ text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)"
+ type: "manual"
+ remediation: |
+ Add policies to each namespace in the cluster which has user workloads to restrict the
+ admission of containers with .spec.allowPrivilegeEscalation set to true.
+ scored: false
+
+ - id: 4.3
+ text: "CNI Plugin"
+ checks:
+ - id: 4.3.1
+ text: "Ensure CNI plugin supports network policies (Manual)"
+ type: "manual"
+ remediation: |
+ As with RBAC policies, network policies should adhere to the policy of least privileged
+ access. Start by creating a deny all policy that restricts all inbound and outbound traffic
+ from a namespace or create a global policy using Calico.
+ scored: false
+
+ - id: 4.3.2
+ text: "Ensure that all Namespaces have Network Policies defined (Automated)"
+ type: "manual"
+ remediation: |
+ Follow the documentation and create NetworkPolicy objects as you need them.
+ scored: false
+
+ - id: 4.4
+ text: "Secrets Management"
+ checks:
+ - id: 4.4.1
+ text: "Prefer using secrets as files over secrets as environment variables (Automated)"
+ type: "manual"
+ remediation: |
+ If possible, rewrite application code to read secrets from mounted secret files, rather than
+ from environment variables.
+ scored: false
+
+ - id: 4.4.2
+ text: "Consider external secret storage (Manual)"
+ type: "manual"
+ remediation: |
+ Refer to the secrets management options offered by your cloud provider or a third-party
+ secrets management solution.
+ scored: false
+
+ - id: 4.5
+ text: "General Policies"
+ checks:
+ - id: 4.5.1
+ text: "Create administrative boundaries between resources using namespaces (Manual)"
+ type: "manual"
+ remediation: |
+ Follow the documentation and create namespaces for objects in your deployment as you need
+ them.
+ scored: false
+
+ - id: 4.5.2
+ text: "Apply Security Context to Your Pods and Containers (Manual)"
+ type: "manual"
+ remediation: |
+ As a best practice we recommend that you scope the binding for privileged pods to
+ service accounts within a particular namespace, e.g. kube-system, and limiting access
+ to that namespace. For all other serviceaccounts/namespaces, we recommend
+ implementing a more restrictive policy such as this:
+
+ apiVersion: policy/v1beta1
+ kind: PodSecurityPolicy
+ metadata:
+ name: restricted
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+ seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ spec:
+ privileged: false
+ # Required to prevent escalations to root.
+ allowPrivilegeEscalation: false
+ # This is redundant with non-root + disallow privilege escalation,
+ # but we can provide it for defense in depth.
+ requiredDropCapabilities:
+ - ALL
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ # Assume that persistentVolumes set up by the cluster admin are safe to use.
+ - 'persistentVolumeClaim'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ # Require the container to run without root privileges.
+ rule: 'MustRunAsNonRoot'
+ seLinux:
+ # This policy assumes the nodes are using AppArmor rather than SELinux.
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+
+ This policy prevents pods from running as privileged or escalating privileges. It also
+ restricts the types of volumes that can be mounted and the root supplemental groups
+ that can be added.
+ Another, albeit similar, approach is to start with policy that locks everything down and
+ incrementally add exceptions for applications that need looser restrictions such as
+ logging agents which need the ability to mount a host path.
+ scored: false
+
+ - id: 4.5.3
+ text: "The default namespace should not be used (Automated)"
+ type: "manual"
+ remediation: |
+ Ensure that namespaces are created to allow for appropriate segregation of Kubernetes
+ resources and that all new resources are created in a specific namespace.
+ scored: false
From 43aba407bdefcc8ca13acc969344568cf751af75 Mon Sep 17 00:00:00 2001
From: Peter Balogh
Date: Fri, 2 Aug 2024 14:25:11 +0200
Subject: [PATCH 2/5] fix(cfg): target map
---
cfg/config.yaml | 1 +
job-eks.yaml | 6 +++---
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/cfg/config.yaml b/cfg/config.yaml
index 9ccd9e0c0..283a4db8b 100644
--- a/cfg/config.yaml
+++ b/cfg/config.yaml
@@ -287,6 +287,7 @@ version_mapping:
"eks-1.0.1": "eks-1.0.1"
"eks-1.1.0": "eks-1.1.0"
"eks-1.2.0": "eks-1.2.0"
+ "eks-1.5.0": "eks-1.5.0"
"gke-1.0": "gke-1.0"
"gke-1.2.0": "gke-1.2.0"
"ocp-3.10": "rh-0.7"
diff --git a/job-eks.yaml b/job-eks.yaml
index beaf39194..d3fe277cb 100644
--- a/job-eks.yaml
+++ b/job-eks.yaml
@@ -11,16 +11,16 @@ spec:
- name: kube-bench
# Push the image to your ECR and then refer to it here
# image:
- image: docker.io/aquasec/kube-bench:latest
+ image: docker.io/poke/kube-bench:7981c07
# To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead
command:
[
"kube-bench",
"run",
"--targets",
- "node",
+ "node,policies,managedservices,controlplane",
"--benchmark",
- "eks-1.2.0",
+ "eks-1.5.0",
]
volumeMounts:
- name: var-lib-kubelet
From aca0ae528fc80036dc7f9e279c8aab36d7f8a70e Mon Sep 17 00:00:00 2001
From: Peter Balogh
Date: Fri, 2 Aug 2024 14:27:22 +0200
Subject: [PATCH 3/5] fix: update eks job
---
job-eks.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/job-eks.yaml b/job-eks.yaml
index d3fe277cb..e5fad34da 100644
--- a/job-eks.yaml
+++ b/job-eks.yaml
@@ -11,7 +11,7 @@ spec:
- name: kube-bench
# Push the image to your ECR and then refer to it here
# image:
- image: docker.io/poke/kube-bench:7981c07
+ image: docker.io/poke/kube-bench:43aba40
# To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead
command:
[
From 489865ce3deeb759449cfb0a15fe12f90c95fdac Mon Sep 17 00:00:00 2001
From: Peter Balogh
Date: Fri, 2 Aug 2024 14:31:08 +0200
Subject: [PATCH 4/5] fix: target mapping
---
cfg/config.yaml | 6 ++++++
job-eks.yaml | 2 +-
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/cfg/config.yaml b/cfg/config.yaml
index 283a4db8b..ce950cb9b 100644
--- a/cfg/config.yaml
+++ b/cfg/config.yaml
@@ -400,6 +400,12 @@ target_mapping:
- "controlplane"
- "policies"
- "managedservices"
+ "eks-1.5.0":
+ - "master"
+ - "node"
+ - "controlplane"
+ - "policies"
+ - "managedservices"
"rh-0.7":
- "master"
- "node"
diff --git a/job-eks.yaml b/job-eks.yaml
index e5fad34da..068be2914 100644
--- a/job-eks.yaml
+++ b/job-eks.yaml
@@ -11,7 +11,7 @@ spec:
- name: kube-bench
# Push the image to your ECR and then refer to it here
# image:
- image: docker.io/poke/kube-bench:43aba40
+ image: docker.io/aquasec/kube-bench:latest
# To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead
command:
[
From cf0c87bbbe4cfcbfc3c43b9a392e1799d9c09fbe Mon Sep 17 00:00:00 2001
From: Peter Balogh
Date: Mon, 5 Aug 2024 15:16:57 +0200
Subject: [PATCH 5/5] feat: use CIS EKS 1.5.0 by default
---
cfg/eks-1.5.0/policies.yaml | 4 ++--
cmd/common_test.go | 6 ++++++
cmd/util.go | 2 +-
cmd/util_test.go | 2 +-
4 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/cfg/eks-1.5.0/policies.yaml b/cfg/eks-1.5.0/policies.yaml
index 6dd7fbe8e..69c273b72 100644
--- a/cfg/eks-1.5.0/policies.yaml
+++ b/cfg/eks-1.5.0/policies.yaml
@@ -114,8 +114,8 @@ groups:
text: "Minimize the admission of containers wishing to share the host network namespace (Automated)"
type: "manual"
remediation: |
- Create a PSP as described in the Kubernetes documentation, ensuring that the
- .spec.hostNetwork field is omitted or set to false.
+ Add policies to each namespace in the cluster which has user workloads to restrict the
+ admission of hostNetwork containers.
scored: false
- id: 4.2.5
diff --git a/cmd/common_test.go b/cmd/common_test.go
index 53793a000..009462bcd 100644
--- a/cmd/common_test.go
+++ b/cmd/common_test.go
@@ -460,6 +460,12 @@ func TestValidTargets(t *testing.T) {
targets: []string{"node", "policies", "controlplane", "managedservices"},
expected: true,
},
+ {
+ name: "eks-1.5.0 valid",
+ benchmark: "eks-1.5.0",
+ targets: []string{"node", "policies", "controlplane", "managedservices"},
+ expected: true,
+ },
}
for _, c := range cases {
diff --git a/cmd/util.go b/cmd/util.go
index 95c0b2639..8f09da087 100644
--- a/cmd/util.go
+++ b/cmd/util.go
@@ -489,7 +489,7 @@ func getPlatformBenchmarkVersion(platform Platform) string {
glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform)
switch platform.Name {
case "eks":
- return "eks-1.2.0"
+ return "eks-1.5.0"
case "gke":
switch platform.Version {
case "1.15", "1.16", "1.17", "1.18", "1.19":
diff --git a/cmd/util_test.go b/cmd/util_test.go
index 2c24a7a95..aea9d6462 100644
--- a/cmd/util_test.go
+++ b/cmd/util_test.go
@@ -650,7 +650,7 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) {
args: args{
platform: Platform{Name: "eks"},
},
- want: "eks-1.2.0",
+ want: "eks-1.5.0",
},
{
name: "gke 1.19",