From c7de51d073f164e0fed978fe5373e605c67fd10f Mon Sep 17 00:00:00 2001 From: Scott Ford Date: Wed, 4 Oct 2023 10:02:59 -0700 Subject: [PATCH] Updates K8s security checks Signed-off-by: Scott Ford --- core/mondoo-kubernetes-security.mql.yaml | 1077 ++++------------------ 1 file changed, 187 insertions(+), 890 deletions(-) diff --git a/core/mondoo-kubernetes-security.mql.yaml b/core/mondoo-kubernetes-security.mql.yaml index 7ff9c4f9..d427ba0b 100644 --- a/core/mondoo-kubernetes-security.mql.yaml +++ b/core/mondoo-kubernetes-security.mql.yaml @@ -809,10 +809,7 @@ queries: - uid: mondoo-kubernetes-security-statefulset-docker-socket title: Container should not mount the Docker socket impact: 100 - mql: | - k8s.statefulset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') - } + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -857,10 +854,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-docker-socket title: Container should not mount the Docker socket impact: 100 - mql: | - k8s.deployment { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') - } + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -905,10 +899,7 @@ queries: - uid: mondoo-kubernetes-security-job-docker-socket title: Container should not mount the Docker socket impact: 100 - mql: | - k8s.job { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') - } + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -953,10 +944,7 @@ queries: - uid: mondoo-kubernetes-security-replicaset-docker-socket title: Container should not mount the Docker socket impact: 100 - mql: | - k8s.replicaset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') - } + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1001,10 +989,7 @@ queries: - uid: mondoo-kubernetes-security-daemonset-docker-socket title: Container should not mount the Docker socket impact: 100 - mql: | - k8s.daemonset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') - } + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1049,10 +1034,7 @@ queries: - uid: mondoo-kubernetes-security-pod-containerd-socket title: Container should not mount the containerd socket impact: 100 - mql: | - k8s.pod { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') - } + mql: k8s.pod.podSpec['volumes'] == null || k8s.pod.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1097,10 +1079,7 @@ queries: - uid: mondoo-kubernetes-security-cronjob-containerd-socket title: Container should not mount the containerd socket impact: 100 - mql: | - k8s.cronjob { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') - } + mql: k8s.cronjob.podSpec['volumes'] == null || k8s.cronjob.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1145,10 +1124,7 @@ queries: - uid: mondoo-kubernetes-security-statefulset-containerd-socket title: Container should not mount the containerd socket impact: 100 - mql: | - k8s.statefulset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') - } + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1193,10 +1169,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-containerd-socket title: Container should not mount the containerd socket impact: 100 - mql: | - k8s.deployment { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') - } + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1241,10 +1214,7 @@ queries: - uid: mondoo-kubernetes-security-job-containerd-socket title: Container should not mount the containerd socket impact: 100 - mql: | - k8s.job { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') - } + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1289,10 +1259,7 @@ queries: - uid: mondoo-kubernetes-security-replicaset-containerd-socket title: Container should not mount the containerd socket impact: 100 - mql: | - k8s.replicaset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') - } + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1337,10 +1304,7 @@ queries: - uid: mondoo-kubernetes-security-daemonset-containerd-socket title: Container should not mount the containerd socket impact: 100 - mql: | - k8s.daemonset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') - } + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1385,10 +1349,7 @@ queries: - uid: mondoo-kubernetes-security-pod-crio-socket title: Container should not mount the CRI-O socket impact: 100 - mql: | - k8s.pod { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') - } + mql: k8s.pod.podSpec['volumes'] == null || k8s.pod.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1433,10 +1394,7 @@ queries: - uid: mondoo-kubernetes-security-cronjob-crio-socket title: Container should not mount the CRI-O socket impact: 100 - mql: | - k8s.cronjob { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') - } + mql: k8s.cronjob.podSpec['volumes'] == null || k8s.cronjob.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1481,10 +1439,7 @@ queries: - uid: mondoo-kubernetes-security-statefulset-crio-socket title: Container should not mount the CRI-O socket impact: 100 - mql: | - k8s.statefulset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') - } + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1529,10 +1484,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-crio-socket title: Container should not mount the CRI-O socket impact: 100 - mql: | - k8s.deployment { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') - } + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1577,10 +1529,7 @@ queries: - uid: mondoo-kubernetes-security-job-crio-socket title: Container should not mount the CRI-O socket impact: 100 - mql: | - k8s.job { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') - } + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1625,10 +1574,7 @@ queries: - uid: mondoo-kubernetes-security-replicaset-crio-socket title: Container should not mount the CRI-O socket impact: 100 - mql: | - k8s.replicaset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') - } + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1673,10 +1619,7 @@ queries: - uid: mondoo-kubernetes-security-daemonset-crio-socket title: Container should not mount the CRI-O socket impact: 100 - mql: | - k8s.daemonset { - podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') - } + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') docs: desc: | Do not mount the container runtime socket into any container. @@ -1722,17 +1665,9 @@ queries: title: Container should not allow privilege escalation impact: 100 mql: | - k8s.pod { - ephemeralContainers { - securityContext['allowPrivilegeEscalation'] != true - } - initContainers { - securityContext['allowPrivilegeEscalation'] != true - } - containers { - securityContext['allowPrivilegeEscalation'] != true - } - } + k8s.pod.ephemeralContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.pod.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.pod.containers.all( securityContext['allowPrivilegeEscalation'] != true ) docs: desc: | Do not allow privilege escalation in containers. @@ -1772,14 +1707,8 @@ queries: title: Container should not allow privilege escalation impact: 100 mql: | - k8s.cronjob { - initContainers { - securityContext['allowPrivilegeEscalation'] != true - } - containers { - securityContext['allowPrivilegeEscalation'] != true - } - } + k8s.cronjob.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.cronjob.containers.all( securityContext['allowPrivilegeEscalation'] != true ) docs: desc: | Do not allow privilege escalation in containers. @@ -1819,14 +1748,8 @@ queries: title: Container should not allow privilege escalation impact: 100 mql: | - k8s.statefulset { - initContainers { - securityContext['allowPrivilegeEscalation'] != true - } - containers { - securityContext['allowPrivilegeEscalation'] != true - } - } + k8s.statefulset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.statefulset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) docs: desc: | Do not allow privilege escalation in containers. @@ -1866,18 +1789,11 @@ queries: title: Container should not allow privilege escalation impact: 100 mql: | - k8s.deployment { - initContainers { - securityContext['allowPrivilegeEscalation'] != true - } - containers { - securityContext['allowPrivilegeEscalation'] != true - } - } + k8s.deployment.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.deployment.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) docs: desc: | - Do not allow privilege escalation in containers. - Even, when the container is not running as root, it could still escalate privileges. + AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. audit: | Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: @@ -1913,14 +1829,8 @@ queries: title: Container should not allow privilege escalation impact: 100 mql: | - k8s.job { - initContainers { - securityContext['allowPrivilegeEscalation'] != true - } - containers { - securityContext['allowPrivilegeEscalation'] != true - } - } + k8s.job.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.job.containers.all( securityContext['allowPrivilegeEscalation'] != true ) docs: desc: | Do not allow privilege escalation in containers. @@ -1960,14 +1870,8 @@ queries: title: Container should not allow privilege escalation impact: 100 mql: | - k8s.replicaset { - initContainers { - securityContext['allowPrivilegeEscalation'] != true - } - containers { - securityContext['allowPrivilegeEscalation'] != true - } - } + k8s.replicaset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.replicaset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) docs: desc: | Do not allow privilege escalation in containers. @@ -2007,14 +1911,8 @@ queries: title: Container should not allow privilege escalation impact: 100 mql: | - k8s.daemonset { - initContainers { - securityContext['allowPrivilegeEscalation'] != true - } - containers { - securityContext['allowPrivilegeEscalation'] != true - } - } + k8s.daemonset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.daemonset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) docs: desc: | Do not allow privilege escalation in containers. @@ -2054,17 +1952,9 @@ queries: title: Container should not run as a privileged container impact: 100 mql: | - k8s.pod { - ephemeralContainers { - securityContext['privileged'] != true - } - initContainers { - securityContext['privileged'] != true - } - containers { - securityContext['privileged'] != true - } - } + k8s.pod.ephemeralContainers.all( securityContext['privileged'] != true ) + k8s.pod.initContainers.all( securityContext['privileged'] != true ) + k8s.pod.containers.all( securityContext['privileged'] != true ) docs: desc: | Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. @@ -2115,14 +2005,8 @@ queries: title: Container should not run as a privileged container impact: 100 mql: | - k8s.cronjob { - initContainers { - securityContext['privileged'] != true - } - containers { - securityContext['privileged'] != true - } - } + k8s.cronjob.initContainers.all( securityContext['privileged'] != true ) + k8s.cronjob.containers.all( securityContext['privileged'] != true ) docs: desc: | Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. @@ -2173,14 +2057,8 @@ queries: title: Container should not run as a privileged container impact: 100 mql: | - k8s.statefulset { - initContainers { - securityContext['privileged'] != true - } - containers { - securityContext['privileged'] != true - } - } + k8s.statefulset.initContainers.all( securityContext['privileged'] != true ) + k8s.statefulset.containers.all( securityContext['privileged'] != true ) docs: desc: | Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. @@ -2231,14 +2109,8 @@ queries: title: Container should not run as a privileged container impact: 100 mql: | - k8s.deployment { - initContainers { - securityContext['privileged'] != true - } - containers { - securityContext['privileged'] != true - } - } + k8s.deployment.containers.all( securityContext['privileged'] != true ) + k8s.deployment.initContainers.all( securityContext['privileged'] != true ) docs: desc: | Running a privileged container means that the container has the host's capabilities including access to all devices and the host's network. @@ -2289,14 +2161,8 @@ queries: title: Container should not run as a privileged container impact: 100 mql: | - k8s.job { - initContainers { - securityContext['privileged'] != true - } - containers { - securityContext['privileged'] != true - } - } + k8s.job.initContainers.all( securityContext['privileged'] != true ) + k8s.job.containers.all( securityContext['privileged'] != true ) docs: desc: | Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. @@ -2347,14 +2213,8 @@ queries: title: Container should not run as a privileged container impact: 100 mql: | - k8s.replicaset { - initContainers { - securityContext['privileged'] != true - } - containers { - securityContext['privileged'] != true - } - } + k8s.replicaset.initContainers.all( securityContext['privileged'] != true ) + k8s.replicaset.containers.all( securityContext['privileged'] != true ) docs: desc: | Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. @@ -2405,14 +2265,8 @@ queries: title: Container should not run as a privileged container impact: 100 mql: | - k8s.daemonset { - initContainers { - securityContext['privileged'] != true - } - containers { - securityContext['privileged'] != true - } - } + k8s.daemonset.initContainers.all( securityContext['privileged'] != true ) + k8s.daemonset.containers.all( securityContext['privileged'] != true ) docs: desc: | Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. @@ -2463,17 +2317,9 @@ queries: title: Container should use an immutable root filesystem impact: 80 mql: | - k8s.pod { - ephemeralContainers { - securityContext['readOnlyRootFilesystem'] == true - } - initContainers { - securityContext['readOnlyRootFilesystem'] == true - } - containers { - securityContext['readOnlyRootFilesystem'] == true - } - } + k8s.pod.ephemeralContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.pod.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.pod.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) docs: desc: | Running a container with an immutable (read-only) file system prevents the modification of running containers. @@ -2512,14 +2358,8 @@ queries: title: Container should use an immutable root filesystem impact: 80 mql: | - k8s.cronjob { - initContainers { - securityContext['readOnlyRootFilesystem'] == true - } - containers { - securityContext['readOnlyRootFilesystem'] == true - } - } + k8s.cronjob.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.cronjob.containers.all( securityContext['readOnlyRootFilesystem'] == true ) docs: desc: | Running a container with an immutable (read-only) file system prevents the modification of running containers. @@ -2558,14 +2398,8 @@ queries: title: Container should use an immutable root filesystem impact: 80 mql: | - k8s.statefulset { - initContainers { - securityContext['readOnlyRootFilesystem'] == true - } - containers { - securityContext['readOnlyRootFilesystem'] == true - } - } + k8s.statefulset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.statefulset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) docs: desc: | Running a container with an immutable (read-only) file system prevents the modification of running containers. @@ -2604,14 +2438,8 @@ queries: title: Container should use an immutable root filesystem impact: 80 mql: | - k8s.deployment { - initContainers { - securityContext['readOnlyRootFilesystem'] == true - } - containers { - securityContext['readOnlyRootFilesystem'] == true - } - } + k8s.deployment.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.deployment.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) docs: desc: | Running a container with an immutable (read-only) file system prevents the modification of running containers. @@ -2650,14 +2478,8 @@ queries: title: Container should use an immutable root filesystem impact: 80 mql: | - k8s.job { - initContainers { - securityContext['readOnlyRootFilesystem'] == true - } - containers { - securityContext['readOnlyRootFilesystem'] == true - } - } + k8s.job.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.job.containers.all( securityContext['readOnlyRootFilesystem'] == true ) docs: desc: | Running a container with an immutable (read-only) file system prevents the modification of running containers. @@ -2696,14 +2518,8 @@ queries: title: Container should use an immutable root filesystem impact: 80 mql: | - k8s.replicaset { - initContainers { - securityContext['readOnlyRootFilesystem'] == true - } - containers { - securityContext['readOnlyRootFilesystem'] == true - } - } + k8s.replicaset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.replicaset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) docs: desc: | Running a container with an immutable (read-only) file system prevents the modification of running containers. @@ -2742,14 +2558,8 @@ queries: title: Container should use an immutable root filesystem impact: 80 mql: | - k8s.daemonset { - initContainers { - securityContext['readOnlyRootFilesystem'] == true - } - containers { - securityContext['readOnlyRootFilesystem'] == true - } - } + k8s.daemonset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.daemonset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) docs: desc: | Running a container with an immutable (read-only) file system prevents the modification of running containers. @@ -3037,19 +2847,8 @@ queries: title: Container should not run as root impact: 100 mql: | - k8s.deployment { - podSecurityContext=podSpec['securityContext'] - initContainers { - a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null - res = securityContext['runAsNonRoot'] == true || a - res == true - } - containers { - a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null - res = securityContext['runAsNonRoot'] == true || a - res == true - } - } + k8s.deployment.containers.all( securityContext['runAsNonRoot'] == true ) + k8s.deployment.initContainers.all( securityContext['runAsNonRoot'] == true ) docs: desc: | Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. @@ -3358,10 +3157,7 @@ queries: - uid: mondoo-kubernetes-security-pod-hostnetwork title: Pod should not run with hostNetwork impact: 80 - mql: | - k8s.pod { - podSpec['hostNetwork'] != true - } + mql: k8s.pod.podSpec['hostNetwork'] != true docs: desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network including loopback devices. This capability can be used to intercept network traffic including the traffic of other pods. audit: | @@ -3396,10 +3192,7 @@ queries: - uid: mondoo-kubernetes-security-cronjob-hostnetwork title: Pod should not run with hostNetwork impact: 80 - mql: | - k8s.cronjob { - podSpec['hostNetwork'] != true - } + mql: k8s.cronjob.podSpec['hostNetwork'] != true docs: desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. audit: | @@ -3434,10 +3227,7 @@ queries: - uid: mondoo-kubernetes-security-statefulset-hostnetwork title: Pod should not run with hostNetwork impact: 80 - mql: | - k8s.statefulset { - podSpec['hostNetwork'] != true - } + mql: k8s.statefulset.podSpec['hostNetwork'] != true docs: desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. audit: | @@ -3472,10 +3262,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-hostnetwork title: Pod should not run with hostNetwork impact: 80 - mql: | - k8s.deployment { - podSpec['hostNetwork'] != true - } + mql: k8s.deployment.podSpec['hostNetwork'] != true docs: desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. audit: | @@ -3510,10 +3297,7 @@ queries: - uid: mondoo-kubernetes-security-job-hostnetwork title: Pod should not run with hostNetwork impact: 80 - mql: | - k8s.job { - podSpec['hostNetwork'] != true - } + mql: k8s.job.podSpec['hostNetwork'] != true docs: desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. audit: | @@ -3549,9 +3333,7 @@ queries: title: Pod should not run with hostNetwork impact: 80 mql: | - k8s.replicaset { - podSpec['hostNetwork'] != true - } + k8s.replicaset.podSpec['hostNetwork'] != true docs: desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. audit: | @@ -3587,9 +3369,7 @@ queries: title: Pod should not run with hostNetwork impact: 80 mql: | - k8s.daemonset { - podSpec['hostNetwork'] != true - } + k8s.daemonset.podSpec['hostNetwork'] != true docs: desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. audit: | @@ -3624,10 +3404,7 @@ queries: - uid: mondoo-kubernetes-security-pod-hostpid title: Pod should not run with hostPID impact: 80 - mql: | - k8s.pod { - podSpec['hostPID'] != true - } + mql: k8s.pod.podSpec['hostPID'] != true docs: desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. audit: | @@ -3660,10 +3437,7 @@ queries: - uid: mondoo-kubernetes-security-cronjob-hostpid title: Pod should not run with hostPID impact: 80 - mql: | - k8s.cronjob { - podSpec['hostPID'] != true - } + mql: k8s.cronjob.podSpec['hostPID'] != true docs: desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. audit: | @@ -3696,10 +3470,7 @@ queries: - uid: mondoo-kubernetes-security-statefulset-hostpid title: Pod should not run with hostPID impact: 80 - mql: | - k8s.statefulset { - podSpec['hostPID'] != true - } + mql: k8s.statefulset.podSpec['hostPID'] != true docs: desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. audit: | @@ -3732,10 +3503,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-hostpid title: Pod should not run with hostPID impact: 80 - mql: | - k8s.deployment { - podSpec['hostPID'] != true - } + mql: k8s.deployment.podSpec['hostPID'] != true docs: desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. audit: | @@ -3768,10 +3536,7 @@ queries: - uid: mondoo-kubernetes-security-job-hostpid title: Pod should not run with hostPID impact: 80 - mql: | - k8s.job { - podSpec['hostPID'] != true - } + mql: k8s.job.podSpec['hostPID'] != true docs: desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. audit: | @@ -3804,10 +3569,7 @@ queries: - uid: mondoo-kubernetes-security-replicaset-hostpid title: Pod should not run with hostPID impact: 80 - mql: | - k8s.replicaset { - podSpec['hostPID'] != true - } + mql: k8s.replicaset.podSpec['hostPID'] != true docs: desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. audit: | @@ -3841,9 +3603,7 @@ queries: title: Pod should not run with hostPID impact: 80 mql: | - k8s.daemonset { - podSpec['hostPID'] != true - } + k8s.daemonset.podSpec['hostPID'] != true docs: desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. audit: | @@ -3877,9 +3637,7 @@ queries: title: Pod should not run with hostIPC impact: 80 mql: | - k8s.pod { - podSpec['hostIPC'] != true - } + k8s.pod.podSpec['hostIPC'] != true docs: desc: | Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. @@ -3914,9 +3672,7 @@ queries: title: Pod should not run with hostIPC impact: 80 mql: | - k8s.cronjob { - podSpec['hostIPC'] != true - } + k8s.cronjob.podSpec['hostIPC'] != true docs: desc: | Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. @@ -3951,9 +3707,7 @@ queries: title: Pod should not run with hostIPC impact: 80 mql: | - k8s.statefulset { - podSpec['hostIPC'] != true - } + k8s.statefulset.podSpec['hostIPC'] != true docs: desc: | Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. @@ -3987,10 +3741,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-hostipc title: Pod should not run with hostIPC impact: 80 - mql: | - k8s.deployment { - podSpec['hostIPC'] != true - } + mql: k8s.deployment.podSpec['hostIPC'] != true docs: desc: | Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. @@ -4025,9 +3776,7 @@ queries: title: Pod should not run with hostIPC impact: 80 mql: | - k8s.job { - podSpec['hostIPC'] != true - } + k8s.job.podSpec['hostIPC'] != true docs: desc: | Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. @@ -4062,9 +3811,7 @@ queries: title: Pod should not run with hostIPC impact: 80 mql: | - k8s.replicaset { - podSpec['hostIPC'] != true - } + k8s.replicaset.podSpec['hostIPC'] != true docs: desc: | Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. @@ -4099,9 +3846,7 @@ queries: title: Pod should not run with hostIPC impact: 80 mql: | - k8s.daemonset { - podSpec['hostIPC'] != true - } + k8s.daemonset.podSpec['hostIPC'] != true docs: desc: | Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. @@ -4136,11 +3881,9 @@ queries: title: Pod should not run with the default service account impact: 30 mql: | - k8s.pod { - podSpec['serviceAccount'] == null || podSpec['serviceAccount'] == podSpec['serviceAccountName'] - podSpec['serviceAccountName'] != '' || podSpec['automountServiceAccountToken'] == false - podSpec['serviceAccountName'] != 'default' || podSpec['automountServiceAccountToken'] == false - } + k8s.pod.podSpec['serviceAccount'] == null || k8s.pod.podSpec['serviceAccount'] == k8s.pod.podSpec['serviceAccountName'] + k8s.pod.podSpec['serviceAccountName'] != '' || k8s.pod.podSpec['automountServiceAccountToken'] == false + k8s.pod.podSpec['serviceAccountName'] != 'default' || k8s.pod.podSpec['automountServiceAccountToken'] == false docs: desc: | Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. @@ -4185,11 +3928,9 @@ queries: title: Pod should not run with the default service account impact: 30 mql: | - k8s.cronjob { - podSpec['serviceAccount'] == null || podSpec['serviceAccount'] == podSpec['serviceAccountName'] - podSpec['serviceAccountName'] != '' || podSpec['automountServiceAccountToken'] == false - podSpec['serviceAccountName'] != 'default' || podSpec['automountServiceAccountToken'] == false - } + k8s.cronjob.podSpec['serviceAccount'] == null || k8s.cronjob.podSpec['serviceAccount'] == k8s.cronjob.podSpec['serviceAccountName'] + k8s.cronjob.podSpec['serviceAccountName'] != '' || k8s.cronjob.podSpec['automountServiceAccountToken'] == false + k8s.cronjob.podSpec['serviceAccountName'] != 'default' || k8s.cronjob.podSpec['automountServiceAccountToken'] == false docs: desc: | Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. @@ -4234,11 +3975,9 @@ queries: title: Pod should not run with the default service account impact: 30 mql: | - k8s.statefulset { - podSpec['serviceAccount'] == null || podSpec['serviceAccount'] == podSpec['serviceAccountName'] - podSpec['serviceAccountName'] != '' || podSpec['automountServiceAccountToken'] == false - podSpec['serviceAccountName'] != 'default' || podSpec['automountServiceAccountToken'] == false - } + k8s.statefulset.podSpec['serviceAccount'] == null || k8s.statefulset.podSpec['serviceAccount'] == k8s.statefulset.podSpec['serviceAccountName'] + k8s.statefulset.podSpec['serviceAccountName'] != '' || k8s.statefulset.podSpec['automountServiceAccountToken'] == false + k8s.statefulset.podSpec['serviceAccountName'] != 'default' || k8s.statefulset.podSpec['automountServiceAccountToken'] == false docs: desc: | Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. @@ -4283,11 +4022,9 @@ queries: title: Pod should not run with the default service account impact: 30 mql: | - k8s.deployment { - podSpec['serviceAccount'] == null || podSpec['serviceAccount'] == podSpec['serviceAccountName'] - podSpec['serviceAccountName'] != '' || podSpec['automountServiceAccountToken'] == false - podSpec['serviceAccountName'] != 'default' || podSpec['automountServiceAccountToken'] == false - } + k8s.deployment.podSpec['serviceAccount'] == null || k8s.deployment.podSpec['serviceAccount'] == k8s.deployment.podSpec['serviceAccountName'] + k8s.deployment.podSpec['serviceAccountName'] != '' || k8s.deployment.podSpec['automountServiceAccountToken'] == false + k8s.deployment.podSpec['serviceAccountName'] != 'default' || k8s.deployment.podSpec['automountServiceAccountToken'] == false docs: desc: | Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. @@ -4332,11 +4069,9 @@ queries: title: Pod should not run with the default service account impact: 30 mql: | - k8s.job { - podSpec['serviceAccount'] == null || podSpec['serviceAccount'] == podSpec['serviceAccountName'] - podSpec['serviceAccountName'] != '' || podSpec['automountServiceAccountToken'] == false - podSpec['serviceAccountName'] != 'default' || podSpec['automountServiceAccountToken'] == false - } + k8s.job.podSpec['serviceAccount'] == null || k8s.job.podSpec['serviceAccount'] == k8s.job.podSpec['serviceAccountName'] + k8s.job.podSpec['serviceAccountName'] != '' || k8s.job.podSpec['automountServiceAccountToken'] == false + k8s.job.podSpec['serviceAccountName'] != 'default' || k8s.job.podSpec['automountServiceAccountToken'] == false docs: desc: | Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. @@ -4381,11 +4116,9 @@ queries: title: Pod should not run with the default service account impact: 30 mql: | - k8s.replicaset { - podSpec['serviceAccount'] == null || podSpec['serviceAccount'] == podSpec['serviceAccountName'] - podSpec['serviceAccountName'] != '' || podSpec['automountServiceAccountToken'] == false - podSpec['serviceAccountName'] != 'default' || podSpec['automountServiceAccountToken'] == false - } + k8s.replicaset.podSpec['serviceAccount'] == null || k8s.replicaset.podSpec['serviceAccount'] == k8s.replicaset.podSpec['serviceAccountName'] + k8s.replicaset.podSpec['serviceAccountName'] != '' || k8s.replicaset.podSpec['automountServiceAccountToken'] == false + k8s.replicaset.podSpec['serviceAccountName'] != 'default' || k8s.replicaset.podSpec['automountServiceAccountToken'] == false docs: desc: | Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. @@ -4430,11 +4163,9 @@ queries: title: Pod should not run with the default service account impact: 30 mql: | - k8s.daemonset { - podSpec['serviceAccount'] == null || podSpec['serviceAccount'] == podSpec['serviceAccountName'] - podSpec['serviceAccountName'] != '' || podSpec['automountServiceAccountToken'] == false - podSpec['serviceAccountName'] != 'default' || podSpec['automountServiceAccountToken'] == false - } + k8s.daemonset.podSpec['serviceAccount'] == null || k8s.daemonset.podSpec['serviceAccount'] == k8s.daemonset.podSpec['serviceAccountName'] + k8s.daemonset.podSpec['serviceAccountName'] != '' || k8s.daemonset.podSpec['automountServiceAccountToken'] == false + k8s.daemonset.podSpec['serviceAccountName'] != 'default' || k8s.daemonset.podSpec['automountServiceAccountToken'] == false docs: desc: | Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. @@ -4479,23 +4210,9 @@ queries: title: Container image pull should be consistent impact: 60 mql: | - k8s.pod { - ephemeralContainers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - initContainers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - containers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - } + k8s.pod.ephemeralContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.pod.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.pod.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) docs: desc: | It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). @@ -4533,18 +4250,8 @@ queries: title: Container image pull should be consistent impact: 60 mql: | - k8s.cronjob { - initContainers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - containers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - } + k8s.cronjob.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.cronjob.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) docs: desc: | It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). @@ -4582,18 +4289,8 @@ queries: title: Container image pull should be consistent impact: 60 mql: | - k8s.statefulset { - initContainers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - containers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - } + k8s.statefulset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.statefulset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) docs: desc: | It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). @@ -4631,18 +4328,8 @@ queries: title: Container image pull should be consistent impact: 60 mql: | - k8s.deployment { - initContainers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - containers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - } + k8s.deployment.initContainers.all( imagePullPolicy == 'Always' && correctImage = image != /:latest/ && image.contains(':') == true ) + k8s.deployment.containers.all( imagePullPolicy == 'Always' && correctImage = image != /:latest/ && image.contains(':') == true ) docs: desc: | It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). @@ -4680,18 +4367,8 @@ queries: title: Container image pull should be consistent impact: 60 mql: | - k8s.job { - initContainers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - containers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - } + k8s.job.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.job.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) docs: desc: | It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). @@ -4729,18 +4406,8 @@ queries: title: Container image pull should be consistent impact: 60 mql: | - k8s.replicaset { - initContainers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - containers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - } + k8s.replicaset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.replicaset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) docs: desc: | It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). @@ -4778,18 +4445,8 @@ queries: title: Container image pull should be consistent impact: 60 mql: | - k8s.daemonset { - initContainers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - containers { - imagePullPolicy == 'Always' - correctImage = image != /:latest/ && image.contains(':') == true - correctImage == true - } - } + k8s.daemonset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.daemonset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) docs: desc: | It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). @@ -4827,14 +4484,8 @@ queries: title: Container should have a CPU limit impact: 20 mql: | - k8s.pod { - initContainers { - resources['limits']['cpu'] != null - } - containers { - resources['limits']['cpu'] != null - } - } + k8s.pod.initContainers.all( resources['limits']['cpu'] != null ) + k8s.pod.containers.all( resources['limits']['cpu'] != null ) docs: desc: | Kubernetes Pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -4875,14 +4526,8 @@ queries: title: Container should have a CPU limit impact: 20 mql: | - k8s.cronjob { - initContainers { - resources['limits']['cpu'] != null - } - containers { - resources['limits']['cpu'] != null - } - } + k8s.cronjob.initContainers.all( resources['limits']['cpu'] != null ) + k8s.cronjob.containers.all( resources['limits']['cpu'] != null ) docs: desc: | Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -4923,14 +4568,8 @@ queries: title: Container should have a CPU limit impact: 20 mql: | - k8s.statefulset { - initContainers { - resources['limits']['cpu'] != null - } - containers { - resources['limits']['cpu'] != null - } - } + k8s.statefulset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.statefulset.containers.all( resources['limits']['cpu'] != null ) docs: desc: | Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -4971,14 +4610,8 @@ queries: title: Container should have a CPU limit impact: 20 mql: | - k8s.deployment { - initContainers { - resources['limits']['cpu'] != null - } - containers { - resources['limits']['cpu'] != null - } - } + k8s.deployment.initContainers.all( resources['limits']['cpu'] != null ) + k8s.deployment.containers.all( resources['limits']['cpu'] != null ) docs: desc: | Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5019,14 +4652,8 @@ queries: title: Container should have a CPU limit impact: 20 mql: | - k8s.job { - initContainers { - resources['limits']['cpu'] != null - } - containers { - resources['limits']['cpu'] != null - } - } + k8s.job.initContainers.all( resources['limits']['cpu'] != null ) + k8s.job.containers.all( resources['limits']['cpu'] != null ) docs: desc: | Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5067,14 +4694,8 @@ queries: title: Container should have a CPU limit impact: 20 mql: | - k8s.replicaset { - initContainers { - resources['limits']['cpu'] != null - } - containers { - resources['limits']['cpu'] != null - } - } + k8s.replicaset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.replicaset.containers.all( resources['limits']['cpu'] != null ) docs: desc: | Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5115,14 +4736,8 @@ queries: title: Container should have a CPU limit impact: 20 mql: | - k8s.daemonset { - initContainers { - resources['limits']['cpu'] != null - } - containers { - resources['limits']['cpu'] != null - } - } + k8s.daemonset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.daemonset.containers.all( resources['limits']['cpu'] != null ) docs: desc: | Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5163,14 +4778,8 @@ queries: title: Container should have a memory limit impact: 20 mql: | - k8s.pod { - initContainers { - resources['limits']['memory'] != null - } - containers { - resources['limits']['memory'] != null - } - } + k8s.pod.initContainers.all( resources['limits']['memory'] != null ) + k8s.pod.containers.all( resources['limits']['memory'] != null ) docs: desc: | Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5208,14 +4817,8 @@ queries: title: Container should have a memory limit impact: 20 mql: | - k8s.cronjob { - initContainers { - resources['limits']['memory'] != null - } - containers { - resources['limits']['memory'] != null - } - } + k8s.cronjob.initContainers.all( resources['limits']['memory'] != null ) + k8s.cronjob.containers.all( resources['limits']['memory'] != null ) docs: desc: | Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5253,14 +4856,8 @@ queries: title: Container should have a memory limit impact: 20 mql: | - k8s.statefulset { - initContainers { - resources['limits']['memory'] != null - } - containers { - resources['limits']['memory'] != null - } - } + k8s.statefulset.initContainers.all( resources['limits']['memory'] != null ) + k8s.statefulset.containers.all( resources['limits']['memory'] != null ) docs: desc: | Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5298,14 +4895,8 @@ queries: title: Container should have a memory limit impact: 20 mql: | - k8s.deployment { - initContainers { - resources['limits']['memory'] != null - } - containers { - resources['limits']['cpu'] != null - } - } + k8s.deployment.initContainers.all( resources['limits']['memory'] != null ) + k8s.deployment.containers.all( resources['limits']['memory'] != null ) docs: desc: | Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5343,14 +4934,8 @@ queries: title: Container should have a memory limit impact: 20 mql: | - k8s.job { - initContainers { - resources['limits']['memory'] != null - } - containers { - resources['limits']['memory'] != null - } - } + k8s.job.initContainers.all( resources['limits']['memory'] != null ) + k8s.job.containers.all( resources['limits']['memory'] != null ) docs: desc: | Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5388,14 +4973,8 @@ queries: title: Container should have a memory limit impact: 20 mql: | - k8s.replicaset { - initContainers { - resources['limits']['memory'] != null - } - containers { - resources['limits']['memory'] != null - } - } + k8s.replicaset.initContainers.all( resources['limits']['memory'] != null ) + k8s.replicaset.containers.all( resources['limits']['memory'] != null ) docs: desc: | Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5433,14 +5012,8 @@ queries: title: Container should have a memory limit impact: 20 mql: | - k8s.daemonset { - initContainers { - resources['limits']['memory'] != null - } - containers { - resources['limits']['memory'] != null - } - } + k8s.daemonset.initContainers.all( resources['limits']['memory'] != null ) + k8s.daemonset.containers.all( resources['limits']['memory'] != null ) docs: desc: | Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. @@ -5478,39 +5051,12 @@ queries: title: Pods should not run with NET_RAW capability impact: 80 mql: | - k8s.pod { - podSpec['containers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } - k8s.pod { - podSpec['ephemeralContainers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } - k8s.pod { - podSpec['initContainers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['drop'].any( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['drop'].any( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _ == /^NET_RAW$|^ALL$/ ) } ) docs: desc: | Pods should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. @@ -5560,17 +5106,8 @@ queries: title: DaemonSets should not run with NET_RAW capability impact: 80 mql: | - k8s.daemonset { - podSpec['containers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _ == /^NET_RAW$|^ALL$/ ) } ) docs: desc: | DaemonSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. @@ -5624,17 +5161,8 @@ queries: title: ReplicaSets should not run with NET_RAW capability impact: 80 mql: | - k8s.replicaset { - podSpec['containers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _ == /^NET_RAW$|^ALL$/ ) } ) docs: desc: | ReplicaSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. @@ -5687,18 +5215,7 @@ queries: - uid: mondoo-kubernetes-security-job-capability-net-raw title: Jobs should not run with NET_RAW capability impact: 80 - mql: | - k8s.job { - podSpec['containers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } + mql: k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^NET_RAW$|^ALL$/ ) } ) docs: desc: | Jobs should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. @@ -5752,17 +5269,8 @@ queries: title: Deployments should not run with NET_RAW capability impact: 80 mql: | - k8s.deployment { - podSpec['containers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } + k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'].none( _['add'].contains("NET_RAW") )) + k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'].none( _['add'].contains("ALL") )) docs: desc: | Deployments should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. @@ -5816,17 +5324,8 @@ queries: title: StatefulSets should not run with NET_RAW capability impact: 80 mql: | - k8s.statefulset { - podSpec['containers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _ == /^NET_RAW$|^ALL$/ ) } ) docs: desc: | StatefulSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. @@ -5880,17 +5379,8 @@ queries: title: CronJobs should not run with NET_RAW capability impact: 80 mql: | - k8s.cronjob { - podSpec['containers'] { - _['securityContext']['capabilities'] != null - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "NET_RAW") - _['drop'] != null - _['drop'].any(_.upcase == "NET_RAW") || _['drop'].any(_.upcase == "ALL") - } - } - } + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^NET_RAW$|^ALL$/ ) } ) + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _ == /^NET_RAW$|^ALL$/ ) } ) docs: desc: | CronJobs should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. @@ -5948,42 +5438,9 @@ queries: title: Pods should not run with SYS_ADMIN capability impact: 80 mql: | - k8s.pod { - podSpec['containers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } - k8s.pod { - podSpec['initContainers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } - k8s.pod { - podSpec['ephemeralContainers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) docs: desc: | Pods should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. @@ -6016,18 +5473,7 @@ queries: title: DaemonSets should not run with SYS_ADMIN capability impact: 80 mql: | - k8s.daemonset { - podSpec['containers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) docs: desc: | DaemonSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. @@ -6062,18 +5508,7 @@ queries: title: ReplicaSets should not run with SYS_ADMIN capability impact: 80 mql: | - k8s.replicaset { - podSpec['containers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) docs: desc: | ReplicaSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. @@ -6108,18 +5543,7 @@ queries: title: Jobs should not run with SYS_ADMIN capability impact: 80 mql: | - k8s.job { - podSpec['containers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } + k8s.job.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) docs: desc: | Jobs should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. @@ -6153,19 +5577,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-capability-sys-admin title: Deployments should not run with SYS_ADMIN capability impact: 80 - mql: | - k8s.deployment { - podSpec['containers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } + mql: k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) docs: desc: | Deployments should not run wIt even allows containers not running as root to run certain tasks as if the user was root with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. @@ -6200,18 +5612,7 @@ queries: title: StatefulSets should not run with SYS_ADMIN capability impact: 80 mql: | - k8s.statefulset { - podSpec['containers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) docs: desc: | StatefulSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. @@ -6246,18 +5647,7 @@ queries: title: CronJobs should not run with SYS_ADMIN capability impact: 80 mql: | - k8s.cronjob { - podSpec['containers'] { - if( _['securityContext']['capabilities'] != null ) { - _['securityContext']['capabilities'] { - _['add'] == null || _['add'].none(_.upcase == "ALL") - _['add'] == null || _['add'].none(_.upcase == "SYS_ADMIN") - } - } else { - true - } - } - } + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'].none( _ == /^SYS_ADMIN$|^ALL$/ ) } ) docs: desc: | CronJobs should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. @@ -6294,18 +5684,8 @@ queries: title: Pods should not bind to a host port impact: 80 mql: | - k8s.pod.podSpec { - _['containers'] { - _['name'] - _['ports'] == null || _['ports'].all(_['hostPort'] == null) - } - } - k8s.pod.podSpec { - _['initContainers'] { - _['name'] - _['ports'] == null || _['ports'].all(_['hostPort'] == null) - } - } + k8s.pod.podSpec['containers'].all( _['ports'].none( _['hostPort'] ) ) + k8s.pod.podSpec['initContainers'].all( _['ports'].none( _['hostPort'] ) ) docs: desc: | Pods should not bind to the underlying host port. This allows bypassing certain network access control systems. @@ -6342,12 +5722,7 @@ queries: title: DaemonSets should not bind to a host port impact: 80 mql: | - k8s.daemonset.podSpec { - _['containers'] { - _['name'] - _['ports'] == null || _['ports'].all(_['hostPort'] == null) - } - } + k8s.daemonset.podSpec['containers'].all( _['ports'].none( _['hostPort'] ) ) docs: desc: | DaemonSets should not bind to the underlying host port. This allows bypassing certain network access control systems. @@ -6386,12 +5761,7 @@ queries: title: ReplicaSets should not bind to a host port impact: 80 mql: | - k8s.replicaset.podSpec { - _['containers'] { - _['name'] - _['ports'] == null || _['ports'].all(_['hostPort'] == null) - } - } + k8s.replicaset.podSpec['containers'].all( _['ports'].none( _['hostPort'] ) ) docs: desc: | ReplicaSets should not bind to the underlying host port. This allows bypassing certain network access control systems. @@ -6430,12 +5800,7 @@ queries: title: Jobs should not bind to a host port impact: 80 mql: | - k8s.job.podSpec { - _['containers'] { - _['name'] - _['ports'] == null || _['ports'].all(_['hostPort'] == null) - } - } + k8s.job.podSpec['containers'].all( _['ports'].none( _['hostPort'] ) ) docs: desc: | Jobs should not bind to the underlying host port. This allows bypassing certain network access control systems. @@ -6473,13 +5838,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-ports-hostport title: Deployments should not bind to a host port impact: 80 - mql: | - k8s.deployment.podSpec { - _['containers'] { - _['name'] - _['ports'] == null || _['ports'].all(_['hostPort'] == null) - } - } + mql: k8s.deployment.podSpec['containers'].all( _['ports'].none( _['hostPort'] ) ) docs: desc: | Deployments should not bind to the underlying host port. This allows bypassing certain network access control systems. @@ -6518,12 +5877,7 @@ queries: title: StatefulSets should not bind to a host port impact: 80 mql: | - k8s.statefulset.podSpec { - _['containers'] { - _['name'] - _['ports'] == null || _['ports'].all(_['hostPort'] == null) - } - } + k8s.statefulset.podSpec['containers'].all( _['ports'].none( _['hostPort'] ) ) docs: desc: | StatefulSets should not bind to the underlying host port. This allows bypassing certain network access control systems. @@ -6562,12 +5916,7 @@ queries: title: CronJobs should not bind to a host port impact: 80 mql: | - k8s.cronjob.podSpec { - _['containers'] { - _['name'] - _['ports'] == null || _['ports'].all(_['hostPort'] == null) - } - } + k8s.cronjob.podSpec['containers'].all( _['ports'].none( _['hostPort'] ) ) docs: desc: | CronJobs should not bind to the underlying host port. This allows bypassing certain network access control systems. @@ -6849,7 +6198,7 @@ queries: - uid: mondoo-kubernetes-security-deployment-hostpath-readonly title: Deployments should mount any host path volumes as read-only impact: 80 - mql: | + mql: | k8s.deployment.podSpec { hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) _['containers'] { @@ -7062,55 +6411,3 @@ queries: ```kubectl get pods -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` remediation: | Delete any pods that are running Kubernetes dashboard. - - uid: mondoo-kubernetes-security-gather-deployment-container - title: Gather all Deployments - mql: | - k8s.deployments { - name - } - - uid: mondoo-kubernetes-security-gather-daemonset-container - title: Gather all DaemonSets - mql: | - k8s.daemonsets { - name - } - - uid: mondoo-kubernetes-security-gather-statefulset-container - title: Gather all StatefulSets - mql: | - k8s.statefulsets { - name - } - - uid: mondoo-kubernetes-security-gather-job-container - title: Gather all Jobs - mql: | - k8s.jobs { - name - } - - uid: mondoo-kubernetes-security-gather-cronjob-container - title: Gather all CronJobs - mql: | - k8s.cronjobs { - name - } - - uid: mondoo-kubernetes-security-gather-pods-security-context - title: Gather all Pods with securityContext - mql: | - k8s.pods { - name - namespace - initContainers { - name - resources - securityContext - } - containers { - name - resources - livenessProbe - securityContext - } - ephemeralContainers { - name - securityContext - } - }