Skip to content

Commit

Permalink
use DaemonSet for node scanning (#1134)
Browse files Browse the repository at this point in the history
* feat: ignore dlv binary

* feat: use templated inventory and universal configmap; add daemonset node scanning style

* feat: create daemonset instead of deployments, cleanup old deployments

* feat: use `inventory_template` file instead of `inventory` to avoid cnspec treating template as config file

* scratching the surface of tests updates

* feat: remove deployments cleanup

* follow-up: remove deployments audit

* follow-up: remove deployments audit

* fix tests

Signed-off-by: Ivan Milchev <[email protected]>

* fix: exclude services from assets score check in the integration test

* fix: allow deamonsets crud in rbac

* cleanup

* fix: integration tests (except oom)

* follow-up on tests naming

* follow-up on tests naming

* fix: report node status and oom test

* fix: update cronjobs spec to match updated configmap

* docs: remove misleading comment (daemon set name)

* follow-up: update misleading or unclear naming, comments and messages in test

* follow-up: copy-pasted comment not matching the code

* bring back the audit conditions test

* remove reduntant for loops in configmap related unit-tests

* follow-up: correct misleading comment

* follow-up: correct var naming

* fix: delete daemonset on down and style change

* fix: delete deprecated config maps

---------

Signed-off-by: Ivan Milchev <[email protected]>
Co-authored-by: Ivan Milchev <[email protected]>
  • Loading branch information
slntopp and imilchev authored Jun 21, 2024
1 parent 2126b82 commit 0cb897f
Show file tree
Hide file tree
Showing 14 changed files with 281 additions and 503 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ testbin/*
# Test binary, build with `go test -c`
*.test

# Go dlv debugger binary
__debug_bin*

# Output of the go coverage tool, specifically when used with LiteIDE
*.out

Expand Down
1 change: 1 addition & 0 deletions api/v1alpha2/mondooauditconfig_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ type NodeScanStyle string
const (
NodeScanStyle_CronJob NodeScanStyle = "cronjob"
NodeScanStyle_Deployment NodeScanStyle = "deployment"
NodeScanStyle_DaemonSet NodeScanStyle = "daemonset"
)

type Nodes struct {
Expand Down
13 changes: 7 additions & 6 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,23 +21,24 @@ rules:
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- batch
Expand Down
2 changes: 1 addition & 1 deletion controllers/mondooauditconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ var MondooClientBuilder = mondooclient.NewClient
//+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondooauditconfigs/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondooauditconfigs/finalizers,verbs=update
//+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondoooperatorconfigs,verbs=get;watch;list
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=deployments;replicasets;daemonsets;statefulsets,verbs=get;list;watch
//+kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=deletecollection
Expand Down
196 changes: 68 additions & 128 deletions controllers/nodes/deployment_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ func (n *DeploymentHandler) Reconcile(ctx context.Context) (ctrl.Result, error)
if err := n.syncCronJob(ctx); err != nil {
return ctrl.Result{}, err
}
} else if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment {
if err := n.syncDeployment(ctx); err != nil {
} else if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment || n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_DaemonSet {
if err := n.syncDaemonSet(ctx); err != nil {
return ctrl.Result{}, err
}
}
Expand Down Expand Up @@ -72,18 +72,24 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error {
return err
}

// Delete DaemonSet if it exists
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, ds); err != nil {
logger.Error(err, "Failed to clean up node scanning DaemonSet", "namespace", ds.Namespace, "name", ds.Name)
return err
}

// Create/update CronJobs for nodes
for _, node := range nodes.Items {
// Delete Deployment if it exists
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil {
logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name)
cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapNameWithNode(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, cm); err != nil {
logger.Error(err, "Failed to clean up old ConfigMap for node scanning", "namespace", cm.Namespace, "name", cm.Name)
return err
}

updated, err := n.syncConfigMap(ctx, node, clusterUid)
updated, err := n.syncConfigMap(ctx, clusterUid)
if err != nil {
return err
}
Expand Down Expand Up @@ -157,7 +163,7 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error {
return nil
}

func (n *DeploymentHandler) syncDeployment(ctx context.Context) error {
func (n *DeploymentHandler) syncDaemonSet(ctx context.Context) error {
mondooClientImage, err := n.ContainerImageResolver.CnspecImage(
n.Mondoo.Spec.Scanner.Image.Name, n.Mondoo.Spec.Scanner.Image.Tag, n.MondooOperatorConfig.Spec.SkipContainerResolution)
if err != nil {
Expand Down Expand Up @@ -194,71 +200,66 @@ func (n *DeploymentHandler) syncDeployment(ctx context.Context) error {
return err
}

updated, err := n.syncConfigMap(ctx, node, clusterUid)
cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapNameWithNode(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, cm); err != nil {
logger.Error(err, "Failed to clean up old ConfigMap for node scanning", "namespace", cm.Namespace, "name", cm.Name)
return err
}

updated, err := n.syncConfigMap(ctx, clusterUid)
if err != nil {
return err
}

if updated {
logger.Info(
"Inventory ConfigMap was just updated. The deployment will use the new config during the next scheduled run.",
"Inventory ConfigMap was just updated. The daemonset will use the new config during the next scheduled run.",
"namespace", n.Mondoo.Namespace,
"name", DeploymentName(n.Mondoo.Name, node.Name))
}

dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, dep, n.Mondoo, logger, func() error {
UpdateDeployment(dep, node, *n.Mondoo, n.IsOpenshift, mondooClientImage, *n.MondooOperatorConfig)
return nil
})
if err != nil {
return err
}

if op == controllerutil.OperationResultCreated {
err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger)
if err != nil {
logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name)
return err
if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment {
dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil {
logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name)
}
continue
}
}

// Delete dangling Deployments for nodes that have been deleted from the cluster.
if err := n.cleanupDeploymentsForDeletedNodes(ctx, *nodes); err != nil {
return err
}

// List the Deployments again after they have been synced.
deployments, err := n.getDeploymentsForAuditConfig(ctx)
ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}}
op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, ds, n.Mondoo, logger, func() error {
UpdateDaemonSet(ds, *n.Mondoo, n.IsOpenshift, mondooClientImage, *n.MondooOperatorConfig)
return nil
})
if err != nil {
return err
}

// Get Pods for these Deployments
pods := &corev1.PodList{}
if len(deployments) > 0 {
opts := &client.ListOptions{
Namespace: n.Mondoo.Namespace,
LabelSelector: labels.SelectorFromSet(NodeScanningLabels(*n.Mondoo)),
}
err = n.KubeClient.List(ctx, pods, opts)
if op == controllerutil.OperationResultCreated {
err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger)
if err != nil {
logger.Error(err, "Failed to list Pods for Node Scanning")
logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name)
return err
}
}

deploymentsDegraded := false
for _, d := range deployments {
if d.Status.ReadyReplicas < *d.Spec.Replicas {
deploymentsDegraded = true
break
}
if err := n.KubeClient.Get(ctx, client.ObjectKeyFromObject(ds), ds); err != nil {
logger.Error(err, "Failed to get DaemonSet", "namespace", ds.Namespace, "name", ds.Name)
}

updateNodeConditions(n.Mondoo, deploymentsDegraded, pods)
// Get Pods for these Deployments
pods := &corev1.PodList{}
opts := &client.ListOptions{
Namespace: n.Mondoo.Namespace,
LabelSelector: labels.SelectorFromSet(NodeScanningLabels(*n.Mondoo)),
}
err = n.KubeClient.List(ctx, pods, opts)
if err != nil {
logger.Error(err, "Failed to list Pods for Node Scanning")
return err
}

updateNodeConditions(n.Mondoo, ds.Status.CurrentNumberScheduled < ds.Status.DesiredNumberScheduled, pods)

if err := n.syncGCCronjob(ctx, mondooOperatorImage, clusterUid); err != nil {
return err
Expand All @@ -269,16 +270,16 @@ func (n *DeploymentHandler) syncDeployment(ctx context.Context) error {
// syncConfigMap syncs the inventory ConfigMap. Returns a boolean indicating whether the ConfigMap has been updated. It
// can only be "true", if the ConfigMap existed before this reconcile cycle and the inventory was different from the
// desired state.
func (n *DeploymentHandler) syncConfigMap(ctx context.Context, node corev1.Node, clusterUid string) (bool, error) {
func (n *DeploymentHandler) syncConfigMap(ctx context.Context, clusterUid string) (bool, error) {
integrationMrn, err := k8s.TryGetIntegrationMrnForAuditConfig(ctx, n.KubeClient, *n.Mondoo)
if err != nil {
logger.Error(err, "failed to retrieve IntegrationMRN")
return false, err
}

cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}}
op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, cm, n.Mondoo, logger, func() error {
return UpdateConfigMap(cm, node, integrationMrn, clusterUid, *n.Mondoo)
return UpdateConfigMap(cm, integrationMrn, clusterUid, *n.Mondoo)
})
if err != nil {
return false, err
Expand Down Expand Up @@ -315,54 +316,6 @@ func (n *DeploymentHandler) cleanupCronJobsForDeletedNodes(ctx context.Context,
return err
}
logger.Info("Deleted CronJob", "namespace", c.Namespace, "name", c.Name)

configMap := &corev1.ConfigMap{}
configMap.Name = ConfigMapName(n.Mondoo.Name, c.Spec.JobTemplate.Spec.Template.Spec.NodeName)
configMap.Namespace = n.Mondoo.Namespace
if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil {
logger.Error(err, "Failed to delete ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name)
return err
}
}
return nil
}

// cleanupDeploymentsForDeletedNodes deletes dangling Deployments for nodes that have been deleted from the cluster.
func (n *DeploymentHandler) cleanupDeploymentsForDeletedNodes(ctx context.Context, currentNodes corev1.NodeList) error {
deployments, err := n.getDeploymentsForAuditConfig(ctx)
if err != nil {
return err
}

for _, d := range deployments {
// Check if the node for that Deployment is still present in the cluster.
found := false
for _, node := range currentNodes.Items {
if DeploymentName(n.Mondoo.Name, node.Name) == d.Name {
found = true
break
}
}

// If the node is still there, there is nothing to update.
if found {
continue
}

// If the node for the Deployment has been deleted from the cluster, the Deployment needs to be deleted.
if err := k8s.DeleteIfExists(ctx, n.KubeClient, &d); err != nil {
logger.Error(err, "Failed to deleted Deployment", "namespace", d.Namespace, "name", d.Name)
return err
}
logger.Info("Deleted Deployment", "namespace", d.Namespace, "name", d.Name)

configMap := &corev1.ConfigMap{}
configMap.Name = ConfigMapName(n.Mondoo.Name, d.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"])
configMap.Namespace = n.Mondoo.Namespace
if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil {
logger.Error(err, "Failed to delete ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name)
return err
}
}
return nil
}
Expand All @@ -389,19 +342,6 @@ func (n *DeploymentHandler) getCronJobsForAuditConfig(ctx context.Context) ([]ba
return cronJobs.Items, nil
}

func (n *DeploymentHandler) getDeploymentsForAuditConfig(ctx context.Context) ([]appsv1.Deployment, error) {
deps := &appsv1.DeploymentList{}
depLabels := NodeScanningLabels(*n.Mondoo)

// Lists only the Deployments in the namespace of the MondooAuditConfig and only the ones that exactly match our labels.
listOpts := &client.ListOptions{Namespace: n.Mondoo.Namespace, LabelSelector: labels.SelectorFromSet(depLabels)}
if err := n.KubeClient.List(ctx, deps, listOpts); err != nil {
logger.Error(err, "Failed to list Deployments in namespace", "namespace", n.Mondoo.Namespace)
return nil, err
}
return deps.Items, nil
}

func (n *DeploymentHandler) down(ctx context.Context) error {
nodes := &corev1.NodeList{}
if err := n.KubeClient.List(ctx, nodes); err != nil {
Expand All @@ -417,22 +357,22 @@ func (n *DeploymentHandler) down(ctx context.Context) error {
logger.Error(err, "Failed to clean up node scanning CronJob", "namespace", cronJob.Namespace, "name", cronJob.Name)
return err
}
}

dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil {
logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name)
return err
}
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, ds); err != nil {
logger.Error(err, "Failed to clean up node scanning DaemonSet", "namespace", ds.Namespace, "name", ds.Name)
return err
}

configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil {
logger.Error(err, "Failed to clean up inventory ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name)
return err
}
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil {
logger.Error(err, "Failed to clean up inventory ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name)
return err
}

gcCronJob := &batchv1.CronJob{
Expand Down
Loading

0 comments on commit 0cb897f

Please sign in to comment.