diff --git a/.gitignore b/.gitignore index 476ac08a..f0bc782c 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,9 @@ testbin/* # Test binary, build with `go test -c` *.test +# Go dlv debugger binary +__debug_bin* + # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/api/v1alpha2/mondooauditconfig_types.go b/api/v1alpha2/mondooauditconfig_types.go index 8b6c2584..b0b80ce5 100644 --- a/api/v1alpha2/mondooauditconfig_types.go +++ b/api/v1alpha2/mondooauditconfig_types.go @@ -96,6 +96,7 @@ type NodeScanStyle string const ( NodeScanStyle_CronJob NodeScanStyle = "cronjob" NodeScanStyle_Deployment NodeScanStyle = "deployment" + NodeScanStyle_DaemonSet NodeScanStyle = "daemonset" ) type Nodes struct { diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 69fbd218..b0f1f5cb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -21,23 +21,24 @@ rules: resources: - daemonsets - deployments - - replicasets - - statefulsets verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - apps resources: + - daemonsets - deployments + - replicasets + - statefulsets verbs: - - create - - delete - get - list - - patch - - update - watch - apiGroups: - batch diff --git a/controllers/mondooauditconfig_controller.go b/controllers/mondooauditconfig_controller.go index b3c672a6..6d16e96e 100644 --- a/controllers/mondooauditconfig_controller.go +++ b/controllers/mondooauditconfig_controller.go @@ -61,7 +61,7 @@ var MondooClientBuilder = mondooclient.NewClient //+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondooauditconfigs/status,verbs=get;update;patch //+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondooauditconfigs/finalizers,verbs=update //+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondoooperatorconfigs,verbs=get;watch;list -//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=apps,resources=deployments;replicasets;daemonsets;statefulsets,verbs=get;list;watch //+kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=batch,resources=jobs,verbs=deletecollection diff --git a/controllers/nodes/deployment_handler.go b/controllers/nodes/deployment_handler.go index a8d7460c..18920df2 100644 --- a/controllers/nodes/deployment_handler.go +++ b/controllers/nodes/deployment_handler.go @@ -38,8 +38,8 @@ func (n *DeploymentHandler) Reconcile(ctx context.Context) (ctrl.Result, error) if err := n.syncCronJob(ctx); err != nil { return ctrl.Result{}, err } - } else if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment { - if err := n.syncDeployment(ctx); err != nil { + } else if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment || n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_DaemonSet { + if err := n.syncDaemonSet(ctx); err != nil { return ctrl.Result{}, err } } @@ -72,18 +72,24 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { return err } + // Delete DaemonSet if it exists + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}, + } + if err := k8s.DeleteIfExists(ctx, n.KubeClient, ds); err != nil { + logger.Error(err, "Failed to clean up node scanning DaemonSet", "namespace", ds.Namespace, "name", ds.Name) + return err + } + // Create/update CronJobs for nodes for _, node := range nodes.Items { - // Delete Deployment if it exists - dep := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}, - } - if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil { - logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name) + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapNameWithNode(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}} + if err := k8s.DeleteIfExists(ctx, n.KubeClient, cm); err != nil { + logger.Error(err, "Failed to clean up old ConfigMap for node scanning", "namespace", cm.Namespace, "name", cm.Name) return err } - updated, err := n.syncConfigMap(ctx, node, clusterUid) + updated, err := n.syncConfigMap(ctx, clusterUid) if err != nil { return err } @@ -157,7 +163,7 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error { return nil } -func (n *DeploymentHandler) syncDeployment(ctx context.Context) error { +func (n *DeploymentHandler) syncDaemonSet(ctx context.Context) error { mondooClientImage, err := n.ContainerImageResolver.CnspecImage( n.Mondoo.Spec.Scanner.Image.Name, n.Mondoo.Spec.Scanner.Image.Tag, n.MondooOperatorConfig.Spec.SkipContainerResolution) if err != nil { @@ -194,71 +200,66 @@ func (n *DeploymentHandler) syncDeployment(ctx context.Context) error { return err } - updated, err := n.syncConfigMap(ctx, node, clusterUid) + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapNameWithNode(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}} + if err := k8s.DeleteIfExists(ctx, n.KubeClient, cm); err != nil { + logger.Error(err, "Failed to clean up old ConfigMap for node scanning", "namespace", cm.Namespace, "name", cm.Name) + return err + } + + updated, err := n.syncConfigMap(ctx, clusterUid) if err != nil { return err } if updated { logger.Info( - "Inventory ConfigMap was just updated. The deployment will use the new config during the next scheduled run.", + "Inventory ConfigMap was just updated. The daemonset will use the new config during the next scheduled run.", "namespace", n.Mondoo.Namespace, "name", DeploymentName(n.Mondoo.Name, node.Name)) } - dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}} - op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, dep, n.Mondoo, logger, func() error { - UpdateDeployment(dep, node, *n.Mondoo, n.IsOpenshift, mondooClientImage, *n.MondooOperatorConfig) - return nil - }) - if err != nil { - return err - } - - if op == controllerutil.OperationResultCreated { - err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger) - if err != nil { - logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name) - return err + if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment { + dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}} + if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil { + logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name) } - continue } } - // Delete dangling Deployments for nodes that have been deleted from the cluster. - if err := n.cleanupDeploymentsForDeletedNodes(ctx, *nodes); err != nil { - return err - } - - // List the Deployments again after they have been synced. - deployments, err := n.getDeploymentsForAuditConfig(ctx) + ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}} + op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, ds, n.Mondoo, logger, func() error { + UpdateDaemonSet(ds, *n.Mondoo, n.IsOpenshift, mondooClientImage, *n.MondooOperatorConfig) + return nil + }) if err != nil { return err } - // Get Pods for these Deployments - pods := &corev1.PodList{} - if len(deployments) > 0 { - opts := &client.ListOptions{ - Namespace: n.Mondoo.Namespace, - LabelSelector: labels.SelectorFromSet(NodeScanningLabels(*n.Mondoo)), - } - err = n.KubeClient.List(ctx, pods, opts) + if op == controllerutil.OperationResultCreated { + err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger) if err != nil { - logger.Error(err, "Failed to list Pods for Node Scanning") + logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name) return err } } - deploymentsDegraded := false - for _, d := range deployments { - if d.Status.ReadyReplicas < *d.Spec.Replicas { - deploymentsDegraded = true - break - } + if err := n.KubeClient.Get(ctx, client.ObjectKeyFromObject(ds), ds); err != nil { + logger.Error(err, "Failed to get DaemonSet", "namespace", ds.Namespace, "name", ds.Name) } - updateNodeConditions(n.Mondoo, deploymentsDegraded, pods) + // Get Pods for these Deployments + pods := &corev1.PodList{} + opts := &client.ListOptions{ + Namespace: n.Mondoo.Namespace, + LabelSelector: labels.SelectorFromSet(NodeScanningLabels(*n.Mondoo)), + } + err = n.KubeClient.List(ctx, pods, opts) + if err != nil { + logger.Error(err, "Failed to list Pods for Node Scanning") + return err + } + + updateNodeConditions(n.Mondoo, ds.Status.CurrentNumberScheduled < ds.Status.DesiredNumberScheduled, pods) if err := n.syncGCCronjob(ctx, mondooOperatorImage, clusterUid); err != nil { return err @@ -269,16 +270,16 @@ func (n *DeploymentHandler) syncDeployment(ctx context.Context) error { // syncConfigMap syncs the inventory ConfigMap. Returns a boolean indicating whether the ConfigMap has been updated. It // can only be "true", if the ConfigMap existed before this reconcile cycle and the inventory was different from the // desired state. -func (n *DeploymentHandler) syncConfigMap(ctx context.Context, node corev1.Node, clusterUid string) (bool, error) { +func (n *DeploymentHandler) syncConfigMap(ctx context.Context, clusterUid string) (bool, error) { integrationMrn, err := k8s.TryGetIntegrationMrnForAuditConfig(ctx, n.KubeClient, *n.Mondoo) if err != nil { logger.Error(err, "failed to retrieve IntegrationMRN") return false, err } - cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}} + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}} op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, cm, n.Mondoo, logger, func() error { - return UpdateConfigMap(cm, node, integrationMrn, clusterUid, *n.Mondoo) + return UpdateConfigMap(cm, integrationMrn, clusterUid, *n.Mondoo) }) if err != nil { return false, err @@ -315,54 +316,6 @@ func (n *DeploymentHandler) cleanupCronJobsForDeletedNodes(ctx context.Context, return err } logger.Info("Deleted CronJob", "namespace", c.Namespace, "name", c.Name) - - configMap := &corev1.ConfigMap{} - configMap.Name = ConfigMapName(n.Mondoo.Name, c.Spec.JobTemplate.Spec.Template.Spec.NodeName) - configMap.Namespace = n.Mondoo.Namespace - if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil { - logger.Error(err, "Failed to delete ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name) - return err - } - } - return nil -} - -// cleanupDeploymentsForDeletedNodes deletes dangling Deployments for nodes that have been deleted from the cluster. -func (n *DeploymentHandler) cleanupDeploymentsForDeletedNodes(ctx context.Context, currentNodes corev1.NodeList) error { - deployments, err := n.getDeploymentsForAuditConfig(ctx) - if err != nil { - return err - } - - for _, d := range deployments { - // Check if the node for that Deployment is still present in the cluster. - found := false - for _, node := range currentNodes.Items { - if DeploymentName(n.Mondoo.Name, node.Name) == d.Name { - found = true - break - } - } - - // If the node is still there, there is nothing to update. - if found { - continue - } - - // If the node for the Deployment has been deleted from the cluster, the Deployment needs to be deleted. - if err := k8s.DeleteIfExists(ctx, n.KubeClient, &d); err != nil { - logger.Error(err, "Failed to deleted Deployment", "namespace", d.Namespace, "name", d.Name) - return err - } - logger.Info("Deleted Deployment", "namespace", d.Namespace, "name", d.Name) - - configMap := &corev1.ConfigMap{} - configMap.Name = ConfigMapName(n.Mondoo.Name, d.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"]) - configMap.Namespace = n.Mondoo.Namespace - if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil { - logger.Error(err, "Failed to delete ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name) - return err - } } return nil } @@ -389,19 +342,6 @@ func (n *DeploymentHandler) getCronJobsForAuditConfig(ctx context.Context) ([]ba return cronJobs.Items, nil } -func (n *DeploymentHandler) getDeploymentsForAuditConfig(ctx context.Context) ([]appsv1.Deployment, error) { - deps := &appsv1.DeploymentList{} - depLabels := NodeScanningLabels(*n.Mondoo) - - // Lists only the Deployments in the namespace of the MondooAuditConfig and only the ones that exactly match our labels. - listOpts := &client.ListOptions{Namespace: n.Mondoo.Namespace, LabelSelector: labels.SelectorFromSet(depLabels)} - if err := n.KubeClient.List(ctx, deps, listOpts); err != nil { - logger.Error(err, "Failed to list Deployments in namespace", "namespace", n.Mondoo.Namespace) - return nil, err - } - return deps.Items, nil -} - func (n *DeploymentHandler) down(ctx context.Context) error { nodes := &corev1.NodeList{} if err := n.KubeClient.List(ctx, nodes); err != nil { @@ -417,22 +357,22 @@ func (n *DeploymentHandler) down(ctx context.Context) error { logger.Error(err, "Failed to clean up node scanning CronJob", "namespace", cronJob.Namespace, "name", cronJob.Name) return err } + } - dep := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}, - } - if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil { - logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name) - return err - } + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}, + } + if err := k8s.DeleteIfExists(ctx, n.KubeClient, ds); err != nil { + logger.Error(err, "Failed to clean up node scanning DaemonSet", "namespace", ds.Namespace, "name", ds.Name) + return err + } - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}, - } - if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil { - logger.Error(err, "Failed to clean up inventory ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name) - return err - } + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}, + } + if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil { + logger.Error(err, "Failed to clean up inventory ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name) + return err } gcCronJob := &batchv1.CronJob{ diff --git a/controllers/nodes/deployment_handler_test.go b/controllers/nodes/deployment_handler_test.go index f4e99d75..b0a1ac67 100644 --- a/controllers/nodes/deployment_handler_test.go +++ b/controllers/nodes/deployment_handler_test.go @@ -65,19 +65,14 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateConfigMap() { s.NoError(err) s.True(result.IsZero()) - nodes := &corev1.NodeList{} - s.NoError(d.KubeClient.List(s.ctx, nodes)) - - for _, node := range nodes.Items { - cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(s.auditConfig.Name, node.Name), Namespace: s.auditConfig.Namespace, - }} - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) + cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace, + }} + s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) - cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, node, "", testClusterUID, s.auditConfig)) - s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) - } + cfgMapExpected := cfgMap.DeepCopy() + s.Require().NoError(UpdateConfigMap(cfgMapExpected, "", testClusterUID, s.auditConfig)) + s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) } func (s *DeploymentHandlerSuite) TestReconcile_CreateConfigMapWithIntegrationMRN() { @@ -109,19 +104,14 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateConfigMapWithIntegrationMRN s.NoError(err) s.True(result.IsZero()) - nodes := &corev1.NodeList{} - s.NoError(d.KubeClient.List(s.ctx, nodes)) - - for _, node := range nodes.Items { - cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(s.auditConfig.Name, node.Name), Namespace: s.auditConfig.Namespace, - }} - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) + cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace, + }} + s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) - cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, node, testIntegrationMRN, testClusterUID, s.auditConfig)) - s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) - } + cfgMapExpected := cfgMap.DeepCopy() + s.Require().NoError(UpdateConfigMap(cfgMapExpected, testIntegrationMRN, testClusterUID, s.auditConfig)) + s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) } func (s *DeploymentHandlerSuite) TestReconcile_UpdateConfigMap() { @@ -133,29 +123,25 @@ func (s *DeploymentHandlerSuite) TestReconcile_UpdateConfigMap() { nodes := &corev1.NodeList{} s.NoError(d.KubeClient.List(s.ctx, nodes)) - for _, node := range nodes.Items { - cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(s.auditConfig.Name, node.Name), Namespace: s.auditConfig.Namespace, - }} - s.Require().NoError(UpdateConfigMap(cfgMap, node, "", testClusterUID, s.auditConfig)) - cfgMap.Data["inventory"] = "" - s.NoError(d.KubeClient.Create(s.ctx, cfgMap)) - } + cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace, + }} + s.Require().NoError(UpdateConfigMap(cfgMap, "", testClusterUID, s.auditConfig)) + cfgMap.Data["inventory"] = "" + s.NoError(d.KubeClient.Create(s.ctx, cfgMap)) result, err := d.Reconcile(s.ctx) s.NoError(err) s.True(result.IsZero()) - for _, node := range nodes.Items { - cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(s.auditConfig.Name, node.Name), Namespace: s.auditConfig.Namespace, - }} - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) + cfgMap = &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ + Name: ConfigMapName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace, + }} + s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) - cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, node, "", testClusterUID, s.auditConfig)) - s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) - } + cfgMapExpected := cfgMap.DeepCopy() + s.Require().NoError(UpdateConfigMap(cfgMapExpected, "", testClusterUID, s.auditConfig)) + s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) } func (s *DeploymentHandlerSuite) TestReconcile_CronJob_CleanConfigMapsForDeletedNodes() { @@ -186,19 +172,19 @@ func (s *DeploymentHandlerSuite) TestReconcile_CronJob_CleanConfigMapsForDeleted s.Equal(1, len(configMaps.Items)) cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(s.auditConfig.Name, nodes.Items[0].Name), Namespace: s.auditConfig.Namespace, + Name: ConfigMapName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace, }} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, nodes.Items[0], "", testClusterUID, s.auditConfig)) + s.Require().NoError(UpdateConfigMap(cfgMapExpected, "", testClusterUID, s.auditConfig)) s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) } func (s *DeploymentHandlerSuite) TestReconcile_Deployment_CleanConfigMapsForDeletedNodes() { s.seedNodes() d := s.createDeploymentHandler() - s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment + s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) mondooAuditConfig := &s.auditConfig s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) @@ -224,12 +210,12 @@ func (s *DeploymentHandlerSuite) TestReconcile_Deployment_CleanConfigMapsForDele s.Equal(1, len(configMaps.Items)) cfgMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{ - Name: ConfigMapName(s.auditConfig.Name, nodes.Items[0].Name), Namespace: s.auditConfig.Namespace, + Name: ConfigMapName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace, }} s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(cfgMap), cfgMap)) cfgMapExpected := cfgMap.DeepCopy() - s.Require().NoError(UpdateConfigMap(cfgMapExpected, nodes.Items[0], "", testClusterUID, s.auditConfig)) + s.Require().NoError(UpdateConfigMap(cfgMapExpected, "", testClusterUID, s.auditConfig)) s.True(equality.Semantic.DeepEqual(cfgMapExpected, cfgMap)) } @@ -345,7 +331,7 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateCronJobs_Switch() { s.True(equality.Semantic.DeepEqual(cjExpected, cj)) } - mondooAuditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment + mondooAuditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) result, err = d.Reconcile(s.ctx) s.NoError(err) s.True(result.IsZero()) @@ -453,10 +439,10 @@ func (s *DeploymentHandlerSuite) TestReconcile_CleanCronJobsForDeletedNodes() { s.True(equality.Semantic.DeepEqual(cjExpected, cj)) } -func (s *DeploymentHandlerSuite) TestReconcile_CreateDeployments() { +func (s *DeploymentHandlerSuite) TestReconcile_CreateDaemonSets() { s.seedNodes() d := s.createDeploymentHandler() - s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment + s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) mondooAuditConfig := &s.auditConfig s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) @@ -471,17 +457,15 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateDeployments() { s.auditConfig.Spec.Scanner.Image.Name, s.auditConfig.Spec.Scanner.Image.Tag, false) s.NoError(err) - for _, n := range nodes.Items { - dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(s.auditConfig.Name, n.Name), Namespace: s.auditConfig.Namespace}} - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(dep), dep)) + ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} + s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) - depExpected := dep.DeepCopy() - UpdateDeployment(depExpected, n, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) - // Make sure the env vars for both are sorted - utils.SortEnvVars(depExpected.Spec.Template.Spec.Containers[0].Env) - utils.SortEnvVars(dep.Spec.Template.Spec.Containers[0].Env) - s.True(equality.Semantic.DeepEqual(depExpected, dep)) - } + dsExpected := ds.DeepCopy() + UpdateDaemonSet(dsExpected, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) + // Make sure the env vars for both are sorted + utils.SortEnvVars(dsExpected.Spec.Template.Spec.Containers[0].Env) + utils.SortEnvVars(ds.Spec.Template.Spec.Containers[0].Env) + s.True(equality.Semantic.DeepEqual(dsExpected, ds)) operatorImage, err := s.containerImageResolver.MondooOperatorImage(s.ctx, "", "", false) s.NoError(err) @@ -495,10 +479,10 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateDeployments() { s.True(equality.Semantic.DeepEqual(gcCjExpected, gcCj)) } -func (s *DeploymentHandlerSuite) TestReconcile_CreateDeployments_Switch() { +func (s *DeploymentHandlerSuite) TestReconcile_CreateDaemonSets_Switch() { s.seedNodes() d := s.createDeploymentHandler() - s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment + s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) mondooAuditConfig := &s.auditConfig s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) @@ -513,14 +497,12 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateDeployments_Switch() { s.auditConfig.Spec.Scanner.Image.Name, s.auditConfig.Spec.Scanner.Image.Tag, false) s.NoError(err) - for _, n := range nodes.Items { - dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(s.auditConfig.Name, n.Name), Namespace: s.auditConfig.Namespace}} - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(dep), dep)) + ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} + s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) - depExpected := dep.DeepCopy() - UpdateDeployment(depExpected, n, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) - s.True(equality.Semantic.DeepEqual(depExpected, dep)) - } + dsExpected := ds.DeepCopy() + UpdateDaemonSet(dsExpected, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) + s.True(equality.Semantic.DeepEqual(dsExpected, ds)) mondooAuditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_CronJob result, err = d.Reconcile(s.ctx) @@ -548,10 +530,10 @@ func (s *DeploymentHandlerSuite) TestReconcile_CreateDeployments_Switch() { s.True(equality.Semantic.DeepEqual(gcCjExpected, gcCj)) } -func (s *DeploymentHandlerSuite) TestReconcile_UpdateDeployments() { +func (s *DeploymentHandlerSuite) TestReconcile_UpdateDaemonSets() { s.seedNodes() d := s.createDeploymentHandler() - s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment + s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) mondooAuditConfig := &s.auditConfig s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) @@ -562,69 +544,22 @@ func (s *DeploymentHandlerSuite) TestReconcile_UpdateDeployments() { s.auditConfig.Spec.Scanner.Image.Name, s.auditConfig.Spec.Scanner.Image.Tag, false) s.NoError(err) - // Make sure a deployment exists for one of the nodes - dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(s.auditConfig.Name, nodes.Items[1].Name), Namespace: s.auditConfig.Namespace}} - UpdateDeployment(dep, nodes.Items[1], s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) - dep.Spec.Template.Spec.Containers[0].Command = []string{"test-command"} - s.NoError(d.KubeClient.Create(s.ctx, dep)) + // Make sure a daemonset exists + ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} + UpdateDaemonSet(ds, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) + ds.Spec.Template.Spec.Containers[0].Command = []string{"test-command"} + s.NoError(d.KubeClient.Create(s.ctx, ds)) result, err := d.Reconcile(s.ctx) s.NoError(err) s.True(result.IsZero()) - for _, n := range nodes.Items { - dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(s.auditConfig.Name, n.Name), Namespace: s.auditConfig.Namespace}} - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(dep), dep)) + ds = &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} + s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) - depExpected := dep.DeepCopy() - UpdateDeployment(depExpected, n, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) - s.True(equality.Semantic.DeepEqual(depExpected, dep)) - } -} - -func (s *DeploymentHandlerSuite) TestReconcile_CleanDeploymentsForDeletedNodes() { - s.seedNodes() - d := s.createDeploymentHandler() - s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment - mondooAuditConfig := &s.auditConfig - s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) - - // Reconcile to create the initial cron jobs - result, err := d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - nodes := &corev1.NodeList{} - s.NoError(d.KubeClient.List(s.ctx, nodes)) - - // Delete one node - s.NoError(d.KubeClient.Delete(s.ctx, &nodes.Items[1])) - - // Reconcile again to delete the cron job for the deleted node - result, err = d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - image, err := s.containerImageResolver.CnspecImage( - s.auditConfig.Spec.Scanner.Image.Name, s.auditConfig.Spec.Scanner.Image.Tag, false) - s.NoError(err) - - listOpts := &client.ListOptions{ - Namespace: s.auditConfig.Namespace, - LabelSelector: labels.SelectorFromSet(NodeScanningLabels(s.auditConfig)), - } - deployments := &appsv1.DeploymentList{} - s.NoError(d.KubeClient.List(s.ctx, deployments, listOpts)) - - s.Equal(1, len(deployments.Items)) - - dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(s.auditConfig.Name, nodes.Items[0].Name), Namespace: s.auditConfig.Namespace}} - UpdateDeployment(dep, nodes.Items[0], s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(dep), dep)) - - depExpected := dep.DeepCopy() - UpdateDeployment(depExpected, nodes.Items[0], s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) - s.True(equality.Semantic.DeepEqual(depExpected, dep)) + depExpected := ds.DeepCopy() + UpdateDaemonSet(depExpected, s.auditConfig, false, image, v1alpha2.MondooOperatorConfig{}) + s.True(equality.Semantic.DeepEqual(depExpected, ds)) } func (s *DeploymentHandlerSuite) TestReconcile_CronJob_NodeScanningStatus() { @@ -701,79 +636,6 @@ func (s *DeploymentHandlerSuite) TestReconcile_CronJob_NodeScanningStatus() { s.Equal(corev1.ConditionFalse, condition.Status) } -func (s *DeploymentHandlerSuite) TestReconcile_Deployment_NodeScanningStatus() { - s.seedNodes() - d := s.createDeploymentHandler() - s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment - mondooAuditConfig := &s.auditConfig - s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) - - // Reconcile to create all resources - result, err := d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - // Verify the node scanning status is set to available - s.Equal(1, len(d.Mondoo.Status.Conditions)) - condition := d.Mondoo.Status.Conditions[0] - s.Equal("Node Scanning is unavailable", condition.Message) - s.Equal("NodeScanningUnavailable", condition.Reason) - s.Equal(corev1.ConditionTrue, condition.Status) - - listOpts := &client.ListOptions{ - Namespace: s.auditConfig.Namespace, - LabelSelector: labels.SelectorFromSet(NodeScanningLabels(s.auditConfig)), - } - deployments := &appsv1.DeploymentList{} - s.NoError(d.KubeClient.List(s.ctx, deployments, listOpts)) - - // Make sure all deployments are ready - deployments.Items[0].Status.ReadyReplicas = 1 - s.NoError(d.KubeClient.Status().Update(s.ctx, &deployments.Items[0])) - deployments.Items[1].Status.ReadyReplicas = 1 - s.NoError(d.KubeClient.Status().Update(s.ctx, &deployments.Items[1])) - - // Reconcile to update the audit config status - result, err = d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - // Verify the node scanning status is set to unavailable - condition = d.Mondoo.Status.Conditions[0] - s.Equal("Node Scanning is available", condition.Message) - s.Equal("NodeScanningAvailable", condition.Reason) - s.Equal(corev1.ConditionFalse, condition.Status) - - // // Make a deployment fail again - s.NoError(d.KubeClient.List(s.ctx, deployments, listOpts)) - deployments.Items[0].Status.ReadyReplicas = 0 - s.NoError(d.KubeClient.Status().Update(s.ctx, &deployments.Items[0])) - - // Reconcile to update the audit config status - result, err = d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - // Verify the node scanning status is set to available - condition = d.Mondoo.Status.Conditions[0] - s.Equal("Node Scanning is unavailable", condition.Message) - s.Equal("NodeScanningUnavailable", condition.Reason) - s.Equal(corev1.ConditionTrue, condition.Status) - - d.Mondoo.Spec.Nodes.Enable = false - - // Reconcile to update the audit config status - result, err = d.Reconcile(s.ctx) - s.NoError(err) - s.True(result.IsZero()) - - // Verify the node scanning status is set to disabled - condition = d.Mondoo.Status.Conditions[0] - s.Equal("Node Scanning is disabled", condition.Message) - s.Equal("NodeScanningDisabled", condition.Reason) - s.Equal(corev1.ConditionFalse, condition.Status) -} - func (s *DeploymentHandlerSuite) TestReconcile_NodeScanningOOMStatus() { s.seedNodes() d := s.createDeploymentHandler() @@ -927,7 +789,7 @@ func (s *DeploymentHandlerSuite) TestReconcile_CronJob_CustomSchedule() { func (s *DeploymentHandlerSuite) TestReconcile_Deployment_CustomInterval() { s.seedNodes() d := s.createDeploymentHandler() - s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment + s.auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) mondooAuditConfig := &s.auditConfig s.NoError(d.KubeClient.Create(s.ctx, mondooAuditConfig)) @@ -940,11 +802,11 @@ func (s *DeploymentHandlerSuite) TestReconcile_Deployment_CustomInterval() { nodes := &corev1.NodeList{} s.NoError(d.KubeClient.List(s.ctx, nodes)) - dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(s.auditConfig.Name, nodes.Items[0].Name), Namespace: s.auditConfig.Namespace}} - s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(dep), dep)) + ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(s.auditConfig.Name), Namespace: s.auditConfig.Namespace}} + s.NoError(d.KubeClient.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) - s.Contains(dep.Spec.Template.Spec.Containers[0].Command, "--timer") - s.Contains(dep.Spec.Template.Spec.Containers[0].Command, fmt.Sprintf("%d", s.auditConfig.Spec.Nodes.IntervalTimer)) + s.Contains(ds.Spec.Template.Spec.Containers[0].Command, "--timer") + s.Contains(ds.Spec.Template.Spec.Containers[0].Command, fmt.Sprintf("%d", s.auditConfig.Spec.Nodes.IntervalTimer)) } func (s *DeploymentHandlerSuite) createDeploymentHandler() DeploymentHandler { diff --git a/controllers/nodes/resources.go b/controllers/nodes/resources.go index 4eb2da7b..f48feb77 100644 --- a/controllers/nodes/resources.go +++ b/controllers/nodes/resources.go @@ -28,10 +28,12 @@ import ( ) const ( - CronJobNameBase = "-node-" - DeploymentNameBase = "-node-" - GarbageCollectCronJobNameBase = "-node-gc" - InventoryConfigMapBase = "-node-inventory-" + CronJobNameBase = "-node-" + DeploymentNameBase = "-node-" + DaemonSetNameBase = "-node" + GarbageCollectCronJobNameBase = "-node-gc" + InventoryConfigMapBase = "-node-inventory" + InventoryConfigMapWithNodeBase = "-node-inventory-" ignoreQueryAnnotationPrefix = "policies.k8s.mondoo.com/" @@ -43,7 +45,7 @@ func UpdateCronJob(cj *batchv1.CronJob, image string, node corev1.Node, m *v1alp cmd := []string{ "cnspec", "scan", "local", "--config", "/etc/opt/mondoo/mondoo.yml", - "--inventory-file", "/etc/opt/mondoo/inventory.yml", + "--inventory-template", "/etc/opt/mondoo/inventory_template.yml", "--score-threshold", "0", } @@ -122,6 +124,10 @@ func UpdateCronJob(cj *batchv1.CronJob, image string, node corev1.Node, m *v1alp Name: "MONDOO_AUTO_UPDATE", Value: "false", }, + { + Name: "NODE_NAME", + Value: node.Name, + }, }, m.Spec.Nodes.Env), TerminationMessagePath: "/dev/termination-log", TerminationMessagePolicy: corev1.TerminationMessageReadFile, @@ -143,10 +149,10 @@ func UpdateCronJob(cj *batchv1.CronJob, image string, node corev1.Node, m *v1alp Sources: []corev1.VolumeProjection{ { ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName(m.Name, node.Name)}, + LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName(m.Name)}, Items: []corev1.KeyToPath{{ Key: "inventory", - Path: "mondoo/inventory.yml", + Path: "mondoo/inventory_template.yml", }}, }, }, @@ -172,9 +178,8 @@ func UpdateCronJob(cj *batchv1.CronJob, image string, node corev1.Node, m *v1alp } } -func UpdateDeployment( - dep *appsv1.Deployment, - node corev1.Node, +func UpdateDaemonSet( + ds *appsv1.DaemonSet, m v1alpha2.MondooAuditConfig, isOpenshift bool, image string, @@ -184,36 +189,31 @@ func UpdateDeployment( cmd := []string{ "cnspec", "serve", "--config", "/etc/opt/mondoo/mondoo.yml", - "--inventory-file", "/etc/opt/mondoo/inventory.yml", + "--inventory-template", "/etc/opt/mondoo/inventory_template.yml", "--timer", fmt.Sprintf("%d", m.Spec.Nodes.IntervalTimer), } if cfg.Spec.HttpProxy != nil { cmd = append(cmd, []string{"--api-proxy", *cfg.Spec.HttpProxy}...) } - dep.Labels = labels - if dep.Annotations == nil { - dep.Annotations = map[string]string{} + ds.Labels = labels + if ds.Annotations == nil { + ds.Annotations = map[string]string{} } - dep.Annotations[ignoreQueryAnnotationPrefix+"mondoo-kubernetes-security-deployment-runasnonroot"] = ignoreAnnotationValue - dep.Spec.Replicas = ptr.To(int32(1)) - dep.Spec.Selector = &metav1.LabelSelector{ + ds.Annotations[ignoreQueryAnnotationPrefix+"mondoo-kubernetes-security-deployment-runasnonroot"] = ignoreAnnotationValue + ds.Spec.Selector = &metav1.LabelSelector{ MatchLabels: labels, } - dep.Spec.Template.Labels = labels - if dep.Spec.Template.Annotations == nil { - dep.Spec.Template.Annotations = map[string]string{} - } - dep.Spec.Template.Annotations[ignoreQueryAnnotationPrefix+"mondoo-kubernetes-security-pod-runasnonroot"] = ignoreAnnotationValue - dep.Spec.Template.Spec.PriorityClassName = m.Spec.Nodes.PriorityClassName - dep.Spec.Template.Spec.NodeSelector = map[string]string{ - "kubernetes.io/hostname": node.Name, + ds.Spec.Template.Labels = labels + if ds.Spec.Template.Annotations == nil { + ds.Spec.Template.Annotations = map[string]string{} } - dep.Spec.Template.Spec.Tolerations = k8s.TaintsToTolerations(node.Spec.Taints) + ds.Spec.Template.Annotations[ignoreQueryAnnotationPrefix+"mondoo-kubernetes-security-pod-runasnonroot"] = ignoreAnnotationValue + ds.Spec.Template.Spec.PriorityClassName = m.Spec.Nodes.PriorityClassName // The node scanning does not use the Kubernetes API at all, therefore the service account token // should not be mounted at all. - dep.Spec.Template.Spec.AutomountServiceAccountToken = ptr.To(false) - dep.Spec.Template.Spec.Containers = []corev1.Container{ + ds.Spec.Template.Spec.AutomountServiceAccountToken = ptr.To(false) + ds.Spec.Template.Spec.Containers = []corev1.Container{ { Image: image, Name: "cnspec", @@ -265,10 +265,18 @@ func UpdateDeployment( Name: "MONDOO_AUTO_UPDATE", Value: "false", }, + { + Name: "NODE_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, }, m.Spec.Nodes.Env), }, } - dep.Spec.Template.Spec.Volumes = []corev1.Volume{ + ds.Spec.Template.Spec.Volumes = []corev1.Volume{ { Name: "root", VolumeSource: corev1.VolumeSource{ @@ -283,10 +291,10 @@ func UpdateDeployment( Sources: []corev1.VolumeProjection{ { ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName(m.Name, node.Name)}, + LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName(m.Name)}, Items: []corev1.KeyToPath{{ Key: "inventory", - Path: "mondoo/inventory.yml", + Path: "mondoo/inventory_template.yml", }}, }, }, @@ -398,8 +406,8 @@ func UpdateGarbageCollectCronJob(cj *batchv1.CronJob, image, clusterUid string, } } -func UpdateConfigMap(cm *corev1.ConfigMap, node corev1.Node, integrationMRN, clusterUID string, m v1alpha2.MondooAuditConfig) error { - inv, err := Inventory(node, integrationMRN, clusterUID, m) +func UpdateConfigMap(cm *corev1.ConfigMap, integrationMRN, clusterUID string, m v1alpha2.MondooAuditConfig) error { + inv, err := Inventory(integrationMRN, clusterUID, m) if err != nil { return err } @@ -423,16 +431,24 @@ func DeploymentName(prefix, suffix string) string { return fmt.Sprintf("%s%s", base, NodeNameOrHash(k8s.ResourceNameMaxLength-len(base), suffix)) } +func DaemonSetName(prefix string) string { + return fmt.Sprintf("%s%s", prefix, DaemonSetNameBase) +} + func GarbageCollectCronJobName(prefix string) string { return fmt.Sprintf("%s%s", prefix, GarbageCollectCronJobNameBase) } -func ConfigMapName(prefix, nodeName string) string { - base := fmt.Sprintf("%s%s", prefix, InventoryConfigMapBase) +func ConfigMapName(prefix string) string { + return fmt.Sprintf("%s%s", prefix, InventoryConfigMapBase) +} + +func ConfigMapNameWithNode(prefix, nodeName string) string { + base := fmt.Sprintf("%s%s", prefix, InventoryConfigMapWithNodeBase) return fmt.Sprintf("%s%s", base, NodeNameOrHash(k8s.ResourceNameMaxLength-len(base), nodeName)) } -func Inventory(node corev1.Node, integrationMRN, clusterUID string, m v1alpha2.MondooAuditConfig) (string, error) { +func Inventory(integrationMRN, clusterUID string, m v1alpha2.MondooAuditConfig) (string, error) { inv := &inventory.Inventory{ Metadata: &inventory.ObjectMeta{ Name: "mondoo-node-inventory", @@ -444,12 +460,12 @@ func Inventory(node corev1.Node, integrationMRN, clusterUID string, m v1alpha2.M Assets: []*inventory.Asset{ { Id: "host", - Name: node.Name, + Name: `{{ getenv "NODE_NAME" }}`, Connections: []*inventory.Config{ { Type: "filesystem", Host: "/mnt/host", - PlatformId: fmt.Sprintf("//platformid.api.mondoo.app/runtime/k8s/uid/%s/node/%s", clusterUID, node.UID), + PlatformId: fmt.Sprintf(`{{ printf "//platformid.api.mondoo.app/runtime/k8s/uid/%%s/node/%%s" "%s" (getenv "NODE_NAME")}}`, clusterUID), }, }, Labels: map[string]string{ diff --git a/controllers/nodes/resources_test.go b/controllers/nodes/resources_test.go index 4bbcd939..e9ec4e8b 100644 --- a/controllers/nodes/resources_test.go +++ b/controllers/nodes/resources_test.go @@ -64,40 +64,6 @@ func TestGarbageCollectCronJobName(t *testing.T) { assert.Equal(t, fmt.Sprintf("%s%s", prefix, GarbageCollectCronJobNameBase), GarbageCollectCronJobName(prefix)) } -func TestConfigMapName(t *testing.T) { - prefix := "mondoo-client" - tests := []struct { - name string - data func() (suffix, expected string) - }{ - { - name: "should be prefix+base+suffix when shorter than 52 chars", - data: func() (suffix, expected string) { - base := fmt.Sprintf("%s%s", prefix, InventoryConfigMapBase) - suffix = utils.RandString(k8s.ResourceNameMaxLength - len(base)) - return suffix, fmt.Sprintf("%s%s", base, suffix) - }, - }, - { - name: "should be prefix+base+hash when longer than 52 chars", - data: func() (suffix, expected string) { - base := fmt.Sprintf("%s%s", prefix, InventoryConfigMapBase) - suffix = utils.RandString(53 - len(base)) - - hash := fmt.Sprintf("%x", sha256.Sum256([]byte(suffix))) - return suffix, fmt.Sprintf("%s%s", base, hash[:k8s.ResourceNameMaxLength-len(base)]) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - suffix, expected := test.data() - assert.Equal(t, expected, ConfigMapName(prefix, suffix)) - }) - } -} - func TestResources(t *testing.T) { tests := []struct { name string @@ -183,18 +149,15 @@ func TestCronJob_Privileged(t *testing.T) { } func TestInventory(t *testing.T) { - randName := utils.RandString(10) auditConfig := v1alpha2.MondooAuditConfig{ObjectMeta: metav1.ObjectMeta{Name: "mondoo-client"}} - inventory, err := Inventory(corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: randName}}, "", testClusterUID, auditConfig) + inventory, err := Inventory("", testClusterUID, auditConfig) assert.NoError(t, err, "unexpected error generating inventory") - assert.Contains(t, inventory, randName) assert.NotContains(t, inventory, constants.MondooAssetsIntegrationLabel) const integrationMRN = "//test-MRN" - inventory, err = Inventory(corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: randName}}, integrationMRN, testClusterUID, auditConfig) + inventory, err = Inventory(integrationMRN, testClusterUID, auditConfig) assert.NoError(t, err, "unexpected error generating inventory") - assert.Contains(t, inventory, randName) assert.Contains(t, inventory, constants.MondooAssetsIntegrationLabel) assert.Contains(t, inventory, integrationMRN) } diff --git a/tests/framework/utils/asset.go b/tests/framework/utils/asset.go index 2993e129..3c37adee 100644 --- a/tests/framework/utils/asset.go +++ b/tests/framework/utils/asset.go @@ -14,10 +14,10 @@ import ( v1 "k8s.io/api/core/v1" ) -func ExcludeClusterAsset(as []assets.AssetWithScore) []assets.AssetWithScore { +func ExcludeNonDetermenisticAssets(as []assets.AssetWithScore) []assets.AssetWithScore { var newAssets []assets.AssetWithScore for _, asset := range as { - if asset.AssetType != "k8s.cluster" { + if asset.AssetType != "k8s.cluster" && asset.AssetType != "k8s.service" { newAssets = append(newAssets, asset) } } diff --git a/tests/framework/utils/k8s_helper.go b/tests/framework/utils/k8s_helper.go index 82edf969..ef9663e8 100644 --- a/tests/framework/utils/k8s_helper.go +++ b/tests/framework/utils/k8s_helper.go @@ -391,6 +391,22 @@ func (k8sh *K8sHelper) UpdateDeploymentWithRetries(ctx context.Context, listOpts }) } +func (k8sh *K8sHelper) UpdateDaemonSetWithRetries(ctx context.Context, key types.NamespacedName, update func(*appsv1.DaemonSet)) error { + ds := &appsv1.DaemonSet{} + return k8sh.ExecuteWithRetries(func() (bool, error) { + if err := k8sh.Clientset.Get(ctx, key, ds); err != nil { + return false, err + } + + // update the daemonset + update(ds) + if err := k8sh.Clientset.Update(ctx, ds); err != nil { + return false, nil // retry + } + return true, nil + }) +} + func (k8sh *K8sHelper) ExecuteWithRetries(f func() (bool, error)) error { for i := 0; i < RetryLoop; i++ { success, err := f() diff --git a/tests/integration/audit_config_base_suite.go b/tests/integration/audit_config_base_suite.go index b851de40..71928dbf 100644 --- a/tests/integration/audit_config_base_suite.go +++ b/tests/integration/audit_config_base_suite.go @@ -220,10 +220,20 @@ func (s *AuditConfigBaseSuite) testMondooAuditConfigKubernetesResources(auditCon zap.S().Info("number of assets from upstream: ", len(assets)) // TODO: the cluster name is non-deterministic currently so we cannot test for it - assetsExceptCluster := utils.ExcludeClusterAsset(assets) - s.Equalf(len(assets)-1, len(assetsExceptCluster), "Cluster asset was sent upstream.") + nonDetermenisticAssets := utils.ExcludeNonDetermenisticAssets(assets) - assetNames := utils.AssetNames(assetsExceptCluster) + // TODO: this number should exclude services and the cluster asset + srvs := &corev1.ServiceList{} + err = s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { + if err := s.testCluster.K8sHelper.Clientset.List(s.ctx, srvs); err != nil { + return false, nil + } + return true, nil + }) + s.NoError(err, "Failed to list Kubernetes Services") + s.Equalf(len(assets)-1-len(srvs.Items), len(nonDetermenisticAssets), "Cluster and/or Services assets were sent upstream.") + + assetNames := utils.AssetNames(nonDetermenisticAssets) s.ElementsMatchf(workloadNames, assetNames, "Workloads were not sent upstream.") s.AssetsNotUnscored(assets) @@ -436,7 +446,7 @@ func (s *AuditConfigBaseSuite) testMondooAuditConfigNodesCronjobs(auditConfig mo s.Equal("ACTIVE", status) } -func (s *AuditConfigBaseSuite) testMondooAuditConfigNodesDeployments(auditConfig mondoov2.MondooAuditConfig) { +func (s *AuditConfigBaseSuite) testMondooAuditConfigNodesDaemonSets(auditConfig mondoov2.MondooAuditConfig) { s.auditConfig = auditConfig // Disable container image resolution to be able to run the k8s resources scan CronJob with a local image. @@ -450,39 +460,20 @@ func (s *AuditConfigBaseSuite) testMondooAuditConfigNodesDeployments(auditConfig s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - zap.S().Info("Verify the nodes scanning deployments are created.") - - deployments := &appsv1.DeploymentList{} - lbls := nodes.NodeScanningLabels(auditConfig) - - // List only the Deployments in the namespace of the MondooAuditConfig and only the ones that exactly match our labels. - listOpts := &client.ListOptions{Namespace: auditConfig.Namespace, LabelSelector: labels.SelectorFromSet(lbls)} + zap.S().Info("Verify the nodes scanning daemonset is created.") nodeList := &corev1.NodeList{} s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, nodeList)) - // Verify the amount of Deployments created is equal to the amount of nodes + // Verify DaemonSet is created + ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: nodes.DaemonSetName(auditConfig.Name), Namespace: auditConfig.Namespace}} err := s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, deployments, listOpts)) - if len(nodeList.Items) == len(deployments.Items) { - return true, nil + if err := s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(ds), ds); err != nil { + return false, nil } - return false, nil + return true, nil }) - s.NoErrorf( - err, - "The amount of node scanning Deployments is not equal to the amount of cluster nodes. expected: %d; actual: %d", - len(nodeList.Items), len(deployments.Items)) - - for _, d := range deployments.Items { - found := false - for _, n := range nodeList.Items { - if n.Name == d.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"] { - found = true - } - } - s.Truef(found, "Deployment %s/%s does not have a corresponding cluster node.", d.Namespace, d.Name) - } + s.NoError(err, "DaemonSet was not created.") // Verify the garbage collect cron job gcCronJobs := &batchv1.CronJobList{} @@ -550,11 +541,9 @@ func (s *AuditConfigBaseSuite) testMondooAuditConfigNodesDeployments(auditConfig s.NoError(err, "Failed to get status") s.Equal("ACTIVE", status) - // Verify that the node scanning deployments aren't constantly updating - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, deployments, listOpts)) - for _, d := range deployments.Items { - s.Less(d.Generation, int64(10)) - } + // Verify that the node scanning daemonset isn't constantly updating + s.NoError(s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(ds), ds)) + s.Less(ds.Generation, int64(10)) } func (s *AuditConfigBaseSuite) testMondooAuditConfigAdmission(auditConfig mondoov2.MondooAuditConfig) { @@ -1253,7 +1242,7 @@ func (s *AuditConfigBaseSuite) AssetsNotUnscored(assets []assets.AssetWithScore) for _, asset := range assets { // We don't score scratch containers at the moment so they are always unscored. // We don't have policies for a cluster asset enabled at the moment so they are always unscored. - if asset.Platform.Name != "scratch" && asset.Platform.Name != "k8s-cluster" && asset.Platform.Name != "k8s-namespace" { + if asset.Platform.Name != "scratch" && asset.Platform.Name != "k8s-cluster" && asset.Platform.Name != "k8s-namespace" && asset.Platform.Name != "k8s-service" { if asset.Grade == "U" || asset.Grade == "" { zap.S().Infof("Asset %s has no score", asset.Name) } diff --git a/tests/integration/audit_config_namespace_test.go b/tests/integration/audit_config_namespace_test.go index 9bb0eff0..f17ea94e 100644 --- a/tests/integration/audit_config_namespace_test.go +++ b/tests/integration/audit_config_namespace_test.go @@ -102,12 +102,12 @@ func (s *AuditConfigCustomNamespaceSuite) TestReconcile_Nodes_CronJobs() { s.testMondooAuditConfigNodesCronjobs(auditConfig) } -func (s *AuditConfigCustomNamespaceSuite) TestReconcile_Nodes_Deployments() { +func (s *AuditConfigCustomNamespaceSuite) TestReconcile_Nodes_DaemonSet() { auditConfig := utils.DefaultAuditConfigMinimal(s.ns.Name, false, false, true, false) - auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment + auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) auditConfig.Spec.Nodes.IntervalTimer = 1 auditConfig.Spec.Scanner.ServiceAccountName = s.sa.Name - s.testMondooAuditConfigNodesDeployments(auditConfig) + s.testMondooAuditConfigNodesDaemonSets(auditConfig) } func (s *AuditConfigCustomNamespaceSuite) TestReconcile_Admission() { diff --git a/tests/integration/audit_config_oom_test.go b/tests/integration/audit_config_oom_test.go index cf6e8247..f7801578 100644 --- a/tests/integration/audit_config_oom_test.go +++ b/tests/integration/audit_config_oom_test.go @@ -18,6 +18,7 @@ import ( batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -277,67 +278,56 @@ func (s *AuditConfigOOMSuite) TestOOMNodeScan_CronJob() { s.Equal("ACTIVE", status) } -func (s *AuditConfigOOMSuite) TestOOMNodeScan_Deployment() { +func (s *AuditConfigOOMSuite) TestOOMNodeScan_DaemonSet() { auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, true, false) - auditConfig.Spec.Nodes.Style = mondoov2.NodeScanStyle_Deployment + auditConfig.Spec.Nodes.Style = mondoov2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) s.auditConfig = auditConfig auditConfig.Spec.Nodes.Resources.Limits = corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("10Mi"), // this should be low enough to trigger an OOMkilled + corev1.ResourceMemory: resource.MustParse("200Mi"), } // Disable container image resolution to be able to run the k8s resources scan CronJob with a local image. cleanup := s.disableContainerImageResolution() defer cleanup() - zap.S().Info("Create an audit config that enables only nodes scanning. (with reduced memory limit)") + zap.S().Info("Create an audit config that enables only nodes scanning.") s.NoErrorf( s.testCluster.K8sHelper.Clientset.Create(s.ctx, &auditConfig), "Failed to create Mondoo audit config.") s.Require().True(s.testCluster.K8sHelper.WaitUntilMondooClientSecretExists(s.ctx, s.auditConfig.Namespace), "Mondoo SA not created") - deployments := &appsv1.DeploymentList{} - lbls := nodes.NodeScanningLabels(auditConfig) - - // List only the Deployments in the namespace of the MondooAuditConfig and only the ones that exactly match our labels. - listOpts := &client.ListOptions{Namespace: auditConfig.Namespace, LabelSelector: labels.SelectorFromSet(lbls)} - - nodeList := &corev1.NodeList{} - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, nodeList)) + ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: nodes.DaemonSetName(auditConfig.Name), Namespace: auditConfig.Namespace}} - // Verify the amount of Deployments created is equal to the amount of nodes + // Verify that DaemonSet was created err := s.testCluster.K8sHelper.ExecuteWithRetries(func() (bool, error) { - s.NoError(s.testCluster.K8sHelper.Clientset.List(s.ctx, deployments, listOpts)) - if len(nodeList.Items) == len(deployments.Items) { - return true, nil + if err := s.testCluster.K8sHelper.Clientset.Get(s.ctx, client.ObjectKeyFromObject(ds), ds); err != nil { + return false, nil } - return false, nil + return true, nil }) - s.NoErrorf( - err, - "The amount of node scanning Deployments is not equal to the amount of cluster nodes. expected: %d; actual: %d", - len(nodeList.Items), len(deployments.Items)) + s.NoError(err, "DaemonSet was not created.") - // This will take some time, because: - // reconcile needs to happen - // a new replicaset should be created - // the first Pod tries to start and gets killed - // on the 2nd start we should get an OOMkilled status update - err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.NodeScanningDegraded, corev1.ConditionTrue, "OOM") - s.Require().NoError(err, "Failed to find degraded condition") + // Give the integration a chance to update + time.Sleep(20 * time.Second) - foundMondooAuditConfig, err := s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) - s.NoError(err, "Failed to find MondooAuditConfig") - cond := mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.NodeScanningDegraded) - s.Require().NotNil(cond) - s.Containsf(cond.Message, "OOM", "Failed to find OOMKilled message in degraded condition") - s.Len(cond.AffectedPods, 1, "Failed to find only one pod in degraded condition") + status, err := s.integration.GetStatus(s.ctx) + s.NoError(err, "Failed to get status") + s.Equal("ACTIVE", status) + + zap.S().Info("Decreasing memory limit to get node Scans running again.") + err = s.testCluster.K8sHelper.UpdateAuditConfigWithRetries(auditConfig.Name, auditConfig.Namespace, func(config *mondoov2.MondooAuditConfig) { + config.Spec.Nodes.Resources.Limits = corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Mi"), // this should be low enough to trigger an OOMkilled + } + }) + s.Require().NoError(err) // Give the integration a chance to update - time.Sleep(2 * time.Second) + time.Sleep(20 * time.Second) - status, err := s.integration.GetStatus(s.ctx) + status, err = s.integration.GetStatus(s.ctx) s.NoError(err, "Failed to get status") s.Equal("ERROR", status) @@ -346,7 +336,6 @@ func (s *AuditConfigOOMSuite) TestOOMNodeScan_Deployment() { config.Spec.Nodes.Resources.Limits = corev1.ResourceList{ corev1.ResourceMemory: resource.MustParse("200Mi"), // this should be enough to get the ScanAPI running again } - foundMondooAuditConfig.Spec.Nodes.IntervalTimer = 1 }) s.Require().NoError(err) @@ -355,15 +344,13 @@ func (s *AuditConfigOOMSuite) TestOOMNodeScan_Deployment() { err = s.testCluster.K8sHelper.CheckForDegradedCondition(&auditConfig, mondoov2.NodeScanningDegraded, corev1.ConditionFalse, "") s.Require().NoError(err, "Failed to find degraded condition") - foundMondooAuditConfig, err = s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) + foundMondooAuditConfig, err := s.testCluster.K8sHelper.GetMondooAuditConfigFromCluster(auditConfig.Name, auditConfig.Namespace) s.NoError(err, "Failed to find MondooAuditConfig") - cond = mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.ScanAPIDegraded) + cond := mondoo.FindMondooAuditConditions(foundMondooAuditConfig.Status.Conditions, mondoov2.ScanAPIDegraded) s.Require().NotNil(cond) s.NotContains(cond.Message, "OOM", "Found OOMKilled message in condition") - s.Len(cond.AffectedPods, 0, "Found a pod in condition") - // Give the integration a chance to update - time.Sleep(2 * time.Second) + s.Len(cond.AffectedPods, 0, "Found a pod in condition") status, err = s.integration.GetStatus(s.ctx) s.NoError(err, "Failed to get status") diff --git a/tests/integration/audit_config_test.go b/tests/integration/audit_config_test.go index 1e86b5d5..7b832e1b 100644 --- a/tests/integration/audit_config_test.go +++ b/tests/integration/audit_config_test.go @@ -46,11 +46,11 @@ func (s *AuditConfigSuite) TestReconcile_Nodes_CronJobs() { s.testMondooAuditConfigNodesCronjobs(auditConfig) } -func (s *AuditConfigSuite) TestReconcile_Nodes_Deployments() { +func (s *AuditConfigSuite) TestReconcile_Nodes_DaemonSet() { auditConfig := utils.DefaultAuditConfigMinimal(s.testCluster.Settings.Namespace, false, false, true, false) - auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment + auditConfig.Spec.Nodes.Style = v1alpha2.NodeScanStyle_Deployment // TODO: Change to DaemonSet (no effect on reconsile logic) auditConfig.Spec.Nodes.IntervalTimer = 1 - s.testMondooAuditConfigNodesDeployments(auditConfig) + s.testMondooAuditConfigNodesDaemonSets(auditConfig) } func (s *AuditConfigSuite) TestReconcile_AdmissionPermissive() {