Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use DaemonSet for node scanning #1134

Merged
merged 27 commits into from
Jun 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
c063bd3
feat: ignore dlv binary
slntopp Jun 19, 2024
aafe1da
feat: use templated inventory and universal configmap; add daemonset …
slntopp Jun 19, 2024
1096c00
feat: create daemonset instead of deployments, cleanup old deployments
slntopp Jun 19, 2024
3faf494
feat: use `inventory_template` file instead of `inventory` to avoid c…
slntopp Jun 19, 2024
4eb0fac
scratching the surface of tests updates
slntopp Jun 19, 2024
1638d31
feat: remove deployments cleanup
slntopp Jun 19, 2024
e39c66e
follow-up: remove deployments audit
slntopp Jun 19, 2024
3b167d0
follow-up: remove deployments audit
slntopp Jun 20, 2024
1c6e25f
fix tests
imilchev Jun 20, 2024
0ff41c3
Merge remote-tracking branch 'origin/ivan/fix-tests' into mik/node-sc…
slntopp Jun 20, 2024
1340f0e
fix: exclude services from assets score check in the integration test
slntopp Jun 20, 2024
7106117
fix: allow deamonsets crud in rbac
slntopp Jun 20, 2024
03e7d7f
cleanup
slntopp Jun 20, 2024
f14d8ca
fix: integration tests (except oom)
slntopp Jun 20, 2024
48f74b9
follow-up on tests naming
slntopp Jun 20, 2024
c64fd0b
follow-up on tests naming
slntopp Jun 20, 2024
9ce42fe
fix: report node status and oom test
slntopp Jun 20, 2024
3fb411a
fix: update cronjobs spec to match updated configmap
slntopp Jun 20, 2024
49621b0
docs: remove misleading comment (daemon set name)
slntopp Jun 21, 2024
3fb50ff
follow-up: update misleading or unclear naming, comments and messages…
slntopp Jun 21, 2024
805fc32
follow-up: copy-pasted comment not matching the code
slntopp Jun 21, 2024
85af9ef
bring back the audit conditions test
slntopp Jun 21, 2024
4cbf460
remove reduntant for loops in configmap related unit-tests
slntopp Jun 21, 2024
2b9d174
follow-up: correct misleading comment
slntopp Jun 21, 2024
bb3f86d
follow-up: correct var naming
slntopp Jun 21, 2024
1070320
fix: delete daemonset on down and style change
slntopp Jun 21, 2024
9dc1997
fix: delete deprecated config maps
slntopp Jun 21, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ testbin/*
# Test binary, build with `go test -c`
*.test

# Go dlv debugger binary
__debug_bin*

# Output of the go coverage tool, specifically when used with LiteIDE
*.out

Expand Down
1 change: 1 addition & 0 deletions api/v1alpha2/mondooauditconfig_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ type NodeScanStyle string
const (
NodeScanStyle_CronJob NodeScanStyle = "cronjob"
NodeScanStyle_Deployment NodeScanStyle = "deployment"
NodeScanStyle_DaemonSet NodeScanStyle = "daemonset"
)

type Nodes struct {
Expand Down
13 changes: 7 additions & 6 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,23 +21,24 @@ rules:
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- batch
Expand Down
2 changes: 1 addition & 1 deletion controllers/mondooauditconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ var MondooClientBuilder = mondooclient.NewClient
//+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondooauditconfigs/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondooauditconfigs/finalizers,verbs=update
//+kubebuilder:rbac:groups=k8s.mondoo.com,resources=mondoooperatorconfigs,verbs=get;watch;list
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=deployments;replicasets;daemonsets;statefulsets,verbs=get;list;watch
//+kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=deletecollection
Expand Down
196 changes: 68 additions & 128 deletions controllers/nodes/deployment_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ func (n *DeploymentHandler) Reconcile(ctx context.Context) (ctrl.Result, error)
if err := n.syncCronJob(ctx); err != nil {
return ctrl.Result{}, err
}
} else if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment {
if err := n.syncDeployment(ctx); err != nil {
} else if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment || n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_DaemonSet {
if err := n.syncDaemonSet(ctx); err != nil {
return ctrl.Result{}, err
}
}
Expand Down Expand Up @@ -72,18 +72,24 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error {
return err
}

// Delete DaemonSet if it exists
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, ds); err != nil {
logger.Error(err, "Failed to clean up node scanning DaemonSet", "namespace", ds.Namespace, "name", ds.Name)
return err
}

// Create/update CronJobs for nodes
for _, node := range nodes.Items {
// Delete Deployment if it exists
dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil {
logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name)
cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapNameWithNode(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, cm); err != nil {
logger.Error(err, "Failed to clean up old ConfigMap for node scanning", "namespace", cm.Namespace, "name", cm.Name)
return err
}

updated, err := n.syncConfigMap(ctx, node, clusterUid)
updated, err := n.syncConfigMap(ctx, clusterUid)
if err != nil {
return err
}
Expand Down Expand Up @@ -157,7 +163,7 @@ func (n *DeploymentHandler) syncCronJob(ctx context.Context) error {
return nil
}

func (n *DeploymentHandler) syncDeployment(ctx context.Context) error {
func (n *DeploymentHandler) syncDaemonSet(ctx context.Context) error {
mondooClientImage, err := n.ContainerImageResolver.CnspecImage(
n.Mondoo.Spec.Scanner.Image.Name, n.Mondoo.Spec.Scanner.Image.Tag, n.MondooOperatorConfig.Spec.SkipContainerResolution)
if err != nil {
Expand Down Expand Up @@ -194,71 +200,66 @@ func (n *DeploymentHandler) syncDeployment(ctx context.Context) error {
return err
}

updated, err := n.syncConfigMap(ctx, node, clusterUid)
cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapNameWithNode(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, cm); err != nil {
logger.Error(err, "Failed to clean up old ConfigMap for node scanning", "namespace", cm.Namespace, "name", cm.Name)
return err
}

updated, err := n.syncConfigMap(ctx, clusterUid)
if err != nil {
return err
}

if updated {
logger.Info(
"Inventory ConfigMap was just updated. The deployment will use the new config during the next scheduled run.",
"Inventory ConfigMap was just updated. The daemonset will use the new config during the next scheduled run.",
"namespace", n.Mondoo.Namespace,
"name", DeploymentName(n.Mondoo.Name, node.Name))
}

dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, dep, n.Mondoo, logger, func() error {
UpdateDeployment(dep, node, *n.Mondoo, n.IsOpenshift, mondooClientImage, *n.MondooOperatorConfig)
return nil
})
if err != nil {
return err
}

if op == controllerutil.OperationResultCreated {
err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger)
if err != nil {
logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name)
return err
if n.Mondoo.Spec.Nodes.Style == v1alpha2.NodeScanStyle_Deployment {
dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil {
czunker marked this conversation as resolved.
Show resolved Hide resolved
logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name)
}
continue
}
}

// Delete dangling Deployments for nodes that have been deleted from the cluster.
if err := n.cleanupDeploymentsForDeletedNodes(ctx, *nodes); err != nil {
return err
}

// List the Deployments again after they have been synced.
deployments, err := n.getDeploymentsForAuditConfig(ctx)
ds := &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}}
op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, ds, n.Mondoo, logger, func() error {
UpdateDaemonSet(ds, *n.Mondoo, n.IsOpenshift, mondooClientImage, *n.MondooOperatorConfig)
return nil
})
if err != nil {
return err
}

// Get Pods for these Deployments
pods := &corev1.PodList{}
if len(deployments) > 0 {
opts := &client.ListOptions{
Namespace: n.Mondoo.Namespace,
LabelSelector: labels.SelectorFromSet(NodeScanningLabels(*n.Mondoo)),
}
err = n.KubeClient.List(ctx, pods, opts)
if op == controllerutil.OperationResultCreated {
err = mondoo.UpdateMondooAuditConfig(ctx, n.KubeClient, n.Mondoo, logger)
if err != nil {
logger.Error(err, "Failed to list Pods for Node Scanning")
logger.Error(err, "Failed to update MondooAuditConfig", "namespace", n.Mondoo.Namespace, "name", n.Mondoo.Name)
return err
}
}

deploymentsDegraded := false
for _, d := range deployments {
if d.Status.ReadyReplicas < *d.Spec.Replicas {
deploymentsDegraded = true
break
}
if err := n.KubeClient.Get(ctx, client.ObjectKeyFromObject(ds), ds); err != nil {
logger.Error(err, "Failed to get DaemonSet", "namespace", ds.Namespace, "name", ds.Name)
}

updateNodeConditions(n.Mondoo, deploymentsDegraded, pods)
// Get Pods for these Deployments
pods := &corev1.PodList{}
opts := &client.ListOptions{
Namespace: n.Mondoo.Namespace,
LabelSelector: labels.SelectorFromSet(NodeScanningLabels(*n.Mondoo)),
}
err = n.KubeClient.List(ctx, pods, opts)
if err != nil {
logger.Error(err, "Failed to list Pods for Node Scanning")
return err
}

updateNodeConditions(n.Mondoo, ds.Status.CurrentNumberScheduled < ds.Status.DesiredNumberScheduled, pods)

if err := n.syncGCCronjob(ctx, mondooOperatorImage, clusterUid); err != nil {
return err
Expand All @@ -269,16 +270,16 @@ func (n *DeploymentHandler) syncDeployment(ctx context.Context) error {
// syncConfigMap syncs the inventory ConfigMap. Returns a boolean indicating whether the ConfigMap has been updated. It
// can only be "true", if the ConfigMap existed before this reconcile cycle and the inventory was different from the
// desired state.
func (n *DeploymentHandler) syncConfigMap(ctx context.Context, node corev1.Node, clusterUid string) (bool, error) {
func (n *DeploymentHandler) syncConfigMap(ctx context.Context, clusterUid string) (bool, error) {
integrationMrn, err := k8s.TryGetIntegrationMrnForAuditConfig(ctx, n.KubeClient, *n.Mondoo)
if err != nil {
logger.Error(err, "failed to retrieve IntegrationMRN")
return false, err
}

cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace}}
cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace}}
op, err := k8s.CreateOrUpdate(ctx, n.KubeClient, cm, n.Mondoo, logger, func() error {
return UpdateConfigMap(cm, node, integrationMrn, clusterUid, *n.Mondoo)
return UpdateConfigMap(cm, integrationMrn, clusterUid, *n.Mondoo)
})
if err != nil {
return false, err
Expand Down Expand Up @@ -315,54 +316,6 @@ func (n *DeploymentHandler) cleanupCronJobsForDeletedNodes(ctx context.Context,
return err
}
logger.Info("Deleted CronJob", "namespace", c.Namespace, "name", c.Name)

configMap := &corev1.ConfigMap{}
czunker marked this conversation as resolved.
Show resolved Hide resolved
configMap.Name = ConfigMapName(n.Mondoo.Name, c.Spec.JobTemplate.Spec.Template.Spec.NodeName)
configMap.Namespace = n.Mondoo.Namespace
if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil {
logger.Error(err, "Failed to delete ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name)
return err
}
}
return nil
}

// cleanupDeploymentsForDeletedNodes deletes dangling Deployments for nodes that have been deleted from the cluster.
func (n *DeploymentHandler) cleanupDeploymentsForDeletedNodes(ctx context.Context, currentNodes corev1.NodeList) error {
deployments, err := n.getDeploymentsForAuditConfig(ctx)
if err != nil {
return err
}

for _, d := range deployments {
// Check if the node for that Deployment is still present in the cluster.
found := false
for _, node := range currentNodes.Items {
if DeploymentName(n.Mondoo.Name, node.Name) == d.Name {
found = true
break
}
}

// If the node is still there, there is nothing to update.
if found {
continue
}

// If the node for the Deployment has been deleted from the cluster, the Deployment needs to be deleted.
if err := k8s.DeleteIfExists(ctx, n.KubeClient, &d); err != nil {
logger.Error(err, "Failed to deleted Deployment", "namespace", d.Namespace, "name", d.Name)
return err
}
logger.Info("Deleted Deployment", "namespace", d.Namespace, "name", d.Name)

configMap := &corev1.ConfigMap{}
configMap.Name = ConfigMapName(n.Mondoo.Name, d.Spec.Template.Spec.NodeSelector["kubernetes.io/hostname"])
configMap.Namespace = n.Mondoo.Namespace
if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil {
logger.Error(err, "Failed to delete ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name)
return err
}
}
return nil
}
Expand All @@ -389,19 +342,6 @@ func (n *DeploymentHandler) getCronJobsForAuditConfig(ctx context.Context) ([]ba
return cronJobs.Items, nil
}

func (n *DeploymentHandler) getDeploymentsForAuditConfig(ctx context.Context) ([]appsv1.Deployment, error) {
deps := &appsv1.DeploymentList{}
depLabels := NodeScanningLabels(*n.Mondoo)

// Lists only the Deployments in the namespace of the MondooAuditConfig and only the ones that exactly match our labels.
listOpts := &client.ListOptions{Namespace: n.Mondoo.Namespace, LabelSelector: labels.SelectorFromSet(depLabels)}
if err := n.KubeClient.List(ctx, deps, listOpts); err != nil {
logger.Error(err, "Failed to list Deployments in namespace", "namespace", n.Mondoo.Namespace)
return nil, err
}
return deps.Items, nil
}

func (n *DeploymentHandler) down(ctx context.Context) error {
nodes := &corev1.NodeList{}
if err := n.KubeClient.List(ctx, nodes); err != nil {
Expand All @@ -417,22 +357,22 @@ func (n *DeploymentHandler) down(ctx context.Context) error {
logger.Error(err, "Failed to clean up node scanning CronJob", "namespace", cronJob.Namespace, "name", cronJob.Name)
return err
}
}

dep := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: DeploymentName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, dep); err != nil {
logger.Error(err, "Failed to clean up node scanning Deployment", "namespace", dep.Namespace, "name", dep.Name)
return err
}
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{Name: DaemonSetName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, ds); err != nil {
logger.Error(err, "Failed to clean up node scanning DaemonSet", "namespace", ds.Namespace, "name", ds.Name)
return err
}

configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name, node.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil {
logger.Error(err, "Failed to clean up inventory ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name)
return err
}
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: ConfigMapName(n.Mondoo.Name), Namespace: n.Mondoo.Namespace},
}
if err := k8s.DeleteIfExists(ctx, n.KubeClient, configMap); err != nil {
logger.Error(err, "Failed to clean up inventory ConfigMap", "namespace", configMap.Namespace, "name", configMap.Name)
return err
}

gcCronJob := &batchv1.CronJob{
Expand Down
Loading
Loading