Skip to content

Commit

Permalink
change NodeDrainPodFilters to metav1.LabelSelector and function to ma…
Browse files Browse the repository at this point in the history
…tch labels
  • Loading branch information
Kun483 committed Aug 6, 2024
1 parent a37df6a commit 77991f2
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 6 deletions.
7 changes: 2 additions & 5 deletions api/v1beta1/machine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubedrain "k8s.io/kubectl/pkg/drain"
capierrors "sigs.k8s.io/cluster-api/errors"
)

Expand Down Expand Up @@ -122,11 +121,9 @@ type MachineSpec struct {
// +optional
NodeDeletionTimeout *metav1.Duration `json:"nodeDeletionTimeout,omitempty"`

// NodeDrainPodFilters are applied as AdditionalFilters after base drain filters to
// exclude pods using custom logic. Any filter that returns PodDeleteStatus
// with Delete == false will immediately stop execution of further filters.
// NodeDrainPodFilters allows to specify filters for pods to be excluded during node drain
// +optional
NodeDrainPodFilters []kubedrain.PodFilter `json:"nodeDrainPodFilters,omitempty"`
NodeDrainPodFilters *metav1.LabelSelector `json:"nodeDrainPodFilters,omitempty"`
}

// ANCHOR_END: MachineSpec
Expand Down
17 changes: 16 additions & 1 deletion internal/controllers/machine/machine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
Expand Down Expand Up @@ -621,7 +622,9 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster,
}},
// SPECTRO: Even if the node is reachable, we wait 30 minutes for drain completion else move ahead
SkipWaitForDeleteTimeoutSeconds: 60 * 30, // 30 minutes
AdditionalFilters: m.Spec.NodeDrainPodFilters,
AdditionalFilters: []kubedrain.PodFilter{
SkipFuncGenerator(m.Spec.NodeDrainPodFilters),
},
}
if noderefutil.IsNodeUnreachable(node) {
// When the node is unreachable and some pods are not evicted for as long as this timeout, we ignore them.
Expand All @@ -644,6 +647,18 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster,
return ctrl.Result{}, nil
}

func SkipFuncGenerator(labelSelector *metav1.LabelSelector) func(pod corev1.Pod) kubedrain.PodDeleteStatus {
return func(pod corev1.Pod) kubedrain.PodDeleteStatus {
if pod.Labels == nil {
return kubedrain.MakePodDeleteStatusOkay()
}
if labels.Equals(labelSelector.MatchLabels, pod.ObjectMeta.Labels) {
return kubedrain.MakePodDeleteStatusSkip()
}
return kubedrain.MakePodDeleteStatusOkay()
}
}

// shouldWaitForNodeVolumes returns true if node status still have volumes attached
// pod deletion and volume detach happen asynchronously, so pod could be deleted before volume detached from the node
// this could cause issue for some storage provisioner, for example, vsphere-volume this is problematic
Expand Down

0 comments on commit 77991f2

Please sign in to comment.