diff --git a/controllers/sriovnetworknodepolicy_controller.go b/controllers/sriovnetworknodepolicy_controller.go index 32126ad4..a71f1ad3 100644 --- a/controllers/sriovnetworknodepolicy_controller.go +++ b/controllers/sriovnetworknodepolicy_controller.go @@ -144,10 +144,11 @@ func (r *SriovNetworkNodePolicyReconciler) Reconcile(ctx context.Context, req ct // Sort the policies with priority, higher priority ones is applied later sort.Sort(sriovnetworkv1.ByPriority(policyList.Items)) + // TODO 暂未验证-wangyudong // Sync SriovNetworkNodeState objects - if err = r.syncAllSriovNetworkNodeStates(defaultPolicy, policyList, nodeList); err != nil { + /*if err = r.syncAllSriovNetworkNodeStates(defaultPolicy, policyList, nodeList); err != nil { return reconcile.Result{}, err - } + }*/ if os.Getenv("SRIOV_DEVICE_PLUGIN_IMAGE") != "" { // Sync Sriov device plugin ConfigMap object @@ -160,6 +161,10 @@ func (r *SriovNetworkNodePolicyReconciler) Reconcile(ctx context.Context, req ct } } + // Sync SriovNetworkNodeState objects + if err = r.syncAllSriovNetworkNodeStates(defaultPolicy, policyList, nodeList); err != nil { + return reconcile.Result{}, err + } // All was successful. Request that this be re-triggered after ResyncPeriod, // so we can reconcile state again. @@ -743,6 +748,11 @@ func (r *SriovNetworkNodePolicyReconciler) renderDevicePluginConfigData(pl *srio nodeState := &sriovnetworkv1.SriovNetworkNodeState{} err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: node.Name}, nodeState) if err == nil { + // TODO 暂未验证-王玉东 + // Loop through interfaces status to find a match for NetworkID or NetworkTag + /*if len(nodeState.Status.Interfaces) == 0 { + return rcl, fmt.Errorf("node state %s doesn't contain interfaces data", nodeState.Name) + }*/ // Loop through interfaces status to find a match for NetworkID or NetworkTag for _, intf := range nodeState.Status.Interfaces { if sriovnetworkv1.NetFilterMatch(p.Spec.NicSelector.NetFilter, intf.NetFilter) { diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index c4132a22..5f6579f8 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -205,6 +205,8 @@ func (dn *Daemon) tryCreateUdevRuleWrapper() error { // Run the config daemon func (dn *Daemon) Run(stopCh <-chan struct{}, exitCh <-chan error) error { + glog.V(0).Infof("Run(): node: %s", dn.name) + if utils.ClusterType == utils.ClusterTypeOpenshift { glog.V(0).Infof("Run(): start daemon. openshiftFlavor: %s", dn.openshiftContext.OpenshiftFlavor) } else { @@ -382,14 +384,24 @@ func (dn *Daemon) nodeUpdateHandler(old, new interface{}) { dn.node = node.DeepCopy() nodes, err := dn.nodeLister.List(labels.Everything()) if err != nil { + glog.Errorf("nodeUpdateHandler(): failed to list nodes: %v", err) return } - for _, node := range nodes { - if node.GetName() != dn.name && (node.Annotations[annoKey] == annoDraining || node.Annotations[annoKey] == annoMcpPaused) { + for _, otherNode := range nodes { + if otherNode.GetName() == dn.name { + continue + } + drainingAnnotationValue := otherNode.Annotations[annoKey] + if drainingAnnotationValue == annoDraining || drainingAnnotationValue == annoMcpPaused { + glog.V(2).Infof("nodeUpdateHandler(): node is not drainable as [%s] has [%s == %s] ", otherNode.Name, annoKey, drainingAnnotationValue) dn.drainable = false return } } + if !dn.drainable { + glog.V(2).Infof("nodeUpdateHandler(): node is now drainable") + } + dn.drainable = true }