forked from openshift/network-metrics-daemon
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.go
89 lines (74 loc) · 2.67 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
package main
import (
"context"
"flag"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"github.com/openshift/network-metrics-daemon/pkg/controller"
"github.com/openshift/network-metrics-daemon/pkg/podmetrics"
"github.com/openshift/network-metrics-daemon/pkg/signals"
)
// build is the git version of this program. It is set using build flags in the makefile.
var build = "develop"
func main() {
klog.InitFlags(nil)
var config struct {
kubeconfig string
masterURL string
metricsAddress string
currentNode string
}
flag.StringVar(&config.kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&config.masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&config.metricsAddress, "metrics-listen-address", ":9091", "metrics server listen address.")
flag.StringVar(&config.currentNode, "node-name", "", "the node the daemon is running on.")
flag.Parse()
if config.currentNode == "" {
klog.Fatalf("--node-name required parameter not set")
}
klog.Info("Version:", build)
klog.Info("Starting with config", config)
// set up signals so we handle the first shutdown signal gracefully
stopCh := signals.SetupSignalHandler()
cfg, err := clientcmd.BuildConfigFromFlags(config.masterURL, config.kubeconfig)
if err != nil {
klog.Fatalf("Error building kubeconfig: %s", err.Error())
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
klog.Fatalf("Error building kubernetes clientset: %s", err.Error())
}
fieldSelector := fmt.Sprintf("spec.nodeName=%s", config.currentNode)
informer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(context.Background(),
metav1.ListOptions{
FieldSelector: fieldSelector,
})
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return kubeClient.CoreV1().Pods(metav1.NamespaceAll).Watch(context.Background(), metav1.ListOptions{
FieldSelector: fieldSelector,
})
},
},
&v1.Pod{},
time.Second*30,
cache.Indexers{},
)
ctrl := controller.New(kubeClient, informer, config.currentNode)
go informer.Run(stopCh)
podmetrics.Serve(config.metricsAddress, stopCh)
if err = ctrl.Run(2, stopCh); err != nil {
klog.Fatalf("Error running controller: %s", err.Error())
}
}