diff --git a/cmd/policy-assistant/pkg/kube/kubernetes.go b/cmd/policy-assistant/pkg/kube/kubernetes.go index 523a1279..a28f7dfa 100644 --- a/cmd/policy-assistant/pkg/kube/kubernetes.go +++ b/cmd/policy-assistant/pkg/kube/kubernetes.go @@ -6,6 +6,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -112,6 +113,43 @@ func (k *Kubernetes) CreateNetworkPolicy(policy *networkingv1.NetworkPolicy) (*n return createdPolicy, errors.Wrapf(err, "unable to create network policy %s/%s", policy.Namespace, policy.Name) } +func (k *Kubernetes) GetDeploymentsInNamespace(namespace string) ([]appsv1.Deployment, error) { + deploymentList, err := k.ClientSet.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "unable to get deployments in namespace %s", namespace) + } + return deploymentList.Items, nil +} + +func (k *Kubernetes) GetDaemonSetsInNamespace(namespace string) ([]appsv1.DaemonSet, error) { + daemonSetList, err := k.ClientSet.AppsV1().DaemonSets(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "unable to get daemonSets in namespace %s", namespace) + } + return daemonSetList.Items, nil +} + +func (k *Kubernetes) GetStatefulSetsInNamespace(namespace string) ([]appsv1.StatefulSet, error) { + statefulSetList, err := k.ClientSet.AppsV1().StatefulSets(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "unable to get StatefulSets in namespace %s", namespace) + } + return statefulSetList.Items, nil +} + +func (k *Kubernetes) GetReplicaSetsInNamespace(namespace string) ([]appsv1.ReplicaSet, error) { + replicaSetList, err := k.ClientSet.AppsV1().ReplicaSets(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "unable to get ReplicaSets in namespace %s", namespace) + } + return replicaSetList.Items, nil +} + +func (k *Kubernetes) GetReplicaSet(namespace string, name string) (*appsv1.ReplicaSet, error) { + replicaSet, err := k.ClientSet.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + return replicaSet, errors.Wrapf(err, "unable to get replicaSet %s/%s", namespace, name) +} + func (k *Kubernetes) GetService(namespace string, name string) (*v1.Service, error) { service, err := k.ClientSet.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) return service, errors.Wrapf(err, "unable to get service %s/%s", namespace, name) diff --git a/cmd/policy-assistant/pkg/matcher/traffic.go b/cmd/policy-assistant/pkg/matcher/traffic.go index a763ca6e..02f4ea87 100644 --- a/cmd/policy-assistant/pkg/matcher/traffic.go +++ b/cmd/policy-assistant/pkg/matcher/traffic.go @@ -5,7 +5,10 @@ import ( "strings" "github.com/mattfenwick/collections/pkg/slice" + "github.com/mattfenwick/cyclonus/pkg/kube" + "github.com/mattfenwick/cyclonus/pkg/utils" "github.com/olekukonko/tablewriter" + "github.com/sirupsen/logrus" "golang.org/x/exp/maps" v1 "k8s.io/api/core/v1" ) @@ -57,7 +60,8 @@ func labelsToString(labels map[string]string) string { type TrafficPeer struct { Internal *InternalPeer - IP string + // IP external to cluster + IP string } func (p *TrafficPeer) Namespace() string { @@ -71,10 +75,268 @@ func (p *TrafficPeer) IsExternal() bool { return p.Internal == nil } +func (p *TrafficPeer) Translate() TrafficPeer { + //Translates kubernetes workload types to TrafficPeers. + var podsNetworking []*PodNetworking + var podLabels map[string]string + var namespaceLabels map[string]string + var workloadOwner string + var workloadKind string + var internalPeer InternalPeer + workloadOwnerExists := false + workloadMetadata := strings.Split(strings.ToLower(p.Internal.Workload), "/") + if len(workloadMetadata) != 3 || (workloadMetadata[0] == "" || workloadMetadata[1] == "" || workloadMetadata[2] == "") || (workloadMetadata[1] != "daemonset" && workloadMetadata[1] != "statefulset" && workloadMetadata[1] != "replicaset" && workloadMetadata[1] != "deployment" && workloadMetadata[1] != "pod") { + logrus.Fatalf("Bad Workload structure: Types supported are pod, replicaset, deployment, daemonset, statefulset, and 3 fields are required with this structure, //") + } + kubeClient, err := kube.NewKubernetesForContext("") + utils.DoOrDie(err) + ns, err := kubeClient.GetNamespace(workloadMetadata[0]) + utils.DoOrDie(err) + kubePods, err := kube.GetPodsInNamespaces(kubeClient, []string{workloadMetadata[0]}) + if err != nil { + logrus.Fatalf("unable to read pods from kube, ns '%s': %+v", workloadMetadata[0], err) + } + for _, pod := range kubePods { + if workloadMetadata[1] == "deployment" && pod.OwnerReferences != nil && pod.OwnerReferences[0].Kind == "ReplicaSet" { + kubeReplicaSets, err := kubeClient.GetReplicaSet(workloadMetadata[0], pod.OwnerReferences[0].Name) + if err != nil { + logrus.Fatalf("unable to read Replicaset from kube, rs '%s': %+v", pod.OwnerReferences[0].Name, err) + } + if kubeReplicaSets.OwnerReferences != nil { + workloadOwner = kubeReplicaSets.OwnerReferences[0].Name + workloadKind = "deployment" + } + + } else if (workloadMetadata[1] == "daemonset" || workloadMetadata[1] == "statefulset" || workloadMetadata[1] == "replicaset") && pod.OwnerReferences != nil { + workloadOwner = pod.OwnerReferences[0].Name + workloadKind = pod.OwnerReferences[0].Kind + } else if workloadMetadata[1] == "pod" { + workloadOwner = pod.Name + workloadKind = "pod" + } + if strings.ToLower(workloadOwner) == workloadMetadata[2] && strings.ToLower(workloadKind) == workloadMetadata[1] { + podLabels = pod.Labels + namespaceLabels = ns.Labels + podNetworking := PodNetworking{ + IP: pod.Status.PodIP, + } + podsNetworking = append(podsNetworking, &podNetworking) + workloadOwnerExists = true + + } + } + + if !workloadOwnerExists { + logrus.Infof("workload not found on the cluster") + internalPeer = InternalPeer{ + Workload: "", + } + } else { + internalPeer = InternalPeer{ + Workload: p.Internal.Workload, + PodLabels: podLabels, + NamespaceLabels: namespaceLabels, + Namespace: workloadMetadata[0], + Pods: podsNetworking, + } + } + + TranslatedPeer := TrafficPeer{ + Internal: &internalPeer, + } + return TranslatedPeer +} + +func DeploymentsToTrafficPeers() []TrafficPeer { + //Translates all pods associated with deployments to TrafficPeers. + var deploymentPeers []TrafficPeer + kubeClient, err := kube.NewKubernetesForContext("") + utils.DoOrDie(err) + kubeNamespaces, err := kubeClient.GetAllNamespaces() + if err != nil { + logrus.Fatalf("unable to read namespaces from kube: %+v", err) + } + + for _, namespace := range kubeNamespaces.Items { + kubeDeployments, err := kubeClient.GetDeploymentsInNamespace(namespace.Name) + if err != nil { + logrus.Fatalf("unable to read deployments from kube, ns '%s': %+v", namespace.Name, err) + } + for _, deployment := range kubeDeployments { + tmpInternalPeer := InternalPeer{ + Workload: namespace.Name + "/deployment/" + deployment.Name, + } + tmpPeer := TrafficPeer{ + Internal: &tmpInternalPeer, + } + tmpPeerTranslated := tmpPeer.Translate() + if tmpPeerTranslated.Internal.Workload != "" { + deploymentPeers = append(deploymentPeers, tmpPeerTranslated) + } + + } + + } + + return deploymentPeers +} + +func DaemonSetsToTrafficPeers() []TrafficPeer { + //Translates all pods associated with daemonSets to TrafficPeers. + var daemonSetPeers []TrafficPeer + kubeClient, err := kube.NewKubernetesForContext("") + utils.DoOrDie(err) + kubeNamespaces, err := kubeClient.GetAllNamespaces() + if err != nil { + logrus.Fatalf("unable to read namespaces from kube: %+v", err) + } + + for _, namespace := range kubeNamespaces.Items { + kubeDaemonSets, err := kubeClient.GetDaemonSetsInNamespace(namespace.Name) + if err != nil { + logrus.Fatalf("unable to read daemonSets from kube, ns '%s': %+v", namespace.Name, err) + } + for _, daemonSet := range kubeDaemonSets { + tmpInternalPeer := InternalPeer{ + Workload: namespace.Name + "/daemonset/" + daemonSet.Name, + } + tmpPeer := TrafficPeer{ + Internal: &tmpInternalPeer, + } + tmpPeerTranslated := tmpPeer.Translate() + if tmpPeerTranslated.Internal.Workload != "" { + daemonSetPeers = append(daemonSetPeers, tmpPeerTranslated) + } + } + + } + + return daemonSetPeers +} + +func StatefulSetsToTrafficPeers() []TrafficPeer { + //Translates all pods associated with statefulSets to TrafficPeers. + var statefulSetPeers []TrafficPeer + kubeClient, err := kube.NewKubernetesForContext("") + utils.DoOrDie(err) + kubeNamespaces, err := kubeClient.GetAllNamespaces() + if err != nil { + logrus.Fatalf("unable to read namespaces from kube: %+v", err) + } + + for _, namespace := range kubeNamespaces.Items { + kubeStatefulSets, err := kubeClient.GetStatefulSetsInNamespace(namespace.Name) + if err != nil { + logrus.Fatalf("unable to read statefulSets from kube, ns '%s': %+v", namespace.Name, err) + } + for _, statefulSet := range kubeStatefulSets { + tmpInternalPeer := InternalPeer{ + Workload: namespace.Name + "/statefulset/" + statefulSet.Name, + } + tmpPeer := TrafficPeer{ + Internal: &tmpInternalPeer, + } + tmpPeerTranslated := tmpPeer.Translate() + if tmpPeerTranslated.Internal.Workload != "" { + statefulSetPeers = append(statefulSetPeers, tmpPeerTranslated) + } + } + + } + + return statefulSetPeers +} + +func ReplicaSetsToTrafficPeers() []TrafficPeer { + //Translates all pods associated with replicaSets that are not associated with deployments to TrafficPeers. + var replicaSetPeers []TrafficPeer + kubeClient, err := kube.NewKubernetesForContext("") + utils.DoOrDie(err) + kubeNamespaces, err := kubeClient.GetAllNamespaces() + if err != nil { + logrus.Fatalf("unable to read namespaces from kube: %+v", err) + } + + for _, namespace := range kubeNamespaces.Items { + kubeReplicaSets, err := kubeClient.GetReplicaSetsInNamespace(namespace.Name) + if err != nil { + logrus.Fatalf("unable to read replicaSets from kube, ns '%s': %+v", namespace.Name, err) + } + + for _, replicaSet := range kubeReplicaSets { + if replicaSet.OwnerReferences != nil { + continue + } else { + tmpInternalPeer := InternalPeer{ + Workload: namespace.Name + "/replicaset/" + replicaSet.Name, + } + tmpPeer := TrafficPeer{ + Internal: &tmpInternalPeer, + } + tmpPeerTranslated := tmpPeer.Translate() + if tmpPeerTranslated.Internal.Workload != "" { + replicaSetPeers = append(replicaSetPeers, tmpPeerTranslated) + } + + } + } + + } + + return replicaSetPeers +} + +func PodsToTrafficPeers() []TrafficPeer { + //Translates all pods that are not associated with other workload types (deployment, replicaSet, daemonSet, statefulSet.) to TrafficPeers. + var podPeers []TrafficPeer + kubeClient, err := kube.NewKubernetesForContext("") + utils.DoOrDie(err) + kubeNamespaces, err := kubeClient.GetAllNamespaces() + if err != nil { + logrus.Fatalf("unable to read namespaces from kube: %+v", err) + } + + for _, namespace := range kubeNamespaces.Items { + kubePods, err := kube.GetPodsInNamespaces(kubeClient, []string{namespace.Name}) + if err != nil { + logrus.Fatalf("unable to read pods from kube, ns '%s': %+v", namespace.Name, err) + } + for _, pod := range kubePods { + if pod.OwnerReferences != nil { + continue + } else { + tmpInternalPeer := InternalPeer{ + Workload: namespace.Name + "/pod/" + pod.Name, + } + tmpPeer := TrafficPeer{ + Internal: &tmpInternalPeer, + } + tmpPeerTranslated := tmpPeer.Translate() + if tmpPeerTranslated.Internal.Workload != "" { + podPeers = append(podPeers, tmpPeerTranslated) + } + } + } + + } + + return podPeers +} + +// Internal to cluster type InternalPeer struct { + // optional: if set, will override remaining values with information from cluster + Workload string PodLabels map[string]string NamespaceLabels map[string]string Namespace string - NodeLabels map[string]string - Node string + // optional + Pods []*PodNetworking +} + +type PodNetworking struct { + IP string + // don't worry about populating below fields right now + IsHostNetworking bool + NodeLabels []string } diff --git a/cmd/policy-assistant/test/workloadTraffic/getDaemonSetsOutput.json b/cmd/policy-assistant/test/workloadTraffic/getDaemonSetsOutput.json new file mode 100644 index 00000000..93932df2 --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/getDaemonSetsOutput.json @@ -0,0 +1,259 @@ +[ + { + "Internal": { + "Workload": "kube-system/daemonset/ama-metrics-node", + "PodLabels": { + "controller-revision-hash": "689d5b466", + "dsName": "ama-metrics-node", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "2" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.139", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.251", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/daemonset/azure-ip-masq-agent", + "PodLabels": { + "controller-revision-hash": "7744f77c8f", + "k8s-app": "azure-ip-masq-agent", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "2", + "tier": "node" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/daemonset/azure-npm", + "PodLabels": { + "controller-revision-hash": "5dfd889fcb", + "k8s-app": "azure-npm", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "2" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/daemonset/cloud-node-manager", + "PodLabels": { + "controller-revision-hash": "646949ff7d", + "k8s-app": "cloud-node-manager", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "3" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/daemonset/csi-azuredisk-node", + "PodLabels": { + "app": "csi-azuredisk-node", + "controller-revision-hash": "6f968bcb6c", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "6" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/daemonset/csi-azurefile-node", + "PodLabels": { + "app": "csi-azurefile-node", + "controller-revision-hash": "f6fdcd6d8", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "3" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/daemonset/fluentd-elasticsearch", + "PodLabels": { + "controller-revision-hash": "77d78b9d8f", + "name": "fluentd-elasticsearch", + "pod-template-generation": "1" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.137", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.204", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/daemonset/kube-proxy", + "PodLabels": { + "component": "kube-proxy", + "controller-revision-hash": "86dd799df9", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "4", + "tier": "node" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + } + ] \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/getDeploymentsOutput.json b/cmd/policy-assistant/test/workloadTraffic/getDeploymentsOutput.json new file mode 100644 index 00000000..d7fa22aa --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/getDeploymentsOutput.json @@ -0,0 +1,369 @@ +[ + { + "Internal": { + "Workload": "default/deployment/nginx-deployment2", + "PodLabels": { + "role": "myrole" + }, + "NamespaceLabels": { + "kubernetes.io/metadata.name": "default", + "labelname2": "value2" + }, + "Namespace": "default", + "Pods": [ + { + "IP": "10.224.0.118", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.123", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.226", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/deployment/ama-metrics", + "PodLabels": { + "kubernetes.azure.com/managedby": "aks", + "pod-template-hash": "b88bf8d7", + "rsName": "ama-metrics" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.49", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/deployment/ama-metrics-ksm", + "PodLabels": { + "controller-revision-hash": "689d5b466", + "dsName": "ama-metrics-node", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "2" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.102", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.139", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.251", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/deployment/ama-metrics-operator-targets", + "PodLabels": { + "controller-revision-hash": "646949ff7d", + "k8s-app": "cloud-node-manager", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "3" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.39", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/deployment/coredns", + "PodLabels": { + "k8s-app": "kube-dns", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "pod-template-hash": "6695469449", + "version": "v20" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.188", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.185", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/deployment/coredns-autoscaler", + "PodLabels": { + "controller-revision-hash": "77d78b9d8f", + "name": "fluentd-elasticsearch", + "pod-template-generation": "1" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.192", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.137", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.204", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/deployment/konnectivity-agent", + "PodLabels": { + "component": "kube-proxy", + "controller-revision-hash": "86dd799df9", + "kubernetes.azure.com/managedby": "aks", + "pod-template-generation": "4", + "tier": "node" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.148", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.169", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.10", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.199", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "kube-system/deployment/metrics-server", + "PodLabels": { + "k8s-app": "metrics-server", + "kubernetes.azure.com/managedby": "aks", + "pod-template-hash": "64f4bf9984" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.155", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.108", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "nginx/deployment/nginx-deployment", + "PodLabels": { + "app": "nginx", + "pod-template-hash": "86dcfdf4c6" + }, + "NamespaceLabels": { + "kubernetes.io/metadata.name": "nginx", + "labelname3": "value3" + }, + "Namespace": "nginx", + "Pods": [ + { + "IP": "10.224.0.165", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.146", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "nginx2/deployment/nginx-deployment", + "PodLabels": { + "app": "nginx", + "pod-template-hash": "86dcfdf4c6" + }, + "NamespaceLabels": { + "kubernetes.io/metadata.name": "nginx2" + }, + "Namespace": "nginx2", + "Pods": [ + { + "IP": "10.224.0.107", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.150", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + } + ] \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/getPodsOutput.json b/cmd/policy-assistant/test/workloadTraffic/getPodsOutput.json new file mode 100644 index 00000000..5f92f093 --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/getPodsOutput.json @@ -0,0 +1,23 @@ +[ + { + "Internal": { + "Workload": "default/pod/static-web", + "PodLabels": { + "role": "myrole" + }, + "NamespaceLabels": { + "kubernetes.io/metadata.name": "default", + "labelname2": "value2" + }, + "Namespace": "default", + "Pods": [ + { + "IP": "10.224.0.226", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + } + ] \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/getReplicaSetsOutput.json b/cmd/policy-assistant/test/workloadTraffic/getReplicaSetsOutput.json new file mode 100644 index 00000000..8c3e1c52 --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/getReplicaSetsOutput.json @@ -0,0 +1,33 @@ +[ + { + "Internal": { + "Workload": "default/replicaset/frontend", + "PodLabels": { + "tier": "frontend" + }, + "NamespaceLabels": { + "kubernetes.io/metadata.name": "default", + "labelname2": "value2" + }, + "Namespace": "default", + "Pods": [ + { + "IP": "10.224.0.40", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.113", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.55", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + } + ] \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/getStatefulSetsOutput.json b/cmd/policy-assistant/test/workloadTraffic/getStatefulSetsOutput.json new file mode 100644 index 00000000..ef2163f8 --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/getStatefulSetsOutput.json @@ -0,0 +1,35 @@ +[ + { + "Internal": { + "Workload": "teststs/statefulset/web", + "PodLabels": { + "app": "nginx", + "apps.kubernetes.io/pod-index": "2", + "controller-revision-hash": "web-79dc58f667", + "statefulset.kubernetes.io/pod-name": "web-2" + }, + "NamespaceLabels": { + "kubernetes.io/metadata.name": "teststs" + }, + "Namespace": "teststs", + "Pods": [ + { + "IP": "10.224.0.147", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.58", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.117", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + } + ] \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/workload-traffic-output.json b/cmd/policy-assistant/test/workloadTraffic/workload-traffic-output.json new file mode 100644 index 00000000..21f936aa --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/workload-traffic-output.json @@ -0,0 +1,62 @@ +[ + { + "Internal": { + "Workload": "kube-system/daemonset/fluentd-elasticsearch", + "PodLabels": { + "controller-revision-hash": "77d78b9d8f", + "name": "fluentd-elasticsearch", + "pod-template-generation": "1" + }, + "NamespaceLabels": { + "addonmanager.kubernetes.io/mode": "Reconcile", + "control-plane": "true", + "kubernetes.azure.com/managedby": "aks", + "kubernetes.io/cluster-service": "true", + "kubernetes.io/metadata.name": "kube-system" + }, + "Namespace": "kube-system", + "Pods": [ + { + "IP": "10.224.0.246", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.192", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + }, + { + "Internal": { + "Workload": "default/deployment/nginx-deployment2", + "PodLabels": { + "app": "nginx", + "app2": "tmp", + "pod-template-hash": "5c496748cb" + }, + "NamespaceLabels": { + "kubernetes.io/metadata.name": "default", + "labelname2": "value2" + }, + "Namespace": "default", + "Pods": [ + { + "IP": "10.224.0.53", + "IsHostNetworking": false, + "NodeLabels": null + }, + { + "IP": "10.224.0.55", + "IsHostNetworking": false, + "NodeLabels": null + } + ] + }, + "IP": "" + } + +] \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/workload-traffic.json b/cmd/policy-assistant/test/workloadTraffic/workload-traffic.json new file mode 100644 index 00000000..4efa5541 --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/workload-traffic.json @@ -0,0 +1,30 @@ +[ + { + "Source": { + "Internal": { + "Workload": "kube-system/daemonset/fluentd-elasticsearch" + } + + }, + "Destination": { + "IP": "8.8.8.8" + }, + "Protocol": "TCP", + "ResolvedPort": 80, + "ResolvedPortName": "serve-80-tcp" + }, + { + "Source": { + "Internal": { + "Workload": "default/deployment/nginx-deployment" + } + + }, + "Destination": { + "IP": "8.8.8.8" + }, + "Protocol": "TCP", + "ResolvedPort": 80, + "ResolvedPortName": "serve-80-tcp" + } +] \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/workloads/daemonSet.yaml b/cmd/policy-assistant/test/workloadTraffic/workloads/daemonSet.yaml new file mode 100644 index 00000000..1650ecce --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/workloads/daemonSet.yaml @@ -0,0 +1,45 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd-elasticsearch + namespace: kube-system + labels: + k8s-app: fluentd-logging +spec: + selector: + matchLabels: + name: fluentd-elasticsearch + template: + metadata: + labels: + name: fluentd-elasticsearch + spec: + tolerations: + # these tolerations are to have the daemonset runnable on control plane nodes + # remove them if your control plane nodes should not run pods + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + containers: + - name: fluentd-elasticsearch + image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlog + mountPath: /var/log + # it may be desirable to set a high priority class to ensure that a DaemonSet Pod + # preempts running Pods + # priorityClassName: important + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log diff --git a/cmd/policy-assistant/test/workloadTraffic/workloads/deployment.yaml b/cmd/policy-assistant/test/workloadTraffic/workloads/deployment.yaml new file mode 100644 index 00000000..8a3cc1c6 --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/workloads/deployment.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment2 + labels: + app: nginx + app2: tmp +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + app2: tmp + template: + metadata: + labels: + app: nginx + app2: tmp + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/cmd/policy-assistant/test/workloadTraffic/workloads/pod.yaml b/cmd/policy-assistant/test/workloadTraffic/workloads/pod.yaml new file mode 100644 index 00000000..1d208d90 --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/workloads/pod.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: static-web + labels: + role: myrole +spec: + containers: + - name: web + image: nginx + ports: + - name: web + containerPort: 80 + protocol: TCP \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/workloads/replicaSet.yaml b/cmd/policy-assistant/test/workloadTraffic/workloads/replicaSet.yaml new file mode 100644 index 00000000..6c9cbf5f --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/workloads/replicaSet.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # modify replicas according to your case + replicas: 3 + selector: + matchLabels: + tier: frontend + template: + metadata: + labels: + tier: frontend + spec: + containers: + - name: php-redis + image: us-docker.pkg.dev/google-samples/containers/gke/gb-frontend:v5 \ No newline at end of file diff --git a/cmd/policy-assistant/test/workloadTraffic/workloads/statefulSet.yaml b/cmd/policy-assistant/test/workloadTraffic/workloads/statefulSet.yaml new file mode 100644 index 00000000..a9b60b0f --- /dev/null +++ b/cmd/policy-assistant/test/workloadTraffic/workloads/statefulSet.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: web +spec: + selector: + matchLabels: + app: nginx # tiene que coincidir con .spec.template.metadata.labels + serviceName: "nginx" + replicas: 3 # por defecto es 1 + template: + metadata: + labels: + app: nginx # tiene que coincidir con .spec.selector.matchLabels + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: registry.k8s.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi \ No newline at end of file