diff --git a/pkg/clustermanager/cluster_manager.go b/pkg/clustermanager/cluster_manager.go index a0ec614c7b5d..e3b80d93f561 100644 --- a/pkg/clustermanager/cluster_manager.go +++ b/pkg/clustermanager/cluster_manager.go @@ -100,6 +100,7 @@ type CAPIClient interface { GetWorkloadKubeconfig(ctx context.Context, clusterName string, cluster *types.Cluster) ([]byte, error) } +// AwsIamAuth interface on AWS IAM. type AwsIamAuth interface { CreateAndInstallAWSIAMAuthCASecret(ctx context.Context, managementCluster *types.Cluster, workloadClusterName string) error InstallAWSIAMAuth(ctx context.Context, management, workload *types.Cluster, spec *cluster.Spec) error diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index 28667b62e5e8..53fc0b433a44 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -647,7 +647,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": [{{ range $i, $ip := .ccmIgnoredNodeIPs }}{{ if $i }}, {{ end }}"{{ $ip }}"{{ end }}] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index 8a52b0ecd093..b60b0d664472 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -176,9 +176,12 @@ func buildTemplateMapCP( failureDomains := generateNutanixFailureDomains(datacenterSpec.FailureDomains) + ccmIgnoredNodeIPs := generateCcmIgnoredNodeIPsList(clusterSpec) + values := map[string]interface{}{ "auditPolicy": auditPolicy, "apiServerExtraArgs": apiServerExtraArgs.ToPartialYaml(), + "ccmIgnoredNodeIPs": ccmIgnoredNodeIPs, "cloudProviderImage": versionsBundle.Nutanix.CloudProvider.VersionedImage(), "clusterName": clusterSpec.Cluster.Name, "controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, @@ -519,3 +522,9 @@ func generateNutanixFailureDomains(eksNutanixFailureDomains []v1alpha1.NutanixDa } return failureDomains } + +func generateCcmIgnoredNodeIPsList(clusterSpec *cluster.Spec) []string { + ignoredIPs := []string{clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host} + + return ignoredIPs +} diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index 40a59ae613d7..af9cdd5d6a61 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -726,6 +726,34 @@ func TestTemplateBuilderFailureDomains(t *testing.T) { } } +func TestTemplateBuilderCcmExcludeNodeIPs(t *testing.T) { + for _, tc := range []struct { + Input string + Output string + }{ + { + Input: "testdata/eksa-cluster-ccm-exclude-node-ips.yaml", + Output: "testdata/expected_cluster_ccm_exclude_node_ips.yaml", + }, + } { + clusterSpec := test.NewFullClusterSpec(t, tc.Input) + + machineCfg := clusterSpec.NutanixMachineConfig(clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name) + + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") + creds := GetCredsFromEnv() + + bldr := NewNutanixTemplateBuilder(&clusterSpec.NutanixDatacenter.Spec, &machineCfg.Spec, nil, + map[string]anywherev1.NutanixMachineConfigSpec{}, creds, time.Now) + + cpSpec, err := bldr.GenerateCAPISpecControlPlane(clusterSpec) + assert.NoError(t, err) + assert.NotNil(t, cpSpec) + test.AssertContentToFile(t, string(cpSpec), tc.Output) + } +} + func minimalNutanixConfigSpec(t *testing.T) (*anywherev1.NutanixDatacenterConfig, *anywherev1.NutanixMachineConfig, map[string]anywherev1.NutanixMachineConfigSpec) { dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf) diff --git a/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_domain_name.yaml index fa51039a4ff0..b469c2f8b1f5 100644 --- a/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_domain_name.yaml +++ b/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_domain_name.yaml @@ -9,7 +9,7 @@ spec: name: test count: 1 endpoint: - host: test + host: 10.199.199.1 certSANs: ["foo.bar"] machineGroupRef: name: test diff --git a/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_ip.yaml b/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_ip.yaml index ab6483f57288..4d619ffc76d1 100644 --- a/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_ip.yaml +++ b/pkg/providers/nutanix/testdata/cluster_api_server_cert_san_ip.yaml @@ -9,7 +9,7 @@ spec: name: test count: 1 endpoint: - host: test + host: 10.199.199.1 certSANs: ["11.11.11.11"] machineGroupRef: name: test diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_etcd_encryption.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_etcd_encryption.yaml index 04d49efe46f7..e65647d9999a 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_etcd_encryption.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_etcd_encryption.yaml @@ -9,7 +9,7 @@ spec: name: test count: 1 endpoint: - host: test + host: 10.199.199.1 machineGroupRef: name: test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_etcd_encryption_1_29.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_etcd_encryption_1_29.yaml index 211d33c52648..c5569ff7f38a 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_etcd_encryption_1_29.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_etcd_encryption_1_29.yaml @@ -9,7 +9,7 @@ spec: name: test count: 1 endpoint: - host: test + host: 10.199.199.1 machineGroupRef: name: test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml index c5750e15cfb5..2e682b13d041 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_failure_domains.yaml @@ -9,7 +9,7 @@ spec: name: test count: 1 endpoint: - host: test + host: 10.199.199.1 machineGroupRef: name: test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_with_invalid_trust_bundle.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_with_invalid_trust_bundle.yaml index 76b62e35c6d3..9eee11e2a8ac 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_with_invalid_trust_bundle.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_with_invalid_trust_bundle.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_with_trust_bundle.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_with_trust_bundle.yaml index d2a0d595d843..3db1283da63b 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_with_trust_bundle.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_with_trust_bundle.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml index 6ee3145e2d0f..5446d7a33a2b 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_cp.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_md.yaml b/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_md.yaml index 9f6520bc4b1d..b51d1f83871b 100644 --- a/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_md.yaml +++ b/pkg/providers/nutanix/testdata/cluster_nutanix_with_upgrade_strategy_md.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips.yaml new file mode 100644 index 000000000000..27073728ea8b --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips.yaml @@ -0,0 +1,15 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + ccmExcludeNodeIPs: + - 10.0.0.1 + - 10.0.0.0/24 + - 10.0.0.10-10.0.0.30 diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_cidr.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_cidr.yaml new file mode 100644 index 000000000000..b39f08e1cef6 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_cidr.yaml @@ -0,0 +1,16 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + ccmExcludeNodeIPs: + - 10.0.0.1 + - 10.0.0.0/24 + - 10.0.0.10-10.0.0.30 + - 10.100.0.0//16 diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip.yaml new file mode 100644 index 000000000000..9254b7e1eff4 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip.yaml @@ -0,0 +1,16 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + ccmExcludeNodeIPs: + - 10.0.0.1 + - 10.0.0.0/24 + - 10.0.0.10-10.0.0.30 + - 244.244.01 diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range1.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range1.yaml new file mode 100644 index 000000000000..606bf924f122 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range1.yaml @@ -0,0 +1,16 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + ccmExcludeNodeIPs: + - 10.0.0.1 + - 10.0.0.0/24 + - 10.0.0.10-10.0.0.30 + - 10.100.0.10-10.100.10.10-10.100.20.30 diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range2.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range2.yaml new file mode 100644 index 000000000000..62c9fcbd1da9 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range2.yaml @@ -0,0 +1,16 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + ccmExcludeNodeIPs: + - 10.0.0.1 + - 10.0.0.0/24 + - 10.0.0.10-10.0.0.30 + - 192.179.1.1-10.1.1.1 diff --git a/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range3.yaml b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range3.yaml new file mode 100644 index 000000000000..71df1fc77698 --- /dev/null +++ b/pkg/providers/nutanix/testdata/datacenterConfig_with_ccm_exclude_node_ips_invalid_ip_range3.yaml @@ -0,0 +1,17 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + ccmExcludeNodeIPs: + - 10.0.0.1 + - 10.0.0.0/24 + - 10.0.0.10-10.0.0.30 + - 10.0.10.0-::1 + diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-additional-categories.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-additional-categories.yaml index 4e1fb6b13066..918fa62c45a1 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-additional-categories.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-additional-categories.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-autoscaler.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-autoscaler.yaml index 54ce8300c926..e9daf3759d31 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-autoscaler.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-autoscaler.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-ccm-exclude-node-ips.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-ccm-exclude-node-ips.yaml new file mode 100644 index 000000000000..9d3206347103 --- /dev/null +++ b/pkg/providers/nutanix/testdata/eksa-cluster-ccm-exclude-node-ips.yaml @@ -0,0 +1,73 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: eksa-unit-test + count: 3 + endpoint: + host: 10.199.199.1 + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + workerNodeGroupConfigurations: + - count: 4 + name: eksa-unit-test + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + ccmExcludeNodeIps: + - "127.100.200.101" + - "10.10.10.10-10.10.10.13" + - "10.123.0.0/29" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image-1-19" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd-k8s-1-20.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd-k8s-1-20.yaml index d3874dcc99b2..1df3211dcae2 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd-k8s-1-20.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd-k8s-1-20.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test-cp count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd-with-optional.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd-with-optional.yaml index d52a906d99ea..65fa0a222fa5 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd-with-optional.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd-with-optional.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test-cp count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd.yaml index 2787ac45a6e6..ec764fb18bbe 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-external-etcd.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test-cp count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-gpus.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-gpus.yaml new file mode 100644 index 000000000000..bbb0f5fb5683 --- /dev/null +++ b/pkg/providers/nutanix/testdata/eksa-cluster-gpus.yaml @@ -0,0 +1,75 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: eksa-unit-test + count: 3 + endpoint: + host: 10.199.199.1 + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + workerNodeGroupConfigurations: + - count: 4 + name: eksa-unit-test + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + gpus: + - type: deviceID + deviceID: 8757 + - type: name + name: "Ampere 40" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-iamauth.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-iamauth.yaml index 03b45ec93b0f..7e3945195f96 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-iamauth.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-iamauth.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml index 18b4ec1eabbf..906fd3d4945c 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-kc.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-pc.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-pc.yaml index 8857498edc4c..ea45e6349fed 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-pc.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-pc.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml index ffa66ec8d7fb..0018f21a95e2 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-invalid-pe-cluster-random-name.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-irsa.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-irsa.yaml index 3b767cfad8ca..a4dd7d2252c7 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-irsa.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-irsa.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-multi-worker-fds.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-multi-worker-fds.yaml new file mode 100644 index 000000000000..bc9fefaca947 --- /dev/null +++ b/pkg/providers/nutanix/testdata/eksa-cluster-multi-worker-fds.yaml @@ -0,0 +1,177 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: eksa-unit-test + count: 3 + endpoint: + host: 10.199.199.1 + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + workerNodeGroupConfigurations: + - count: 4 + name: eksa-unit-test-1 + machineGroupRef: + name: eksa-unit-test-1 + kind: NutanixMachineConfig + - count: 3 + name: eksa-unit-test-2 + machineGroupRef: + name: eksa-unit-test-2 + kind: NutanixMachineConfig + - count: 2 + name: eksa-unit-test-3 + machineGroupRef: + name: eksa-unit-test-3 + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + failureDomains: + - name: "pe1" + workerMachineGroups: + - eksa-unit-test-1 + - eksa-unit-test-2 + - eksa-unit-test-3 + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + workerMachineGroups: + - eksa-unit-test-1 + - eksa-unit-test-2 + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test-1 + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test-2 + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 2 + memorySize: 16Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test-3 + namespace: default +spec: + vcpusPerSocket: 2 + vcpuSockets: 4 + memorySize: 4Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-multiple-machineconfigs.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-multiple-machineconfigs.yaml index b00a7215efe3..cbef7966dff6 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-multiple-machineconfigs.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-multiple-machineconfigs.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test-cp kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-multiple-worker-md.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-multiple-worker-md.yaml index aaf7d81714e9..80b592399234 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-multiple-worker-md.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-multiple-worker-md.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-no-credentialref.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-no-credentialref.yaml index 3652765e81b0..fe6de7fb0394 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-no-credentialref.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-no-credentialref.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-node-taints-labels.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-node-taints-labels.yaml index 4535beebe12d..9393677961b5 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-node-taints-labels.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-node-taints-labels.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-oidc.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-oidc.yaml index d060ab86f39e..bc2f9ea8d268 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-oidc.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-oidc.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-project.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-project.yaml index 563a6c4215b7..0d0dd6c612d1 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-project.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-project.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-proxy.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-proxy.yaml index 86c2739e5d82..7b2c0af13533 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-proxy.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-proxy.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-registry-mirror.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-registry-mirror.yaml index 50a52170bb5e..965f17559cb1 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-registry-mirror.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-registry-mirror.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-worker-fds.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-worker-fds.yaml new file mode 100644 index 000000000000..f6a13a154909 --- /dev/null +++ b/pkg/providers/nutanix/testdata/eksa-cluster-worker-fds.yaml @@ -0,0 +1,89 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: eksa-unit-test + count: 3 + endpoint: + host: 10.199.199.1 + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + workerNodeGroupConfigurations: + - count: 4 + name: eksa-unit-test + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" + failureDomains: + - name: "pe1" + workerMachineGroups: + - eksa-unit-test + cluster: + type: name + name: "prism-cluster" + subnets: + - type: uuid + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + - name: "pe2" + workerMachineGroups: + - eksa-unit-test + cluster: + type: uuid + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: name + name: "prism-subnet" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-worker-version.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-worker-version.yaml index 37721376966f..d6bc3872b185 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster-worker-version.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster-worker-version.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/eksa-cluster.json b/pkg/providers/nutanix/testdata/eksa-cluster.json index 65ef0da10fb4..70b517417837 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster.json +++ b/pkg/providers/nutanix/testdata/eksa-cluster.json @@ -11,7 +11,7 @@ "name": "eksa-unit-test", "count": 3, "endpoint": { - "host": "test-ip" + "host": "10.199.199.1" }, "machineGroupRef": { "name": "eksa-unit-test", diff --git a/pkg/providers/nutanix/testdata/eksa-cluster.yaml b/pkg/providers/nutanix/testdata/eksa-cluster.yaml index 742b30d482f8..bd6eb91ff1e7 100644 --- a/pkg/providers/nutanix/testdata/eksa-cluster.yaml +++ b/pkg/providers/nutanix/testdata/eksa-cluster.yaml @@ -9,7 +9,7 @@ spec: name: eksa-unit-test count: 3 endpoint: - host: test-ip + host: 10.199.199.1 machineGroupRef: name: eksa-unit-test kind: NutanixMachineConfig diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml index c7b78abe1447..be8a747e627d 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_additional_trust_bundle.yaml @@ -37,7 +37,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -132,7 +132,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -458,7 +458,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml index e07e818237ea..c393421b295b 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_domain_name.yaml @@ -13,7 +13,7 @@ spec: name: "capx-test" kind: Secret controlPlaneEndpoint: - host: "test" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -109,7 +109,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -401,7 +401,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml index be3eead5c104..2d3e8df162ca 100644 --- a/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml +++ b/pkg/providers/nutanix/testdata/expected_cluster_api_server_cert_san_ip.yaml @@ -13,7 +13,7 @@ spec: name: "capx-test" kind: Secret controlPlaneEndpoint: - host: "test" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -109,7 +109,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -401,7 +401,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_cluster_ccm_exclude_node_ips.yaml b/pkg/providers/nutanix/testdata/expected_cluster_ccm_exclude_node_ips.yaml new file mode 100644 index 000000000000..f620f2421a02 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_cluster_ccm_exclude_node_ips.yaml @@ -0,0 +1,612 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + failureDomains: [] + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-eksa-unit-test" + kind: Secret + controlPlaneEndpoint: + host: "10.199.199.1" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "eksa-unit-test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + replicas: 3 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "10.199.199.1" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image-1-19" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eksa-unit-test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + }, + "ignoredNodeIPs": ["10.199.199.1"] + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: eksa-unit-test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + resources: + - kind: ConfigMap + name: eksa-unit-test-nutanix-ccm + - kind: Secret + name: eksa-unit-test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_cp.yaml b/pkg/providers/nutanix/testdata/expected_cp.yaml index 81569fb33035..0c170c59f943 100644 --- a/pkg/providers/nutanix/testdata/expected_cp.yaml +++ b/pkg/providers/nutanix/testdata/expected_cp.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -117,7 +117,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -408,7 +408,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml b/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml index f291088809f1..2285c023c26f 100644 --- a/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_additional_categories.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -108,7 +108,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -405,7 +405,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml index 833a2ec9d811..6efa6b4f7901 100644 --- a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption.yaml @@ -13,7 +13,7 @@ spec: name: "capx-test" kind: Secret controlPlaneEndpoint: - host: "test" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -158,7 +158,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -450,7 +450,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml index 9e212a7a23c2..87d6f892fcc3 100644 --- a/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_etcd_encryption_1_29.yaml @@ -13,7 +13,7 @@ spec: name: "capx-test" kind: Secret controlPlaneEndpoint: - host: "test" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -159,7 +159,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -480,7 +480,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml b/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml index fe8fbcd9ccc6..d2be84079854 100644 --- a/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_external_etcd.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -115,7 +115,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -462,7 +462,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml b/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml index d60332022f26..46422494ba36 100644 --- a/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_external_etcd_with_optional.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -115,7 +115,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -476,7 +476,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml b/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml index b3ff855aa819..0c1287304ea5 100644 --- a/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_failure_domains.yaml @@ -33,7 +33,7 @@ spec: name: "capx-test" kind: Secret controlPlaneEndpoint: - host: "test" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -128,7 +128,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -420,7 +420,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_gpus.yaml b/pkg/providers/nutanix/testdata/expected_results_gpus.yaml new file mode 100644 index 000000000000..643d5a1b5251 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_gpus.yaml @@ -0,0 +1,612 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + failureDomains: [] + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-eksa-unit-test" + kind: Secret + controlPlaneEndpoint: + host: "10.199.199.1" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "eksa-unit-test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + replicas: 3 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "10.199.199.1" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eksa-unit-test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + }, + "ignoredNodeIPs": ["10.199.199.1"] + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: eksa-unit-test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + resources: + - kind: ConfigMap + name: eksa-unit-test-nutanix-ccm + - kind: Secret + name: eksa-unit-test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml b/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml index cded83bf575d..5afa2d4812b6 100644 --- a/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_iamauth.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -117,7 +117,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -444,7 +444,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_irsa.yaml b/pkg/providers/nutanix/testdata/expected_results_irsa.yaml index 5f80eb7c2b1f..3d3bad6bed6a 100644 --- a/pkg/providers/nutanix/testdata/expected_results_irsa.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_irsa.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -109,7 +109,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -401,7 +401,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds.yaml b/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds.yaml new file mode 100644 index 000000000000..430a79306cf8 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_multi_worker_fds.yaml @@ -0,0 +1,628 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + failureDomains: + - name: "pe1" + cluster: + type: "name" + name: "prism-cluster" + subnets: + - type: "uuid" + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + controlPlane: true + - name: "pe2" + cluster: + type: "uuid" + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: "name" + name: "prism-subnet" + controlPlane: true + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-eksa-unit-test" + kind: Secret + controlPlaneEndpoint: + host: "10.199.199.1" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "eksa-unit-test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + replicas: 3 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "10.199.199.1" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eksa-unit-test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + }, + "ignoredNodeIPs": ["10.199.199.1"] + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: eksa-unit-test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + resources: + - kind: ConfigMap + name: eksa-unit-test-nutanix-ccm + - kind: Secret + name: eksa-unit-test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml index 6e440f19f2b0..4a6b20890f00 100644 --- a/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_node_taints_labels.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -108,7 +108,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -410,7 +410,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_oidc.yaml b/pkg/providers/nutanix/testdata/expected_results_oidc.yaml index 0bafba10a28d..53da1ed37f2a 100644 --- a/pkg/providers/nutanix/testdata/expected_results_oidc.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_oidc.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -110,7 +110,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -402,7 +402,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_project.yaml b/pkg/providers/nutanix/testdata/expected_results_project.yaml index f42167b0b87e..b9c660caf7da 100644 --- a/pkg/providers/nutanix/testdata/expected_results_project.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_project.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -108,7 +108,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -404,7 +404,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_proxy.yaml b/pkg/providers/nutanix/testdata/expected_results_proxy.yaml index ce933ceca1ce..030375af59db 100644 --- a/pkg/providers/nutanix/testdata/expected_results_proxy.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_proxy.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -108,7 +108,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -154,7 +154,7 @@ spec: [Service] Environment="HTTP_PROXY=proxy.nutanix.com:8888" Environment="HTTPS_PROXY=proxy.nutanix.com:8888" - Environment="NO_PROXY=192.168.0.0/16,10.96.0.0/12,noproxy1.nutanix.com,noproxy2.nutanix.com,noproxy3.nutanix.com,localhost,127.0.0.1,.svc,prism.nutanix.com,test-ip" + Environment="NO_PROXY=192.168.0.0/16,10.96.0.0/12,noproxy1.nutanix.com,noproxy2.nutanix.com,noproxy3.nutanix.com,localhost,127.0.0.1,.svc,prism.nutanix.com,10.199.199.1" owner: root:root path: /etc/systemd/system/containerd.service.d/http-proxy.conf - content: | @@ -409,7 +409,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_proxy_md.yaml b/pkg/providers/nutanix/testdata/expected_results_proxy_md.yaml index 167041328fe5..39c92faa6af3 100644 --- a/pkg/providers/nutanix/testdata/expected_results_proxy_md.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_proxy_md.yaml @@ -84,7 +84,7 @@ spec: [Service] Environment="HTTP_PROXY=proxy.nutanix.com:8888" Environment="HTTPS_PROXY=proxy.nutanix.com:8888" - Environment="NO_PROXY=192.168.0.0/16,10.96.0.0/12,noproxy1.nutanix.com,noproxy2.nutanix.com,noproxy3.nutanix.com,localhost,127.0.0.1,.svc,prism.nutanix.com,test-ip" + Environment="NO_PROXY=192.168.0.0/16,10.96.0.0/12,noproxy1.nutanix.com,noproxy2.nutanix.com,noproxy3.nutanix.com,localhost,127.0.0.1,.svc,prism.nutanix.com,10.199.199.1" owner: root:root path: /etc/systemd/system/containerd.service.d/http-proxy.conf diff --git a/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml b/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml index 97c32981ec50..bf2ecf3e3e9d 100644 --- a/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml +++ b/pkg/providers/nutanix/testdata/expected_results_registry_mirror.yaml @@ -13,7 +13,7 @@ spec: name: "capx-eksa-unit-test" kind: Secret controlPlaneEndpoint: - host: "test-ip" + host: "10.199.199.1" port: 6443 --- apiVersion: cluster.x-k8s.io/v1beta1 @@ -108,7 +108,7 @@ spec: - name: vip_arp value: "true" - name: address - value: "test-ip" + value: "10.199.199.1" - name: port value: "6443" - name: vip_cidr @@ -450,7 +450,8 @@ data: "enableCustomLabeling": false, "topologyDiscovery": { "type": "Prism" - } + }, + "ignoredNodeIPs": ["10.199.199.1"] } --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/providers/nutanix/testdata/expected_results_worker_fds.yaml b/pkg/providers/nutanix/testdata/expected_results_worker_fds.yaml new file mode 100644 index 000000000000..430a79306cf8 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_worker_fds.yaml @@ -0,0 +1,628 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + failureDomains: + - name: "pe1" + cluster: + type: "name" + name: "prism-cluster" + subnets: + - type: "uuid" + uuid: "2d166190-7759-4dc6-b835-923262d6b497" + controlPlane: true + - name: "pe2" + cluster: + type: "uuid" + uuid: "4d69ca7d-022f-49d1-a454-74535993bda4" + subnets: + - type: "name" + name: "prism-subnet" + controlPlane: true + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-eksa-unit-test" + kind: Secret + controlPlaneEndpoint: + host: "10.199.199.1" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "eksa-unit-test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + replicas: 3 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + local: + imageRepository: public.ecr.aws/eks-distro/etcd-io + imageTag: v3.4.14-eks-1-19-4 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "10.199.199.1" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: eksa-unit-test-nutanix-ccm + namespace: "eksa-system" +data: + nutanix-ccm.yaml: | + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: nutanix-config + namespace: kube-system + data: + nutanix_config.json: |- + { + "prismCentral": { + "address": "prism.nutanix.com", + "port": 9440, + "insecure": false, + "credentialRef": { + "kind": "secret", + "name": "nutanix-creds", + "namespace": "kube-system" + } + }, + "enableCustomLabeling": false, + "topologyDiscovery": { + "type": "Prism" + }, + "ignoredNodeIPs": ["10.199.199.1"] + } + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + --- + kind: ClusterRoleBinding + apiVersion: rbac.authorization.k8s.io/v1 + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + name: nutanix-cloud-controller-manager + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + strategy: + type: Recreate + template: + metadata: + labels: + k8s-app: nutanix-cloud-controller-manager + spec: + hostNetwork: true + priorityClassName: system-cluster-critical + nodeSelector: + node-role.kubernetes.io/control-plane: "" + serviceAccountName: cloud-controller-manager + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + k8s-app: nutanix-cloud-controller-manager + topologyKey: kubernetes.io/hostname + dnsPolicy: Default + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + containers: + - image: "" + imagePullPolicy: IfNotPresent + name: nutanix-cloud-controller-manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - "--leader-elect=true" + - "--cloud-config=/etc/cloud/nutanix_config.json" + resources: + requests: + cpu: 100m + memory: 50Mi + volumeMounts: + - mountPath: /etc/cloud + name: nutanix-config-volume + readOnly: true + volumes: + - name: nutanix-config-volume + configMap: + name: nutanix-config +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: eksa-unit-test-nutanix-ccm-crs + namespace: "eksa-system" +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + resources: + - kind: ConfigMap + name: eksa-unit-test-nutanix-ccm + - kind: Secret + name: eksa-unit-test-nutanix-ccm-secret + strategy: Reconcile +--- +apiVersion: v1 +kind: Secret +metadata: + name: "eksa-unit-test-nutanix-ccm-secret" + namespace: "eksa-system" +stringData: + nutanix-ccm-secret.yaml: | + apiVersion: v1 + kind: Secret + metadata: + name: nutanix-creds + namespace: kube-system + stringData: + credentials: |- + [ + { + "type": "basic_auth", + "data": { + "prismCentral": { + "username": "admin", + "password": "password" + }, + "prismElements": null + } + } + ] +type: addons.cluster.x-k8s.io/resource-set diff --git a/pkg/providers/nutanix/validator.go b/pkg/providers/nutanix/validator.go index 827d91b20652..59f325a93fd1 100644 --- a/pkg/providers/nutanix/validator.go +++ b/pkg/providers/nutanix/validator.go @@ -3,6 +3,7 @@ package nutanix import ( "context" "fmt" + "net" "net/http" "regexp" "strconv" @@ -49,6 +50,15 @@ func NewValidator(clientCache *ClientCache, certValidator crypto.TlsValidator, h } } +func (v *Validator) validateControlPlaneIP(ip string) error { + // check if controlPlaneEndpointIp is valid + parsedIP := net.ParseIP(ip) + if parsedIP == nil { + return fmt.Errorf("cluster controlPlaneConfiguration.Endpoint.Host is invalid: %s", ip) + } + return nil +} + // ValidateClusterSpec validates the cluster spec. func (v *Validator) ValidateClusterSpec(ctx context.Context, spec *cluster.Spec, creds credentials.BasicAuthCredential) error { logger.Info("ValidateClusterSpec for Nutanix datacenter", "NutanixDatacenter", spec.NutanixDatacenter.Name) @@ -61,6 +71,10 @@ func (v *Validator) ValidateClusterSpec(ctx context.Context, spec *cluster.Spec, return err } + if err := v.validateControlPlaneIP(spec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host); err != nil { + return err + } + for _, conf := range spec.NutanixMachineConfigs { if err := v.ValidateMachineConfig(ctx, client, conf); err != nil { return fmt.Errorf("failed to validate machine config: %v", err)