diff --git a/magnum_capi_helm/driver.py b/magnum_capi_helm/driver.py index 336ed95..8f5e40a 100644 --- a/magnum_capi_helm/driver.py +++ b/magnum_capi_helm/driver.py @@ -472,6 +472,16 @@ def _label(self, cluster, key, default): # NOTE(johngarbutt): filtering untrusted user input return re.sub(r"[^a-zA-Z0-9\.\-\/ ]+", "", raw) + def _get_label_bool(self, cluster, label, default): + cluster_label = self._label(cluster, label, "") + if not cluster_label: + return default + if default: + # Default is on, so return for any value except "false" + return cluster_label != "false" + # Default is False, so only "true" responds with True + return cluster_label == "true" + def _get_chart_version(self, cluster): version = cluster.cluster_template.labels.get( "capi_helm_chart_version", @@ -519,15 +529,16 @@ def _get_dns_nameservers(self, cluster): return None def _get_monitoring_enabled(self, cluster): - mon_label = self._label(cluster, "monitoring_enabled", "") - # NOTE(mkjpryor) default of, like heat driver, - # as requires cinder and takes a while - return mon_label == "true" + # NOTE(mkjpryor) default off, like heat driver, + # as requires cinder and takes a while + return self._get_label_bool(cluster, "monitoring_enabled", False) def _get_kube_dash_enabled(self, cluster): - kube_dash_label = self._label(cluster, "kube_dashboard_enabled", "") - # NOTE(mkjpryor) default on, like the heat driver - return kube_dash_label != "false" + # NOTE(mkjpryor) default on, like the heat driver + return self._get_label_bool(cluster, "kube_dashboard_enabled", True) + + def _get_autoheal_enabled(self, cluster): + return self._get_label_bool(cluster, "auto_healing_enabled", True) def _get_fixed_network_id(self, context, cluster): network = cluster.fixed_network @@ -550,6 +561,8 @@ def _update_helm_release(self, context, cluster, nodegroups=None): network_id = self._get_fixed_network_id(context, cluster) subnet_id = neutron.get_fixed_subnet_id(context, cluster.fixed_subnet) + kubenetwork_pod_cidr = "172.16.0.0/13" + kubenetwork_services_cidr = "172.24.0.0/13" values = { "kubernetesVersion": kube_version, @@ -578,9 +591,29 @@ def _update_helm_release(self, context, cluster, nodegroups=None): ), }, }, + "kubeNetwork": { + "pods": { + "cidrBlocks": [ + kubenetwork_pod_cidr, + ], + }, + "services": { + "cidrBlocks": [ + kubenetwork_services_cidr, + ], + }, + }, "controlPlane": { "machineFlavor": cluster.master_flavor_id, "machineCount": cluster.master_count, + "healthCheck": { + "enabled": self._get_autoheal_enabled(cluster), + }, + }, + "nodeGroupDefaults": { + "healthCheck": { + "enabled": self._get_autoheal_enabled(cluster), + }, }, "nodeGroups": [ { @@ -604,23 +637,45 @@ def _update_helm_release(self, context, cluster, nodegroups=None): }, } + # Add boot disk details, if defined in config file. + # Helm chart defaults to ephemeral disks, if unset. + if CONF.cinder.default_boot_volume_type: + disk_details = { + "controlPlane": { + "machineRootVolume": { + "volumeType": CONF.cinder.default_boot_volume_type, + "diskSize": CONF.cinder.default_boot_volume_size or "", + } + }, + "nodeGroupDefaults": { + "machineRootVolume": { + "volumeType": CONF.cinder.default_boot_volume_type, + "diskSize": CONF.cinder.default_boot_volume_size or "", + } + }, + } + values = helm.mergeconcat(values, disk_details) + # Sometimes you need to add an extra network # for things like Cinder CSI CephFS Native extra_network_name = self._label(cluster, "extra_network_name", "") if extra_network_name: - values["nodeGroupDefaults"] = { - "machineNetworking": { - "ports": [ - {}, - { - "network": { - "name": extra_network_name, + network_values = { + "nodeGroupDefaults": { + "machineNetworking": { + "ports": [ + {}, + { + "network": { + "name": extra_network_name, + }, + "securityGroups": [], }, - "securityGroups": [], - }, - ] + ] + } } } + values = helm.mergeconcat(values, network_values) self._helm_client.install_or_upgrade( self._get_chart_release_name(cluster), diff --git a/magnum_capi_helm/tests/test_driver.py b/magnum_capi_helm/tests/test_driver.py index e47862b..d99e42b 100644 --- a/magnum_capi_helm/tests/test_driver.py +++ b/magnum_capi_helm/tests/test_driver.py @@ -1138,6 +1138,18 @@ def test_create_cluster( }, "dnsNameservers": ["8.8.1.1"], }, + "kubeNetwork": { + "pods": { + "cidrBlocks": [ + "172.16.0.0/13", + ], + }, + "services": { + "cidrBlocks": [ + "172.24.0.0/13", + ], + }, + }, "apiServer": { "enableLoadBalancer": True, "loadBalancerProvider": "amphora", @@ -1145,6 +1157,7 @@ def test_create_cluster( "controlPlane": { "machineFlavor": "flavor_small", "machineCount": 3, + "healthCheck": {"enabled": True}, }, "addons": { "monitoring": {"enabled": False}, @@ -1158,6 +1171,9 @@ def test_create_cluster( "machineCount": 3, } ], + "nodeGroupDefaults": { + "healthCheck": {"enabled": True}, + }, "machineSSHKeyName": "kp1", }, repo=CONF.capi_helm.helm_chart_repo, @@ -1211,6 +1227,18 @@ def test_create_cluster_no_dns( }, "dnsNameservers": None, }, + "kubeNetwork": { + "pods": { + "cidrBlocks": [ + "172.16.0.0/13", + ], + }, + "services": { + "cidrBlocks": [ + "172.24.0.0/13", + ], + }, + }, "apiServer": { "enableLoadBalancer": True, "loadBalancerProvider": "amphora", @@ -1218,6 +1246,7 @@ def test_create_cluster_no_dns( "controlPlane": { "machineFlavor": "flavor_small", "machineCount": 3, + "healthCheck": {"enabled": True}, }, "addons": { "monitoring": {"enabled": False}, @@ -1232,6 +1261,7 @@ def test_create_cluster_no_dns( } ], "nodeGroupDefaults": { + "healthCheck": {"enabled": True}, "machineNetworking": { "ports": [ {}, @@ -1292,6 +1322,18 @@ def test_create_cluster_no_keypair( }, "dnsNameservers": ["8.8.1.1"], }, + "kubeNetwork": { + "pods": { + "cidrBlocks": [ + "172.16.0.0/13", + ], + }, + "services": { + "cidrBlocks": [ + "172.24.0.0/13", + ], + }, + }, "apiServer": { "enableLoadBalancer": True, "loadBalancerProvider": "amphora", @@ -1299,6 +1341,7 @@ def test_create_cluster_no_keypair( "controlPlane": { "machineFlavor": "flavor_small", "machineCount": 3, + "healthCheck": {"enabled": True}, }, "addons": { "monitoring": {"enabled": False}, @@ -1312,6 +1355,9 @@ def test_create_cluster_no_keypair( "machineCount": 3, } ], + "nodeGroupDefaults": { + "healthCheck": {"enabled": True}, + }, "machineSSHKeyName": None, }, repo=CONF.capi_helm.helm_chart_repo,