diff --git a/cmd/clusterctl/client/tree/discovery.go b/cmd/clusterctl/client/tree/discovery.go index 76405bfe089e..1904b2030a23 100644 --- a/cmd/clusterctl/client/tree/discovery.go +++ b/cmd/clusterctl/client/tree/discovery.go @@ -19,11 +19,14 @@ package tree import ( "context" + // corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" ) @@ -76,9 +79,9 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt } // Adds control plane - controlPLane, err := external.Get(ctx, c, cluster.Spec.ControlPlaneRef, cluster.Namespace) + controlPlane, err := external.Get(ctx, c, cluster.Spec.ControlPlaneRef, cluster.Namespace) if err == nil { - tree.Add(cluster, controlPLane, ObjectMetaName("ControlPlane"), GroupingObject(true)) + tree.Add(cluster, controlPlane, ObjectMetaName("ControlPlane"), GroupingObject(true)) } // Adds control plane machines. @@ -96,6 +99,7 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt tree.Add(m, machineInfra, ObjectMetaName("MachineInfrastructure"), NoEcho(true)) } + // Note: this reference doesn't exist on MachinePool Machines if machineBootstrap, err := external.Get(ctx, c, m.Spec.Bootstrap.ConfigRef, cluster.Namespace); err == nil { tree.Add(m, machineBootstrap, ObjectMetaName("BootstrapConfig"), NoEcho(true)) } @@ -105,10 +109,15 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt controlPlaneMachines := selectControlPlaneMachines(machinesList) for i := range controlPlaneMachines { cp := controlPlaneMachines[i] - addMachineFunc(controlPLane, cp) + addMachineFunc(controlPlane, cp) } - if len(machinesList.Items) == len(controlPlaneMachines) { + machinePoolList, err := getMachinePoolsInCluster(ctx, c, cluster.Namespace, cluster.Name) + if err != nil { + return nil, err + } + + if len(machinesList.Items) == len(controlPlaneMachines) && len(machinePoolList.Items) == 0 { return tree, nil } @@ -151,6 +160,11 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt } } + err = addMachinePoolsToObjectTree(ctx, c, cluster, workers, machinePoolList, machinesList, tree, addMachineFunc) + if err != nil { + return nil, err + } + // Handles orphan machines. if len(machineMap) < len(machinesList.Items) { other := VirtualObject(cluster.Namespace, "OtherGroup", "Other") @@ -168,6 +182,28 @@ func Discovery(ctx context.Context, c client.Client, namespace, name string, opt return tree, nil } +func addMachinePoolsToObjectTree(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, workers *unstructured.Unstructured, machinePoolList *expv1.MachinePoolList, machinesList *clusterv1.MachineList, tree *ObjectTree, addMachineFunc func(parent client.Object, m *clusterv1.Machine)) error { + for i := range machinePoolList.Items { + mp := &machinePoolList.Items[i] + tree.Add(workers, mp, GroupingObject(true)) + if machinePoolInfra, err := external.Get(ctx, c, &mp.Spec.Template.Spec.InfrastructureRef, cluster.Namespace); err == nil { + tree.Add(mp, machinePoolInfra, ObjectMetaName("MachinePoolInfrastructure"), NoEcho(true)) + } + + if machinePoolBootstrap, err := external.Get(ctx, c, mp.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Namespace); err == nil { + tree.Add(mp, machinePoolBootstrap, ObjectMetaName("BootstrapConfig"), NoEcho(true)) + // TODO: should this BootstrapConfig go under the MachinePool or individual Machine? + } + + machines := selectMachinesControlledBy(machinesList, mp) + for _, w := range machines { + addMachineFunc(mp, w) + } + } + + return nil +} + func getMachinesInCluster(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachineList, error) { if name == "" { return nil, nil @@ -198,6 +234,21 @@ func getMachineDeploymentsInCluster(ctx context.Context, c client.Client, namesp return machineDeploymentList, nil } +func getMachinePoolsInCluster(ctx context.Context, c client.Client, namespace, name string) (*expv1.MachinePoolList, error) { + if name == "" { + return nil, nil + } + + machinePoolList := &expv1.MachinePoolList{} + labels := map[string]string{clusterv1.ClusterLabelName: name} + + if err := c.List(ctx, machinePoolList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil { + return nil, err + } + + return machinePoolList, nil +} + func getMachineSetsInCluster(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachineSetList, error) { if name == "" { return nil, nil diff --git a/cmd/clusterctl/internal/scheme/scheme.go b/cmd/clusterctl/internal/scheme/scheme.go index 4ecaec819772..531a3755f674 100644 --- a/cmd/clusterctl/internal/scheme/scheme.go +++ b/cmd/clusterctl/internal/scheme/scheme.go @@ -28,6 +28,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) var ( @@ -44,4 +45,5 @@ func init() { _ = admissionregistration.AddToScheme(Scheme) _ = admissionregistrationv1beta1.AddToScheme(Scheme) _ = addonsv1.AddToScheme(Scheme) + _ = expv1.AddToScheme(Scheme) } diff --git a/config/crd/bases/cluster.x-k8s.io_machinepools.yaml b/config/crd/bases/cluster.x-k8s.io_machinepools.yaml index abc9350babbb..e9cbd697fcb3 100644 --- a/config/crd/bases/cluster.x-k8s.io_machinepools.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machinepools.yaml @@ -1017,6 +1017,73 @@ spec: items: type: string type: array + infrastructureRefList: + description: InfrastructureRefList are the infrastructure references + of machine instances, populated by the provider. This field must + contain the infrastructure references of all instances in the machine + pool. If this list is populated by the provider, `ProviderIDList` + should be left empty. + items: + description: 'ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, "must refer only to types A and B" or "UID not honored" + or "name must be restricted". Those cannot be well described when + embedded. 3. Inconsistent validation. Because the usages are + different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don''t make new APIs embed an underspecified + API type they do not control. Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + .' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + type: array minReadySeconds: description: Minimum number of seconds for which a newly created machine instances should be ready. Defaults to 0 (machine instance will @@ -1024,10 +1091,11 @@ spec: format: int32 type: integer providerIDList: - description: ProviderIDList are the identification IDs of machine - instances provided by the provider. This field must match the provider - IDs as seen on the node objects corresponding to a machine pool's - machine instances. + description: ProviderIDList are the identifiers of machine instances + populated by the provider. This field must match the provider IDs + as seen on the node objects corresponding to a machine pool's machine + instances. If this list is populated by the provider, `InfrastructureRefList` + should be left empty. items: type: string type: array diff --git a/docker-cluster.yaml b/docker-cluster.yaml new file mode 100644 index 000000000000..892e0764afe4 --- /dev/null +++ b/docker-cluster.yaml @@ -0,0 +1,116 @@ +# Creates a cluster with one control-plane node and one worker node +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: my-cluster + namespace: default +spec: + clusterNetwork: + services: + cidrBlocks: ["10.96.0.0/12"] + pods: + cidrBlocks: ["192.168.0.0/16"] + serviceDomain: cluster.local + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: controlplane + namespace: default + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerCluster + name: my-cluster + namespace: default +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: controlplane + namespace: default +spec: + replicas: 1 + version: v1.22.4 + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: controlplane + namespace: default + kubeadmConfigSpec: + clusterConfiguration: + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + preKubeadmCommands: + - sysctl -w fs.inotify.max_user_watches=1048576 + - sysctl -w fs.inotify.max_user_instances=512 + - sysctl -w vm.max_map_count=524288 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerCluster +metadata: + name: my-cluster + namespace: default +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: controlplane + namespace: default +spec: + template: + spec: {} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: worker-mp-0 + namespace: default + annotations: + cluster.k8s.io/cluster-api-autoscaler-node-group-min-size: "1" + cluster.k8s.io/cluster-api-autoscaler-node-group-max-size: "10" +spec: + clusterName: my-cluster + replicas: 1 + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfig + name: worker-mp-0-config + namespace: default + clusterName: my-cluster + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePool + name: worker-dmp-0 + namespace: default + version: v1.22.4 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachinePool +metadata: + name: worker-dmp-0 + namespace: default +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfig +metadata: + name: worker-mp-0-config + namespace: default +spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% diff --git a/exp/api/v1alpha3/conversion.go b/exp/api/v1alpha3/conversion.go index 47cedb064ccf..f4240d26babc 100644 --- a/exp/api/v1alpha3/conversion.go +++ b/exp/api/v1alpha3/conversion.go @@ -18,6 +18,8 @@ package v1alpha3 import ( apimachineryconversion "k8s.io/apimachinery/pkg/conversion" + "sigs.k8s.io/cluster-api/exp/api/v1beta1" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -57,26 +59,55 @@ func Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(in *expv1.MachinePool, return nil } +// ConvertTo converts a v1alpha3 MachinePool to the hub version. func (src *MachinePool) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*expv1.MachinePool) + if err := Convert_v1alpha3_MachinePool_To_v1beta1_MachinePool(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &v1beta1.MachinePool{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + // Restore list of infrastructure references. + dst.Spec.InfrastructureRefList = restored.Spec.InfrastructureRefList - return Convert_v1alpha3_MachinePool_To_v1beta1_MachinePool(src, dst, nil) + return nil } +// ConvertFrom converts a hub version MachinePool to v1alpha3. func (dst *MachinePool) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*expv1.MachinePool) + if err := Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion. + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } - return Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(src, dst, nil) + return nil } +// ConvertTo converts a v1alpha3 MachinePoolList to the hub version. func (src *MachinePoolList) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*expv1.MachinePoolList) return Convert_v1alpha3_MachinePoolList_To_v1beta1_MachinePoolList(src, dst, nil) } +// ConvertFrom converts a hub version MachinePoolList to v1alpha3. func (dst *MachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*expv1.MachinePoolList) return Convert_v1beta1_MachinePoolList_To_v1alpha3_MachinePoolList(src, dst, nil) } + +// Convert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec is an autogenerated conversion function. +func Convert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(in *v1beta1.MachinePoolSpec, out *MachinePoolSpec, s apimachineryconversion.Scope) error { + return autoConvert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(in, out, s) +} diff --git a/exp/api/v1alpha3/zz_generated.conversion.go b/exp/api/v1alpha3/zz_generated.conversion.go index d152713ecaf9..8bb63a787780 100644 --- a/exp/api/v1alpha3/zz_generated.conversion.go +++ b/exp/api/v1alpha3/zz_generated.conversion.go @@ -50,11 +50,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.MachinePoolSpec)(nil), (*MachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(a.(*v1beta1.MachinePoolSpec), b.(*MachinePoolSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachinePoolStatus)(nil), (*v1beta1.MachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_MachinePoolStatus_To_v1beta1_MachinePoolStatus(a.(*MachinePoolStatus), b.(*v1beta1.MachinePoolStatus), scope) }); err != nil { @@ -75,6 +70,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta1.MachinePoolSpec)(nil), (*MachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(a.(*v1beta1.MachinePoolSpec), b.(*MachinePoolSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta1.MachinePool)(nil), (*MachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(a.(*v1beta1.MachinePool), b.(*MachinePool), scope) }); err != nil { @@ -167,16 +167,12 @@ func autoConvert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(in *v1beta1 return err } out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) + // WARNING: in.InfrastructureRefList requires manual conversion: does not exist in peer-type out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList)) out.FailureDomains = *(*[]string)(unsafe.Pointer(&in.FailureDomains)) return nil } -// Convert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec is an autogenerated conversion function. -func Convert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(in *v1beta1.MachinePoolSpec, out *MachinePoolSpec, s conversion.Scope) error { - return autoConvert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(in, out, s) -} - func autoConvert_v1alpha3_MachinePoolStatus_To_v1beta1_MachinePoolStatus(in *MachinePoolStatus, out *v1beta1.MachinePoolStatus, s conversion.Scope) error { out.NodeRefs = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.NodeRefs)) out.Replicas = in.Replicas diff --git a/exp/api/v1alpha4/conversion.go b/exp/api/v1alpha4/conversion.go index 609d15cff4b5..7efc73d74cef 100644 --- a/exp/api/v1alpha4/conversion.go +++ b/exp/api/v1alpha4/conversion.go @@ -17,31 +17,63 @@ limitations under the License. package v1alpha4 import ( + apimachineryconversion "k8s.io/apimachinery/pkg/conversion" + v1beta1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) +// ConvertTo converts a v1alpha4 MachinePool to the hub version. func (src *MachinePool) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*expv1.MachinePool) + if err := Convert_v1alpha4_MachinePool_To_v1beta1_MachinePool(src, dst, nil); err != nil { + return err + } - return Convert_v1alpha4_MachinePool_To_v1beta1_MachinePool(src, dst, nil) + // Manually restore data. + restored := &v1beta1.MachinePool{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + // Restore list of infrastructure references. + dst.Spec.InfrastructureRefList = restored.Spec.InfrastructureRefList + + return nil } +// ConvertFrom converts a hub version MachinePool to v1alpha4. func (dst *MachinePool) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*expv1.MachinePool) + if err := Convert_v1beta1_MachinePool_To_v1alpha4_MachinePool(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion. + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } - return Convert_v1beta1_MachinePool_To_v1alpha4_MachinePool(src, dst, nil) + return nil } +// ConvertTo converts a v1alpha4 MachinePoolList to the hub version. func (src *MachinePoolList) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*expv1.MachinePoolList) return Convert_v1alpha4_MachinePoolList_To_v1beta1_MachinePoolList(src, dst, nil) } +// ConvertFrom converts a hub version MachinePool to v1alpha4. func (dst *MachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*expv1.MachinePoolList) return Convert_v1beta1_MachinePoolList_To_v1alpha4_MachinePoolList(src, dst, nil) } + +// Convert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec is an autogenerated conversion function. +func Convert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(in *v1beta1.MachinePoolSpec, out *MachinePoolSpec, s apimachineryconversion.Scope) error { + return autoConvert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(in, out, s) +} diff --git a/exp/api/v1alpha4/zz_generated.conversion.go b/exp/api/v1alpha4/zz_generated.conversion.go index 3609aa05f248..d5e53862087c 100644 --- a/exp/api/v1alpha4/zz_generated.conversion.go +++ b/exp/api/v1alpha4/zz_generated.conversion.go @@ -65,11 +65,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.MachinePoolSpec)(nil), (*MachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(a.(*v1beta1.MachinePoolSpec), b.(*MachinePoolSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*MachinePoolStatus)(nil), (*v1beta1.MachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_MachinePoolStatus_To_v1beta1_MachinePoolStatus(a.(*MachinePoolStatus), b.(*v1beta1.MachinePoolStatus), scope) }); err != nil { @@ -80,6 +75,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta1.MachinePoolSpec)(nil), (*MachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(a.(*v1beta1.MachinePoolSpec), b.(*MachinePoolSpec), scope) + }); err != nil { + return err + } return nil } @@ -181,16 +181,12 @@ func autoConvert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(in *v1beta1 return err } out.MinReadySeconds = (*int32)(unsafe.Pointer(in.MinReadySeconds)) + // WARNING: in.InfrastructureRefList requires manual conversion: does not exist in peer-type out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList)) out.FailureDomains = *(*[]string)(unsafe.Pointer(&in.FailureDomains)) return nil } -// Convert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec is an autogenerated conversion function. -func Convert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(in *v1beta1.MachinePoolSpec, out *MachinePoolSpec, s conversion.Scope) error { - return autoConvert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(in, out, s) -} - func autoConvert_v1alpha4_MachinePoolStatus_To_v1beta1_MachinePoolStatus(in *MachinePoolStatus, out *v1beta1.MachinePoolStatus, s conversion.Scope) error { out.NodeRefs = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.NodeRefs)) out.Replicas = in.Replicas diff --git a/exp/api/v1beta1/machinepool_types.go b/exp/api/v1beta1/machinepool_types.go index 36bf0341b427..faed2263f45c 100644 --- a/exp/api/v1beta1/machinepool_types.go +++ b/exp/api/v1beta1/machinepool_types.go @@ -52,8 +52,15 @@ type MachinePoolSpec struct { // +optional MinReadySeconds *int32 `json:"minReadySeconds,omitempty"` - // ProviderIDList are the identification IDs of machine instances provided by the provider. + // InfrastructureRefList are the infrastructure references of machine instances, populated by the provider. + // This field must contain the infrastructure references of all instances in the machine pool. + // If this list is populated by the provider, `ProviderIDList` should be left empty. + // +optional + InfrastructureRefList []corev1.ObjectReference `json:"infrastructureRefList,omitempty"` + + // ProviderIDList are the identifiers of machine instances populated by the provider. // This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. + // If this list is populated by the provider, `InfrastructureRefList` should be left empty. // +optional ProviderIDList []string `json:"providerIDList,omitempty"` diff --git a/exp/api/v1beta1/zz_generated.deepcopy.go b/exp/api/v1beta1/zz_generated.deepcopy.go index 164680e08999..846a64cf2e51 100644 --- a/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/exp/api/v1beta1/zz_generated.deepcopy.go @@ -101,6 +101,11 @@ func (in *MachinePoolSpec) DeepCopyInto(out *MachinePoolSpec) { *out = new(int32) **out = **in } + if in.InfrastructureRefList != nil { + in, out := &in.InfrastructureRefList, &out.InfrastructureRefList + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } if in.ProviderIDList != nil { in, out := &in.ProviderIDList, &out.ProviderIDList *out = make([]string, len(*in)) diff --git a/exp/internal/controllers/machinepool_controller_phases.go b/exp/internal/controllers/machinepool_controller_phases.go index 4f8921842402..7b06c820116a 100644 --- a/exp/internal/controllers/machinepool_controller_phases.go +++ b/exp/internal/controllers/machinepool_controller_phases.go @@ -25,9 +25,12 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" @@ -272,15 +275,100 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu return ctrl.Result{RequeueAfter: externalReadyWait}, nil } + // - providerIDList is backward-compatible but does not support MachinePool Machines. + // - infrastructureRefList is preferred and is used to create MachinePool Machines. + + var infraRefList []corev1.ObjectReference + // Get Spec.InfrastructureRefList from the infrastructure provider. + infraRefErr := util.UnstructuredUnmarshalField(infraConfig, &infraRefList, "spec", "infrastructureRefList") + var providerIDList []string // Get Spec.ProviderIDList from the infrastructure provider. - if err := util.UnstructuredUnmarshalField(infraConfig, &providerIDList, "spec", "providerIDList"); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve data from infrastructure provider for MachinePool %q in namespace %q", mp.Name, mp.Namespace) - } else if len(providerIDList) == 0 { - log.Info("Retrieved empty Spec.ProviderIDList from infrastructure provider") + providerIDErr := util.UnstructuredUnmarshalField(infraConfig, &providerIDList, "spec", "providerIDList") + + if infraRefErr != nil && providerIDErr != nil && mp.Spec.Replicas != nil && *mp.Spec.Replicas > 0 { + err := errors.Wrapf(infraRefErr, "failed to retrieve infrastructureRefList from infrastructure provider.") + err = errors.Wrapf(err, "failed to retrieve providerIDList from infrastructure provider.") + log.Error(err, "failed to retrieve information from infrastructure provider") + } + + // if len(infraRefList) > 0 && len(providerIDList) > 0 { + // return ctrl.Result{}, errors.Errorf("infrastructureRefList and providerIDList cannot both be set") + // } + if len(infraRefList) == 0 && len(providerIDList) == 0 { + log.Info("Retrieved empty infrastructureRefList and empty providerIDList from infrastructure provider") return ctrl.Result{RequeueAfter: externalReadyWait}, nil } + // log.Info("Creating MachinePool Machines", "infraRefList", infraRefList) + + // If infrastructure refs were provided, create Machines if needed. + for _, ir := range infraRefList { + if err := r.Client.Get(ctx, types.NamespacedName{Namespace: mp.Namespace, Name: ir.Name}, &clusterv1.Machine{}); err == nil { + continue + } + providerID := "docker:////" + ir.Name + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: ir.Name, + Namespace: mp.Namespace, + Labels: mp.Labels, + }, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: ir, + ClusterName: mp.Spec.ClusterName, + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: mp.Spec.Template.Spec.Bootstrap.DataSecretName, + }, + ProviderID: &providerID, + Version: mp.Spec.Template.Spec.Version, + }, + } + + // Set the ownerRef of the Machine to the MachinePool. + controllerutil.SetControllerReference(mp, machine, r.Client.Scheme()) + + // Create the Machine. + log.Info("Creating Machine", "machine", machine.Name) + if err := r.Client.Create(ctx, machine); err != nil { + return ctrl.Result{}, err + } + } + + // Delete Machines owned by the MachinePool that are not in its infrastructureRefList. + machineList := &clusterv1.MachineList{} + if err := r.Client.List(ctx, machineList, client.MatchingLabels(mp.Labels), client.InNamespace(mp.Namespace)); err != nil { + log.Error(err, "failed to list machines") + } + for _, machine := range machineList.Items { + owned := false + if machine.OwnerReferences != nil { + for _, ref := range machine.OwnerReferences { + if ref.UID == mp.UID { + owned = true + break + } + } + } + if !owned { + continue + } + + hasInfraRef := false + for _, ir := range infraRefList { + if machine.Name == ir.Name { + hasInfraRef = true + break + } + } + if !hasInfraRef { + log.Info("deleting orphaned machine", "machine", machine.Name) + if err := r.Client.Delete(ctx, &machine); err != nil { + log.Error(err, "failed to delete orphaned machine", "machine", machine.Name) + } + } + } + // Get and set Status.Replicas from the infrastructure provider. err = util.UnstructuredUnmarshalField(infraConfig, &mp.Status.Replicas, "status", "replicas") if err != nil { @@ -292,7 +380,8 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu return ctrl.Result{RequeueAfter: externalReadyWait}, nil } - if !reflect.DeepEqual(mp.Spec.ProviderIDList, providerIDList) { + if !reflect.DeepEqual(mp.Spec.InfrastructureRefList, infraRefList) || !reflect.DeepEqual(mp.Spec.ProviderIDList, providerIDList) { + mp.Spec.InfrastructureRefList = infraRefList mp.Spec.ProviderIDList = providerIDList mp.Status.ReadyReplicas = 0 mp.Status.AvailableReplicas = 0 diff --git a/hack/tools/tilt-prepare/main.go b/hack/tools/tilt-prepare/main.go index 1dfee9eba407..7cbe08e6d765 100644 --- a/hack/tools/tilt-prepare/main.go +++ b/hack/tools/tilt-prepare/main.go @@ -341,7 +341,7 @@ func certManagerTask() taskFunction { cluster := cluster.New(cluster.Kubeconfig{}, config) if err := cluster.CertManager().EnsureInstalled(); err != nil { - errCh <- errors.Wrapf(err, "[%s] failed to install cert-manger", prefix) + errCh <- errors.Wrapf(err, "[%s] failed to install cert-manager", prefix) } } } diff --git a/prep-docker-cluster.sh b/prep-docker-cluster.sh new file mode 100755 index 000000000000..08de83732eac --- /dev/null +++ b/prep-docker-cluster.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +CLUSTER_NAME=${1:-my-cluster} + +./bin/clusterctl get kubeconfig $CLUSTER_NAME > $CLUSTER_NAME.kubeconfig + +# Point the kubeconfig to the exposed port of the load balancer, rather than the inaccessible container IP. +sed -i -e "s/server:.*/server: https:\/\/$(docker port $CLUSTER_NAME-lb 6443/tcp | sed "s/0.0.0.0/127.0.0.1/")/g" ./$CLUSTER_NAME.kubeconfig + +# Ignore the CA, because it is not signed for 127.0.0.1 +sed -i -e "s/certificate-authority-data:.*/insecure-skip-tls-verify: true/g" ./$CLUSTER_NAME.kubeconfig + +# Add a CNI solution for the cluster +kubectl --kubeconfig=./$CLUSTER_NAME.kubeconfig apply -f https://docs.projectcalico.org/v3.21/manifests/calico.yaml diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepoolmachines.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepoolmachines.yaml new file mode 100644 index 000000000000..40d1667cdbd5 --- /dev/null +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepoolmachines.yaml @@ -0,0 +1,185 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: dockermachinepoolmachines.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: DockerMachinePoolMachine + listKind: DockerMachinePoolMachineList + plural: dockermachinepoolmachines + shortNames: + - dmpm + singular: dockermachinepoolmachine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Flag indicating infrastructure is successfully provisioned + jsonPath: .status.ready + name: Ready + type: string + - description: Cluster + jsonPath: .metadata.labels['cluster\.x-k8s\.io/cluster-name'] + name: Cluster + type: string + - description: Time duration since creation of DockerMachine + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: DockerMachinePoolMachine is the Schema for the dockermachinepoolmachines + API. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DockerMachinePoolMachineSpec defines the desired state of + DockerMachinePoolMachine. + properties: + bootstrapped: + description: Bootstrapped is true when the kubeadm bootstrapping has + been run against this machine + type: boolean + customImage: + description: CustomImage allows customizing the container image that + is used for running the machine + type: string + extraMounts: + description: ExtraMounts describes additional mount points for the + node container These may be used to bind a hostPath + items: + description: Mount specifies a host volume to mount into a container. + This is a simplified version of kind v1alpha4.Mount types. + properties: + containerPath: + description: Path of the mount within the container. + type: string + hostPath: + description: Path of the mount on the host. If the hostPath + doesn't exist, then runtimes should report error. If the hostpath + is a symbolic link, runtimes should follow the symlink and + mount the real destination to container. + type: string + readOnly: + description: If set, the mount is read-only. + type: boolean + type: object + type: array + preLoadImages: + description: PreLoadImages allows to pre-load images in a newly created + machine. This can be used to speed up tests by avoiding e.g. to + download CNI images on all the containers. + items: + type: string + type: array + providerID: + description: ProviderID will be the container name in ProviderID format + (docker:////) + type: string + type: object + status: + description: DockerMachinePoolMachineStatus defines the observed state + of DockerMachinePoolMachine. + properties: + addresses: + description: Addresses contains the associated addresses for the docker + machine. + items: + description: MachineAddress contains information for the node's + address. + properties: + address: + description: The machine address. + type: string + type: + description: Machine address type, one of Hostname, ExternalIP + or InternalIP. + type: string + required: + - address + - type + type: object + type: array + conditions: + description: Conditions defines current service state of the DockerMachine. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. The specific API may choose whether or not this + field is considered a guaranteed API. This field may not be + empty. + type: string + severity: + description: Severity provides an explicit classification of + Reason code, so the users or machines can immediately understand + the current situation and act accordingly. The Severity field + MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + loadBalancerConfigured: + description: LoadBalancerConfigured denotes that the machine has been + added to the load balancer + type: boolean + ready: + description: Ready denotes that the machine (docker container) is + ready + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml index 038d3dc7c9e7..c6fdf0dad6bf 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml @@ -405,6 +405,72 @@ spec: spec: description: DockerMachinePoolSpec defines the desired state of DockerMachinePool. properties: + infrastructureRefList: + description: InfrastructureRefList are the infrastructure references + of machine instances, populated by the provider. This field must + contain the infrastructure references of all instances in the machine + pool. + items: + description: 'ObjectReference contains enough information to let + you inspect or modify the referred object. --- New uses of this + type are discouraged because of difficulty describing its usage + when embedded in APIs. 1. Ignored fields. It includes many fields + which are not generally honored. For instance, ResourceVersion + and FieldPath are both very rarely valid in actual usage. 2. Invalid + usage help. It is impossible to add specific help for individual + usage. In most embedded usages, there are particular restrictions + like, "must refer only to types A and B" or "UID not honored" + or "name must be restricted". Those cannot be well described when + embedded. 3. Inconsistent validation. Because the usages are + different, the validation rules are different by usage, which + makes it hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency is + on the group,resource tuple and the version of the actual struct + is irrelevant. 5. We cannot easily change it. Because this type + is embedded in many locations, updates to this type will affect + numerous schemas. Don''t make new APIs embed an underspecified + API type they do not control. Instead of using this type, create + a locally provided and used type that is well-focused on your + reference. For example, ServiceReferences for admission registration: + https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + .' + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + type: array providerID: description: ProviderID is the identification ID of the Machine Pool type: string diff --git a/test/infrastructure/docker/config/crd/kustomization.yaml b/test/infrastructure/docker/config/crd/kustomization.yaml index 427d52d4bed6..3b970b5aabba 100644 --- a/test/infrastructure/docker/config/crd/kustomization.yaml +++ b/test/infrastructure/docker/config/crd/kustomization.yaml @@ -13,6 +13,7 @@ resources: - bases/infrastructure.cluster.x-k8s.io_dockerclusters.yaml - bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml - bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml + - bases/infrastructure.cluster.x-k8s.io_dockermachinepoolmachines.yaml - bases/infrastructure.cluster.x-k8s.io_dockerclustertemplates.yaml # +kubebuilder:scaffold:crdkustomizeresource diff --git a/test/infrastructure/docker/config/rbac/role.yaml b/test/infrastructure/docker/config/rbac/role.yaml index 8eead72ea2bc..30818aebf97c 100644 --- a/test/infrastructure/docker/config/rbac/role.yaml +++ b/test/infrastructure/docker/config/rbac/role.yaml @@ -52,6 +52,27 @@ rules: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - dockermachinepoolmachines + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - dockermachinepoolmachines/finalizers + - dockermachinepoolmachines/status + verbs: + - get + - patch + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: diff --git a/test/infrastructure/docker/exp/api/v1alpha3/conversion.go b/test/infrastructure/docker/exp/api/v1alpha3/conversion.go index 3eeb15387b8b..5eb3e357b4ae 100644 --- a/test/infrastructure/docker/exp/api/v1alpha3/conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha3/conversion.go @@ -19,19 +19,52 @@ package v1alpha3 import ( "sigs.k8s.io/controller-runtime/pkg/conversion" + convert "k8s.io/apimachinery/pkg/conversion" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *DockerMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1alpha3_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil) + err := Convert_v1alpha3_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil) + if err != nil { + return err + } + + // Manually restore data. + restored := &infraexpv1.DockerMachinePool{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + // Restore list of infrastructure references. + dst.Spec.InfrastructureRefList = restored.Spec.InfrastructureRefList + + return nil } func (dst *DockerMachinePool) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1beta1_DockerMachinePool_To_v1alpha3_DockerMachinePool(src, dst, nil) + if err := Convert_v1beta1_DockerMachinePool_To_v1alpha3_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion. + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } + + return nil +} + +func Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha3_DockerMachinePoolSpec(in *infraexpv1.DockerMachinePoolSpec, out *DockerMachinePoolSpec, s convert.Scope) error { + return autoConvert_v1beta1_DockerMachinePoolSpec_To_v1alpha3_DockerMachinePoolSpec(in, out, s) +} + +func Convert_v1alpha3_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(in *DockerMachinePoolSpec, out *infraexpv1.DockerMachinePoolSpec, s convert.Scope) error { + return autoConvert_v1alpha3_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(in, out, s) } func (src *DockerMachinePoolList) ConvertTo(dstRaw conversion.Hub) error { diff --git a/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go index 311ad6ada301..db62552b67eb 100644 --- a/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go @@ -80,23 +80,23 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*DockerMachinePoolSpec)(nil), (*v1beta1.DockerMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(a.(*DockerMachinePoolSpec), b.(*v1beta1.DockerMachinePoolSpec), scope) + if err := s.AddGeneratedConversionFunc((*DockerMachinePoolStatus)(nil), (*v1beta1.DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DockerMachinePoolStatus_To_v1beta1_DockerMachinePoolStatus(a.(*DockerMachinePoolStatus), b.(*v1beta1.DockerMachinePoolStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolSpec)(nil), (*DockerMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha3_DockerMachinePoolSpec(a.(*v1beta1.DockerMachinePoolSpec), b.(*DockerMachinePoolSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(a.(*v1beta1.DockerMachinePoolStatus), b.(*DockerMachinePoolStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*DockerMachinePoolStatus)(nil), (*v1beta1.DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_DockerMachinePoolStatus_To_v1beta1_DockerMachinePoolStatus(a.(*DockerMachinePoolStatus), b.(*v1beta1.DockerMachinePoolStatus), scope) + if err := s.AddConversionFunc((*DockerMachinePoolSpec)(nil), (*v1beta1.DockerMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(a.(*DockerMachinePoolSpec), b.(*v1beta1.DockerMachinePoolSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(a.(*v1beta1.DockerMachinePoolStatus), b.(*DockerMachinePoolStatus), scope) + if err := s.AddConversionFunc((*v1beta1.DockerMachinePoolSpec)(nil), (*DockerMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha3_DockerMachinePoolSpec(a.(*v1beta1.DockerMachinePoolSpec), b.(*DockerMachinePoolSpec), scope) }); err != nil { return err } @@ -260,25 +260,16 @@ func autoConvert_v1alpha3_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec return nil } -// Convert_v1alpha3_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec is an autogenerated conversion function. -func Convert_v1alpha3_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(in *DockerMachinePoolSpec, out *v1beta1.DockerMachinePoolSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(in, out, s) -} - func autoConvert_v1beta1_DockerMachinePoolSpec_To_v1alpha3_DockerMachinePoolSpec(in *v1beta1.DockerMachinePoolSpec, out *DockerMachinePoolSpec, s conversion.Scope) error { if err := Convert_v1beta1_DockerMachinePoolMachineTemplate_To_v1alpha3_DockerMachinePoolMachineTemplate(&in.Template, &out.Template, s); err != nil { return err } out.ProviderID = in.ProviderID out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList)) + // WARNING: in.InfrastructureRefList requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha3_DockerMachinePoolSpec is an autogenerated conversion function. -func Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha3_DockerMachinePoolSpec(in *v1beta1.DockerMachinePoolSpec, out *DockerMachinePoolSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachinePoolSpec_To_v1alpha3_DockerMachinePoolSpec(in, out, s) -} - func autoConvert_v1alpha3_DockerMachinePoolStatus_To_v1beta1_DockerMachinePoolStatus(in *DockerMachinePoolStatus, out *v1beta1.DockerMachinePoolStatus, s conversion.Scope) error { out.Ready = in.Ready out.Replicas = in.Replicas diff --git a/test/infrastructure/docker/exp/api/v1alpha4/conversion.go b/test/infrastructure/docker/exp/api/v1alpha4/conversion.go index acc200d9e51a..360ec9915429 100644 --- a/test/infrastructure/docker/exp/api/v1alpha4/conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha4/conversion.go @@ -19,19 +19,52 @@ package v1alpha4 import ( "sigs.k8s.io/controller-runtime/pkg/conversion" + convert "k8s.io/apimachinery/pkg/conversion" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *DockerMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1alpha4_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil) + err := Convert_v1alpha4_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil) + if err != nil { + return err + } + + // Manually restore data. + restored := &infraexpv1.DockerMachinePool{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + // Restore list of infrastructure references. + dst.Spec.InfrastructureRefList = restored.Spec.InfrastructureRefList + + return nil } func (dst *DockerMachinePool) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1beta1_DockerMachinePool_To_v1alpha4_DockerMachinePool(src, dst, nil) + if err := Convert_v1beta1_DockerMachinePool_To_v1alpha4_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion. + if err := utilconversion.MarshalData(src, dst); err != nil { + return err + } + + return nil +} + +func Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha4_DockerMachinePoolSpec(in *infraexpv1.DockerMachinePoolSpec, out *DockerMachinePoolSpec, s convert.Scope) error { + return autoConvert_v1beta1_DockerMachinePoolSpec_To_v1alpha4_DockerMachinePoolSpec(in, out, s) +} + +func Convert_v1alpha4_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(in *DockerMachinePoolSpec, out *infraexpv1.DockerMachinePoolSpec, s convert.Scope) error { + return autoConvert_v1alpha4_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(in, out, s) } func (src *DockerMachinePoolList) ConvertTo(dstRaw conversion.Hub) error { diff --git a/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go b/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go index 4f302088997d..10f4a644e64f 100644 --- a/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go @@ -80,23 +80,23 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*DockerMachinePoolSpec)(nil), (*v1beta1.DockerMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(a.(*DockerMachinePoolSpec), b.(*v1beta1.DockerMachinePoolSpec), scope) + if err := s.AddGeneratedConversionFunc((*DockerMachinePoolStatus)(nil), (*v1beta1.DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_DockerMachinePoolStatus_To_v1beta1_DockerMachinePoolStatus(a.(*DockerMachinePoolStatus), b.(*v1beta1.DockerMachinePoolStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolSpec)(nil), (*DockerMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha4_DockerMachinePoolSpec(a.(*v1beta1.DockerMachinePoolSpec), b.(*DockerMachinePoolSpec), scope) + if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(a.(*v1beta1.DockerMachinePoolStatus), b.(*DockerMachinePoolStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*DockerMachinePoolStatus)(nil), (*v1beta1.DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_DockerMachinePoolStatus_To_v1beta1_DockerMachinePoolStatus(a.(*DockerMachinePoolStatus), b.(*v1beta1.DockerMachinePoolStatus), scope) + if err := s.AddConversionFunc((*DockerMachinePoolSpec)(nil), (*v1beta1.DockerMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(a.(*DockerMachinePoolSpec), b.(*v1beta1.DockerMachinePoolSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(a.(*v1beta1.DockerMachinePoolStatus), b.(*DockerMachinePoolStatus), scope) + if err := s.AddConversionFunc((*v1beta1.DockerMachinePoolSpec)(nil), (*DockerMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha4_DockerMachinePoolSpec(a.(*v1beta1.DockerMachinePoolSpec), b.(*DockerMachinePoolSpec), scope) }); err != nil { return err } @@ -260,25 +260,16 @@ func autoConvert_v1alpha4_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec return nil } -// Convert_v1alpha4_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec is an autogenerated conversion function. -func Convert_v1alpha4_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(in *DockerMachinePoolSpec, out *v1beta1.DockerMachinePoolSpec, s conversion.Scope) error { - return autoConvert_v1alpha4_DockerMachinePoolSpec_To_v1beta1_DockerMachinePoolSpec(in, out, s) -} - func autoConvert_v1beta1_DockerMachinePoolSpec_To_v1alpha4_DockerMachinePoolSpec(in *v1beta1.DockerMachinePoolSpec, out *DockerMachinePoolSpec, s conversion.Scope) error { if err := Convert_v1beta1_DockerMachinePoolMachineTemplate_To_v1alpha4_DockerMachinePoolMachineTemplate(&in.Template, &out.Template, s); err != nil { return err } out.ProviderID = in.ProviderID out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList)) + // WARNING: in.InfrastructureRefList requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha4_DockerMachinePoolSpec is an autogenerated conversion function. -func Convert_v1beta1_DockerMachinePoolSpec_To_v1alpha4_DockerMachinePoolSpec(in *v1beta1.DockerMachinePoolSpec, out *DockerMachinePoolSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachinePoolSpec_To_v1alpha4_DockerMachinePoolSpec(in, out, s) -} - func autoConvert_v1alpha4_DockerMachinePoolStatus_To_v1beta1_DockerMachinePoolStatus(in *DockerMachinePoolStatus, out *v1beta1.DockerMachinePoolStatus, s conversion.Scope) error { out.Ready = in.Ready out.Replicas = in.Replicas diff --git a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go index fe40293d0b4a..7cf1328ab4fe 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go +++ b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -59,6 +60,11 @@ type DockerMachinePoolSpec struct { // ProviderIDList is the list of identification IDs of machine instances managed by this Machine Pool //+optional ProviderIDList []string `json:"providerIDList,omitempty"` + + // InfrastructureRefList are the infrastructure references of machine instances, populated by the provider. + // This field must contain the infrastructure references of all instances in the machine pool. + // +optional + InfrastructureRefList []corev1.ObjectReference `json:"infrastructureRefList,omitempty"` } // DockerMachinePoolStatus defines the observed state of DockerMachinePool. diff --git a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepoolmachine_types.go b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepoolmachine_types.go new file mode 100644 index 000000000000..0f805902686a --- /dev/null +++ b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepoolmachine_types.go @@ -0,0 +1,133 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +const ( + // MachinePoolMachineFinalizer is used to ensure deletion of dependencies (nodes, infra). + MachinePoolMachineFinalizer = "dockermachinepoolmachine.infrastructure.cluster.x-k8s.io" +) + +type ( + + // DockerMachinePoolMachineSpec defines the desired state of DockerMachinePoolMachine. + DockerMachinePoolMachineSpec struct { + // ProviderID will be the container name in ProviderID format (docker:////) + // +optional + ProviderID *string `json:"providerID,omitempty"` + + // CustomImage allows customizing the container image that is used for + // running the machine + // +optional + CustomImage string `json:"customImage,omitempty"` + + // PreLoadImages allows to pre-load images in a newly created machine. This can be used to + // speed up tests by avoiding e.g. to download CNI images on all the containers. + // +optional + PreLoadImages []string `json:"preLoadImages,omitempty"` + + // ExtraMounts describes additional mount points for the node container + // These may be used to bind a hostPath + // +optional + ExtraMounts []Mount `json:"extraMounts,omitempty"` + + // Bootstrapped is true when the kubeadm bootstrapping has been run + // against this machine + // +optional + Bootstrapped bool `json:"bootstrapped,omitempty"` + } + + // Mount specifies a host volume to mount into a container. + // This is a simplified version of kind v1alpha4.Mount types. + Mount struct { + // Path of the mount within the container. + ContainerPath string `json:"containerPath,omitempty"` + + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string `json:"hostPath,omitempty"` + + // If set, the mount is read-only. + // +optional + Readonly bool `json:"readOnly,omitempty"` + } + + // DockerMachinePoolMachineStatus defines the observed state of DockerMachinePoolMachine. + DockerMachinePoolMachineStatus struct { + // Ready denotes that the machine (docker container) is ready + // +optional + Ready bool `json:"ready"` + + // LoadBalancerConfigured denotes that the machine has been + // added to the load balancer + // +optional + LoadBalancerConfigured bool `json:"loadBalancerConfigured"` + + // Addresses contains the associated addresses for the docker machine. + // +optional + Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + + // Conditions defines current service state of the DockerMachine. + // +optional + Conditions clusterv1.Conditions `json:"conditions,omitempty"` + } + + // +kubebuilder:object:root=true + // +kubebuilder:subresource:status + // +kubebuilder:resource:path=dockermachinepoolmachines,scope=Namespaced,categories=cluster-api,shortName=dmpm + // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Flag indicating infrastructure is successfully provisioned" + // +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels['cluster\\.x-k8s\\.io/cluster-name']",description="Cluster" + // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of DockerMachine" + // +kubebuilder:storageversion + + // DockerMachinePoolMachine is the Schema for the dockermachinepoolmachines API. + DockerMachinePoolMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DockerMachinePoolMachineSpec `json:"spec,omitempty"` + Status DockerMachinePoolMachineStatus `json:"status,omitempty"` + } + + // +kubebuilder:object:root=true + + // DockerMachinePoolMachineList contains a list of DockerMachinePoolMachines. + DockerMachinePoolMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DockerMachinePoolMachine `json:"items"` + } +) + +// GetConditions returns the list of conditions for a DockerMachinePoolMachine API object. +func (c *DockerMachinePoolMachine) GetConditions() clusterv1.Conditions { + return c.Status.Conditions +} + +// SetConditions will set the given conditions on a DockerMachinePoolMachine object. +func (c *DockerMachinePoolMachine) SetConditions(conditions clusterv1.Conditions) { + c.Status.Conditions = conditions +} + +func init() { + SchemeBuilder.Register(&DockerMachinePoolMachine{}, &DockerMachinePoolMachineList{}) +} diff --git a/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go b/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go index cf1e613a00d1..b1c8485e7d8e 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ limitations under the License. package v1beta1 import ( + "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" cluster_apiapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" apiv1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" @@ -116,6 +117,122 @@ func (in *DockerMachinePoolList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolMachine) DeepCopyInto(out *DockerMachinePoolMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolMachine. +func (in *DockerMachinePoolMachine) DeepCopy() *DockerMachinePoolMachine { + if in == nil { + return nil + } + out := new(DockerMachinePoolMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerMachinePoolMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolMachineList) DeepCopyInto(out *DockerMachinePoolMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DockerMachinePoolMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolMachineList. +func (in *DockerMachinePoolMachineList) DeepCopy() *DockerMachinePoolMachineList { + if in == nil { + return nil + } + out := new(DockerMachinePoolMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerMachinePoolMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolMachineSpec) DeepCopyInto(out *DockerMachinePoolMachineSpec) { + *out = *in + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + if in.PreLoadImages != nil { + in, out := &in.PreLoadImages, &out.PreLoadImages + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraMounts != nil { + in, out := &in.ExtraMounts, &out.ExtraMounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolMachineSpec. +func (in *DockerMachinePoolMachineSpec) DeepCopy() *DockerMachinePoolMachineSpec { + if in == nil { + return nil + } + out := new(DockerMachinePoolMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerMachinePoolMachineStatus) DeepCopyInto(out *DockerMachinePoolMachineStatus) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]cluster_apiapiv1beta1.MachineAddress, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(cluster_apiapiv1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolMachineStatus. +func (in *DockerMachinePoolMachineStatus) DeepCopy() *DockerMachinePoolMachineStatus { + if in == nil { + return nil + } + out := new(DockerMachinePoolMachineStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DockerMachinePoolMachineTemplate) DeepCopyInto(out *DockerMachinePoolMachineTemplate) { *out = *in @@ -150,6 +267,11 @@ func (in *DockerMachinePoolSpec) DeepCopyInto(out *DockerMachinePoolSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.InfrastructureRefList != nil { + in, out := &in.InfrastructureRefList, &out.InfrastructureRefList + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolSpec. @@ -190,3 +312,18 @@ func (in *DockerMachinePoolStatus) DeepCopy() *DockerMachinePoolStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} diff --git a/test/infrastructure/docker/exp/controllers/alias.go b/test/infrastructure/docker/exp/controllers/alias.go index 5abc40a9def2..8ed150a983e0 100644 --- a/test/infrastructure/docker/exp/controllers/alias.go +++ b/test/infrastructure/docker/exp/controllers/alias.go @@ -44,3 +44,19 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ContainerRuntime: r.ContainerRuntime, }).SetupWithManager(ctx, mgr, options) } + +// DockerMachinePoolReconciler reconciles a DockerMachinePool object. +type DockerMachinePoolMachineReconciler struct { + Client client.Client + Scheme *runtime.Scheme + ContainerRuntime container.Runtime +} + +// SetupWithManager will add watches for this controller. +func (r *DockerMachinePoolMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + return (&dockermachinepoolcontrollers.DockerMachinePoolMachineReconciler{ + Client: r.Client, + Scheme: r.Scheme, + ContainerRuntime: r.ContainerRuntime, + }).SetupWithManager(ctx, mgr, options) +} diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index 4bcca23a49ec..c2b19dfa4fb3 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -20,9 +20,12 @@ package controllers import ( "context" "fmt" + "strings" + "time" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" @@ -32,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" + corev1 "k8s.io/api/core/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" utilexp "sigs.k8s.io/cluster-api/exp/util" @@ -39,6 +43,7 @@ import ( infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/internal/docker" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -52,6 +57,8 @@ type DockerMachinePoolReconciler struct { // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockermachinepools,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockermachinepools/status;dockermachinepools/finalizers,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockermachinepoolmachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockermachinepoolmachines/status;dockermachinepoolmachines/finalizers,verbs=get;update;patch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch @@ -63,7 +70,7 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re dockerMachinePool := &infraexpv1.DockerMachinePool{} if err := r.Client.Get(ctx, req.NamespacedName, dockerMachinePool); err != nil { if apierrors.IsNotFound(err) { - return ctrl.Result{}, nil + log.Info("Machinepool not found, returning") } return ctrl.Result{}, err } @@ -117,11 +124,14 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Handle deleted machines + log.Info(fmt.Sprintf("Deletion timestamp is %+v", dockerMachinePool.DeletionTimestamp)) + log.Info("Is zero? ", "bool", dockerMachinePool.ObjectMeta.DeletionTimestamp.IsZero()) if !dockerMachinePool.ObjectMeta.DeletionTimestamp.IsZero() { return r.reconcileDelete(ctx, cluster, machinePool, dockerMachinePool) } // Handle non-deleted machines + log.Info("Reconciling machinepool normally") return r.reconcileNormal(ctx, cluster, machinePool, dockerMachinePool) } @@ -190,13 +200,58 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust } // Derive info from Status.Instances - dockerMachinePool.Spec.ProviderIDList = []string{} - for _, instance := range dockerMachinePool.Status.Instances { - if instance.ProviderID != nil && instance.Ready { - dockerMachinePool.Spec.ProviderIDList = append(dockerMachinePool.Spec.ProviderIDList, *instance.ProviderID) + providerIDList := make([]string, len(dockerMachinePool.Status.Instances)) + infraRefList := make([]corev1.ObjectReference, len(dockerMachinePool.Status.Instances)) + for i, instance := range dockerMachinePool.Status.Instances { + if instance.ProviderID != nil { + name := strings.TrimPrefix(*instance.ProviderID, "docker:////") + infraRefList[i] = corev1.ObjectReference{ + Kind: "DockerMachinePoolMachine", + Name: name, + Namespace: dockerMachinePool.Namespace, + APIVersion: infraexpv1.GroupVersion.String(), + } + if instance.Ready { + providerIDList[i] = *instance.ProviderID + } + + // Look up the DockerMachinePoolMachine object. + dmpm := &infraexpv1.DockerMachinePoolMachine{} + if err := r.Client.Get(ctx, client.ObjectKey{Name: name, Namespace: dockerMachinePool.Namespace}, dmpm); err != nil { + // TODO: check the error to see that it was a 404? + // Create the DockerMachinePoolMachine object if needed. + dmpm := &infraexpv1.DockerMachinePoolMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: dockerMachinePool.Namespace, + }, + Spec: infraexpv1.DockerMachinePoolMachineSpec{ + ProviderID: instance.ProviderID, + }, + } + // Find the corresponding Machine and set it as the OwnerReference + m := &clusterv1.Machine{} + if err := r.Client.Get(ctx, client.ObjectKey{Name: name, Namespace: dockerMachinePool.Namespace}, m); err != nil { + continue + } + controllerutil.SetControllerReference(m, dmpm, r.Client.Scheme()) + log.Info("Creating dmpm", "name", dmpm.Name, "namespace", dmpm.Namespace) + err = r.Client.Create(ctx, dmpm) + if err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to create DockerMachinePoolMachine") + } + } + // Update the DockerMachinePoolMachine object's status. + if instance.Ready { + dmpm.Status.Ready = true + r.Client.Status().Update(ctx, dmpm) + } } } + dockerMachinePool.Spec.ProviderIDList = providerIDList + dockerMachinePool.Spec.InfrastructureRefList = infraRefList + dockerMachinePool.Status.Replicas = int32(len(dockerMachinePool.Status.Instances)) if dockerMachinePool.Spec.ProviderID == "" { @@ -207,6 +262,14 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust } dockerMachinePool.Status.Ready = len(dockerMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) + if dockerMachinePool.Status.Ready { + conditions.MarkTrue(dockerMachinePool, expv1.ReplicasReadyCondition) + } else { + conditions.MarkFalse(dockerMachinePool, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "") + // TODO: is this requeue necessary? + return ctrl.Result{RequeueAfter: 30 * time.Second}, nil + } + return ctrl.Result{}, nil } @@ -215,11 +278,18 @@ func getDockerMachinePoolProviderID(clusterName, dockerMachinePoolName string) s } func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infraexpv1.DockerMachinePool) error { - // TODO: add conditions + conditions.SetSummary(dockerMachinePool, + conditions.WithConditions( + expv1.ReplicasReadyCondition, + ), + ) - // Patch the object, ignoring conflicts on the conditions owned by this controller. return patchHelper.Patch( ctx, dockerMachinePool, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + expv1.ReplicasReadyCondition, + }}, ) } diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepoolmachine_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepoolmachine_controller.go new file mode 100644 index 000000000000..6056b9081e2e --- /dev/null +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepoolmachine_controller.go @@ -0,0 +1,481 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/test/infrastructure/container" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" + infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/test/infrastructure/docker/internal/docker" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/predicates" +) + +// DockerMachinePoolMachineReconciler reconciles a DockerMachinePoolMachine object. +type DockerMachinePoolMachineReconciler struct { + client.Client + Scheme *runtime.Scheme + ContainerRuntime container.Runtime +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockermachinepoolmachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=dockermachinepoolmachines/status;dockermachinepoolmachines/finalizers,verbs=get;update;patch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;machines,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch + +// Reconcile handles DockerMachinePoolMachine events. +func (r *DockerMachinePoolMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) { + log := ctrl.LoggerFrom(ctx) + ctx = container.RuntimeInto(ctx, r.ContainerRuntime) + + log.Info("Reconciling DockerMachinePoolMachine") + + // Fetch the DockerMachinePoolMachine instance. + dockerMachinePoolMachine := &infraexpv1.DockerMachinePoolMachine{} + if err := r.Client.Get(ctx, req.NamespacedName, dockerMachinePoolMachine); err != nil { + if apierrors.IsNotFound(err) { + log.Info("DMPM not found in reconcile, returning", "DMPM", req.NamespacedName) + } + return ctrl.Result{}, err + } + + // Fetch the DMPM. + machine, err := util.GetOwnerMachine(ctx, r.Client, dockerMachinePoolMachine.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if machine == nil { + log.Info("Waiting for Machine Controller to set OwnerRef on DockerMachinePoolMachine") + return ctrl.Result{}, nil + } + + log = log.WithValues("machine", machine.Name) + + // Fetch the Cluster. + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + if err != nil { + log.Info("DockerMachinePoolMachine owner Machine is missing cluster label or cluster does not exist") + return ctrl.Result{}, err + } + if cluster == nil { + log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", clusterv1.ClusterLabelName)) + return ctrl.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + // Return early if the object or Cluster is paused. + if annotations.IsPaused(cluster, dockerMachinePoolMachine) { + log.Info("Reconciliation is paused for this object") + return ctrl.Result{}, nil + } + + // Fetch the Docker Cluster. + dockerCluster := &infrav1.DockerCluster{} + dockerClusterName := client.ObjectKey{ + Namespace: dockerMachinePoolMachine.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + if err := r.Client.Get(ctx, dockerClusterName, dockerCluster); err != nil { + log.Info("DockerCluster is not available yet") + return ctrl.Result{}, nil + } + + log = log.WithValues("docker-cluster", dockerCluster.Name) + + // Initialize the patch helper + patchHelper, err := patch.NewHelper(dockerMachinePoolMachine, r.Client) + if err != nil { + return ctrl.Result{}, err + } + // Always attempt to Patch the DockerMachinePoolMachine object and status after each reconciliation. + defer func() { + if err := patchDockerMachine(ctx, patchHelper, dockerMachinePoolMachine); err != nil { + log.Error(err, "failed to patch DockerMachine") + if rerr == nil { + rerr = err + } + } + }() + + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(dockerMachinePoolMachine, infrav1.MachineFinalizer) { + controllerutil.AddFinalizer(dockerMachinePoolMachine, infrav1.MachineFinalizer) + log.Info("Adding finalizer to the DockerMachinePoolMachine, returning", "dmpm", dockerMachinePoolMachine.Name) + return ctrl.Result{}, nil + } + + // Check if the infrastructure is ready, otherwise return and wait for the cluster object to be updated + if !cluster.Status.InfrastructureReady { + log.Info("Waiting for DockerCluster Controller to create cluster infrastructure") + conditions.MarkFalse(dockerMachinePoolMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{}, nil + } + + // Create a helper for managing the docker container hosting the machine. + externalMachine, err := docker.NewMachine(ctx, cluster, machine.Name, dockerMachinePoolMachine.Spec.CustomImage, nil) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalMachine") + } + + // Create a helper for managing a docker container hosting the loadbalancer. + // NB. the machine controller has to manage the cluster load balancer because the current implementation of the + // docker load balancer does not support auto-discovery of control plane nodes, so CAPD should take care of + // updating the cluster load balancer configuration when control plane machines are added/removed + externalLoadBalancer, err := docker.NewLoadBalancer(ctx, cluster, dockerCluster) + if err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalLoadBalancer") + } + + // Handle deleted machines + log.Info(fmt.Sprintf("Deletion timestamp for machine %+v w/ timestamp %+v", machine.Name, dockerMachinePoolMachine.DeletionTimestamp)) + log.Info("Is zero? ", "bool", dockerMachinePoolMachine.ObjectMeta.DeletionTimestamp.IsZero()) + if !dockerMachinePoolMachine.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, machine, dockerMachinePoolMachine, externalMachine, externalLoadBalancer) + } + + // Handle non-deleted machines + log.Info("Reconciling dmpm normally") + return r.reconcileNormal(ctx, cluster, machine, dockerMachinePoolMachine, externalMachine, externalLoadBalancer) +} + +func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMachine *infraexpv1.DockerMachinePoolMachine) error { + // Always update the readyCondition by summarizing the state of other conditions. + // A step counter is added to represent progress during the provisioning process (instead we are hiding the step counter during the deletion process). + conditions.SetSummary(dockerMachine, + conditions.WithConditions( + infrav1.ContainerProvisionedCondition, + infrav1.BootstrapExecSucceededCondition, + ), + conditions.WithStepCounterIf(dockerMachine.ObjectMeta.DeletionTimestamp.IsZero() && dockerMachine.Spec.ProviderID == nil), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + return patchHelper.Patch( + ctx, + dockerMachine, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + infrav1.ContainerProvisionedCondition, + infrav1.BootstrapExecSucceededCondition, + }}, + ) +} + +func (r *DockerMachinePoolMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, dockerMachine *infraexpv1.DockerMachinePoolMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) (res ctrl.Result, retErr error) { + log := ctrl.LoggerFrom(ctx) + + // if the machine is already provisioned, return + if dockerMachine.Spec.ProviderID != nil { + // ensure ready state is set. + // This is required after move, because status is not moved to the target cluster. + dockerMachine.Status.Ready = true + + if externalMachine.Exists() { + conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) + // Setting machine address is required after move, because status.Address field is not retained during move. + if err := setMachineAddress(ctx, dockerMachine, externalMachine); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to set the machine address") + } + } else { + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.ContainerDeletedReason, clusterv1.ConditionSeverityError, fmt.Sprintf("Container %s does not exists anymore", externalMachine.Name())) + } + return ctrl.Result{}, nil + } + + // Make sure bootstrap data is available and populated. + if machine.Spec.Bootstrap.DataSecretName == nil { + if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + log.Info("Waiting for the control plane to be initialized") + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{}, nil + } + + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{}, nil + } + + // // Create the docker container hosting the machine + // role := constants.WorkerNodeRoleValue + // if util.IsControlPlaneMachine(machine) { + // role = constants.ControlPlaneNodeRoleValue + // } + + // // Create the machine if not existing yet + // if !externalMachine.Exists() { + // if err := externalMachine.Create(ctx, role, machine.Spec.Version, dockerMachine.Spec.ExtraMounts); err != nil { + // return ctrl.Result{}, errors.Wrap(err, "failed to create worker DockerMachine") + // } + // } + + // Preload images into the container + if len(dockerMachine.Spec.PreLoadImages) > 0 { + if err := externalMachine.PreloadLoadImages(ctx, dockerMachine.Spec.PreLoadImages); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to pre-load images into the DockerMachine") + } + } + + // if the machine is a control plane update the load balancer configuration + // we should only do this once, as reconfiguration more or less ensures + // node ref setting fails + if util.IsControlPlaneMachine(machine) && !dockerMachine.Status.LoadBalancerConfigured { + if err := externalLoadBalancer.UpdateConfiguration(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to update DockerCluster.loadbalancer configuration") + } + dockerMachine.Status.LoadBalancerConfigured = true + } + + // Update the ContainerProvisionedCondition condition + // NOTE: it is required to create the patch helper before this change otherwise it wont surface if + // we issue a patch down in the code (because if we create patch helper after this point the ContainerProvisionedCondition=True exists both on before and after). + patchHelper, err := patch.NewHelper(dockerMachine, r.Client) + if err != nil { + return ctrl.Result{}, err + } + conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) + + // At, this stage, we are ready for bootstrap. However, if the BootstrapExecSucceededCondition is missing we add it and we + // issue an patch so the user can see the change of state before the bootstrap actually starts. + // NOTE: usually controller should not rely on status they are setting, but on the observed state; however + // in this case we are doing this because we explicitly want to give a feedback to users. + if !conditions.Has(dockerMachine, infrav1.BootstrapExecSucceededCondition) { + conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrappingReason, clusterv1.ConditionSeverityInfo, "") + if err := patchDockerMachine(ctx, patchHelper, dockerMachine); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to patch DockerMachine") + } + } + + // if the machine isn't bootstrapped, only then run bootstrap scripts + if !dockerMachine.Spec.Bootstrapped { + bootstrapData, format, err := r.getBootstrapData(ctx, machine) + if err != nil { + log.Error(err, "failed to get bootstrap data") + return ctrl.Result{}, err + } + + timeoutctx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + // Run the bootstrap script. Simulates cloud-init/Ignition. + if err := externalMachine.ExecBootstrap(timeoutctx, bootstrapData, format); err != nil { + conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") + return ctrl.Result{}, errors.Wrap(err, "failed to exec DockerMachine bootstrap") + } + // Check for bootstrap success + if err := externalMachine.CheckForBootstrapSuccess(timeoutctx); err != nil { + conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") + return ctrl.Result{}, errors.Wrap(err, "failed to check for existence of bootstrap success file at /run/cluster-api/bootstrap-success.complete") + } + + dockerMachine.Spec.Bootstrapped = true + } + + // Update the BootstrapExecSucceededCondition condition + conditions.MarkTrue(dockerMachine, infrav1.BootstrapExecSucceededCondition) + + if err := setMachineAddress(ctx, dockerMachine, externalMachine); err != nil { + log.Error(err, "failed to set the machine address") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + // Usually a cloud provider will do this, but there is no docker-cloud provider. + // Requeue if there is an error, as this is likely momentary load balancer + // state changes during control plane provisioning. + if err := externalMachine.SetNodeProviderID(ctx); err != nil { + if errors.As(err, &docker.ContainerNotRunningError{}) { + return ctrl.Result{}, errors.Wrap(err, "failed to patch the Kubernetes node with the machine providerID") + } + log.Error(err, "failed to patch the Kubernetes node with the machine providerID") + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + // Set ProviderID so the Cluster API Machine Controller can pull it + providerID := externalMachine.ProviderID() + dockerMachine.Spec.ProviderID = &providerID + dockerMachine.Status.Ready = true + conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) + + return ctrl.Result{}, nil +} + +func (r *DockerMachinePoolMachineReconciler) reconcileDelete(ctx context.Context, machine *clusterv1.Machine, dockerMachine *infraexpv1.DockerMachinePoolMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) (ctrl.Result, error) { + log := ctrl.LoggerFrom(ctx) + + log.Info("reconciling delete for", "machine", machine.Name, "dockerMachine", dockerMachine.Name) + + // Set the ContainerProvisionedCondition reporting delete is started, and issue a patch in order to make + // this visible to the users. + // NB. The operation in docker is fast, so there is the chance the user will not notice the status change; + // nevertheless we are issuing a patch so we can test a pattern that will be used by other providers as well + patchHelper, err := patch.NewHelper(dockerMachine, r.Client) + if err != nil { + return ctrl.Result{}, err + } + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + if err := patchDockerMachine(ctx, patchHelper, dockerMachine); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to patch DockerMachinePoolMachine") + } + + // delete the machine + log.Info("deleting dmpm", "machine", machine.Name, "dockerMachine", dockerMachine.Name) + if err := externalMachine.Delete(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to delete DockerMachinePoolMachine") + } + + // if the deleted machine is a control-plane node, remove it from the load balancer configuration; + if util.IsControlPlaneMachine(machine) { + if err := externalLoadBalancer.UpdateConfiguration(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to update DockerCluster.loadbalancer configuration") + } + } + + // Machine is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(dockerMachine, infrav1.MachineFinalizer) + return ctrl.Result{}, nil +} + +// SetupWithManager will add watches for this controller. +func (r *DockerMachinePoolMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + clusterToDockerMachinePoolMachines, err := util.ClusterToObjectsMapper(mgr.GetClient(), &infraexpv1.DockerMachinePoolMachineList{}, mgr.GetScheme()) + if err != nil { + return err + } + + c, err := ctrl.NewControllerManagedBy(mgr). + For(&infraexpv1.DockerMachinePoolMachine{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))). + Watches( + &source.Kind{Type: &clusterv1.Machine{}}, + handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("DockerMachinePoolMachine"))), + ). + Watches( + &source.Kind{Type: &infrav1.DockerCluster{}}, + handler.EnqueueRequestsFromMapFunc(r.DockerClusterToDockerMachinePoolMachines), + ). + Build(r) + if err != nil { + return err + } + return c.Watch( + &source.Kind{Type: &clusterv1.Cluster{}}, + handler.EnqueueRequestsFromMapFunc(clusterToDockerMachinePoolMachines), + predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)), + ) +} + +// DockerClusterToDockerMachinePoolMachines is a handler.ToRequestsFunc to be used to enqeue +// requests for reconciliation of DockerMachines. +func (r *DockerMachinePoolMachineReconciler) DockerClusterToDockerMachinePoolMachines(o client.Object) []ctrl.Request { + result := []ctrl.Request{} + c, ok := o.(*infrav1.DockerCluster) + if !ok { + panic(fmt.Sprintf("Expected a DockerCluster but got a %T", o)) + } + + cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta) + switch { + case apierrors.IsNotFound(err) || cluster == nil: + return result + case err != nil: + return result + } + + labels := map[string]string{clusterv1.ClusterLabelName: cluster.Name} + machineList := &clusterv1.MachineList{} + // machinePoolMachineList := &infraexpv1.MachinePoolList{} + if err := r.Client.List(context.TODO(), machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { + return nil + } + for _, m := range machineList.Items { + if m.Spec.InfrastructureRef.Name == "" { + continue + } + name := client.ObjectKey{Namespace: m.Namespace, Name: m.Name} + result = append(result, ctrl.Request{NamespacedName: name}) + } + + return result +} + +func (r *DockerMachinePoolMachineReconciler) getBootstrapData(ctx context.Context, machine *clusterv1.Machine) (string, bootstrapv1.Format, error) { + if machine.Spec.Bootstrap.DataSecretName == nil { + return "", "", errors.New("error retrieving bootstrap data: linked Machine's bootstrap.dataSecretName is nil") + } + + s := &corev1.Secret{} + key := client.ObjectKey{Namespace: machine.GetNamespace(), Name: *machine.Spec.Bootstrap.DataSecretName} + if err := r.Client.Get(ctx, key, s); err != nil { + return "", "", errors.Wrapf(err, "failed to retrieve bootstrap data secret for DockerMachine %s/%s", machine.GetNamespace(), machine.GetName()) + } + + value, ok := s.Data["value"] + if !ok { + return "", "", errors.New("error retrieving bootstrap data: secret value key is missing") + } + + format := s.Data["format"] + if string(format) == "" { + format = []byte(bootstrapv1.CloudConfig) + } + + return base64.StdEncoding.EncodeToString(value), bootstrapv1.Format(format), nil +} + +// setMachineAddress gets the address from the container corresponding to a docker node and sets it on the Machine object. +func setMachineAddress(ctx context.Context, dockerMachinePoolMachine *infraexpv1.DockerMachinePoolMachine, externalMachine *docker.Machine) error { + machineAddress, err := externalMachine.Address(ctx) + if err != nil { + return err + } + + dockerMachinePoolMachine.Status.Addresses = []clusterv1.MachineAddress{ + { + Type: clusterv1.MachineHostName, + Address: externalMachine.ContainerName(), + }, + { + Type: clusterv1.MachineInternalIP, + Address: machineAddress, + }, + { + Type: clusterv1.MachineExternalIP, + Address: machineAddress, + }, + } + return nil +} diff --git a/test/infrastructure/docker/main.go b/test/infrastructure/docker/main.go index 1d95ea7b0442..20bbbd22cc00 100644 --- a/test/infrastructure/docker/main.go +++ b/test/infrastructure/docker/main.go @@ -203,6 +203,15 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { setupLog.Error(err, "unable to create controller", "controller", "DockerMachinePool") os.Exit(1) } + if err := (&expcontrollers.DockerMachinePoolMachineReconciler{ + Client: mgr.GetClient(), + ContainerRuntime: runtimeClient, + }).SetupWithManager(ctx, mgr, controller.Options{ + MaxConcurrentReconciles: concurrency, + }); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "DockerMachinePoolMachine") + os.Exit(1) + } } }