diff --git a/lib/backend/model/resource.go b/lib/backend/model/resource.go index 975bf7931..cd1a2f742 100644 --- a/lib/backend/model/resource.go +++ b/lib/backend/model/resource.go @@ -254,3 +254,7 @@ func (options ResourceListOptions) defaultPathRoot() string { } return k + "/" + options.Name } + +func (options ResourceListOptions) String() string { + return options.Kind +} diff --git a/lib/backend/syncersv1/bgpsyncer/bgpsyncer_e2e_test.go b/lib/backend/syncersv1/bgpsyncer/bgpsyncer_e2e_test.go index e06257115..82a55c0cc 100644 --- a/lib/backend/syncersv1/bgpsyncer/bgpsyncer_e2e_test.go +++ b/lib/backend/syncersv1/bgpsyncer/bgpsyncer_e2e_test.go @@ -204,10 +204,11 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl ) Expect(err).NotTo(HaveOccurred()) // The pool will add as single entry ( +1 ) + poolKeyV1 := model.IPPoolKey{CIDR: net.MustParseCIDR("192.124.0.0/21")} expectedCacheSize += 1 syncTester.ExpectCacheSize(expectedCacheSize) syncTester.ExpectData(model.KVPair{ - Key: model.IPPoolKey{CIDR: net.MustParseCIDR("192.124.0.0/21")}, + Key: poolKeyV1, Value: &model.IPPool{ CIDR: poolCIDRNet, IPIPInterface: "tunl0", @@ -293,9 +294,10 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl // For non-kubernetes, check that we can allocate an IP address and get a syncer update // for the allocation block. + var blockAffinityKeyV1 model.BlockAffinityKey if config.Spec.DatastoreType != apiconfig.Kubernetes { By("Allocating an IP address and checking that we get an allocation block") - _, _, err = c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{ + ips1, _, err := c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{ Num4: 1, Hostname: "127.0.0.1", }) @@ -306,16 +308,45 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl expectedCacheSize += 1 syncTester.ExpectCacheSize(expectedCacheSize) current := syncTester.GetCacheEntries() - found := false for _, kvp := range current { if kab, ok := kvp.Key.(model.BlockAffinityKey); ok { if kab.Host == "127.0.0.1" && poolCIDRNet.Contains(kab.CIDR.IP) { - found = true + blockAffinityKeyV1 = kab break } } } - Expect(found).To(BeTrue(), "Did not find affinity block in sync data") + Expect(blockAffinityKeyV1).NotTo(BeNil(), "Did not find affinity block in sync data") + + By("Allocating an IP address on a different host and checking for no updates") + // The syncer only monitors affine blocks for one host, so IP allocations for a different + // host should not result in updates. + ips2, _, err := c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{ + Num4: 1, + Hostname: "not-this-host", + }) + Expect(err).NotTo(HaveOccurred()) + syncTester.ExpectCacheSize(expectedCacheSize) + + By("Releasing the IP addresses and checking for no updates") + // Releasing IPs should leave the affine blocks assigned, so releasing the IPs + // should result in no updates. + _, err = c.IPAM().ReleaseIPs(ctx, ips1) + Expect(err).NotTo(HaveOccurred()) + _, err = c.IPAM().ReleaseIPs(ctx, ips2) + Expect(err).NotTo(HaveOccurred()) + syncTester.ExpectCacheSize(expectedCacheSize) + + By("Deleting the IPPool and checking for pool and affine block deletion") + // Deleting the pool will also release all affine blocks associated with the pool. + _, err = c.IPPools().Delete(ctx, "mypool", options.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // The pool and the affine block for 127.0.0.1 should have deletion events. + expectedCacheSize -= 2 + syncTester.ExpectCacheSize(expectedCacheSize) + syncTester.ExpectNoData(blockAffinityKeyV1) + syncTester.ExpectNoData(poolKeyV1) } By("Starting a new syncer and verifying that all current entries are returned before sync status") @@ -329,7 +360,7 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl // step. syncTester.ExpectStatusUpdate(api.WaitForDatastore) syncTester.ExpectStatusUpdate(api.ResyncInProgress) - syncTester.ExpectCacheSize(len(current)) + syncTester.ExpectCacheSize(expectedCacheSize) for _, e := range current { if config.Spec.DatastoreType == apiconfig.Kubernetes { // Don't check revisions for K8s since the node data gets updated constantly. diff --git a/lib/backend/syncersv1/felixsyncer/felixsyncer_e2e_test.go b/lib/backend/syncersv1/felixsyncer/felixsyncer_e2e_test.go index 46afea10e..866b89665 100644 --- a/lib/backend/syncersv1/felixsyncer/felixsyncer_e2e_test.go +++ b/lib/backend/syncersv1/felixsyncer/felixsyncer_e2e_test.go @@ -182,7 +182,7 @@ var _ = testutils.E2eDatastoreDescribe("Felix syncer tests", testutils.Datastore Revision: pool.ResourceVersion, }) syncTester.ExpectData(model.KVPair{ - Key: model.GlobalConfigKey{"IpInIpEnabled"}, + Key: model.GlobalConfigKey{"IpInIpEnabled"}, Value: "true", }) diff --git a/lib/clientv2/ippool.go b/lib/clientv2/ippool.go index bcdc0fc86..691d4fe3f 100644 --- a/lib/clientv2/ippool.go +++ b/lib/clientv2/ippool.go @@ -16,14 +16,17 @@ package clientv2 import ( "context" + "fmt" "net" + "time" + + log "github.com/sirupsen/logrus" apiv2 "github.com/projectcalico/libcalico-go/lib/apis/v2" cerrors "github.com/projectcalico/libcalico-go/lib/errors" cnet "github.com/projectcalico/libcalico-go/lib/net" "github.com/projectcalico/libcalico-go/lib/options" "github.com/projectcalico/libcalico-go/lib/watch" - log "github.com/sirupsen/logrus" ) // IPPoolInterface has methods to work with IPPool resources. @@ -45,7 +48,7 @@ type ipPools struct { // representation of the IPPool, and an error, if there is any. func (r ipPools) Create(ctx context.Context, res *apiv2.IPPool, opts options.SetOptions) (*apiv2.IPPool, error) { // Validate the IPPool before creating the resource. - if err := r.validateAndSetDefaults(res); err != nil { + if err := r.validateAndSetDefaults(ctx, res, nil); err != nil { return nil, err } @@ -67,14 +70,20 @@ func (r ipPools) Create(ctx context.Context, res *apiv2.IPPool, opts options.Set // Update takes the representation of a IPPool and updates it. Returns the stored // representation of the IPPool, and an error, if there is any. func (r ipPools) Update(ctx context.Context, res *apiv2.IPPool, opts options.SetOptions) (*apiv2.IPPool, error) { + // Get the existing settings, so that we can validate the CIDR has not changed. + old, err := r.Get(ctx, res.Name, options.GetOptions{}) + if err != nil { + return nil, err + } + // Validate the IPPool updating the resource. - if err := r.validateAndSetDefaults(res); err != nil { + if err := r.validateAndSetDefaults(ctx, res, old); err != nil { return nil, err } // Enable IPIP globally if required. Do this before the Update so if it fails the user // can retry the same command. - err := r.maybeEnableIPIP(ctx, res) + err = r.maybeEnableIPIP(ctx, res) if err != nil { return nil, err } @@ -88,6 +97,65 @@ func (r ipPools) Update(ctx context.Context, res *apiv2.IPPool, opts options.Set // Delete takes name of the IPPool and deletes it. Returns an error if one occurs. func (r ipPools) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv2.IPPool, error) { + // Deleting a pool requires a little care because of existing endpoints + // using IP addresses allocated in the pool. We do the deletion in + // the following steps: + // - disable the pool so no more IPs are assigned from it + // - remove all affinities associated with the pool + // - delete the pool + + // Get the pool so that we can find the CIDR associated with it. + pool, err := r.Get(ctx, name, options.GetOptions{}) + if err != nil { + return nil, err + } + + logCxt := log.WithFields(log.Fields{ + "CIDR": pool.Spec.CIDR, + "Name": name, + }) + + // If the pool is active, set the disabled flag to ensure we stop allocating from this pool. + if !pool.Spec.Disabled { + logCxt.Info("Disabling pool to release affinities") + pool.Spec.Disabled = true + + // If the Delete has been called with a ResourceVersion then use that to perform the + // update - that way we'll catch update conflicts (we could actually check here, but + // the most likely scenario is there isn't one - so just pass it through and let the + // Update handle any conflicts). + if opts.ResourceVersion != "" { + pool.ResourceVersion = opts.ResourceVersion + } + if _, err := r.Update(ctx, pool, options.SetOptions{}); err != nil { + return nil, err + } + + // Reset the resource version before the actual delete since the version of that resource + // will now have been updated. + opts.ResourceVersion = "" + } + + // Release affinities associated with this pool. We do this even if the pool was disabled + // (since it may have been enabled at one time, and if there are no affine blocks created + // then this will be a no-op). We've already validated the CIDR so we know it will parse. + if _, cidrNet, err := cnet.ParseCIDR(pool.Spec.CIDR); err == nil { + logCxt.Info("Releasing pool affinities") + + // Pause for a short period before releasing the affinities - this gives any in-progress + // allocations an opportunity to finish. + time.Sleep(500 * time.Millisecond) + err = r.client.IPAM().ReleasePoolAffinities(ctx, *cidrNet) + + // Depending on the datastore, IPAM may not be supported. If we get a not supported + // error, then continue. Any other error, fail. + if _, ok := err.(cerrors.ErrorOperationNotSupported); !ok && err != nil { + return nil, err + } + } + + // And finally, delete the pool. + logCxt.Info("Deleting pool") out, err := r.client.resources.Delete(ctx, opts, apiv2.KindIPPool, noNamespace, name) if out != nil { return out.(*apiv2.IPPool), err @@ -120,12 +188,14 @@ func (r ipPools) Watch(ctx context.Context, opts options.ListOptions) (watch.Int return r.client.resources.Watch(ctx, opts, apiv2.KindIPPool) } -// validateAndSetDefaults validates IPPool fields. -func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error { +// validateAndSetDefaults validates IPPool fields and sets default values that are +// not assigned. +// The old pool will be unassigned for a Create. +func (r ipPools) validateAndSetDefaults(ctx context.Context, new, old *apiv2.IPPool) error { errFields := []cerrors.ErroredField{} // Spec.CIDR field must not be empty. - if pool.Spec.CIDR == "" { + if new.Spec.CIDR == "" { return cerrors.ErrorValidation{ ErroredFields: []cerrors.ErroredField{{ Name: "IPPool.Spec.CIDR", @@ -135,32 +205,77 @@ func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error { } // Make sure the CIDR is parsable. - ipAddr, cidr, err := cnet.ParseCIDR(pool.Spec.CIDR) + ipAddr, cidr, err := cnet.ParseCIDR(new.Spec.CIDR) if err != nil { return cerrors.ErrorValidation{ ErroredFields: []cerrors.ErroredField{{ Name: "IPPool.Spec.CIDR", Reason: "IPPool CIDR must be a valid subnet", + Value: new.Spec.CIDR, }}, } } + // Normalize the CIDR before persisting. + new.Spec.CIDR = cidr.String() + + // If there was a previous pool then this must be an Update, validate that the + // CIDR has not changed. Since we are using normalized CIDRs we can just do a + // simple string comparison. + if old != nil && old.Spec.CIDR != new.Spec.CIDR { + errFields = append(errFields, cerrors.ErroredField{ + Name: "IPPool.Spec.CIDR", + Reason: "IPPool CIDR cannot be modified", + Value: new.Spec.CIDR, + }) + } + + // If there was no previous pool then this must be a Create. Check that the CIDR + // does not overlap with any other pool CIDRs. + if old == nil { + allPools, err := r.List(ctx, options.ListOptions{}) + if err != nil { + return err + } + + for _, otherPool := range allPools.Items { + // It's possible that Create is called for a pre-existing pool, so skip our own + // pool and let the generic processing handle the pre-existing resource error case. + if otherPool.Name == new.Name { + continue + } + _, otherCIDR, err := cnet.ParseCIDR(otherPool.Spec.CIDR) + if err != nil { + log.WithField("Name", otherPool.Name).WithError(err).Error("IPPool is configured with an invalid CIDR") + continue + } + if otherCIDR.IsNetOverlap(cidr.IPNet) { + errFields = append(errFields, cerrors.ErroredField{ + Name: "IPPool.Spec.CIDR", + Reason: fmt.Sprintf("IPPool(%s) CIDR overlaps with IPPool(%s) CIDR %s", new.Name, otherPool.Name, otherPool.Spec.CIDR), + Value: new.Spec.CIDR, + }) + } + } + } + // Make sure IPIPMode is defaulted to "Never". - if len(pool.Spec.IPIPMode) == 0 { - pool.Spec.IPIPMode = apiv2.IPIPModeNever + if len(new.Spec.IPIPMode) == 0 { + new.Spec.IPIPMode = apiv2.IPIPModeNever } // IPIP cannot be enabled for IPv6. - if cidr.Version() == 6 && pool.Spec.IPIPMode != apiv2.IPIPModeNever { + if cidr.Version() == 6 && new.Spec.IPIPMode != apiv2.IPIPModeNever { errFields = append(errFields, cerrors.ErroredField{ - Name: "IPPool.Spec.IPIP.Mode", + Name: "IPPool.Spec.IPIPMode", Reason: "IPIP is not supported on an IPv6 IP pool", + Value: new.Spec.IPIPMode, }) } // The Calico IPAM places restrictions on the minimum IP pool size. If // the ippool is enabled, check that the pool is at least the minimum size. - if !pool.Spec.Disabled { + if !new.Spec.Disabled { ones, bits := cidr.Mask.Size() log.Debugf("Pool CIDR: %s, num bits: %d", cidr.String(), bits-ones) if bits-ones < 6 { @@ -168,22 +283,25 @@ func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error { errFields = append(errFields, cerrors.ErroredField{ Name: "IPPool.Spec.CIDR", Reason: "IPv4 pool size is too small (min /26) for use with Calico IPAM", + Value: new.Spec.CIDR, }) } else { errFields = append(errFields, cerrors.ErroredField{ Name: "IPPool.Spec.CIDR", Reason: "IPv6 pool size is too small (min /122) for use with Calico IPAM", + Value: new.Spec.CIDR, }) } } } // The Calico CIDR should be strictly masked - log.Debugf("IPPool CIDR: %s, Masked IP: %d", pool.Spec.CIDR, cidr.IP) + log.Debugf("IPPool CIDR: %s, Masked IP: %d", new.Spec.CIDR, cidr.IP) if cidr.IP.String() != ipAddr.String() { errFields = append(errFields, cerrors.ErroredField{ Name: "IPPool.Spec.CIDR", - Reason: "IP pool CIDR is not strictly masked", + Reason: "IPPool CIDR is not strictly masked", + Value: new.Spec.CIDR, }) } @@ -202,14 +320,16 @@ func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error { if cidr.Version() == 4 && cidr.IsNetOverlap(ipv4LinkLocalNet) { errFields = append(errFields, cerrors.ErroredField{ Name: "IPPool.Spec.CIDR", - Reason: "IP pool range overlaps with IPv4 Link Local range 169.254.0.0/16", + Reason: "IPPool CIDR overlaps with IPv4 Link Local range 169.254.0.0/16", + Value: new.Spec.CIDR, }) } if cidr.Version() == 6 && cidr.IsNetOverlap(ipv6LinkLocalNet) { errFields = append(errFields, cerrors.ErroredField{ Name: "IPPool.Spec.CIDR", - Reason: "IP pool range overlaps with IPv6 Link Local range fe80::/10", + Reason: "IPPool CIDR overlaps with IPv6 Link Local range fe80::/10", + Value: new.Spec.CIDR, }) } diff --git a/lib/clientv2/ippool_e2e_test.go b/lib/clientv2/ippool_e2e_test.go index 23c9bd196..8023674d8 100644 --- a/lib/clientv2/ippool_e2e_test.go +++ b/lib/clientv2/ippool_e2e_test.go @@ -43,13 +43,23 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f CIDR: "1.2.3.0/24", IPIPMode: apiv2.IPIPModeAlways, } + spec1_2 := apiv2.IPPoolSpec{ + CIDR: "1.2.3.0/24", + NATOutgoing: true, + IPIPMode: apiv2.IPIPModeNever, + } spec2 := apiv2.IPPoolSpec{ - CIDR: "2001::0/120", + CIDR: "2001::/120", + NATOutgoing: true, + IPIPMode: apiv2.IPIPModeNever, + } + spec2_1 := apiv2.IPPoolSpec{ + CIDR: "2001::/120", IPIPMode: apiv2.IPIPModeNever, } DescribeTable("IPPool e2e CRUD tests", - func(name1, name2 string, spec1, spec2 apiv2.IPPoolSpec) { + func(name1, name2 string, spec1, spec1_2, spec2 apiv2.IPPoolSpec) { c, err := clientv2.New(config) Expect(err).NotTo(HaveOccurred()) @@ -130,11 +140,11 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f testutils.ExpectResource(&outList.Items[0], apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec1) testutils.ExpectResource(&outList.Items[1], apiv2.KindIPPool, testutils.ExpectNoNamespace, name2, spec2) - By("Updating IPPool name1 with spec2") - res1.Spec = spec2 + By("Updating IPPool name1 with spec1_2") + res1.Spec = spec1_2 res1, outError = c.IPPools().Update(ctx, res1, options.SetOptions{}) Expect(outError).NotTo(HaveOccurred()) - testutils.ExpectResource(res1, apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec2) + testutils.ExpectResource(res1, apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec1_2) // Track the version of the updated name1 data. rv1_2 := res1.ResourceVersion @@ -161,10 +171,10 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f Expect(res.ResourceVersion).To(Equal(rv1_1)) } - By("Getting IPPool (name1) with the updated resource version and comparing the output against spec2") + By("Getting IPPool (name1) with the updated resource version and comparing the output against spec1_2") res, outError = c.IPPools().Get(ctx, name1, options.GetOptions{ResourceVersion: rv1_2}) Expect(outError).NotTo(HaveOccurred()) - testutils.ExpectResource(res, apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec2) + testutils.ExpectResource(res, apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec1_2) Expect(res.ResourceVersion).To(Equal(rv1_2)) if config.Spec.DatastoreType != apiconfig.Kubernetes { @@ -175,11 +185,11 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f testutils.ExpectResource(&outList.Items[0], apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec1) } - By("Listing IPPools with the latest resource version and checking for two results with name1/spec2 and name2/spec2") + By("Listing IPPools with the latest resource version and checking for two results with name1/spec1_2 and name2/spec2") outList, outError = c.IPPools().List(ctx, options.ListOptions{}) Expect(outError).NotTo(HaveOccurred()) Expect(outList.Items).To(HaveLen(2)) - testutils.ExpectResource(&outList.Items[0], apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec2) + testutils.ExpectResource(&outList.Items[0], apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec1_2) testutils.ExpectResource(&outList.Items[1], apiv2.KindIPPool, testutils.ExpectNoNamespace, name2, spec2) if config.Spec.DatastoreType != apiconfig.Kubernetes { @@ -192,7 +202,9 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f By("Deleting IPPool (name1) with the new resource version") dres, outError := c.IPPools().Delete(ctx, name1, options.DeleteOptions{ResourceVersion: rv1_2}) Expect(outError).NotTo(HaveOccurred()) - testutils.ExpectResource(dres, apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec2) + // The pool will first be disabled, so tweak the Disabled field before doing the comparison. + spec1_2.Disabled = true + testutils.ExpectResource(dres, apiv2.KindIPPool, testutils.ExpectNoNamespace, name1, spec1_2) if config.Spec.DatastoreType != apiconfig.Kubernetes { By("Updating IPPool name2 with a 2s TTL and waiting for the entry to be deleted") @@ -222,9 +234,11 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f } if config.Spec.DatastoreType == apiconfig.Kubernetes { - By("Attempting to deleting IPPool (name2) again") + // The pool will first be disabled, so tweak the Disabled field before doing the comparison. + By("Deleting IPPool (name2)") dres, outError = c.IPPools().Delete(ctx, name2, options.DeleteOptions{}) Expect(outError).NotTo(HaveOccurred()) + spec2.Disabled = true testutils.ExpectResource(dres, apiv2.KindIPPool, testutils.ExpectNoNamespace, name2, spec2) } @@ -245,7 +259,7 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f }, // Test 1: Pass two fully populated IPPoolSpecs and expect the series of operations to succeed. - Entry("Two fully populated IPPoolSpecs", name1, name2, spec1, spec2), + Entry("Two fully populated IPPoolSpecs", name1, name2, spec1, spec1_2, spec2), ) Describe("IPPool watch functionality", func() { @@ -257,7 +271,7 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f Expect(err).NotTo(HaveOccurred()) be.Clean() - By("Listing IPPools with the latest resource version and checking for two results with name1/spec2 and name2/spec2") + By("Listing IPPools with no resource version and checking for no results") outList, outError := c.IPPools().List(ctx, options.ListOptions{}) Expect(outError).NotTo(HaveOccurred()) Expect(outList.Items).To(HaveLen(0)) @@ -272,6 +286,7 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f }, options.SetOptions{}, ) + Expect(err).NotTo(HaveOccurred()) rev1 := outRes1.ResourceVersion By("Configuring a IPPool name2/spec2 and storing the response") @@ -283,6 +298,7 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f }, options.SetOptions{}, ) + Expect(err).NotTo(HaveOccurred()) By("Starting a watcher from revision rev1 - this should skip the first creation") w, err := c.IPPools().Watch(ctx, options.ListOptions{ResourceVersion: rev1}) @@ -291,19 +307,24 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f defer testWatcher1.Stop() By("Deleting res1") - _, err = c.IPPools().Delete(ctx, name1, options.DeleteOptions{}) + outRes3, err := c.IPPools().Delete(ctx, name1, options.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) - By("Checking for two events, create res2 and delete res1") + By("Checking for three events, create res2 and disable and delete res1") testWatcher1.ExpectEvents(apiv2.KindIPPool, []watch.Event{ { Type: watch.Added, Object: outRes2, }, { - Type: watch.Deleted, + Type: watch.Modified, + Object: outRes3, Previous: outRes1, }, + { + Type: watch.Deleted, + Previous: outRes3, + }, }) testWatcher1.Stop() @@ -314,11 +335,11 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f defer testWatcher2.Stop() By("Modifying res2") - outRes3, err := c.IPPools().Update( + outRes4, err := c.IPPools().Update( ctx, &apiv2.IPPool{ ObjectMeta: outRes2.ObjectMeta, - Spec: spec1, + Spec: spec2_1, }, options.SetOptions{}, ) @@ -333,13 +354,18 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f Object: outRes2, }, { - Type: watch.Deleted, + Type: watch.Modified, + Object: outRes3, Previous: outRes1, }, + { + Type: watch.Deleted, + Previous: outRes3, + }, { Type: watch.Modified, Previous: outRes2, - Object: outRes3, + Object: outRes4, }, }) testWatcher2.Stop() @@ -357,9 +383,14 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f Object: outRes1, }, { - Type: watch.Deleted, + Type: watch.Modified, + Object: outRes3, Previous: outRes1, }, + { + Type: watch.Deleted, + Previous: outRes3, + }, }) testWatcher2_1.Stop() } @@ -372,7 +403,7 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f testWatcher3.ExpectEvents(apiv2.KindIPPool, []watch.Event{ { Type: watch.Added, - Object: outRes3, + Object: outRes4, }, }) testWatcher3.Stop() @@ -399,20 +430,7 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f }, { Type: watch.Added, - Object: outRes3, - }, - }) - - By("Cleaning the datastore and expecting deletion events for each configured resource (tests prefix deletes results in individual events for each key)") - be.Clean() - testWatcher4.ExpectEvents(apiv2.KindIPPool, []watch.Event{ - { - Type: watch.Deleted, - Previous: outRes1, - }, - { - Type: watch.Deleted, - Previous: outRes3, + Object: outRes4, }, }) testWatcher4.Stop() @@ -559,4 +577,90 @@ var _ = testutils.E2eDatastoreDescribe("IPPool tests", testutils.DatastoreAll, f Expect(*enabled).To(BeFalse()) }) }) + + Describe("Verify pool CIDR validation", func() { + var err error + var c clientv2.Interface + + BeforeEach(func() { + c, err = clientv2.New(config) + Expect(err).NotTo(HaveOccurred()) + + be, err := backend.NewClient(config) + Expect(err).NotTo(HaveOccurred()) + be.Clean() + }) + + It("should prevent the CIDR being changed on an update", func() { + By("Creating a pool") + pool, err := c.IPPools().Create(ctx, &apiv2.IPPool{ + ObjectMeta: metav1.ObjectMeta{Name: "ippool1"}, + Spec: apiv2.IPPoolSpec{ + CIDR: "1.2.3.0/24", + }, + }, options.SetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Attempting to change te CIDR") + pool.Spec.CIDR = "1.2.4.0/24" + _, err = c.IPPools().Update(ctx, pool, options.SetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeAssignableToTypeOf(errors.ErrorValidation{})) + Expect(err.Error()).To(ContainSubstring("IPPool CIDR cannot be modified")) + }) + + It("should prevent the creation of a pool with an identical or overlapping CIDR", func() { + By("Creating a pool") + _, err := c.IPPools().Create(ctx, &apiv2.IPPool{ + ObjectMeta: metav1.ObjectMeta{Name: "ippool1"}, + Spec: apiv2.IPPoolSpec{ + CIDR: "1.2.3.0/24", + }, + }, options.SetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Attempting to create the same pool and checking for the correct error") + _, err = c.IPPools().Create(ctx, &apiv2.IPPool{ + ObjectMeta: metav1.ObjectMeta{Name: "ippool1"}, + Spec: apiv2.IPPoolSpec{ + CIDR: "1.2.3.0/24", + }, + }, options.SetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeAssignableToTypeOf(errors.ErrorResourceAlreadyExists{})) + + By("Attempting to create a pool with the same CIDR") + _, err = c.IPPools().Create(ctx, &apiv2.IPPool{ + ObjectMeta: metav1.ObjectMeta{Name: "ippool2"}, + Spec: apiv2.IPPoolSpec{ + CIDR: "1.2.3.0/24", + }, + }, options.SetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeAssignableToTypeOf(errors.ErrorValidation{})) + Expect(err.Error()).To(ContainSubstring("IPPool(ippool2) CIDR overlaps with IPPool(ippool1) CIDR 1.2.3.0/24")) + + By("Attempting to create a pool with a larger overlapping CIDR") + _, err = c.IPPools().Create(ctx, &apiv2.IPPool{ + ObjectMeta: metav1.ObjectMeta{Name: "ippool3"}, + Spec: apiv2.IPPoolSpec{ + CIDR: "1.2.0.0/16", + }, + }, options.SetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeAssignableToTypeOf(errors.ErrorValidation{})) + Expect(err.Error()).To(ContainSubstring("IPPool(ippool3) CIDR overlaps with IPPool(ippool1) CIDR 1.2.3.0/24")) + + By("Attempting to create a pool with a smaller overlapping CIDR") + _, err = c.IPPools().Create(ctx, &apiv2.IPPool{ + ObjectMeta: metav1.ObjectMeta{Name: "ippool4"}, + Spec: apiv2.IPPoolSpec{ + CIDR: "1.2.3.128/25", + }, + }, options.SetOptions{}) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeAssignableToTypeOf(errors.ErrorValidation{})) + Expect(err.Error()).To(ContainSubstring("IPPool(ippool4) CIDR overlaps with IPPool(ippool1) CIDR 1.2.3.0/24")) + }) + }) }) diff --git a/lib/errors/errors.go b/lib/errors/errors.go index 405a3a85e..985442624 100644 --- a/lib/errors/errors.go +++ b/lib/errors/errors.go @@ -80,28 +80,29 @@ type ErroredField struct { Reason string } +func (e ErroredField) String() string { + var fieldString string + if e.Value == nil { + fieldString = e.Name + } else { + fieldString = fmt.Sprintf("%s = '%v'", e.Name, e.Value) + } + if e.Reason != "" { + fieldString = fmt.Sprintf("%s (%s)", fieldString, e.Reason) + } + return fieldString +} + func (e ErrorValidation) Error() string { if len(e.ErroredFields) == 0 { return "unknown validation error" } else if len(e.ErroredFields) == 1 { f := e.ErroredFields[0] - if f.Reason == "" { - return fmt.Sprintf("error with field %s = '%v'", - f.Name, f.Value) - } else { - return fmt.Sprintf("error with field %s = '%v' (%s)", - f.Name, f.Value, f.Reason) - } + return fmt.Sprintf("error with field %s", f) } else { s := "error with the following fields:\n" for _, f := range e.ErroredFields { - if f.Reason == "" { - s = s + fmt.Sprintf("- %s = '%v'\n", - f.Name, f.Value) - } else { - s = s + fmt.Sprintf("- %s = '%v' (%s)\n", - f.Name, f.Value, f.Reason) - } + s = s + fmt.Sprintf("- %s\n", f) } return s }