Skip to content

Commit

Permalink
Merge pull request #633 from robbrockbank/handle-pool-deletion
Browse files Browse the repository at this point in the history
Release pool affinities, check for overlapping pools and check CIDR cannot be altered
  • Loading branch information
robbrockbank authored Oct 27, 2017
2 parents 05786be + 33c143c commit dae320c
Show file tree
Hide file tree
Showing 6 changed files with 334 additions and 74 deletions.
4 changes: 4 additions & 0 deletions lib/backend/model/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,3 +254,7 @@ func (options ResourceListOptions) defaultPathRoot() string {
}
return k + "/" + options.Name
}

func (options ResourceListOptions) String() string {
return options.Kind
}
43 changes: 37 additions & 6 deletions lib/backend/syncersv1/bgpsyncer/bgpsyncer_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,10 +204,11 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl
)
Expect(err).NotTo(HaveOccurred())
// The pool will add as single entry ( +1 )
poolKeyV1 := model.IPPoolKey{CIDR: net.MustParseCIDR("192.124.0.0/21")}
expectedCacheSize += 1
syncTester.ExpectCacheSize(expectedCacheSize)
syncTester.ExpectData(model.KVPair{
Key: model.IPPoolKey{CIDR: net.MustParseCIDR("192.124.0.0/21")},
Key: poolKeyV1,
Value: &model.IPPool{
CIDR: poolCIDRNet,
IPIPInterface: "tunl0",
Expand Down Expand Up @@ -293,9 +294,10 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl

// For non-kubernetes, check that we can allocate an IP address and get a syncer update
// for the allocation block.
var blockAffinityKeyV1 model.BlockAffinityKey
if config.Spec.DatastoreType != apiconfig.Kubernetes {
By("Allocating an IP address and checking that we get an allocation block")
_, _, err = c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{
ips1, _, err := c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{
Num4: 1,
Hostname: "127.0.0.1",
})
Expand All @@ -306,16 +308,45 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl
expectedCacheSize += 1
syncTester.ExpectCacheSize(expectedCacheSize)
current := syncTester.GetCacheEntries()
found := false
for _, kvp := range current {
if kab, ok := kvp.Key.(model.BlockAffinityKey); ok {
if kab.Host == "127.0.0.1" && poolCIDRNet.Contains(kab.CIDR.IP) {
found = true
blockAffinityKeyV1 = kab
break
}
}
}
Expect(found).To(BeTrue(), "Did not find affinity block in sync data")
Expect(blockAffinityKeyV1).NotTo(BeNil(), "Did not find affinity block in sync data")

By("Allocating an IP address on a different host and checking for no updates")
// The syncer only monitors affine blocks for one host, so IP allocations for a different
// host should not result in updates.
ips2, _, err := c.IPAM().AutoAssign(ctx, ipam.AutoAssignArgs{
Num4: 1,
Hostname: "not-this-host",
})
Expect(err).NotTo(HaveOccurred())
syncTester.ExpectCacheSize(expectedCacheSize)

By("Releasing the IP addresses and checking for no updates")
// Releasing IPs should leave the affine blocks assigned, so releasing the IPs
// should result in no updates.
_, err = c.IPAM().ReleaseIPs(ctx, ips1)
Expect(err).NotTo(HaveOccurred())
_, err = c.IPAM().ReleaseIPs(ctx, ips2)
Expect(err).NotTo(HaveOccurred())
syncTester.ExpectCacheSize(expectedCacheSize)

By("Deleting the IPPool and checking for pool and affine block deletion")
// Deleting the pool will also release all affine blocks associated with the pool.
_, err = c.IPPools().Delete(ctx, "mypool", options.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())

// The pool and the affine block for 127.0.0.1 should have deletion events.
expectedCacheSize -= 2
syncTester.ExpectCacheSize(expectedCacheSize)
syncTester.ExpectNoData(blockAffinityKeyV1)
syncTester.ExpectNoData(poolKeyV1)
}

By("Starting a new syncer and verifying that all current entries are returned before sync status")
Expand All @@ -329,7 +360,7 @@ var _ = testutils.E2eDatastoreDescribe("BGP syncer tests", testutils.DatastoreAl
// step.
syncTester.ExpectStatusUpdate(api.WaitForDatastore)
syncTester.ExpectStatusUpdate(api.ResyncInProgress)
syncTester.ExpectCacheSize(len(current))
syncTester.ExpectCacheSize(expectedCacheSize)
for _, e := range current {
if config.Spec.DatastoreType == apiconfig.Kubernetes {
// Don't check revisions for K8s since the node data gets updated constantly.
Expand Down
2 changes: 1 addition & 1 deletion lib/backend/syncersv1/felixsyncer/felixsyncer_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ var _ = testutils.E2eDatastoreDescribe("Felix syncer tests", testutils.Datastore
Revision: pool.ResourceVersion,
})
syncTester.ExpectData(model.KVPair{
Key: model.GlobalConfigKey{"IpInIpEnabled"},
Key: model.GlobalConfigKey{"IpInIpEnabled"},
Value: "true",
})

Expand Down
154 changes: 137 additions & 17 deletions lib/clientv2/ippool.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,17 @@ package clientv2

import (
"context"
"fmt"
"net"
"time"

log "github.com/sirupsen/logrus"

apiv2 "github.com/projectcalico/libcalico-go/lib/apis/v2"
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
cnet "github.com/projectcalico/libcalico-go/lib/net"
"github.com/projectcalico/libcalico-go/lib/options"
"github.com/projectcalico/libcalico-go/lib/watch"
log "github.com/sirupsen/logrus"
)

// IPPoolInterface has methods to work with IPPool resources.
Expand All @@ -45,7 +48,7 @@ type ipPools struct {
// representation of the IPPool, and an error, if there is any.
func (r ipPools) Create(ctx context.Context, res *apiv2.IPPool, opts options.SetOptions) (*apiv2.IPPool, error) {
// Validate the IPPool before creating the resource.
if err := r.validateAndSetDefaults(res); err != nil {
if err := r.validateAndSetDefaults(ctx, res, nil); err != nil {
return nil, err
}

Expand All @@ -67,14 +70,20 @@ func (r ipPools) Create(ctx context.Context, res *apiv2.IPPool, opts options.Set
// Update takes the representation of a IPPool and updates it. Returns the stored
// representation of the IPPool, and an error, if there is any.
func (r ipPools) Update(ctx context.Context, res *apiv2.IPPool, opts options.SetOptions) (*apiv2.IPPool, error) {
// Get the existing settings, so that we can validate the CIDR has not changed.
old, err := r.Get(ctx, res.Name, options.GetOptions{})
if err != nil {
return nil, err
}

// Validate the IPPool updating the resource.
if err := r.validateAndSetDefaults(res); err != nil {
if err := r.validateAndSetDefaults(ctx, res, old); err != nil {
return nil, err
}

// Enable IPIP globally if required. Do this before the Update so if it fails the user
// can retry the same command.
err := r.maybeEnableIPIP(ctx, res)
err = r.maybeEnableIPIP(ctx, res)
if err != nil {
return nil, err
}
Expand All @@ -88,6 +97,65 @@ func (r ipPools) Update(ctx context.Context, res *apiv2.IPPool, opts options.Set

// Delete takes name of the IPPool and deletes it. Returns an error if one occurs.
func (r ipPools) Delete(ctx context.Context, name string, opts options.DeleteOptions) (*apiv2.IPPool, error) {
// Deleting a pool requires a little care because of existing endpoints
// using IP addresses allocated in the pool. We do the deletion in
// the following steps:
// - disable the pool so no more IPs are assigned from it
// - remove all affinities associated with the pool
// - delete the pool

// Get the pool so that we can find the CIDR associated with it.
pool, err := r.Get(ctx, name, options.GetOptions{})
if err != nil {
return nil, err
}

logCxt := log.WithFields(log.Fields{
"CIDR": pool.Spec.CIDR,
"Name": name,
})

// If the pool is active, set the disabled flag to ensure we stop allocating from this pool.
if !pool.Spec.Disabled {
logCxt.Info("Disabling pool to release affinities")
pool.Spec.Disabled = true

// If the Delete has been called with a ResourceVersion then use that to perform the
// update - that way we'll catch update conflicts (we could actually check here, but
// the most likely scenario is there isn't one - so just pass it through and let the
// Update handle any conflicts).
if opts.ResourceVersion != "" {
pool.ResourceVersion = opts.ResourceVersion
}
if _, err := r.Update(ctx, pool, options.SetOptions{}); err != nil {
return nil, err
}

// Reset the resource version before the actual delete since the version of that resource
// will now have been updated.
opts.ResourceVersion = ""
}

// Release affinities associated with this pool. We do this even if the pool was disabled
// (since it may have been enabled at one time, and if there are no affine blocks created
// then this will be a no-op). We've already validated the CIDR so we know it will parse.
if _, cidrNet, err := cnet.ParseCIDR(pool.Spec.CIDR); err == nil {
logCxt.Info("Releasing pool affinities")

// Pause for a short period before releasing the affinities - this gives any in-progress
// allocations an opportunity to finish.
time.Sleep(500 * time.Millisecond)
err = r.client.IPAM().ReleasePoolAffinities(ctx, *cidrNet)

// Depending on the datastore, IPAM may not be supported. If we get a not supported
// error, then continue. Any other error, fail.
if _, ok := err.(cerrors.ErrorOperationNotSupported); !ok && err != nil {
return nil, err
}
}

// And finally, delete the pool.
logCxt.Info("Deleting pool")
out, err := r.client.resources.Delete(ctx, opts, apiv2.KindIPPool, noNamespace, name)
if out != nil {
return out.(*apiv2.IPPool), err
Expand Down Expand Up @@ -120,12 +188,14 @@ func (r ipPools) Watch(ctx context.Context, opts options.ListOptions) (watch.Int
return r.client.resources.Watch(ctx, opts, apiv2.KindIPPool)
}

// validateAndSetDefaults validates IPPool fields.
func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error {
// validateAndSetDefaults validates IPPool fields and sets default values that are
// not assigned.
// The old pool will be unassigned for a Create.
func (r ipPools) validateAndSetDefaults(ctx context.Context, new, old *apiv2.IPPool) error {
errFields := []cerrors.ErroredField{}

// Spec.CIDR field must not be empty.
if pool.Spec.CIDR == "" {
if new.Spec.CIDR == "" {
return cerrors.ErrorValidation{
ErroredFields: []cerrors.ErroredField{{
Name: "IPPool.Spec.CIDR",
Expand All @@ -135,55 +205,103 @@ func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error {
}

// Make sure the CIDR is parsable.
ipAddr, cidr, err := cnet.ParseCIDR(pool.Spec.CIDR)
ipAddr, cidr, err := cnet.ParseCIDR(new.Spec.CIDR)
if err != nil {
return cerrors.ErrorValidation{
ErroredFields: []cerrors.ErroredField{{
Name: "IPPool.Spec.CIDR",
Reason: "IPPool CIDR must be a valid subnet",
Value: new.Spec.CIDR,
}},
}
}

// Normalize the CIDR before persisting.
new.Spec.CIDR = cidr.String()

// If there was a previous pool then this must be an Update, validate that the
// CIDR has not changed. Since we are using normalized CIDRs we can just do a
// simple string comparison.
if old != nil && old.Spec.CIDR != new.Spec.CIDR {
errFields = append(errFields, cerrors.ErroredField{
Name: "IPPool.Spec.CIDR",
Reason: "IPPool CIDR cannot be modified",
Value: new.Spec.CIDR,
})
}

// If there was no previous pool then this must be a Create. Check that the CIDR
// does not overlap with any other pool CIDRs.
if old == nil {
allPools, err := r.List(ctx, options.ListOptions{})
if err != nil {
return err
}

for _, otherPool := range allPools.Items {
// It's possible that Create is called for a pre-existing pool, so skip our own
// pool and let the generic processing handle the pre-existing resource error case.
if otherPool.Name == new.Name {
continue
}
_, otherCIDR, err := cnet.ParseCIDR(otherPool.Spec.CIDR)
if err != nil {
log.WithField("Name", otherPool.Name).WithError(err).Error("IPPool is configured with an invalid CIDR")
continue
}
if otherCIDR.IsNetOverlap(cidr.IPNet) {
errFields = append(errFields, cerrors.ErroredField{
Name: "IPPool.Spec.CIDR",
Reason: fmt.Sprintf("IPPool(%s) CIDR overlaps with IPPool(%s) CIDR %s", new.Name, otherPool.Name, otherPool.Spec.CIDR),
Value: new.Spec.CIDR,
})
}
}
}

// Make sure IPIPMode is defaulted to "Never".
if len(pool.Spec.IPIPMode) == 0 {
pool.Spec.IPIPMode = apiv2.IPIPModeNever
if len(new.Spec.IPIPMode) == 0 {
new.Spec.IPIPMode = apiv2.IPIPModeNever
}

// IPIP cannot be enabled for IPv6.
if cidr.Version() == 6 && pool.Spec.IPIPMode != apiv2.IPIPModeNever {
if cidr.Version() == 6 && new.Spec.IPIPMode != apiv2.IPIPModeNever {
errFields = append(errFields, cerrors.ErroredField{
Name: "IPPool.Spec.IPIP.Mode",
Name: "IPPool.Spec.IPIPMode",
Reason: "IPIP is not supported on an IPv6 IP pool",
Value: new.Spec.IPIPMode,
})
}

// The Calico IPAM places restrictions on the minimum IP pool size. If
// the ippool is enabled, check that the pool is at least the minimum size.
if !pool.Spec.Disabled {
if !new.Spec.Disabled {
ones, bits := cidr.Mask.Size()
log.Debugf("Pool CIDR: %s, num bits: %d", cidr.String(), bits-ones)
if bits-ones < 6 {
if cidr.Version() == 4 {
errFields = append(errFields, cerrors.ErroredField{
Name: "IPPool.Spec.CIDR",
Reason: "IPv4 pool size is too small (min /26) for use with Calico IPAM",
Value: new.Spec.CIDR,
})
} else {
errFields = append(errFields, cerrors.ErroredField{
Name: "IPPool.Spec.CIDR",
Reason: "IPv6 pool size is too small (min /122) for use with Calico IPAM",
Value: new.Spec.CIDR,
})
}
}
}

// The Calico CIDR should be strictly masked
log.Debugf("IPPool CIDR: %s, Masked IP: %d", pool.Spec.CIDR, cidr.IP)
log.Debugf("IPPool CIDR: %s, Masked IP: %d", new.Spec.CIDR, cidr.IP)
if cidr.IP.String() != ipAddr.String() {
errFields = append(errFields, cerrors.ErroredField{
Name: "IPPool.Spec.CIDR",
Reason: "IP pool CIDR is not strictly masked",
Reason: "IPPool CIDR is not strictly masked",
Value: new.Spec.CIDR,
})
}

Expand All @@ -202,14 +320,16 @@ func (_ ipPools) validateAndSetDefaults(pool *apiv2.IPPool) error {
if cidr.Version() == 4 && cidr.IsNetOverlap(ipv4LinkLocalNet) {
errFields = append(errFields, cerrors.ErroredField{
Name: "IPPool.Spec.CIDR",
Reason: "IP pool range overlaps with IPv4 Link Local range 169.254.0.0/16",
Reason: "IPPool CIDR overlaps with IPv4 Link Local range 169.254.0.0/16",
Value: new.Spec.CIDR,
})
}

if cidr.Version() == 6 && cidr.IsNetOverlap(ipv6LinkLocalNet) {
errFields = append(errFields, cerrors.ErroredField{
Name: "IPPool.Spec.CIDR",
Reason: "IP pool range overlaps with IPv6 Link Local range fe80::/10",
Reason: "IPPool CIDR overlaps with IPv6 Link Local range fe80::/10",
Value: new.Spec.CIDR,
})
}

Expand Down
Loading

0 comments on commit dae320c

Please sign in to comment.