diff --git a/Makefile b/Makefile index 6c39ed3..416360e 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ GOMOD := $(GO) mod GOARCH := $(shell $(GO) env GOARCH) GOBUILD = CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) $(GO) build GOTEST := $(GO) test -GOPKGS := $$($(GO) list ./...| grep -vE "vendor" | grep -vE "cmd" |grep -vE "test" |grep -v 'apis/networking'| grep -v 'generated') +GOPKGS := $$($(GO) list ./...| grep "pkg" |grep -v "vendor" | grep -v "cmd" |grep -v "test" | grep -v 'api' |grep -v "generated" | grep -v 'pkg/bce' | grep -v config | grep -v metric | grep -v rpc | grep -v version | grep -v wrapper | grep -v util) GOGCFLAGS := -gcflags=all="-trimpath=$(GOPATH)" -asmflags=all="-trimpath=$(GOPATH)" GOLDFLAGS := -ldflags '-s -w' GO_PACKAGE := github.com/baidubce/baiducloud-cce-cni-driver @@ -23,7 +23,7 @@ COVFUNC := $(HOMEDIR)/covfunc.txt # coverage profile information for each funct COVHTML := $(HOMEDIR)/covhtml.html # HTML representation of coverage profile # versions -VERSION := v1.5.4 +VERSION := v1.6.12 FELIX_VERSION := v3.5.8 K8S_VERSION := 1.18.9 @@ -68,7 +68,7 @@ gomod: set-env outdir: mkdir -p $(OUTDIR)/cni-bin # Compile all cni plug-ins -cni_target := eni-ipam ipvlan macvlan bandwidth ptp sysctl unnumbered-ptp crossvpc-eni rdma +cni_target := eni-ipam ipvlan macvlan bandwidth ptp sysctl unnumbered-ptp crossvpc-eni rdma eri $(cni_target): fmt outdir @echo "===> Building cni $@ <===" $(GOBUILD) $(GOLDFLAGS) $(GOGCFLAGS) -o $(HOMEDIR)/$@ $(HOMEDIR)/cni/$@ @@ -90,7 +90,7 @@ build: compile # make test, test your code test: prepare test-case test-case: - $(GOTEST) -v -cover $(GOPKGS) + $(GOTEST) -v -cover -parallel 16 $(GOPKGS) debian-iptables-image: @echo "===> Building debian iptables base image <===" diff --git a/build/images/cce-cni/Dockerfile b/build/images/cce-cni/Dockerfile index ffb7668..5c0862c 100644 --- a/build/images/cce-cni/Dockerfile +++ b/build/images/cce-cni/Dockerfile @@ -12,6 +12,7 @@ COPY output/cni-bin/eni-ipam /eni-ipam COPY output/cni-bin/sysctl /sysctl COPY output/cni-bin/crossvpc-eni /crossvpc-eni COPY output/cni-bin/rdma /rdma +COPY output/cni-bin/eri /eri # install cce ipam binary COPY output/cce-ipam /bin/cce-ipam diff --git a/build/images/cce-cni/entrypoint.sh b/build/images/cce-cni/entrypoint.sh index 819a09b..f0d7c2a 100755 --- a/build/images/cce-cni/entrypoint.sh +++ b/build/images/cce-cni/entrypoint.sh @@ -2,7 +2,7 @@ set -u -e CNI_BINARY_DIR=/opt/cni/bin/ -CNI_PLUGIN_LIST="bridge unnumbered-ptp ipvlan macvlan bandwidth loopback host-local ptp eni-ipam sysctl portmap crossvpc-eni rdma" +CNI_PLUGIN_LIST="bridge unnumbered-ptp ipvlan macvlan bandwidth loopback host-local ptp eni-ipam sysctl portmap crossvpc-eni rdma eri" # mv cni binary to dest for PLUGIN in $CNI_PLUGIN_LIST diff --git a/build/images/cni-base/entrypoint.sh b/build/images/cni-base/entrypoint.sh index 1e0fcf8..85d301a 100755 --- a/build/images/cni-base/entrypoint.sh +++ b/build/images/cni-base/entrypoint.sh @@ -2,7 +2,7 @@ set -u -e CNI_BINARY_DIR=/opt/cni/bin/ -CNI_PLUGIN_LIST="bridge unnumbered-ptp ipvlan macvlan bandwidth loopback host-local ptp eni-ipam sysctl portmap crossvpc-eni rdma" +CNI_PLUGIN_LIST="bridge unnumbered-ptp ipvlan macvlan bandwidth loopback host-local ptp eni-ipam sysctl portmap crossvpc-eni rdma eri" # mv cni binary to dest for PLUGIN in $CNI_PLUGIN_LIST diff --git a/cni/crossvpc-eni/main.go b/cni/crossvpc-eni/main.go index 5aeaec6..1417c78 100644 --- a/cni/crossvpc-eni/main.go +++ b/cni/crossvpc-eni/main.go @@ -34,6 +34,7 @@ import ( "github.com/containernetworking/plugins/pkg/ns" bv "github.com/containernetworking/plugins/pkg/utils/buildversion" "github.com/vishvananda/netlink" + "k8s.io/apimachinery/pkg/util/wait" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/cni" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/rpc" @@ -163,6 +164,7 @@ func (p *crossVpcEniPlugin) cmdAdd(args *skel.CmdArgs) error { ipam = NewCrossVpcEniIPAM(p.grpc, p.rpc) netns ns.NetNS err error + linkErr error ) log.Infof(ctx, "====> CmdAdd Begins <====") @@ -215,23 +217,24 @@ func (p *crossVpcEniPlugin) cmdAdd(args *skel.CmdArgs) error { } defer netns.Close() - // ENI 插入虚机后 driver 有时候需要 10s 才能识别到网卡设备,这里重试延长设备等待时间 - const ( - linkReadyRetryCount = 6 - ) - - for i := 0; i < linkReadyRetryCount; i++ { - target, err = p.netutil.GetLinkByMacAddress(resp.GetCrossVPCENI().Mac) - if err != nil { - log.Errorf(ctx, "retry: host netns: %v", err) + // the kernel would take some time to detect eni insertion. + err = wait.ExponentialBackoff(wait.Backoff{ + Duration: time.Millisecond * 500, + Factor: 1, + Steps: 6, + }, func() (done bool, err error) { + target, linkErr = p.netutil.GetLinkByMacAddress(resp.GetCrossVPCENI().Mac) + if linkErr != nil { + log.Warningf(ctx, "host netns: %v", linkErr) } else { - break + log.Infof(ctx, "host netns: found link %v with mac address %v", target.Attrs().Name, resp.GetCrossVPCENI().Mac) + return true, nil } - time.Sleep(time.Second * 2) - } - if err != nil { - return fmt.Errorf("host netns: %v", err) + return false, nil + }) + if err != nil && err == wait.ErrWaitTimeout { + return fmt.Errorf("host netns: %v", linkErr) } if err = p.nlink.LinkSetNsFd(target, int(netns.Fd())); err != nil { diff --git a/cni/crossvpc-eni/main_test.go b/cni/crossvpc-eni/main_test.go index aab9c76..aa8844a 100644 --- a/cni/crossvpc-eni/main_test.go +++ b/cni/crossvpc-eni/main_test.go @@ -16,7 +16,7 @@ package main import ( - "net" + "fmt" "syscall" "testing" @@ -96,6 +96,12 @@ var ( }` envArgs = `IgnoreUnknown=1;K8S_POD_NAMESPACE=default;K8S_POD_NAME=busybox;K8S_POD_INFRA_CONTAINER_ID=xxxxx` + + tenantEni = &netlink.Bond{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eni0", + }, + } ) func setupEnv(ctrl *gomock.Controller) ( @@ -167,7 +173,7 @@ func Test_crossVpcEniPlugin_cmdAdd(t *testing.T) { rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient), cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil), ns.EXPECT().GetNS(gomock.Any()).Return(netns, nil), - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), netns.EXPECT().Fd().Return(uintptr(10)), nlink.EXPECT().LinkSetNsFd(gomock.Any(), gomock.Any()).Return(nil), netns.EXPECT().Do(gomock.Any()).Return(nil), @@ -200,7 +206,7 @@ func Test_crossVpcEniPlugin_cmdAdd(t *testing.T) { wantErr: false, }, { - name: "Chained Plugin 正常流程,等待网卡就绪重试", + name: "Main plugin 正常流程", fields: func() fields { ctrl := gomock.NewController(t) nlink, ns, ipam, ip, types, netutil, rpc, grpc, _ := setupEnv(ctrl) @@ -223,8 +229,7 @@ func Test_crossVpcEniPlugin_cmdAdd(t *testing.T) { rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient), cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil), ns.EXPECT().GetNS(gomock.Any()).Return(netns, nil), - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, net.ErrClosed), - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), netns.EXPECT().Fd().Return(uintptr(10)), nlink.EXPECT().LinkSetNsFd(gomock.Any(), gomock.Any()).Return(nil), netns.EXPECT().Do(gomock.Any()).Return(nil), @@ -251,41 +256,27 @@ func Test_crossVpcEniPlugin_cmdAdd(t *testing.T) { IfName: "eth0", Args: envArgs, Path: "/opt/cin/bin", - StdinData: []byte(stdinData), + StdinData: []byte(stdinDataMainPlugin), }, }, wantErr: false, }, { - name: "Main plugin 正常流程", + name: "分配 ENI 失败流程", fields: func() fields { ctrl := gomock.NewController(t) nlink, ns, ipam, ip, types, netutil, rpc, grpc, _ := setupEnv(ctrl) cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) - netns := mocknetns.NewMockNetNS(ctrl) + // netns := mocknetns.NewMockNetNS(ctrl) allocReply := rpcdef.AllocateIPReply{ - IsSuccess: true, - NetworkInfo: &rpcdef.AllocateIPReply_CrossVPCENI{ - CrossVPCENI: &rpcdef.CrossVPCENIReply{ - IP: "10.10.10.10", - Mac: "ff:ff:ff:ff:ff:ff", - VPCCIDR: "10.0.0.0/8", - }, - }, + IsSuccess: false, } gomock.InOrder( grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient), cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil), - ns.EXPECT().GetNS(gomock.Any()).Return(netns, nil), - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, nil), - netns.EXPECT().Fd().Return(uintptr(10)), - nlink.EXPECT().LinkSetNsFd(gomock.Any(), gomock.Any()).Return(nil), - netns.EXPECT().Do(gomock.Any()).Return(nil), - types.EXPECT().PrintResult(gomock.Any(), gomock.Any()).Return(nil), - netns.EXPECT().Close().Return(nil), ) return fields{ @@ -307,27 +298,28 @@ func Test_crossVpcEniPlugin_cmdAdd(t *testing.T) { IfName: "eth0", Args: envArgs, Path: "/opt/cin/bin", - StdinData: []byte(stdinDataMainPlugin), + StdinData: []byte(stdinData), }, }, - wantErr: false, + wantErr: true, }, { - name: "分配 ENI 失败流程", + name: "Pod 无需申请 ENI ", fields: func() fields { ctrl := gomock.NewController(t) nlink, ns, ipam, ip, types, netutil, rpc, grpc, _ := setupEnv(ctrl) cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) // netns := mocknetns.NewMockNetNS(ctrl) - allocReply := rpcdef.AllocateIPReply{ - IsSuccess: false, + allocReply := &rpcdef.AllocateIPReply{ + IsSuccess: true, } gomock.InOrder( grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient), - cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil), + cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(allocReply, nil), + types.EXPECT().PrintResult(gomock.Any(), gomock.Any()).Return(nil), ) return fields{ @@ -352,25 +344,39 @@ func Test_crossVpcEniPlugin_cmdAdd(t *testing.T) { StdinData: []byte(stdinData), }, }, - wantErr: true, + wantErr: false, }, { - name: "Pod 无需申请 ENI ", + name: "Main plugin ,查找 eni 初次失败", fields: func() fields { ctrl := gomock.NewController(t) nlink, ns, ipam, ip, types, netutil, rpc, grpc, _ := setupEnv(ctrl) cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) - // netns := mocknetns.NewMockNetNS(ctrl) - allocReply := &rpcdef.AllocateIPReply{ + netns := mocknetns.NewMockNetNS(ctrl) + allocReply := rpcdef.AllocateIPReply{ IsSuccess: true, + NetworkInfo: &rpcdef.AllocateIPReply_CrossVPCENI{ + CrossVPCENI: &rpcdef.CrossVPCENIReply{ + IP: "10.10.10.10", + Mac: "ff:ff:ff:ff:ff:ff", + VPCCIDR: "10.0.0.0/8", + }, + }, } gomock.InOrder( grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient), - cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(allocReply, nil), + cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil), + ns.EXPECT().GetNS(gomock.Any()).Return(netns, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, fmt.Errorf("link with mac ff:ff:ff:ff:ff:ff not found")), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), + netns.EXPECT().Fd().Return(uintptr(10)), + nlink.EXPECT().LinkSetNsFd(gomock.Any(), gomock.Any()).Return(nil), + netns.EXPECT().Do(gomock.Any()).Return(nil), types.EXPECT().PrintResult(gomock.Any(), gomock.Any()).Return(nil), + netns.EXPECT().Close().Return(nil), ) return fields{ @@ -392,11 +398,68 @@ func Test_crossVpcEniPlugin_cmdAdd(t *testing.T) { IfName: "eth0", Args: envArgs, Path: "/opt/cin/bin", - StdinData: []byte(stdinData), + StdinData: []byte(stdinDataMainPlugin), }, }, wantErr: false, }, + { + name: "Main plugin ,查找 eni 一直失败", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, _ := setupEnv(ctrl) + + cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) + netns := mocknetns.NewMockNetNS(ctrl) + allocReply := rpcdef.AllocateIPReply{ + IsSuccess: true, + NetworkInfo: &rpcdef.AllocateIPReply_CrossVPCENI{ + CrossVPCENI: &rpcdef.CrossVPCENIReply{ + IP: "10.10.10.10", + Mac: "ff:ff:ff:ff:ff:ff", + VPCCIDR: "10.0.0.0/8", + }, + }, + } + + gomock.InOrder( + grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), + rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient), + cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil), + ns.EXPECT().GetNS(gomock.Any()).Return(netns, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, fmt.Errorf("link with mac ff:ff:ff:ff:ff:ff not found")), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, fmt.Errorf("link with mac ff:ff:ff:ff:ff:ff not found")), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, fmt.Errorf("link with mac ff:ff:ff:ff:ff:ff not found")), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, fmt.Errorf("link with mac ff:ff:ff:ff:ff:ff not found")), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, fmt.Errorf("link with mac ff:ff:ff:ff:ff:ff not found")), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(nil, fmt.Errorf("link with mac ff:ff:ff:ff:ff:ff not found")), + netns.EXPECT().Close().Return(nil), + ) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinDataMainPlugin), + }, + }, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -451,7 +514,7 @@ func Test_crossVpcEniPlugin_setupEni(t *testing.T) { nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) gomock.InOrder( - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(&netlink.Bond{}, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil), nlink.EXPECT().AddrAdd(gomock.Any(), gomock.Any()).Return(nil), nlink.EXPECT().LinkByName("eth0").Return(&netlink.Veth{}, nil), @@ -501,7 +564,7 @@ func Test_crossVpcEniPlugin_setupEni(t *testing.T) { nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) gomock.InOrder( - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(&netlink.Bond{}, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), nlink.EXPECT().LinkSetDown(gomock.Any()).Return(nil), nlink.EXPECT().LinkSetName(gomock.Any(), gomock.Any()).Return(nil), nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil), @@ -553,7 +616,7 @@ func Test_crossVpcEniPlugin_setupEni(t *testing.T) { nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) gomock.InOrder( - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(&netlink.Bond{}, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Veth{}, nil), nlink.EXPECT().LinkDel(gomock.Any()).Return(nil), nlink.EXPECT().LinkSetDown(gomock.Any()).Return(nil), @@ -607,7 +670,7 @@ func Test_crossVpcEniPlugin_setupEni(t *testing.T) { nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) gomock.InOrder( - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(&netlink.Bond{}, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), nlink.EXPECT().LinkSetDown(gomock.Any()).Return(nil), nlink.EXPECT().LinkSetName(gomock.Any(), gomock.Any()).Return(nil), nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil), @@ -661,7 +724,7 @@ func Test_crossVpcEniPlugin_setupEni(t *testing.T) { nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) gomock.InOrder( - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(&netlink.Bond{}, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), nlink.EXPECT().LinkSetDown(gomock.Any()).Return(nil), nlink.EXPECT().LinkSetName(gomock.Any(), gomock.Any()).Return(nil), nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil), @@ -716,7 +779,7 @@ func Test_crossVpcEniPlugin_setupEni(t *testing.T) { nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) gomock.InOrder( - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(&netlink.Bond{}, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), nlink.EXPECT().LinkSetDown(gomock.Any()).Return(nil), nlink.EXPECT().LinkSetName(gomock.Any(), gomock.Any()).Return(nil), nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil), @@ -773,7 +836,7 @@ func Test_crossVpcEniPlugin_setupEni(t *testing.T) { nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) gomock.InOrder( - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(&netlink.Bond{}, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Veth{}, nil), nlink.EXPECT().LinkDel(gomock.Any()).Return(nil), nlink.EXPECT().LinkSetDown(gomock.Any()).Return(nil), @@ -829,7 +892,7 @@ func Test_crossVpcEniPlugin_setupEni(t *testing.T) { nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) gomock.InOrder( - netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(&netlink.Bond{}, nil), + netutil.EXPECT().GetLinkByMacAddress(gomock.Any()).Return(tenantEni, nil), nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil), nlink.EXPECT().AddrAdd(gomock.Any(), gomock.Any()).Return(nil), nlink.EXPECT().LinkByName("eth0").Return(&netlink.Veth{}, nil), diff --git a/cni/eni-ipam/bcc.go b/cni/eni-ipam/bcc.go index b8da191..e57c580 100644 --- a/cni/eni-ipam/bcc.go +++ b/cni/eni-ipam/bcc.go @@ -66,7 +66,8 @@ func (client *bccENIMultiIP) SetupNetwork( return err } - log.Infof(ctx, "pod (%v %v) with IP %v is subject to interface: %v", client.namespace, client.name, allocRespNetworkInfo.IP, eniIntf.Attrs().Name) + log.Infof(ctx, "pod (%v %v) with IP %v is subject to interface: %v", client.namespace, client.name, + allocRespNetworkInfo.IP, eniIntf.Attrs().Name) ethIndex, err := getENIInterfaceIndex(ipamConf.ENILinkPrefix, eniIntf) if err != nil { @@ -107,6 +108,12 @@ func (client *bccENIMultiIP) SetupNetwork( _ = client.delScopeLinkRoute(eniIntf) } + // check whether eni is working + eniErr := client.validateEni(eniIntf, rtTable) + if eniErr != nil { + return fmt.Errorf("eni isn't working: %s", eniErr) + } + return nil } @@ -227,3 +234,45 @@ func (client *bccENIMultiIP) delToOrFromContainerRule(isToContainer bool, addr * return nil } + +func (client *bccENIMultiIP) validateEni(eniIntf netlink.Link, rtTable int) error { + if err := client.validateEniRoute(eniIntf, rtTable); err != nil { + return err + } + + if err := client.validateEniIP(eniIntf); err != nil { + return err + } + + return nil +} + +func (client *bccENIMultiIP) validateEniRoute(eniIntf netlink.Link, rtTable int) error { + filter := &netlink.Route{ + LinkIndex: eniIntf.Attrs().Index, + Table: rtTable, + } + routes, listErr := client.netlink.RouteListFiltered(netlink.FAMILY_V4, filter, + netlink.RT_FILTER_TABLE|netlink.RT_FILTER_OIF) + if listErr != nil { + return fmt.Errorf("failed to list route of eni %s, linkIndex %d, table %d, err: %s", + eniIntf.Attrs().Name, filter.LinkIndex, filter.Table, listErr) + } + + if len(routes) == 0 { + return fmt.Errorf("route table %d of eni %s not found", rtTable, eniIntf.Attrs().Name) + } + return nil +} + +func (client *bccENIMultiIP) validateEniIP(eniIntf netlink.Link) error { + eniAddressList, addrErr := client.netlink.AddrList(eniIntf, netlink.FAMILY_V4) + if addrErr != nil { + return fmt.Errorf("failed to list addr of eni %s, err: %s", eniIntf.Attrs().Name, addrErr) + } + + if len(eniAddressList) == 0 { + return fmt.Errorf("failed to get address of eni %s", eniIntf.Attrs().Name) + } + return nil +} diff --git a/cni/eri/eri.go b/cni/eri/eri.go new file mode 100644 index 0000000..c1edeaf --- /dev/null +++ b/cni/eri/eri.go @@ -0,0 +1,812 @@ +/* + * Copyright (c) 2021 Baidu, Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * + */ + +package main + +import ( + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "net" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/cni" + log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" + + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/keymutex" + networkutil "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/network" + typeswrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/cnitypes" + grpcwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/grpc" + ipwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ip" + ipamwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ipam" + netlinkwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/netlink" + nswrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ns" + rpcwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/rpc" + sysctlwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/sysctl" + "github.com/containernetworking/cni/pkg/skel" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/containernetworking/plugins/pkg/ip" + "github.com/containernetworking/plugins/pkg/ns" + bv "github.com/containernetworking/plugins/pkg/utils/buildversion" + "github.com/vishvananda/netlink" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + utilexec "k8s.io/utils/exec" +) + +const ( + logFile = "/var/log/cce/cni-rdma.log" + rtStartIdx = 100 + fileLock = "/var/run/cni-rdma.lock" + roceDevicePrefix = "roce" + resourceName = "rdma" + defaultKubeConfig = "/etc/cni/net.d/cce-cni.d/cce-cni.kubeconfig" + rpFilterSysctlTemplate = "net.ipv4.conf.%s.rp_filter" + arpIgnoreSysctlTemplate = "net.ipv4.conf.%s.arp_ignore" +) + +var buildConfigFromFlags = clientcmd.BuildConfigFromFlags +var k8sClientSet = func(c *rest.Config) (kubernetes.Interface, error) { + clientSet, err := kubernetes.NewForConfig(c) + return clientSet, err +} + +type NetConf struct { + types.NetConf + Mode string `json:"mode"` + KubeConfig string `json:"kubeconfig"` + Mask int `json:"mask"` + InstanceType string `json:"instanceType"` + IPAM *IPAMConf `json:"ipam,omitempty"` +} + +type IPAMConf struct { + Endpoint string `json:"endpoint"` +} + +func init() { + // this ensures that main runs only on main thread (thread group leader). + // since namespace ops (unshare, setns) are done for a single thread, we + // must ensure that the goroutine does not jump from OS thread to thread + runtime.LockOSThread() +} + +func loadConf(bytes []byte) (*NetConf, string, error) { + n := &NetConf{} + if err := json.Unmarshal(bytes, n); err != nil { + return nil, "", fmt.Errorf("failed to load netconf: %v", err) + } + + if n.IPAM == nil { + return nil, "", fmt.Errorf("IPAM config missing 'ipam' key") + } + + if n.KubeConfig == "" { + n.KubeConfig = defaultKubeConfig + } + if n.IPAM.Endpoint == "" { + return nil, "", fmt.Errorf("ipam endpoint is empty") + } + + if n.Mask <= 0 { + n.Mask = 24 + } + + return n, n.CNIVersion, nil +} + +type eriPlugin struct { + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + rpc rpcwrapper.Interface + grpc grpcwrapper.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface +} + +func newERIPlugin() *eriPlugin { + return &eriPlugin{ + nlink: netlinkwrapper.New(), + ns: nswrapper.New(), + ipam: ipamwrapper.New(), + ip: ipwrapper.New(), + types: typeswrapper.New(), + netutil: networkutil.New(), + rpc: rpcwrapper.New(), + grpc: grpcwrapper.New(), + exec: utilexec.New(), + sysctl: sysctlwrapper.New(), + } +} + +func (p *eriPlugin) cmdAdd(args *skel.CmdArgs) error { + ctx := log.NewContext() + log.Infof(ctx, "====> CmdAdd Begins <====") + defer log.Infof(ctx, "====> CmdAdd Ends <====") + defer log.Flush() + ipam := NewRoceIPAM(p.grpc, p.rpc) + + n, cniVersion, err := loadConf(args.StdinData) + if err != nil { + return err + } + netns, err := p.ns.GetNS(args.Netns) + if err != nil { + return fmt.Errorf("failed to open netns %q: %v", netns, err) + } + defer netns.Close() + result := ¤t.Result{CNIVersion: cniVersion} + + roceDevs, err := p.getAllRoceDevices() + if err != nil || len(roceDevs) <= 0 { + log.Infof(ctx, "not found roce devices err: %s", err.Error()) + return types.PrintResult(result, cniVersion) + } + + log.Infof(ctx, "roce devs:%v", roceDevs) + + k8sArgs, err := p.loadK8SArgs(args.Args) + if err != nil { + return err + } + want, err := wantRoce(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME), n) + if err != nil { + log.Errorf(ctx, "check want roce failed: %s", err.Error()) + } + + if !want { + log.Infof(ctx, "pod: %s,donot want roce", string(k8sArgs.K8S_POD_NAME)) + return types.PrintResult(result, n.CNIVersion) + } + + l, err := keymutex.GrabFileLock(fileLock) + if err != nil { + log.Errorf(ctx, "grad file lock error: %s", err.Error()) + return fmt.Errorf("grad file lock error: %s", err.Error()) + } + defer l.Close() + + for idx, devName := range roceDevs { + roceDevName := fmt.Sprintf("%s%d", roceDevicePrefix, idx+1) + err = p.setupIPvlan(ctx, n, devName, roceDevName, netns, k8sArgs, ipam) + if err != nil { + return err + } + } + + return types.PrintResult(result, cniVersion) +} + +func (p *eriPlugin) cmdDel(args *skel.CmdArgs) error { + ctx := log.NewContext() + log.Infof(ctx, "====> Rdma CNI <====") + log.Infof(ctx, "====> CmdDel Begins <====") + defer log.Infof(ctx, "====> CmdDel Ends <====") + log.Infof(ctx, "[cmdDel]: containerID: %v, netns: %v, ifName: %v, args: %v, path: %v", args.ContainerID, args.Netns, args.IfName, args.Args, args.Path) + log.Infof(ctx, "[cmdDel]: stdinData: %v", string(args.StdinData)) + if args.Netns == "" { + return nil + } + + // There is a netns so try to clean up. Delete can be called multiple times + // so don't return an error if the device is already removed. + err := p.ns.WithNetNSPath(args.Netns, func(_ ns.NetNS) error { + return p.delAllIPVlanDevices() + }) + + if err != nil { + log.Errorf(ctx, "delete macvlan device failed:%s", err.Error()) + return err + } + + n, _, err := loadConf(args.StdinData) + if err != nil { + return err + } + + k8sArgs, err := p.loadK8SArgs(args.Args) + if err != nil { + return err + } + + ipamClient := NewRoceIPAM(p.grpc, p.rpc) + resp, err := ipamClient.ReleaseIP(ctx, k8sArgs, n.IPAM.Endpoint, n.InstanceType) + if err != nil { + msg := fmt.Sprintf("failed to delete IP for pod (%v %v): %v", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME, err) + log.Error(ctx, msg) + return errors.New(msg) + } + + if !resp.IsSuccess { + msg := fmt.Sprintf("ipam server release IP error: %v", resp.ErrMsg) + log.Error(ctx, msg) + return errors.New(msg) + } + log.Infof(ctx, "release for pod(%v %v) successfully", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME) + + return err + +} + +func (p *eriPlugin) delAllIPVlanDevices() error { + devs, err := p.nlink.LinkList() + if err != nil { + return err + } + for _, dev := range devs { + if dev.Type() == "ipvlan" { + if err := p.ip.DelLinkByName(dev.Attrs().Name); err != nil { + if err != ip.ErrLinkNotFound { + return err + } + } + } + } + return nil +} + +func main() { + initFlags() + defer log.Flush() + + logDir := filepath.Dir(logFile) + if err := os.Mkdir(logDir, 0755); err != nil && !os.IsExist(err) { + fmt.Printf("mkdir %v failed: %v", logDir, err) + os.Exit(1) + } + + plugin := newERIPlugin() + if e := skel.PluginMainWithError(plugin.cmdAdd, plugin.cmdCheck, plugin.cmdDel, cni.PluginSupportedVersions, bv.BuildString("rdma")); e != nil { + log.Flush() + if err := e.Print(); err != nil { + log.Errorf(context.TODO(), "Error writing error JSON to stdout: %v", err) + } + os.Exit(1) + } + +} + +func (p *eriPlugin) cmdCheck(args *skel.CmdArgs) error { + return nil +} + +func (p *eriPlugin) loadK8SArgs(envArgs string) (*cni.K8SArgs, error) { + k8sArgs := cni.K8SArgs{} + if envArgs != "" { + err := types.LoadArgs(envArgs, &k8sArgs) + if err != nil { + return nil, err + } + } + return &k8sArgs, nil +} + +func initFlags() { + log.InitFlags(nil) + flag.Set("logtostderr", "false") + flag.Set("log_file", logFile) + flag.Parse() +} + +func modeFromString(s string) (netlink.IPVlanMode, error) { + switch s { + case "", "l3": + return netlink.IPVLAN_MODE_L3, nil + case "l2": + return netlink.IPVLAN_MODE_L2, nil + case "l3s": + return netlink.IPVLAN_MODE_L3S, nil + default: + return 0, fmt.Errorf("unknown ipvlan mode: %q", s) + } +} + +func (p *eriPlugin) createIPvlan(conf *NetConf, master, ifName string, netns ns.NetNS) (*current.Interface, error) { + ipvlan := ¤t.Interface{} + + mode, err := modeFromString(conf.Mode) + if err != nil { + return nil, err + } + + m, err := p.nlink.LinkByName(master) + if err != nil { + return nil, fmt.Errorf("failed to lookup master %q: %v", master, err) + } + + // due to kernel bug we have to create with tmpName or it might + // collide with the name on the host and error out + tmpName, err := ip.RandomVethName() + if err != nil { + return nil, err + } + + iv := &netlink.IPVlan{ + LinkAttrs: netlink.LinkAttrs{ + // MTU: conf.MTU, + Name: tmpName, + ParentIndex: m.Attrs().Index, + Namespace: netlink.NsFd(int(netns.Fd())), + }, + Mode: mode, + } + + if err := p.nlink.LinkAdd(iv); err != nil { + return nil, fmt.Errorf("failed to create ipvlan for eri rdma: %v", err) + } + + err = netns.Do(func(_ ns.NetNS) error { + return p.setupIPvlanInterface(ipvlan, iv, tmpName, ifName, netns) + }) + if err != nil { + return nil, err + } + + return ipvlan, nil +} + +func (p *eriPlugin) setupIPvlanInterface(ipvlan *current.Interface, iv *netlink.IPVlan, tmpName, ifName string, netns ns.NetNS) error { + err := p.ip.RenameLink(tmpName, ifName) + if err != nil { + _ = p.nlink.LinkDel(iv) + return fmt.Errorf("failed to rename ipvlan to %q: %v", ifName, err) + } + ipvlan.Name = ifName + + // Re-fetch macvlan to get all properties/attributes + contIPvlan, err := p.nlink.LinkByName(ifName) + if err != nil { + return fmt.Errorf("failed to refetch ipvlan %q: %v", ifName, err) + } + ipvlan.Mac = contIPvlan.Attrs().HardwareAddr.String() + ipvlan.Sandbox = netns.Path() + + return nil +} + +func (p *eriPlugin) getAllRoceDevices() ([]string, error) { + cmd := p.exec.Command("sh", "-c", "ibdev2netdev | awk '{print $5}'") + out, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("run ibdev2netdev cmd failed with %s\n", err) + } + + if strings.Contains(string(out), "not found") { + return nil, fmt.Errorf("exec command error %s\n", string(out)) + } + + parts := strings.Split(strings.TrimSpace(string(out)), "\n") + roceDevs := make([]string, 0, 1) + + defaultRouteInterface, err := getDefaultRouteInterfaceName() + if err != nil { + return nil, err + } + + for _, devName := range parts { + if devName == defaultRouteInterface { + continue + } + + roceDevs = append(roceDevs, devName) + } + + return roceDevs, nil +} + +func getDefaultRouteInterfaceName() (string, error) { + routeToDstIP, err := netlink.RouteList(nil, netlink.FAMILY_ALL) + if err != nil { + return "", err + } + + for _, v := range routeToDstIP { + if v.Dst == nil { + l, err := netlink.LinkByIndex(v.LinkIndex) + if err != nil { + return "", err + } + return l.Attrs().Name, nil + } + } + + return "", fmt.Errorf("no default route interface found") +} + +func (p *eriPlugin) setupIPvlan(ctx context.Context, conf *NetConf, master, ifName string, netns ns.NetNS, k8sArgs *cni.K8SArgs, ipamClient *roceIPAM) error { + ipvlanInterface, err := p.createIPvlan(conf, master, ifName, netns) + if err != nil { + return err + } + log.Infof(ctx, "create ipvlan dev: %s successfully,master:%s", ifName, master) + + defer func() { + if err != nil { + err = netns.Do(func(_ ns.NetNS) error { + return ip.DelLinkByName(ifName) + }) + if err != nil { + log.Errorf(ctx, "delete link error in defer, device name: %s ,error:%s", ifName, err.Error()) + } + } + }() + + m, err := p.nlink.LinkByName(master) + if err != nil { + return fmt.Errorf("failed to lookup master %q: %v", master, err) + } + + masterMac := m.Attrs().HardwareAddr.String() + masterMask := p.getDeviceMask(conf, master) + + log.Infof(ctx, "master mac: %s,master mask: %v,for dev: %s", masterMac, masterMask, master) + + var allocIP *netlink.Addr + err = netns.Do(func(_ ns.NetNS) error { + allocIP, err = p.setupIPvlanNetworkInfo(ctx, conf, masterMac, masterMask, ifName, ipvlanInterface, k8sArgs, ipamClient) + return err + }) + + if err != nil { + return err + } + if allocIP != nil { + err = p.setUpHostVethRoute(ctx, m, *allocIP, netns) + if err != nil { + log.Errorf(ctx, "set up host veth error: %s for pod: %s", err.Error(), string(k8sArgs.K8S_POD_NAME)) + return fmt.Errorf("set up host veth error: %s", err.Error()) + } + } + + if err != nil { + return err + } + err = p.addRoute2IPVlanMaster(m, netns) + + return err +} + +func (p *eriPlugin) getDeviceMask(conf *NetConf, devName string) net.IPMask { + link, err := p.nlink.LinkByName(devName) + if err != nil { + return net.CIDRMask(conf.Mask, 32) + } + + addrList, err := p.nlink.AddrList(link, netlink.FAMILY_V4) + if err != nil { + return net.CIDRMask(conf.Mask, 32) + } + + for _, addr := range addrList { + return addr.IPNet.Mask + } + return net.CIDRMask(conf.Mask, 32) +} + +func (p *eriPlugin) setupIPvlanNetworkInfo(ctx context.Context, conf *NetConf, masterMac string, masterMask net.IPMask, ifName string, + ipvlanInterface *current.Interface, k8sArgs *cni.K8SArgs, ipamClient *roceIPAM) (*netlink.Addr, error) { + ipvlanInterfaceLink, err := p.nlink.LinkByName(ifName) + if err != nil { + return nil, fmt.Errorf("failed to find interface name %q: %v", ipvlanInterface.Name, err) + } + + if err := p.nlink.LinkSetUp(ipvlanInterfaceLink); err != nil { + return nil, fmt.Errorf("failed to set %q UP: %v", ifName, err) + } + name, namespace := string(k8sArgs.K8S_POD_NAME), string(k8sArgs.K8S_POD_NAMESPACE) + + resp, err := ipamClient.AllocIP(ctx, k8sArgs, conf.IPAM.Endpoint, masterMac, conf.InstanceType) + if err != nil { + log.Errorf(ctx, "failed to allocate IP: %v", err) + return nil, err + } + if !resp.IsSuccess { + msg := fmt.Sprintf("ipam server allocate IP error: %v", resp.ErrMsg) + log.Error(ctx, msg) + return nil, errors.New(msg) + } + + allocRespNetworkInfo := resp.GetENIMultiIP() + if allocRespNetworkInfo == nil { + err := errors.New(fmt.Sprintf("failed to allocate IP for pod (%v %v): NetworkInfo is nil", namespace, name)) + log.Errorf(ctx, err.Error()) + return nil, err + } + + log.Infof(ctx, "allocate IP %v, for pod(%v %v) successfully", allocRespNetworkInfo.IP, namespace, name) + + defer func() { + if err != nil { + _, err := ipamClient.ReleaseIP(ctx, k8sArgs, conf.IPAM.Endpoint, conf.InstanceType) + if err != nil { + log.Errorf(ctx, "rollback: failed to delete IP for pod (%v %v): %v", namespace, name, err) + } + } + }() + + addr := &netlink.Addr{IPNet: &net.IPNet{ + IP: net.ParseIP(allocRespNetworkInfo.IP), + Mask: masterMask, + }} + + err = p.nlink.AddrAdd(ipvlanInterfaceLink, addr) + if err != nil { + log.Errorf(ctx, "failed to add IP %v to device : %v", addr.String(), err) + return nil, err + } + + idx := ipvlanInterfaceLink.Attrs().Index + ruleSrc := &net.IPNet{ + IP: addr.IPNet.IP, + Mask: net.CIDRMask(32, 32), + } + + err = p.addFromRule(ruleSrc, 10000, rtStartIdx+idx) + if err != nil { + log.Errorf(ctx, "add from rule failed: %v", err) + return nil, err + } + log.Infof(ctx, "add rule table: %d,src ip: %s", rtStartIdx+idx, allocRespNetworkInfo.IP) + + _, cidr, err := net.ParseCIDR(addr.IPNet.String()) + if err != nil { + log.Errorf(ctx, "parse cidr:%s, failed: %s", addr.IPNet.String(), err.Error()) + return nil, err + } + + err = p.addRouteByCmd(ctx, cidr, ifName, ruleSrc.IP.String(), rtStartIdx+idx) + if err != nil { + log.Errorf(ctx, "add route failed: %s", err.Error()) + return nil, err + } + + return addr, nil +} + +func (p *eriPlugin) addFromRule(addr *net.IPNet, priority int, rtTable int) error { + rule := netlink.NewRule() + rule.Table = rtTable + rule.Priority = priority + rule.Src = addr // ip rule add from `addr` lookup `table` prio `xxx` + err := p.nlink.RuleDel(rule) + if err != nil && !netlinkwrapper.IsNotExistError(err) { + return err + } + + if err := p.nlink.RuleAdd(rule); err != nil { + return err + } + return nil +} + +func (p *eriPlugin) addRouteByCmd(ctx context.Context, dst *net.IPNet, ifName, srcIP string, rtable int) error { + strRoute := fmt.Sprintf("ip route add %s dev %s src %s table %d", dst.String(), ifName, srcIP, rtable) + log.Infof(ctx, "add route: %s", strRoute) + cmd := p.exec.Command("ip", "route", "add", dst.String(), "dev", ifName, + "src", srcIP, "table", strconv.Itoa(rtable)) + cmd.SetStdout(os.Stdout) + cmd.SetStderr(os.Stderr) + if err := cmd.Run(); err != nil { + return fmt.Errorf("add route failed: %v", err) + } + + return nil +} + +func (p *eriPlugin) disableRPFCheck(ctx context.Context, devNums int) error { + var errs []error + rpfDevs := []string{"all", "default"} + for idx := 0; idx < devNums; idx++ { + rpfDevs = append(rpfDevs, fmt.Sprintf("%s%d", roceDevicePrefix, idx+1)) + } + + for _, name := range rpfDevs { + if name != "" { + if _, err := p.sysctl.Sysctl(fmt.Sprintf(rpFilterSysctlTemplate, name), "0"); err != nil { + errs = append(errs, err) + log.Errorf(ctx, "failed to disable RP filter for interface %v: %v", name, err) + } + } + } + + for _, name := range []string{"all", "default"} { + if name != "" { + if _, err := p.sysctl.Sysctl(fmt.Sprintf(arpIgnoreSysctlTemplate, name), "0"); err != nil { + errs = append(errs, err) + log.Errorf(ctx, "failed to disable arp ignore for interface %v: %v", name, err) + } + } + } + + return utilerrors.NewAggregate(errs) +} + +func wantRoce(podNs, podName string, n *NetConf) (bool, error) { + client, err := newClient(n.KubeConfig) + if err != nil { + return false, fmt.Errorf("build k8s client error: %s", err.Error()) + } + + pod, err := client.CoreV1().Pods(podNs).Get(context.TODO(), podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + for _, container := range pod.Spec.Containers { + if hasRDMAResource(container.Resources.Limits) || hasRDMAResource(container.Resources.Requests) { + return true, nil + } + } + + return false, nil +} + +func hasRDMAResource(rl v1.ResourceList) bool { + for key, _ := range rl { + arr := strings.Split(string(key), "/") + if len(arr) != 2 { + continue + } + if arr[0] == resourceName { + return true + } + } + return false +} + +func newClient(kubeconfig string) (kubernetes.Interface, error) { + config, err := buildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, err + } + return k8sClientSet(config) +} + +func (p *eriPlugin) setUpHostVethRoute(ctx context.Context, master netlink.Link, eriAddr netlink.Addr, netns ns.NetNS) error { + addrs, err := p.nlink.AddrList(master, netlink.FAMILY_V4) + if err != nil { + return err + } + + if len(addrs) < 1 { + return fmt.Errorf("there is not ip for master,index: %d", master.Attrs().Index) + } + addrBits := 32 + vethHost, err := p.getVethHostInterface(netns) + if err != nil { + return err + } + + log.Infof(ctx, "add host veth route: src: %v,dst: %v", addrs[0].IP, eriAddr.IPNet.IP) + + err = p.nlink.RouteAdd(&netlink.Route{ + LinkIndex: vethHost.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: eriAddr.IPNet.IP, + Mask: net.CIDRMask(addrBits, addrBits), + }, + Src: addrs[0].IP, + }) + + if err != nil { + return fmt.Errorf("failed to add host route src: %v, dst: %v, error: %v", addrs[0].IP, eriAddr.IPNet.IP, err) + } + return err +} + +// get the veth peer of container interface in host namespace +func (p *eriPlugin) getVethHostInterface(netns ns.NetNS) (netlink.Link, error) { + var peerIndex int + var err error + _ = netns.Do(func(_ ns.NetNS) error { + linkList, err := p.nlink.LinkList() + if err != nil { + return err + } + for _, l := range linkList { + if l.Type() != "veth" { + continue + } + _, peerIndex, err = p.ip.GetVethPeerIfindex(l.Attrs().Name) + break + } + return nil + }) + if peerIndex <= 0 { + return nil, fmt.Errorf("has no veth peer: %v", err) + } + + // find host interface by index + link, err := p.nlink.LinkByIndex(peerIndex) + if err != nil { + return nil, fmt.Errorf("veth peer with index %d is not in host ns,error: %s", peerIndex, err.Error()) + } + + return link, nil +} + +func (p *eriPlugin) addRoute2IPVlanMaster(master netlink.Link, netns ns.NetNS) error { + addrs, err := p.nlink.AddrList(master, netlink.FAMILY_V4) + if err != nil { + return err + } + if len(addrs) < 1 { + return fmt.Errorf("there is not ip for master,index: %d", master.Attrs().Index) + } + err = netns.Do(func(_ ns.NetNS) error { + return p.addRoute2IPVlanMasterNetNS(addrs) + }) + return err +} + +func (p *eriPlugin) addRoute2IPVlanMasterNetNS(addrs []netlink.Addr) error { + routeToDstIP, err := p.nlink.RouteList(nil, netlink.FAMILY_ALL) + if err != nil { + return err + } + shouldAdd := true + for _, v := range routeToDstIP { + if v.Dst != nil && v.Dst.IP.String() == "169.254.1.1" { + //veth pair ptp mode + shouldAdd = false + break + } + } + + if shouldAdd { + linkList, err := p.nlink.LinkList() + if err != nil { + return err + } + for _, l := range linkList { + if l.Type() != "veth" { + continue + } + + for _, addr := range addrs { + err = p.nlink.RouteAdd(&netlink.Route{ + LinkIndex: l.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: addr.IP, + Mask: net.CIDRMask(32, 32), + }, + }) + if err != nil { + return err + } + } + break + } + + } + return nil +} diff --git a/cni/eri/eri_test.go b/cni/eri/eri_test.go new file mode 100644 index 0000000..b861321 --- /dev/null +++ b/cni/eri/eri_test.go @@ -0,0 +1,1443 @@ +/* + * Copyright (c) 2021 Baidu, Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * + */ + +package main + +import ( + "context" + "errors" + "net" + "testing" + + rpcdef "github.com/baidubce/baiducloud-cce-cni-driver/pkg/rpc" + mockcbclient "github.com/baidubce/baiducloud-cce-cni-driver/pkg/rpc/testing" + log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" + networkutil "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/network" + mockutilnetwork "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/network/testing" + typeswrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/cnitypes" + mocktypes "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/cnitypes/testing" + grpcwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/grpc" + mockgrpc "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/grpc/testing" + ipwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ip" + mockip "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ip/testing" + ipamwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ipam" + mockipam "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ipam/testing" + netlinkwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/netlink" + mocknetlink "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/netlink/testing" + mocknetns "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/netns/testing" + nswrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ns" + mockns "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/ns/testing" + rpcwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/rpc" + mockrpc "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/rpc/testing" + sysctlwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/sysctl" + mocksysctl "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/sysctl/testing" + "github.com/containernetworking/cni/pkg/skel" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/golang/mock/gomock" + "github.com/vishvananda/netlink" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/utils/exec" + utilexec "k8s.io/utils/exec" + fakeexec "k8s.io/utils/exec/testing" +) + +var ( + stdinData = ` +{ + "cniVersion":"0.3.1", + "name":"cce-cni", + "type":"eri", + "ipam":{ + "endpoint":"172.25.66.38:80" + } +}` + envArgs = `IgnoreUnknown=1;K8S_POD_NAMESPACE=default;K8S_POD_NAME=busybox;K8S_POD_INFRA_CONTAINER_ID=xxxxx` +) + +func setupEnv(ctrl *gomock.Controller) ( + *mocknetlink.MockInterface, + *mockns.MockInterface, + *mockipam.MockInterface, + *mockip.MockInterface, + *mocktypes.MockInterface, + *mockutilnetwork.MockInterface, + *mockrpc.MockInterface, + *mockgrpc.MockInterface, + *mocksysctl.MockInterface, +) { + nlink := mocknetlink.NewMockInterface(ctrl) + ns := mockns.NewMockInterface(ctrl) + ipam := mockipam.NewMockInterface(ctrl) + ip := mockip.NewMockInterface(ctrl) + types := mocktypes.NewMockInterface(ctrl) + netutil := mockutilnetwork.NewMockInterface(ctrl) + rpc := mockrpc.NewMockInterface(ctrl) + grpc := mockgrpc.NewMockInterface(ctrl) + sysctl := mocksysctl.NewMockInterface(ctrl) + return nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl +} + +func Test_cmdDel(t *testing.T) { + t.Log("test cmd del") + + type fields struct { + ctrl *gomock.Controller + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + rpc rpcwrapper.Interface + grpc grpcwrapper.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface + } + type args struct { + args *skel.CmdArgs + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "正常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + allocReply := rpcdef.ReleaseIPReply{ + IsSuccess: true, + ErrMsg: "", + } + cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) + ns.EXPECT().WithNetNSPath(gomock.Any(), gomock.Any()).Return(nil) + grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) + rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient) + cniBackendClient.EXPECT().ReleaseIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: false, + }, + { + name: "异常流程1", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + allocReply := rpcdef.ReleaseIPReply{ + IsSuccess: true, + ErrMsg: "", + } + cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) + ns.EXPECT().WithNetNSPath(gomock.Any(), gomock.Any()).Return(nil) + grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) + rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient) + cniBackendClient.EXPECT().ReleaseIP(gomock.Any(), gomock.Any()).Return(&allocReply, errors.New("release ip error")) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, + { + name: "异常流程2", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + ns.EXPECT().WithNetNSPath(gomock.Any(), gomock.Any()).Return(errors.New("nspath error for cmd del unit testrelease")) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, + { + name: "异常流程3", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + allocReply := rpcdef.ReleaseIPReply{ + IsSuccess: false, + ErrMsg: "", + } + cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) + ns.EXPECT().WithNetNSPath(gomock.Any(), gomock.Any()).Return(nil) + grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) + rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient) + cniBackendClient.EXPECT().ReleaseIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + p := &eriPlugin{ + nlink: tt.fields.nlink, + ns: tt.fields.ns, + ipam: tt.fields.ipam, + ip: tt.fields.ip, + types: tt.fields.types, + netutil: tt.fields.netutil, + rpc: tt.fields.rpc, + grpc: tt.fields.grpc, + exec: tt.fields.exec, + sysctl: tt.fields.sysctl, + } + if err := p.cmdDel(tt.args.args); (err != nil) != tt.wantErr { + t.Errorf("eriPlugin.cmdDel() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_cmdAdd(t *testing.T) { + t.Log("test cmd add") + SetUPK8SClientEnv() + type fields struct { + ctrl *gomock.Controller + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + rpc rpcwrapper.Interface + grpc grpcwrapper.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface + } + type args struct { + args *skel.CmdArgs + } + + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "正常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + fakeCmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return []byte("ens11"), nil, nil }, + }, + } + fakeExec := getFakeExecTemplate(&fakeCmd) + netns := mocknetns.NewMockNetNS(ctrl) + + nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "ens11"}}, nil).AnyTimes() + nlink.EXPECT().LinkAdd(gomock.Any()).Return(nil) + nlink.EXPECT().AddrList(gomock.Any(), gomock.Any()).Return([]netlink.Addr{ + { + IPNet: &net.IPNet{ + IP: net.IPv4(25, 0, 0, 45), + Mask: net.CIDRMask(24, 32), + }, + }, + }, nil).AnyTimes() + + ns.EXPECT().GetNS(gomock.Any()).Return(netns, nil) + netns.EXPECT().Fd().Return(uintptr(10)) + netns.EXPECT().Do(gomock.Any()).Return(nil).AnyTimes() + netns.EXPECT().Close().Return(nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + exec: &fakeExec, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: false, + }, + { + name: "异常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + fakeCmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { + return []byte("ens11"), nil, errors.New("get roce device error for unit test") + }, + }, + } + fakeExec := getFakeExecTemplate(&fakeCmd) + netns := mocknetns.NewMockNetNS(ctrl) + + nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "ens11"}}, nil).AnyTimes() + nlink.EXPECT().AddrList(gomock.Any(), gomock.Any()).Return([]netlink.Addr{ + { + IPNet: &net.IPNet{ + IP: net.IPv4(25, 0, 0, 45), + Mask: net.CIDRMask(24, 32), + }, + }, + }, nil).AnyTimes() + + //nlink.EXPECT().RuleDel(gomock.Any()).Return(nil) + ns.EXPECT().GetNS(gomock.Any()).Return(netns, nil) + netns.EXPECT().Do(gomock.Any()).Return(nil).AnyTimes() + netns.EXPECT().Close().Return(nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + exec: &fakeExec, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + p := &eriPlugin{ + nlink: tt.fields.nlink, + ns: tt.fields.ns, + ipam: tt.fields.ipam, + ip: tt.fields.ip, + types: tt.fields.types, + netutil: tt.fields.netutil, + rpc: tt.fields.rpc, + grpc: tt.fields.grpc, + exec: tt.fields.exec, + sysctl: tt.fields.sysctl, + } + if err := p.cmdAdd(tt.args.args); (err != nil) != tt.wantErr { + t.Errorf("eriPlugin.cmdAdd() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func getFakeExecTemplate(fakeCmd *fakeexec.FakeCmd) fakeexec.FakeExec { + var fakeTemplate []fakeexec.FakeCommandAction + for i := 0; i < len(fakeCmd.CombinedOutputScript); i++ { + fakeTemplate = append(fakeTemplate, func(cmd string, args ...string) exec.Cmd { return fakeexec.InitFakeCmd(fakeCmd, cmd, args...) }) + } + return fakeexec.FakeExec{ + CommandScript: fakeTemplate, + } +} + +func Test_rocePlugin_setupIpvlanInterface(t *testing.T) { + type fields struct { + ctrl *gomock.Controller + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface + netns *mocknetns.MockNetNS + } + + tests := []struct { + name string + fields fields + wantErr bool + }{ + { + name: "正常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, _, _, sysctl := setupEnv(ctrl) + + ip.EXPECT().RenameLink(gomock.Any(), gomock.Any()).Return(nil) + nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "roce0"}}, nil).AnyTimes() + netns := mocknetns.NewMockNetNS(ctrl) + netns.EXPECT().Path().Return("") + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + sysctl: sysctl, + netns: netns, + } + }(), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + p := &eriPlugin{ + nlink: tt.fields.nlink, + ns: tt.fields.ns, + ipam: tt.fields.ipam, + ip: tt.fields.ip, + types: tt.fields.types, + netutil: tt.fields.netutil, + sysctl: tt.fields.sysctl, + } + + ipvlan := ¤t.Interface{} + + iv := &netlink.IPVlan{ + Mode: netlink.IPVLAN_MODE_L3, + } + if err := p.setupIPvlanInterface(ipvlan, iv, "roce0", "roce0", tt.fields.netns); (err != nil) != tt.wantErr { + t.Errorf("eriPlugin.setupIPvlanInterface() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_rdmaPlugin_setupIPvlanNetworkInfo(t *testing.T) { + t.Log("test eriPlugin setupIPvlanNetworkInfo") + + type fields struct { + ctrl *gomock.Controller + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + rpc rpcwrapper.Interface + grpc grpcwrapper.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface + } + type args struct { + args *skel.CmdArgs + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "正常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) + + fakeCmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return []byte("ens11"), nil, nil }, + }, + RunScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return nil, nil, nil }, + }, + } + fakeExec := getFakeExecTemplate(&fakeCmd) + allocReply := rpcdef.AllocateIPReply{ + IsSuccess: true, + NetworkInfo: &rpcdef.AllocateIPReply_ENIMultiIP{ + ENIMultiIP: &rpcdef.ENIMultiIPReply{ + IP: "172.168.172.168", + Mac: "a2:37:b9:e8:ee:8f", + Gw: "172.168.172.1", + }, + }, + } + + grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) + rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient) + cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil) + nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "ens11"}}, nil).AnyTimes() + nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil) + nlink.EXPECT().AddrAdd(gomock.Any(), gomock.Any()).Return(nil) + nlink.EXPECT().RuleDel(gomock.Any()).Return(nil) + nlink.EXPECT().RuleAdd(gomock.Any()).Return(nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + exec: &fakeExec, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: false, + }, + { + name: "异常流程1", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) + + fakeCmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return []byte("ens11"), nil, nil }, + }, + } + fakeExec := getFakeExecTemplate(&fakeCmd) + allocReply := rpcdef.AllocateIPReply{ + IsSuccess: true, + NetworkInfo: &rpcdef.AllocateIPReply_ENIMultiIP{ + ENIMultiIP: &rpcdef.ENIMultiIPReply{ + IP: "172.168.172.168", + Mac: "a2:37:b9:e8:ee:8f", + Gw: "172.168.172.1", + }, + }, + } + + releaseReply := rpcdef.ReleaseIPReply{ + IsSuccess: true, + NetworkInfo: &rpcdef.ReleaseIPReply_ENIMultiIP{}, + } + + grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient).AnyTimes() + cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, nil) + cniBackendClient.EXPECT().ReleaseIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(&releaseReply, nil) + nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "ens11"}}, nil).AnyTimes() + nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil) + nlink.EXPECT().AddrAdd(gomock.Any(), gomock.Any()).Return(errors.New("add addr failed")) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + exec: &fakeExec, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, + { + name: "异常流程2", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + cniBackendClient := mockcbclient.NewMockCNIBackendClient(ctrl) + + fakeCmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { return []byte("ens11"), nil, nil }, + }, + } + fakeExec := getFakeExecTemplate(&fakeCmd) + allocReply := rpcdef.AllocateIPReply{ + IsSuccess: false, + NetworkInfo: &rpcdef.AllocateIPReply_ENIMultiIP{ + ENIMultiIP: &rpcdef.ENIMultiIPReply{ + IP: "172.168.172.168", + Mac: "a2:37:b9:e8:ee:8f", + Gw: "172.168.172.1", + }, + }, + } + + grpc.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).AnyTimes() + rpc.EXPECT().NewCNIBackendClient(gomock.Any()).Return(cniBackendClient).AnyTimes() + cniBackendClient.EXPECT().AllocateIP(gomock.Any(), gomock.Any()).Return(&allocReply, errors.New("allocate ip error")) + nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "ens11"}}, nil).AnyTimes() + nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + exec: &fakeExec, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + p := &eriPlugin{ + nlink: tt.fields.nlink, + ns: tt.fields.ns, + ipam: tt.fields.ipam, + ip: tt.fields.ip, + types: tt.fields.types, + netutil: tt.fields.netutil, + rpc: tt.fields.rpc, + grpc: tt.fields.grpc, + exec: tt.fields.exec, + sysctl: tt.fields.sysctl, + } + ctx := log.NewContext() + n, _, err := loadConf(tt.args.args.StdinData) + if err != nil { + t.Errorf("loadConf error = %v", err) + } + macvlan := ¤t.Interface{} + k8sArgs, err := p.loadK8SArgs(tt.args.args.Args) + if err != nil { + t.Errorf("loadK8SArgs error = %v", err) + } + roceIpam := NewRoceIPAM(tt.fields.grpc, tt.fields.rpc) + masterMask := net.CIDRMask(24, 32) + if _, err := p.setupIPvlanNetworkInfo(ctx, n, "a2:37:b9:e8:ee:8f", masterMask, "roce0", macvlan, k8sArgs, roceIpam); (err != nil) != tt.wantErr { + t.Errorf("eriPlugin.cmdAdd() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_eriPlugin_delAllIPVlanDevices(t *testing.T) { + t.Log("test eriPlugin setupIPvlanNetworkInfo") + + type fields struct { + ctrl *gomock.Controller + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + rpc rpcwrapper.Interface + grpc grpcwrapper.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface + } + type args struct { + args *skel.CmdArgs + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "正常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + ipVlanDev := &netlink.IPVlan{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: []byte{100, 100, 100, 100, 100, 100}, + }, + } + + ip.EXPECT().DelLinkByName(gomock.Any()).Return(nil) + nlink.EXPECT().LinkList().Return([]netlink.Link{ipVlanDev}, nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: false, + }, + { + name: "异常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + ipVlanDev := &netlink.IPVlan{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: []byte{100, 100, 100, 100, 100, 100}, + }, + } + + ip.EXPECT().DelLinkByName(gomock.Any()).Return(errors.New("Delete Link By Name Error")) + nlink.EXPECT().LinkList().Return([]netlink.Link{ipVlanDev}, nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + p := &eriPlugin{ + nlink: tt.fields.nlink, + ns: tt.fields.ns, + ipam: tt.fields.ipam, + ip: tt.fields.ip, + types: tt.fields.types, + netutil: tt.fields.netutil, + rpc: tt.fields.rpc, + grpc: tt.fields.grpc, + exec: tt.fields.exec, + sysctl: tt.fields.sysctl, + } + + if err := p.delAllIPVlanDevices(); (err != nil) != tt.wantErr { + t.Errorf("eriPlugin.delAllIPVlanDevices() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_eriPlugin_addRoute2IPVlanMasterNetNS(t *testing.T) { + t.Log("test eriPlugin setupIPvlanNetworkInfo") + + type fields struct { + ctrl *gomock.Controller + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + rpc rpcwrapper.Interface + grpc grpcwrapper.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface + } + type args struct { + args *skel.CmdArgs + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "正常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + veth1 := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: []byte{100, 100, 100, 100, 100, 100}, + }, + } + nlink.EXPECT().LinkList().Return([]netlink.Link{veth1}, nil) + nlink.EXPECT().RouteAdd(gomock.Any()).Return(nil) + nlink.EXPECT().RouteList(gomock.Any(), gomock.Any()).Return([]netlink.Route{{ + LinkIndex: 2, + }, + }, nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: false, + }, + { + name: "异常流程1", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + nlink.EXPECT().RouteList(gomock.Any(), gomock.Any()).Return([]netlink.Route{{ + LinkIndex: 2, + Dst: &net.IPNet{ + IP: net.IPv4(169, 254, 1, 1), + Mask: net.CIDRMask(24, 32), + }, + }, + }, nil) + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + p := &eriPlugin{ + nlink: tt.fields.nlink, + ns: tt.fields.ns, + ipam: tt.fields.ipam, + ip: tt.fields.ip, + types: tt.fields.types, + netutil: tt.fields.netutil, + rpc: tt.fields.rpc, + grpc: tt.fields.grpc, + exec: tt.fields.exec, + sysctl: tt.fields.sysctl, + } + addr, _ := netlink.ParseAddr("192.168.12.12/32") + if err := p.addRoute2IPVlanMasterNetNS([]netlink.Addr{*addr}); (err != nil) != tt.wantErr { + t.Errorf("eriPlugin.addRoute2IPVlanMasterNetNS() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_eriPlugin_disableRPFCheck(t *testing.T) { + t.Log("test eriPlugin disableRPFCheck") + + type fields struct { + ctrl *gomock.Controller + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + rpc rpcwrapper.Interface + grpc grpcwrapper.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface + } + type args struct { + args *skel.CmdArgs + } + tests := []struct { + name string + fields fields + wantErr bool + }{ + { + name: "正常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + sysctl.EXPECT().Sysctl(gomock.Any(), gomock.Any()).Return("", nil).AnyTimes() + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + wantErr: false, + }, + { + name: "异常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + sysctl.EXPECT().Sysctl(gomock.Any(), gomock.Any()).Return("", errors.New("apply sysctl error")).AnyTimes() + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + p := &eriPlugin{ + nlink: tt.fields.nlink, + ns: tt.fields.ns, + ipam: tt.fields.ipam, + ip: tt.fields.ip, + types: tt.fields.types, + netutil: tt.fields.netutil, + rpc: tt.fields.rpc, + grpc: tt.fields.grpc, + exec: tt.fields.exec, + sysctl: tt.fields.sysctl, + } + ctx := log.NewContext() + if err := p.disableRPFCheck(ctx, 1); (err != nil) != tt.wantErr { + t.Errorf("eriPlugin.disableRPFCheck() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_modeFromString(t *testing.T) { + t.Log("test cmd modeFromString") + + _, err := modeFromString("l2") + if err != nil { + t.Error("modeFromString failed") + } + _, err = modeFromString("l3") + if err != nil { + t.Error("modeFromString failed") + } + _, err = modeFromString("l3s") + if err != nil { + t.Error("modeFromString failed") + } + _, err = modeFromString("error") + if err == nil { + t.Error("modeFromString failed") + } +} + +func Test_loadConf(t *testing.T) { + t.Log("test loadConf") + + type fields struct { + conf []byte + } + + tests := []struct { + name string + fields fields + wantErr bool + }{ + { + name: "流程流程1", + fields: func() fields { + stdinData = ` + { + "cniVersion":"0.3.1", + "name":"cce-cni", + "type":"eri" + }` + return fields{ + conf: []byte(stdinData), + } + }(), + wantErr: true, + }, + { + name: "流程流程2", + fields: func() fields { + stdinData = ` + { + "cniVersion":"0.3.1", + "name":"cce-cni", + "type":"eri", + "ipam":{ + } + }` + return fields{ + conf: []byte(stdinData), + } + }(), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if _, _, err := loadConf(tt.fields.conf); (err != nil) != tt.wantErr { + t.Errorf("loadConf error = %v", err) + } + }) + } +} + +func Test_eriPlugin_setUpHostVethRoute(t *testing.T) { + t.Log("test eriPlugin setUpHostVethRoute") + + type fields struct { + ctrl *gomock.Controller + nlink netlinkwrapper.Interface + ns nswrapper.Interface + ipam ipamwrapper.Interface + ip ipwrapper.Interface + types typeswrapper.Interface + netutil networkutil.Interface + rpc rpcwrapper.Interface + grpc grpcwrapper.Interface + exec utilexec.Interface + sysctl sysctlwrapper.Interface + } + type args struct { + args *skel.CmdArgs + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + { + name: "异常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + nlink.EXPECT().AddrList(gomock.Any(), gomock.Any()).Return([]netlink.Addr{ + { + IPNet: &net.IPNet{ + IP: net.IPv4(25, 0, 0, 45), + Mask: net.CIDRMask(24, 32), + }, + }, + }, nil).AnyTimes() + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, + { + name: "异常流程1", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + nlink.EXPECT().AddrList(gomock.Any(), gomock.Any()).Return([]netlink.Addr{ + { + IPNet: &net.IPNet{ + IP: net.IPv4(25, 0, 0, 45), + Mask: net.CIDRMask(24, 32), + }, + }, + }, nil).AnyTimes() + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + p := &eriPlugin{ + nlink: tt.fields.nlink, + ns: tt.fields.ns, + ipam: tt.fields.ipam, + ip: tt.fields.ip, + types: tt.fields.types, + netutil: tt.fields.netutil, + rpc: tt.fields.rpc, + grpc: tt.fields.grpc, + exec: tt.fields.exec, + sysctl: tt.fields.sysctl, + } + + ctrl := gomock.NewController(t) + netns := mocknetns.NewMockNetNS(ctrl) + netns.EXPECT().Do(gomock.Any()).Return(nil).AnyTimes() + + master := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + HardwareAddr: []byte{100, 100, 100, 100, 100, 100}, + }, + } + + addr, _ := netlink.ParseAddr("192.168.12.12/32") + ctx := log.NewContext() + if err := p.setUpHostVethRoute(ctx, master, *addr, netns); (err != nil) != tt.wantErr { + t.Errorf("eriPlugin.setUpHostVethRoute() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_cmdCheck(t *testing.T) { + t.Log("test cmd check") + p := newERIPlugin() + if err := p.cmdCheck(&skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }); err != nil { + t.Error("cmdCheck failed") + } +} + +func TestNewERIPlugin(t *testing.T) { + t.Log("test cmd eri plugin") + initFlags() + p := newERIPlugin() + if p == nil { + t.Error("newERIPlugin returns nil") + } +} + +func SetUPK8SClientEnv() { + testConfig := &rest.Config{ + Host: "testHost", + APIPath: "api", + ContentConfig: rest.ContentConfig{}, + Impersonate: rest.ImpersonationConfig{}, + TLSClientConfig: rest.TLSClientConfig{}, + } + + //defer func() { buildConfigFromFlags = origNewKubernetesClientSet }() + buildConfigFromFlags = func(masterUrl, kubeconfigPath string) (config *rest.Config, err error) { + return testConfig, nil + } + + k8sClientSet = func(c *rest.Config) (kubernetes.Interface, error) { + kubeClient := k8sfake.NewSimpleClientset() + _, _ = kubeClient.CoreV1().Pods(v1.NamespaceDefault).Create(context.TODO(), &v1.Pod{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Name: "main", + Image: "python:3.8", + Command: []string{"python"}, + Args: []string{"-c", "print('hello world')"}, + Resources: getResourceRequirements(), + }, + }, + }, + }, metav1.CreateOptions{}) + return kubeClient, nil + } +} + +func getResourceRequirements() v1.ResourceRequirements { + res := v1.ResourceRequirements{} + requests := v1.ResourceList{} + limits := v1.ResourceList{} + requests["rdma/roce"] = resource.MustParse("1") + limits["rdma/roce"] = resource.MustParse("1") + res.Requests = requests + res.Limits = limits + return res +} diff --git a/cni/eri/ipam_client.go b/cni/eri/ipam_client.go new file mode 100644 index 0000000..3fad353 --- /dev/null +++ b/cni/eri/ipam_client.go @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2021 Baidu, Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * + */ + +package main + +import ( + "context" + "time" + + "google.golang.org/grpc" + + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/cni" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/rpc" + log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" + grpcwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/grpc" + rpcwrapper "github.com/baidubce/baiducloud-cce-cni-driver/pkg/wrapper/rpc" +) + +const ( + rpcTimeout = 90 * time.Second +) + +type roceIPAM struct { + gRPCClient grpcwrapper.Interface + rpc rpcwrapper.Interface +} + +func NewRoceIPAM(grpc grpcwrapper.Interface, rpc rpcwrapper.Interface) *roceIPAM { + return &roceIPAM{ + gRPCClient: grpc, + rpc: rpc, + } +} + +func (ipam *roceIPAM) AllocIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpoint string, masterMac string, instanceType string) (*rpc.AllocateIPReply, error) { + ctx, cancel := context.WithTimeout(ctx, rpcTimeout) + defer cancel() + + conn, err := ipam.gRPCClient.DialContext(ctx, endpoint, grpc.WithInsecure()) + if err != nil { + log.Errorf(ctx, "failed to connect to ipam server on %v: %v", endpoint, err) + return nil, err + } + defer func() { + if conn != nil { + conn.Close() + } + }() + + requestIPType := rpc.IPType_ERIENIMultiIPType + if instanceType == "bbc" { + requestIPType = rpc.IPType_RoceENIMultiIPType + } + + c := ipam.rpc.NewCNIBackendClient(conn) + + resp, err := c.AllocateIP(ctx, &rpc.AllocateIPRequest{ + K8SPodName: string(k8sArgs.K8S_POD_NAME), + K8SPodNamespace: string(k8sArgs.K8S_POD_NAMESPACE), + K8SPodInfraContainerID: string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID), + IPType: requestIPType, + NetworkInfo: &rpc.AllocateIPRequest_ENIMultiIP{ + ENIMultiIP: &rpc.ENIMultiIPRequest{ + Mac: masterMac, + }, + }, + }) + if err != nil { + log.Errorf(ctx, "failed to allocate ip from cni backend: %v", err) + return nil, err + } + log.Infof(ctx, "allocate ip response body: %v", resp.String()) + return resp, nil +} + +func (ipam *roceIPAM) ReleaseIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpoint string, instanceType string) (*rpc.ReleaseIPReply, error) { + ctx, cancel := context.WithTimeout(ctx, rpcTimeout) + defer cancel() + + conn, err := ipam.gRPCClient.DialContext(ctx, endpoint, grpc.WithInsecure()) + if err != nil { + log.Errorf(ctx, "failed to connect to ipam server on %v: %v", endpoint, err) + return nil, err + } + defer func() { + if conn != nil { + conn.Close() + } + }() + + requestIPType := rpc.IPType_ERIENIMultiIPType + if instanceType == "bbc" { + requestIPType = rpc.IPType_RoceENIMultiIPType + } + c := ipam.rpc.NewCNIBackendClient(conn) + + resp, err := c.ReleaseIP(ctx, &rpc.ReleaseIPRequest{ + K8SPodName: string(k8sArgs.K8S_POD_NAME), + K8SPodNamespace: string(k8sArgs.K8S_POD_NAMESPACE), + K8SPodInfraContainerID: string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID), + IPType: requestIPType, + }) + if err != nil { + log.Errorf(ctx, "failed to release ip from cni backend: %v", err) + return nil, err + } + log.Infof(ctx, "release ip response body: %v", resp.String()) + + return resp, nil +} diff --git a/cni/macvlan/macvlan.go b/cni/macvlan/macvlan.go index bbb9e68..10b80c3 100644 --- a/cni/macvlan/macvlan.go +++ b/cni/macvlan/macvlan.go @@ -14,16 +14,10 @@ import ( "github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/cni/pkg/version" - "github.com/containernetworking/plugins/pkg/ip" "github.com/containernetworking/plugins/pkg/ipam" "github.com/containernetworking/plugins/pkg/ns" bv "github.com/containernetworking/plugins/pkg/utils/buildversion" - "github.com/containernetworking/plugins/pkg/utils/sysctl" -) - -const ( - IPv4InterfaceArpProxySysctlTemplate = "net.ipv4.conf.%s.proxy_arp" ) type NetConf struct { @@ -158,14 +152,6 @@ func createMacvlan(conf *NetConf, ifName string, netns ns.NetNS) (*current.Inter } err = netns.Do(func(_ ns.NetNS) error { - // TODO: duplicate following lines for ipv6 support, when it will be added in other places - ipv4SysctlValueName := fmt.Sprintf(IPv4InterfaceArpProxySysctlTemplate, tmpName) - if _, err := sysctl.Sysctl(ipv4SysctlValueName, "1"); err != nil { - // remove the newly added link and ignore errors, because we already are in a failed state - _ = netlink.LinkDel(mv) - return fmt.Errorf("failed to set proxy_arp on newly added interface %q: %v", tmpName, err) - } - err := ip.RenameLink(tmpName, ifName) if err != nil { _ = netlink.LinkDel(mv) diff --git a/cni/rdma/ipam_client.go b/cni/rdma/ipam_client.go index 9fe6a12..3fad353 100644 --- a/cni/rdma/ipam_client.go +++ b/cni/rdma/ipam_client.go @@ -44,7 +44,7 @@ func NewRoceIPAM(grpc grpcwrapper.Interface, rpc rpcwrapper.Interface) *roceIPAM } } -func (ipam *roceIPAM) AllocIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpoint string, masterMac string) (*rpc.AllocateIPReply, error) { +func (ipam *roceIPAM) AllocIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpoint string, masterMac string, instanceType string) (*rpc.AllocateIPReply, error) { ctx, cancel := context.WithTimeout(ctx, rpcTimeout) defer cancel() @@ -59,13 +59,18 @@ func (ipam *roceIPAM) AllocIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpoin } }() + requestIPType := rpc.IPType_ERIENIMultiIPType + if instanceType == "bbc" { + requestIPType = rpc.IPType_RoceENIMultiIPType + } + c := ipam.rpc.NewCNIBackendClient(conn) resp, err := c.AllocateIP(ctx, &rpc.AllocateIPRequest{ K8SPodName: string(k8sArgs.K8S_POD_NAME), K8SPodNamespace: string(k8sArgs.K8S_POD_NAMESPACE), K8SPodInfraContainerID: string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID), - IPType: rpc.IPType_RoceENIMultiIPType, + IPType: requestIPType, NetworkInfo: &rpc.AllocateIPRequest_ENIMultiIP{ ENIMultiIP: &rpc.ENIMultiIPRequest{ Mac: masterMac, @@ -80,7 +85,7 @@ func (ipam *roceIPAM) AllocIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpoin return resp, nil } -func (ipam *roceIPAM) ReleaseIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpoint string) (*rpc.ReleaseIPReply, error) { +func (ipam *roceIPAM) ReleaseIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpoint string, instanceType string) (*rpc.ReleaseIPReply, error) { ctx, cancel := context.WithTimeout(ctx, rpcTimeout) defer cancel() @@ -95,13 +100,17 @@ func (ipam *roceIPAM) ReleaseIP(ctx context.Context, k8sArgs *cni.K8SArgs, endpo } }() + requestIPType := rpc.IPType_ERIENIMultiIPType + if instanceType == "bbc" { + requestIPType = rpc.IPType_RoceENIMultiIPType + } c := ipam.rpc.NewCNIBackendClient(conn) resp, err := c.ReleaseIP(ctx, &rpc.ReleaseIPRequest{ K8SPodName: string(k8sArgs.K8S_POD_NAME), K8SPodNamespace: string(k8sArgs.K8S_POD_NAMESPACE), K8SPodInfraContainerID: string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID), - IPType: rpc.IPType_RoceENIMultiIPType, + IPType: requestIPType, }) if err != nil { log.Errorf(ctx, "failed to release ip from cni backend: %v", err) diff --git a/cni/rdma/rdma.go b/cni/rdma/rdma.go index 3e8f4cb..4aef41a 100644 --- a/cni/rdma/rdma.go +++ b/cni/rdma/rdma.go @@ -76,10 +76,11 @@ var k8sClientSet = func(c *rest.Config) (kubernetes.Interface, error) { type NetConf struct { types.NetConf - Mode string `json:"mode"` - KubeConfig string `json:"kubeconfig"` - Mask int `json:"mask"` - IPAM *IPAMConf `json:"ipam,omitempty"` + Mode string `json:"mode"` + KubeConfig string `json:"kubeconfig"` + Mask int `json:"mask"` + InstanceType string `json:"instanceType"` + IPAM *IPAMConf `json:"ipam,omitempty"` } type IPAMConf struct { @@ -240,7 +241,7 @@ func (p *rdmaPlugin) cmdDel(args *skel.CmdArgs) error { } ipamClient := NewRoceIPAM(p.grpc, p.rpc) - resp, err := ipamClient.ReleaseIP(ctx, k8sArgs, n.IPAM.Endpoint) + resp, err := ipamClient.ReleaseIP(ctx, k8sArgs, n.IPAM.Endpoint, n.InstanceType) if err != nil { msg := fmt.Sprintf("failed to delete IP for pod (%v %v): %v", k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_NAME, err) log.Error(ctx, msg) @@ -450,7 +451,7 @@ func (p *rdmaPlugin) setupMacvlan(ctx context.Context, conf *NetConf, master, if if err != nil { return err } - log.Infof(ctx, "create macvlan dev: %s successfully,master:%s", ifName, master) + log.Infof(ctx, "create macvlan dev: %s ,mac: %s successfully,master:%s", ifName, macvlanInterface.Mac, master) defer func() { if err != nil { @@ -553,7 +554,7 @@ func (p *rdmaPlugin) setupMacvlanNetworkInfo(ctx context.Context, conf *NetConf, } name, namespace := string(k8sArgs.K8S_POD_NAME), string(k8sArgs.K8S_POD_NAMESPACE) - resp, err := ipamClient.AllocIP(ctx, k8sArgs, conf.IPAM.Endpoint, masterMac) + resp, err := ipamClient.AllocIP(ctx, k8sArgs, conf.IPAM.Endpoint, masterMac, conf.InstanceType) if err != nil { log.Errorf(ctx, "failed to allocate IP: %v", err) return err @@ -575,7 +576,7 @@ func (p *rdmaPlugin) setupMacvlanNetworkInfo(ctx context.Context, conf *NetConf, defer func() { if err != nil { - _, err := ipamClient.ReleaseIP(ctx, k8sArgs, conf.IPAM.Endpoint) + _, err := ipamClient.ReleaseIP(ctx, k8sArgs, conf.IPAM.Endpoint, conf.InstanceType) if err != nil { log.Errorf(ctx, "rollback: failed to delete IP for pod (%v %v): %v", namespace, name, err) } @@ -606,6 +607,13 @@ func (p *rdmaPlugin) setupMacvlanNetworkInfo(ctx context.Context, conf *NetConf, } log.Infof(ctx, "add rule table: %d,src ip: %s", rtStartIdx+idx, allocRespNetworkInfo.IP) + err = p.addOIFRule(ifName, 10000, rtStartIdx+idx) + if err != nil { + log.Errorf(ctx, "add from rule failed: %v", err) + return err + } + log.Infof(ctx, "add rule table: %d,oif: %s", rtStartIdx+idx, ifName) + _, cidr, err := net.ParseCIDR(addr.IPNet.String()) if err != nil { log.Errorf(ctx, "parse cidr:%s, failed: %s", addr.IPNet.String(), err.Error()) @@ -645,6 +653,22 @@ func (p *rdmaPlugin) addFromRule(addr *net.IPNet, priority int, rtTable int) err return nil } +func (p *rdmaPlugin) addOIFRule(oifName string, priority int, rtTable int) error { + rule := netlink.NewRule() + rule.Table = rtTable + rule.Priority = priority + rule.OifName = oifName // ip rule add oif `oifName` lookup `table` prio `xxx` + err := p.nlink.RuleDel(rule) + if err != nil && !netlinkwrapper.IsNotExistError(err) { + return err + } + + if err := p.nlink.RuleAdd(rule); err != nil { + return err + } + return nil +} + func (p *rdmaPlugin) addRoute(idx, rtTable int, gw, src net.IP, dst *net.IPNet) error { ro := &netlink.Route{ LinkIndex: idx, @@ -674,6 +698,16 @@ func (p *rdmaPlugin) addRouteByCmd(ctx context.Context, dst *net.IPNet, ifName, return fmt.Errorf("add route failed: %v", err) } + strDefaultRoute := fmt.Sprintf("ip route add default dev %s via %s src %s table %d onlink", ifName, gw, srcIP, rtable) + log.Infof(ctx, "add route: %s", strDefaultRoute) + defaultCmd := p.exec.Command("ip", "route", "add", "default", "dev", ifName, + "via", gw, "src", srcIP, "table", strconv.Itoa(rtable), "onlink") + defaultCmd.SetStdout(os.Stdout) + defaultCmd.SetStderr(os.Stderr) + if err := defaultCmd.Run(); err != nil { + return fmt.Errorf("add default route failed: %v", err) + } + return nil } diff --git a/cni/rdma/rdma_test.go b/cni/rdma/rdma_test.go index 84a7454..8420914 100644 --- a/cni/rdma/rdma_test.go +++ b/cni/rdma/rdma_test.go @@ -60,14 +60,14 @@ import ( var ( stdinData = ` -{ - "cniVersion":"0.3.1", - "name":"cce-cni", - "type":"rdma", - "ipam":{ - "endpoint":"172.25.66.38:80" - } -}` + { + "cniVersion":"0.3.1", + "name":"cce-cni", + "type":"rdma", + "ipam":{ + "endpoint":"172.25.66.38:80" + } + }` envArgs = `IgnoreUnknown=1;K8S_POD_NAMESPACE=default;K8S_POD_NAME=busybox;K8S_POD_INFRA_CONTAINER_ID=xxxxx` ) @@ -161,7 +161,7 @@ func Test_cmdDel(t *testing.T) { wantErr: false, }, { - name: "异常流程", + name: "异常流程1", fields: func() fields { ctrl := gomock.NewController(t) nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) @@ -201,6 +201,39 @@ func Test_cmdDel(t *testing.T) { }, wantErr: true, }, + { + name: "异常流程2", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + ns.EXPECT().WithNetNSPath(gomock.Any(), gomock.Any()).Return(errors.New("nspath error for cmd del unit testrelease")) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -323,6 +356,63 @@ func Test_cmdAdd(t *testing.T) { }, wantErr: false, }, + { + name: "异常流程", + fields: func() fields { + ctrl := gomock.NewController(t) + nlink, ns, ipam, ip, types, netutil, rpc, grpc, sysctl := setupEnv(ctrl) + + fakeCmd := fakeexec.FakeCmd{ + CombinedOutputScript: []fakeexec.FakeAction{ + func() ([]byte, []byte, error) { + return []byte("ens11"), nil, errors.New("get roce device error for unit test") + }, + }, + } + fakeExec := getFakeExecTemplate(&fakeCmd) + netns := mocknetns.NewMockNetNS(ctrl) + + nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "ens11"}}, nil).AnyTimes() + nlink.EXPECT().AddrList(gomock.Any(), gomock.Any()).Return([]netlink.Addr{ + { + IPNet: &net.IPNet{ + IP: net.IPv4(25, 0, 0, 45), + Mask: net.CIDRMask(24, 32), + }, + }, + }, nil).AnyTimes() + + //nlink.EXPECT().RuleDel(gomock.Any()).Return(nil) + ns.EXPECT().GetNS(gomock.Any()).Return(netns, nil) + netns.EXPECT().Do(gomock.Any()).Return(nil).AnyTimes() + netns.EXPECT().Close().Return(nil) + + return fields{ + ctrl: ctrl, + nlink: nlink, + ns: ns, + ipam: ipam, + ip: ip, + types: types, + netutil: netutil, + rpc: rpc, + grpc: grpc, + exec: &fakeExec, + sysctl: sysctl, + } + }(), + args: args{ + args: &skel.CmdArgs{ + ContainerID: "xxxx", + Netns: "/proc/100/ns/net", + IfName: "eth0", + Args: envArgs, + Path: "/opt/cin/bin", + StdinData: []byte(stdinData), + }, + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -465,9 +555,11 @@ func Test_rdmaPlugin_setupMacvlanNetworkInfo(t *testing.T) { fakeCmd := fakeexec.FakeCmd{ CombinedOutputScript: []fakeexec.FakeAction{ func() ([]byte, []byte, error) { return []byte("ens11"), nil, nil }, + func() ([]byte, []byte, error) { return []byte("ens12"), nil, nil }, }, RunScript: []fakeexec.FakeAction{ func() ([]byte, []byte, error) { return nil, nil, nil }, + func() ([]byte, []byte, error) { return nil, nil, nil }, }, } fakeExec := getFakeExecTemplate(&fakeCmd) @@ -488,8 +580,8 @@ func Test_rdmaPlugin_setupMacvlanNetworkInfo(t *testing.T) { nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "ens11"}}, nil).AnyTimes() nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil) nlink.EXPECT().AddrAdd(gomock.Any(), gomock.Any()).Return(nil) - nlink.EXPECT().RuleDel(gomock.Any()).Return(nil) - nlink.EXPECT().RuleAdd(gomock.Any()).Return(nil) + nlink.EXPECT().RuleDel(gomock.Any()).Return(nil).AnyTimes() + nlink.EXPECT().RuleAdd(gomock.Any()).Return(nil).AnyTimes() netutil.EXPECT().InterfaceByName(gomock.Any()).Return(&net.Interface{}, nil) netutil.EXPECT().GratuitousArpOverIface(gomock.Any(), gomock.Any()).Return(nil) @@ -651,9 +743,11 @@ func Test_rdmaPlugin_setupMacvlanNetworkInfo(t *testing.T) { fakeCmd := fakeexec.FakeCmd{ CombinedOutputScript: []fakeexec.FakeAction{ func() ([]byte, []byte, error) { return []byte("ens11"), nil, nil }, + func() ([]byte, []byte, error) { return []byte("ens12"), nil, nil }, }, RunScript: []fakeexec.FakeAction{ func() ([]byte, []byte, error) { return nil, nil, nil }, + func() ([]byte, []byte, error) { return nil, nil, nil }, }, } fakeExec := getFakeExecTemplate(&fakeCmd) @@ -679,8 +773,8 @@ func Test_rdmaPlugin_setupMacvlanNetworkInfo(t *testing.T) { nlink.EXPECT().LinkByName(gomock.Any()).Return(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "ens11"}}, nil).AnyTimes() nlink.EXPECT().LinkSetUp(gomock.Any()).Return(nil) nlink.EXPECT().AddrAdd(gomock.Any(), gomock.Any()).Return(nil) - nlink.EXPECT().RuleDel(gomock.Any()).Return(nil) - nlink.EXPECT().RuleAdd(gomock.Any()).Return(nil) + nlink.EXPECT().RuleDel(gomock.Any()).Return(nil).AnyTimes() + nlink.EXPECT().RuleAdd(gomock.Any()).Return(nil).AnyTimes() netutil.EXPECT().InterfaceByName(gomock.Any()).Return(&net.Interface{}, errors.New("get interface by name error")) //netutil.EXPECT().GratuitousArpOverIface(gomock.Any(), gomock.Any()).Return(nil) diff --git a/go.mod b/go.mod index a3e08a2..c0eaa97 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/baidubce/baiducloud-cce-cni-driver -go 1.16 +go 1.18 require ( github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae @@ -18,9 +18,8 @@ require ( github.com/j-keck/arping v1.0.1 github.com/juju/ratelimit v1.0.1 github.com/prometheus/client_golang v1.12.1 - github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/satori/go.uuid v1.2.0 - github.com/spf13/cobra v1.0.0 + github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.5 github.com/vishvananda/netlink v1.1.0 @@ -31,7 +30,7 @@ require ( k8s.io/client-go v11.0.0+incompatible k8s.io/component-base v0.24.2 k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.0.0 + k8s.io/klog/v2 v2.4.0 k8s.io/kubectl v0.0.0 k8s.io/kubernetes v0.0.0-00010101000000-000000000000 k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 @@ -39,32 +38,94 @@ require ( sigs.k8s.io/controller-runtime v0.6.5 ) +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver v3.5.1+incompatible // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/coreos/go-iptables v0.4.5 // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 // indirect + github.com/go-logr/logr v0.2.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/google/go-cmp v0.5.5 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.1.2 // indirect + github.com/googleapis/gnostic v0.4.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/imdario/mergo v0.3.9 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/lithammer/dedent v1.1.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/selinux v1.6.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect + github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 // indirect + github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect + github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 // indirect + go.uber.org/atomic v1.4.0 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.10.0 // indirect + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 // indirect + golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect + golang.org/x/text v0.3.6 // indirect + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect + gomodules.xyz/jsonpatch/v2 v2.0.1 // indirect + google.golang.org/appengine v1.6.6 // indirect + google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect + google.golang.org/protobuf v1.26.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.20.9 // indirect + k8s.io/apiserver v0.20.15 // indirect + k8s.io/cloud-provider v0.20.15 // indirect + k8s.io/controller-manager v0.20.15 // indirect + k8s.io/cri-api v0.0.0 // indirect + k8s.io/kube-openapi v0.0.0-20211110013926-83f114cd0513 // indirect + k8s.io/mount-utils v0.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect +) + replace ( github.com/go-logr/logr => github.com/go-logr/logr v0.4.0 github.com/go-logr/zapr => github.com/go-logr/zapr v0.4.0 - github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.1.0 + // github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.3.1 github.com/im7mortal/kmutex => github.com/im7mortal/kmutex v1.0.2-0.20211009180904-795f0d162683 google.golang.org/grpc v1.30.0 => google.golang.org/grpc v1.29.1 - k8s.io/api => k8s.io/api v0.18.9 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.9 - k8s.io/apimachinery => k8s.io/apimachinery v0.18.9 - k8s.io/apiserver => k8s.io/apiserver v0.18.9 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.9 - k8s.io/client-go => k8s.io/client-go v0.18.9 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.9 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.9 - k8s.io/code-generator => k8s.io/code-generator v0.18.9 - k8s.io/component-base => k8s.io/component-base v0.18.9 - k8s.io/cri-api => k8s.io/cri-api v0.18.9 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.9 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.9 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.9 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.9 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.9 - k8s.io/kubectl => k8s.io/kubectl v0.18.9 - k8s.io/kubelet => k8s.io/kubelet v0.18.9 - k8s.io/kubernetes => k8s.io/kubernetes v1.18.9 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.9 - k8s.io/metrics => k8s.io/metrics v0.18.9 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.9 + k8s.io/api => k8s.io/api v0.20.15 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.15 + k8s.io/apimachinery => k8s.io/apimachinery v0.20.15 + k8s.io/apiserver => k8s.io/apiserver v0.20.15 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.15 + k8s.io/client-go => k8s.io/client-go v0.20.15 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.15 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.15 + k8s.io/code-generator => k8s.io/code-generator v0.20.15 + k8s.io/component-base => k8s.io/component-base v0.20.15 + k8s.io/component-helpers => k8s.io/component-helpers v0.20.15 + k8s.io/controller-manager => k8s.io/controller-manager v0.20.15 + k8s.io/cri-api => k8s.io/cri-api v0.20.15 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.15 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.15 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.15 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.15 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.15 + k8s.io/kubectl => k8s.io/kubectl v0.20.15 + k8s.io/kubelet => k8s.io/kubelet v0.20.15 + k8s.io/kubernetes => k8s.io/kubernetes v1.20.15 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.15 + k8s.io/metrics => k8s.io/metrics v0.20.15 + k8s.io/mount-utils => k8s.io/mount-utils v0.20.15 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.15 ) diff --git a/go.sum b/go.sum index 13413c1..5280968 100644 --- a/go.sum +++ b/go.sum @@ -7,6 +7,7 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -22,6 +23,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -32,37 +34,34 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990/go.mod h1:ay/0dTb7NsG8QMDfsRfLHgZo/6xAJShLe1+ePPflihk= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -73,48 +72,44 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae h1:AMzIhMUqU3jMrZiTuW0zkYeKlKDAFD+DG20IoO421/Y= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/baidubce/bce-sdk-go v0.9.117 h1:0uV+5HRJ4fi4Cm0quVEq8TJ3ewC0aOCKnaL+cjp+xmE= github.com/baidubce/bce-sdk-go v0.9.117/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= -github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= -github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= -github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -122,32 +117,38 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.7 h1:bU7QieuAp+sACI2vCzESJ3FoT860urYP+lThyZkb/2M= github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0= -github.com/coredns/corefile-migration v1.0.6/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= +github.com/coredns/corefile-migration v1.0.10/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -162,20 +163,21 @@ github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -189,28 +191,26 @@ github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCv github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -218,7 +218,6 @@ github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -227,12 +226,10 @@ github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2 github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= @@ -245,7 +242,6 @@ github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCs github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= @@ -254,7 +250,6 @@ github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pL github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -264,26 +259,13 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -292,7 +274,6 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -302,7 +283,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -320,29 +300,12 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= -github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= +github.com/google/cadvisor v0.38.8/go.mod h1:1OFB9sOOMkBdUBGCO/1SArawTnDscgMzTodacVDe8mA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -350,11 +313,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -374,33 +336,46 @@ github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -412,17 +387,17 @@ github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v1.0.1 h1:XrO9juQieAQHE7DlwT7zFLUK2u3Oi/4Uz2B3ZTxvhxg= github.com/j-keck/arping v1.0.1/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -435,22 +410,20 @@ github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSg github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -459,16 +432,13 @@ github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -478,25 +448,29 @@ github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwm github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -506,8 +480,7 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= @@ -516,8 +489,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -531,21 +502,23 @@ github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.6.0 h1:+bIAS/Za3q5FTwWym4fTB0vObnfCf3G/NC7K6Jx62mY= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -554,8 +527,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -575,41 +548,38 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= +github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= +github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= @@ -618,20 +588,15 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -639,10 +604,9 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -652,47 +616,43 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q= github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= -github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -708,27 +668,22 @@ go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= -golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -765,16 +720,14 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -784,7 +737,6 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -810,18 +762,18 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -830,17 +782,16 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -850,27 +801,31 @@ golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -881,7 +836,11 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201110211018-35f3e6cf4a65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -892,54 +851,45 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -959,12 +909,15 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -980,13 +933,13 @@ gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6d gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= @@ -1016,6 +969,7 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1031,8 +985,9 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1074,10 +1029,10 @@ gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1094,65 +1049,67 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.18.9 h1:7VDtivqwbvLOf8hmXSd/PDSSbpCBq49MELg84EYBYiQ= -k8s.io/api v0.18.9/go.mod h1:9u/h6sUh6FxfErv7QqetX1EB3yBMIYOBXzdcf0Gf0rc= -k8s.io/apiextensions-apiserver v0.18.9 h1:tVEf8rVKh5BnXORnYYCztjbf6CSyGNMt/rAIEyfU00Q= -k8s.io/apiextensions-apiserver v0.18.9/go.mod h1:JagmAhU0TVENzgUZqHJsjCSDh7YuV5o6g01G1Fwh7zI= -k8s.io/apimachinery v0.18.9 h1:3ZABKQx3F3xPWlsGhCfUl8W+JXRRblV6Wo2A3zn0pvY= -k8s.io/apimachinery v0.18.9/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEXk= -k8s.io/apiserver v0.18.9 h1:ziivCosB28MltECzhDBLLVKW01zCghl/lXke3B62y8g= -k8s.io/apiserver v0.18.9/go.mod h1:vXQzMtUCLsGg1Bh+7Jo2mZKHpHZFCZn8eTNSepcIA1M= -k8s.io/cli-runtime v0.18.9/go.mod h1:Pw7UPmZd/wIlGd7DWGTUWA7qn92jCeybNeiS5WYJI6A= -k8s.io/client-go v0.18.9 h1:sPHX49yOtUqv1fl49TwV3f8cC0N3etSnwgFGsIsXnZc= -k8s.io/client-go v0.18.9/go.mod h1:UjkEetDmr40P9NX0Ok3Idt08FCf2I4mIHgjFsot77uY= -k8s.io/cloud-provider v0.18.9 h1:YDx3yECVceeDFEP0wOrPdDbNNjQZxcxnDsesqENduaM= -k8s.io/cloud-provider v0.18.9/go.mod h1:kwlWAqQK2AzQtq8zhXYRgsgia8zJVqdoonzNOUhDE4c= -k8s.io/cluster-bootstrap v0.18.9/go.mod h1:1mBaMJ6iQnJ82oGu2FIhMjZHTSxYXVk6b/e/TYBOP+0= -k8s.io/code-generator v0.18.9/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/component-base v0.18.9 h1:7G0D/PUKrVxyUxjT5HV4aTqYqhPj60erA1ab1JUw7m8= -k8s.io/component-base v0.18.9/go.mod h1:tUo4qZtV8m7t/U+0DgY+fcnn4BFZ480fZdzxOkWH4zk= -k8s.io/cri-api v0.18.9 h1:RTenaui5r0F315XppVTzwunficov0U/POPOjybhF40U= -k8s.io/cri-api v0.18.9/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s= -k8s.io/csi-translation-lib v0.18.9/go.mod h1:ELfWvcnpMYGXEGMtAzMmF4LahWWB0ZRzWbg6DynSWWE= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/api v0.20.15 h1:7PoPWNuE/pFFhMIQCuto88+63TIjSlCviXknxWCHLVs= +k8s.io/api v0.20.15/go.mod h1:X3JDf1BiTRQQ6xNAxTuhgi6yL2dHc6fSr9LGzE+Z3YU= +k8s.io/apiextensions-apiserver v0.20.15 h1:Vl+kj+BEJ2LhffIPCMZr4HXvK6L2G8iUren/hJ16z10= +k8s.io/apiextensions-apiserver v0.20.15/go.mod h1:8vnD+tSG+1Fv3RDrLyqobmHoFkp8xIfHYFxRAz59wfU= +k8s.io/apimachinery v0.20.15 h1:tZW9jhDILQJq0fYXq7/t0xulj+73HzxLVBUGLCNg9uM= +k8s.io/apimachinery v0.20.15/go.mod h1:4KFiDSxCoGviCiRk9kTXIROsIf4VSGkVYjVJjJln3pg= +k8s.io/apiserver v0.20.15 h1:ZmR5v8J91+Ti7gHyoKy4KP/yL9v8M+o+FIZe7c0D10s= +k8s.io/apiserver v0.20.15/go.mod h1:B0kPHF2TLyZHZkhyKbOG9dxxd1DNfrymN0UENcDRHxQ= +k8s.io/cli-runtime v0.20.15/go.mod h1:VFx6VfsOSy0wJmtwssg9eslqALIN42CO2YHMrEbPhPs= +k8s.io/client-go v0.20.15 h1:B6Wvl5yFiHkDZaZ0i5Vju6mGHw4Zo2DzDE8XF378Asc= +k8s.io/client-go v0.20.15/go.mod h1:q/vywQFfGT3jw+lXQGA9sEJDH0QEX7XUT2PwrQ2qm/I= +k8s.io/cloud-provider v0.20.15 h1:sfsgMvF1exOzGIL8L/h5+NiZPpIBfAChSAsE08s+Oj8= +k8s.io/cloud-provider v0.20.15/go.mod h1:m8/0URsiTIcM1GIk/uiNA705T+Tl8RP47tl/yVPNmNw= +k8s.io/cluster-bootstrap v0.20.15/go.mod h1:8Y1NfnVkAvmuiEYjmZ7CA5eFbRX1U/2tks+yWGoNtBI= +k8s.io/code-generator v0.20.15/go.mod h1:MW85KuhTjX9nzhFYpRqUOYh4et0xeEBHTEjwBzFYGaM= +k8s.io/component-base v0.20.15 h1:iVhpuhLfOZfkTIG930C0VLSmwg1oAbfIQfezwyST3oY= +k8s.io/component-base v0.20.15/go.mod h1:Pf1ax04nhNWZMY1J+yO2UXSjOVVVksP8sO0SfbJMav8= +k8s.io/component-helpers v0.20.15 h1:oWyeIT/FjR7ZVggsQodTDM06gUR25Aa76GF/kapNMkY= +k8s.io/component-helpers v0.20.15/go.mod h1:tej5yEDfI8n1ekqVlobPWIoU0s0yOhLLbk6SRW2bnuk= +k8s.io/controller-manager v0.20.15 h1:zKHUyWOMWp9c5Hdq4rbcLCR2IOWXGNgxhHdYvk7X/lA= +k8s.io/controller-manager v0.20.15/go.mod h1:HWf8xdzmARRlIXDmqRk0VsoD8XozRlM7DCnTn93kafo= +k8s.io/cri-api v0.20.15 h1:TYCmHZ2iNN3zPP+0lV05qWZhf4nx+tiEmDPSpp0ExoY= +k8s.io/cri-api v0.20.15/go.mod h1:kORlNE4RAwVawmrX+BBiwzjqZdUl3EYzj7Cnb97UKNs= +k8s.io/csi-translation-lib v0.20.15/go.mod h1:fiY3okwrxhczlmQow48M5n+py4XVpoS3F+Lm1MuC4+Q= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/kube-aggregator v0.18.9/go.mod h1:ik5Mf6JaP2M9XbWZR/AYgXx2Nj4EDBrHyakUx7C8cdw= -k8s.io/kube-controller-manager v0.18.9/go.mod h1:ZNrKY2V/vDYVLAqbSjcsgUVNC24JktaaQaeeGmC17Zc= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-proxy v0.18.9/go.mod h1:vWzFu3b1E55OQ2659d4ugfDlxJKIj69InFUkWGZTKS4= -k8s.io/kube-scheduler v0.18.9/go.mod h1:rQ0iAmJpEv+7CWPhkqY+Rz0Mhm23hsyQ5bh5EeRX+z8= -k8s.io/kubectl v0.18.9 h1:V6Jl++DSleKtVYWzN0i3XVd/YPvzwGgxwTGHrb253J8= -k8s.io/kubectl v0.18.9/go.mod h1:wJBoebwhAhI6LkvLefo8k2vEUCjJwcPz19seNepOLkU= -k8s.io/kubelet v0.18.9/go.mod h1:mqLCPAOmh1raK9Z+qapi6yZ0VSfPne/qRuEqqg3E9DE= -k8s.io/kubernetes v1.18.9 h1:Gv41iFq/NnHF4LbzheM9d9cl+0+ZLdQkkHZk229Fr3M= -k8s.io/kubernetes v1.18.9/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8= -k8s.io/legacy-cloud-providers v0.18.9/go.mod h1:8ubkki2oC6Kto8euDjXf8xWszW1ehH5Bpwt+QxBBI8Q= -k8s.io/metrics v0.18.9/go.mod h1:zFnHxkmOAc4EywyiWFCSwMPmtvvMStlvWFZTL9BqGWU= -k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= -k8s.io/sample-apiserver v0.18.9/go.mod h1:BqskggZvQv6thkecZe2iAoPeNTN7WRu6e5bfDzOjc0A= -k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-aggregator v0.20.15/go.mod h1:9vNzyg1fByAEohRirks7JPbXc7e1eXl6OQgapVHkrLU= +k8s.io/kube-controller-manager v0.20.15/go.mod h1:kIbLMR4MMdxacCcRHmUC0nKFsRrFlA2pvAfPbZ/NXYc= +k8s.io/kube-openapi v0.0.0-20211110013926-83f114cd0513 h1:pbudjNtv90nOgR0/DUhPwKHnQ55Khz8+sNhJBIK7A5M= +k8s.io/kube-openapi v0.0.0-20211110013926-83f114cd0513/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-proxy v0.20.15/go.mod h1:Cx+SDYzMftMCxTvYR176zPrV4p9hLCqVUtgeELTOvXk= +k8s.io/kube-scheduler v0.20.15/go.mod h1:f0FW6TSZEzD3tJc2FmW+WJxoKBj434gGDFAhkrE2x3w= +k8s.io/kubectl v0.20.15 h1:288hFvUaXDvKKu1jXR4htqq3LMlHHdzBdRi+lF5RT/g= +k8s.io/kubectl v0.20.15/go.mod h1:Y8IrYMldfB19CncLNVgfkpGSBhERZu5sqthpQ9nWvZ0= +k8s.io/kubelet v0.20.15/go.mod h1:hRsSNm3fNdxNuYyd9Zp3K1upCUdesPVx4LspNwKZzH0= +k8s.io/kubernetes v1.20.15 h1:eGwdo4cRbRbi3AuFxQxzKz/uObB8nFt7eNhc3/KN4r0= +k8s.io/kubernetes v1.20.15/go.mod h1:maNGHPCYK0JpF3C4gI2AOAX0OlPCYsseIEd+wewVkuA= +k8s.io/legacy-cloud-providers v0.20.15/go.mod h1:QuIs9GUVSJN0nLL+T8FF2f0pLxH/HSKeegoQtfaeIRA= +k8s.io/metrics v0.20.15/go.mod h1:CtfKqMCeoGzHSHJRle9eSDxhBogC7Jixbx2E0MtfgEg= +k8s.io/mount-utils v0.20.15 h1:APB5836usQAUhFco0eq6vNOF0oTOKAxSyHTe7tg6mPg= +k8s.io/mount-utils v0.20.15/go.mod h1:Jv9NRZ5L2LF87A17GaGlArD+r3JAJdZFvo4XD1cG4Kc= +k8s.io/sample-apiserver v0.20.15/go.mod h1:H9igCYEWjs+1YDBKghjIlqKo9mhmGXqnEUEnpeduEZk= +k8s.io/system-validators v1.2.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -1161,23 +1118,17 @@ modernc.org/mathutil v1.0.0 h1:93vKjrJopTPrtTNpZ8XIovER7iCIH1QU7wNbOQXC60I= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7 h1:uuHDyjllyzRyCIvvn0OBjiRB0SgBZGqHNYAmjR7fO50= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.6.5 h1:DSRu6E4FBeVwd/p8niskCVWnX5TSC6ZT9L/OIWOBK7s= sigs.k8s.io/controller-runtime v0.6.5/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/pkg/apimachinery/networking/subnet.go b/pkg/apimachinery/networking/subnet.go index c9640b7..2b02553 100644 --- a/pkg/apimachinery/networking/subnet.go +++ b/pkg/apimachinery/networking/subnet.go @@ -10,12 +10,10 @@ package networking import ( - "net" "strconv" "strings" corev1 "k8s.io/api/core/v1" - k8sutilnet "k8s.io/utils/net" networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" ) @@ -36,40 +34,53 @@ func GetPodSubnetTopologySpreadName(pod *corev1.Pod) string { } func IsFixedIPMode(psts *networkingv1alpha1.PodSubnetTopologySpread) bool { - if len(psts.Spec.Subnets) != 1 { - return false - } - for _, sub := range psts.Spec.Subnets { - if sub.Type == networkingv1alpha1.IPAllocTypeFixed { - return true - } - } - return false + var strategy = GetPSTSStrategy(psts) + return strategy.Type == networkingv1alpha1.IPAllocTypeFixed } func IsManualMode(psts *networkingv1alpha1.PodSubnetTopologySpread) bool { - for _, sub := range psts.Spec.Subnets { - if sub.Type == networkingv1alpha1.IPAllocTypeManual { - return true - } - } - return false + var strategy = GetPSTSStrategy(psts) + return strategy.Type == networkingv1alpha1.IPAllocTypeManual } func IsElasticMode(psts *networkingv1alpha1.PodSubnetTopologySpread) bool { - for _, sub := range psts.Spec.Subnets { - if sub.Type == networkingv1alpha1.IPAllocTypeElastic || sub.Type == "" { - return true - } - } - return false + var strategy = GetPSTSStrategy(psts) + return strategy.Type == networkingv1alpha1.IPAllocTypeElastic || strategy.Type == "" +} + +// IsReuseIPCustomPSTS whether to enable the reuse IP mode +func IsReuseIPCustomPSTS(psts *networkingv1alpha1.PodSubnetTopologySpread) bool { + return IsCustomMode(psts) && psts.Spec.Strategy.EnableReuseIPAddress } +// IsCustomMode psts type is custom +func IsCustomMode(psts *networkingv1alpha1.PodSubnetTopologySpread) bool { + return psts.Spec.Strategy != nil && psts.Spec.Strategy.Type == networkingv1alpha1.IPAllocTypeCustom +} + +// GetReleaseStrategy defaults to release strategy is TTL func GetReleaseStrategy(psts *networkingv1alpha1.PodSubnetTopologySpread) networkingv1alpha1.ReleaseStrategy { - for _, v := range psts.Spec.Subnets { - return v.ReleaseStrategy + return GetPSTSStrategy(psts).ReleaseStrategy +} + +// GetPSTSStrategy Recursively obtain the IP application policy +func GetPSTSStrategy(psts *networkingv1alpha1.PodSubnetTopologySpread) *networkingv1alpha1.IPAllocationStrategy { + var strategy *networkingv1alpha1.IPAllocationStrategy + if psts.Spec.Strategy != nil { + strategy = psts.Spec.Strategy + } else { + for _, sub := range psts.Spec.Subnets { + strategy = &sub.IPAllocationStrategy + } + } + + if strategy.Type == networkingv1alpha1.IPAllocTypeNil { + strategy.Type = networkingv1alpha1.IPAllocTypeElastic + } + if strategy.ReleaseStrategy == "" { + strategy.ReleaseStrategy = networkingv1alpha1.ReleaseStrategyTTL } - return networkingv1alpha1.ReleaseStrategyTTL + return strategy } func IsEndWithNum(name string) bool { @@ -85,42 +96,15 @@ func OwnerByPodSubnetTopologySpread(wep *networkingv1alpha1.WorkloadEndpoint, ps return wep.GetNamespace() == psts.GetNamespace() && wep.Spec.SubnetTopologyReference == psts.Name } +// PSTSContainsAvailableSubnet whatever length of the available subnet of psts greater than 0 func PSTSContainsAvailableSubnet(psts *networkingv1alpha1.PodSubnetTopologySpread) bool { return len(psts.Status.AvailableSubnets) > 0 } -func PSTSContainersIP(ip string, psts *networkingv1alpha1.PodSubnetTopologySpread) bool { - netIP := net.ParseIP(ip) - for _, sbn := range psts.Spec.Subnets { - for _, v := range sbn.IPv4 { - if v == ip { - return true - } - } - - cidrs, err := k8sutilnet.ParseCIDRs(sbn.IPv4Range) - if err == nil { - for _, cidr := range cidrs { - if cidr.Contains(netIP) { - return true - } - } - } - - for _, v := range sbn.IPv6 { - if v == ip { - return true - } - } - - cidrs, err = k8sutilnet.ParseCIDRs(sbn.IPv6Range) - if err == nil { - for _, cidr := range cidrs { - if cidr.Contains(netIP) { - return true - } - } - } +// PSTSMode slecte mod of psts and return true if a available subnet in psts +func PSTSMode(psts *networkingv1alpha1.PodSubnetTopologySpread) (*networkingv1alpha1.IPAllocationStrategy, bool) { + if len(psts.Status.AvailableSubnets) == 0 { + return nil, false } - return false + return GetPSTSStrategy(psts), true } diff --git a/pkg/apimachinery/networking/subnet_test.go b/pkg/apimachinery/networking/subnet_test.go new file mode 100644 index 0000000..4601e33 --- /dev/null +++ b/pkg/apimachinery/networking/subnet_test.go @@ -0,0 +1,79 @@ +package networking + +import ( + "testing" + + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + "github.com/baidubce/baiducloud-cce-cni-driver/test/data" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +func TestIsSubnetTopologySpreadPod(t *testing.T) { + pod := &corev1.Pod{} + IsSubnetTopologySpreadPod(pod) + pod.Annotations = map[string]string{"": ""} + IsSubnetTopologySpreadPod(pod) + pod.Annotations[AnnotationPodSubnetTopologySpread] = "true" + IsSubnetTopologySpreadPod(pod) +} + +func TestIsFixedIPMode(t *testing.T) { + l := labels.Set{} + psts := data.MockPodSubnetTopologySpread("default", "psts-test", "sbn-test", l) + if !IsElasticMode(psts) { + t.Errorf("IsElasticMode") + } + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeManual, + } + if !IsManualMode(psts) { + t.Errorf("IsManualMode") + } + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeFixed, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyNever, + } + if !IsFixedIPMode(psts) { + t.Errorf("IsFixedIPMode") + } + + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + } + if !IsCustomMode(psts) { + t.Errorf("IsCustomMode") + } + + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + EnableReuseIPAddress: true, + } + if !IsReuseIPCustomPSTS(psts) { + t.Errorf("IsCustomMode") + } + + psts.Spec.Strategy.Type = networkingv1alpha1.IPAllocTypeNil + GetReleaseStrategy(psts) + psts.Spec.Strategy = nil + GetReleaseStrategy(psts) + + OwnerByPodSubnetTopologySpread(nil, psts) + wep := data.MockFixedWorkloadEndpoint() + wep.Spec.SubnetTopologyReference = "psts-test" + if !OwnerByPodSubnetTopologySpread(wep, psts) { + t.Errorf("OwnerByPodSubnetTopologySpread") + } + + if !IsEndWithNum("a-0") { + t.Errorf("IsEndWithNum") + } + PSTSMode(psts) + psts.Status.AvailableSubnets = nil + PSTSMode(psts) + if PSTSContainsAvailableSubnet(psts) { + t.Errorf("PSTSContainsAvailableSubnet") + } +} diff --git a/pkg/apimachinery/networking/wep.go b/pkg/apimachinery/networking/wep.go new file mode 100644 index 0000000..6dd9843 --- /dev/null +++ b/pkg/apimachinery/networking/wep.go @@ -0,0 +1,28 @@ +package networking + +import ( + "time" + + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" +) + +// IsCustomReuseModeWEP WorkloadEndpoint type is custom +func ISCustomReuseModeWEP(wep *networkingv1alpha1.WorkloadEndpoint) bool { + return wep.Spec.Type == ipamgeneric.WepTypeReuseIPPod && wep.Spec.EnableFixIP == "True" +} + +func NeedReleaseReuseModeWEP(wep *networkingv1alpha1.WorkloadEndpoint) bool { + if wep.Spec.Phase == networkingv1alpha1.WorkloadEndpointPhasePodDeleted && + wep.Spec.Release != nil && + wep.Spec.Release.PodDeletedTime != nil { + deleteTime := wep.Spec.Release.PodDeletedTime + ttl := wep.Spec.Release.TTL + return deleteTime.Add(ttl.Duration).After(time.Now()) + } + return false +} + +func IsFixIPStatefulSetPodWep(wep *networkingv1alpha1.WorkloadEndpoint) bool { + return wep.Spec.Type == ipamgeneric.WepTypeSts && wep.Spec.EnableFixIP == "True" +} diff --git a/pkg/apimachinery/networking/wep_test.go b/pkg/apimachinery/networking/wep_test.go new file mode 100644 index 0000000..2f414c1 --- /dev/null +++ b/pkg/apimachinery/networking/wep_test.go @@ -0,0 +1,32 @@ +package networking + +import ( + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/test/data" +) + +func TestISCustomReuseModeWEP(t *testing.T) { + wep := data.MockFixedWorkloadEndpoint() + if !IsFixIPStatefulSetPodWep(wep) { + t.Fail() + } + wep.Spec.Type = ipamgeneric.WepTypeReuseIPPod + if !ISCustomReuseModeWEP(wep) { + t.Fail() + } + NeedReleaseReuseModeWEP(wep) + now := metav1.Now() + wep.Spec.Phase = networkingv1alpha1.WorkloadEndpointPhasePodDeleted + wep.Spec.Release = &networkingv1alpha1.EndpointRelease{ + TTL: metav1.Duration{Duration: time.Second}, + PodDeletedTime: &now, + } + + NeedReleaseReuseModeWEP(wep) +} diff --git a/pkg/apis/networking/v1alpha1/psts.go b/pkg/apis/networking/v1alpha1/psts.go new file mode 100644 index 0000000..5a1cc41 --- /dev/null +++ b/pkg/apis/networking/v1alpha1/psts.go @@ -0,0 +1,276 @@ +package v1alpha1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sutilnet "k8s.io/utils/net" +) + +var ( + // DefaultReuseIPTTL default timeout for reusing IP addresses. + // If this time is not set, the default value is 7 days + DefaultReuseIPTTL = &metav1.Duration{Duration: time.Hour * 24 * 7} +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=psts + +// PodSubnetTopologySpread describes how to distribute pods in the scenario of sub customized subnets +type PodSubnetTopologySpread struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PodSubnetTopologySpreadSpec `json:"spec,omitempty"` + Status PodSubnetTopologySpreadStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSubnetTopologySpreadList contains a list of PodSubnetTopologySpread +type PodSubnetTopologySpreadList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PodSubnetTopologySpread `json:"items"` +} + +type PodSubnetTopologySpreadSpec struct { + Name string `json:"name,omitempty"` + // +kubebuilder:validation:MinProperties:=1 + + // Subnets for the subnet used by the object, each subnet topology constraint + // object must specify at least one available subnet. + // The subnet must be the subnet ID of the same VPC as the current cluster. + // The format is `sbn-*` for example, sbn-ccfud13pwcqf + // If a dedicated subnet is used, the user should confirm that the subnet + // is only used by the current CCE cluster + Subnets map[string]SubnetAllocation `json:"subnets"` + + // Strategy IP allocate strategy, which is a global ip application strategy. + // If the subnet also sets these fields, the subnet will override the global configuration + // If no global policy is defined, the policy of the first subnet is the global policy by default + Strategy *IPAllocationStrategy `json:"strategy,omitempty"` + + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector,omitempty"` + + // +kubebuilder:validation:Minimum:=0 + + // Priority describes which object the target pod should use when multiple + // objects affect a pod at the same time. The higher the priority value, + // the earlier the object is configured. When multiple objects have the same + // priority value, only the configuration of the first object is taken. + Priority int32 `json:"priority,omitempty"` + + // +kubebuilder:default:=1 + // +kubebuilder:validation:Minimum:=0 + + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + MaxSkew int32 `json:"maxSkew,omitempty"` + + // +kubebuilder:default:=DoNotSchedule + + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` + + // +kubebuilder:default:=true + + // Whether the IP address allocation details under each subnet are displayed + // in the status. When this attribute is enabled, the pod to which each IP + // address under the current subnet is historically assigned will be recorded. + // Note: recording IP address allocation status may produce large objects, + // which may affect etcd performance + EnableIPAllocationStatus bool `json:"nableIPAllocationStatus,omitempty"` + + EnablePodTopologySpread bool `json:"enablePodTopologySpread,omitempty"` +} + +type UnsatisfiableConstraintAction string + +const ( + // DoNotSchedule instructs the scheduler not to schedule the pod + // when constraints are not satisfied. + DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" + // ScheduleAnyway instructs the scheduler to schedule the pod + // even if constraints are not satisfied. + ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" +) + +// IPAllocationStrategy The policy determines whether to use fixed IP, Elastic IP or custom mode +type IPAllocationStrategy struct { + // If the type is empty, the subnet type is used + Type IPAllocType `json:"type,omitempty"` + + // +kubebuilder:default:=TTL + + // IP address recycling policy + // TTL: represents the default dynamic IP address recycling policy,default. + // Never: this policy can only be used in fixed IP scenarios + ReleaseStrategy ReleaseStrategy `json:"releaseStrategy,omitempty"` + + // ReuseIPAddress Whether to enable address reuse with the same pod name + EnableReuseIPAddress bool `json:"enableReuseIPAddress,omitempty"` + + // TTL How long after the pod is deleted, the IP will be deleted, regardless of whether the IP reuse mode is enabled + TTL *metav1.Duration `json:"ttl,omitempty"` +} + +// SubnetAllocation describe how the IP address under the subnet should be allocated +type SubnetAllocation struct { + IPAllocationStrategy `json:",inline"` + + IPv4 []string `json:"ipv4,omitempty"` + IPv6 []string `json:"ipv6,omitempty"` + // IPv4Range CIDR list for IPv4 address to allocated to pod + IPv4Range []string `json:"ipv4Range,omitempty"` + // IPv6Range CIDR list for IPv6 address to allocated to pod + IPv6Range []string `json:"ipv6Range,omitempty"` + + // Custom When the Type field is `custom`, other sub attributes (such as `IPv4``, `IPv4Range``, etc.) + // of SubnetAllocation will be ignored. At this time, the management policy + // of IP address is taken over by Custom + // + // If the Type is Elastic, Fixed or Manual, the internal part of the Custom field will not be resolved + // The Custom type is not universal, because it may cause IP address disclosure + Custom []CustomAllocation `json:"custom,omitempty"` +} + +// CustomAllocation User defined IP address management policy +type CustomAllocation struct { + // +kubebuilder:default:=4 + // Family of IP Address. 4 or 6 + Family k8sutilnet.IPFamily `json:"family,omitempty"` + // CustomIPRange User defined IP address range. Note that this range must be smaller than the subnet range + // Note that the definitions of multiple ranges cannot be duplicate + CustomIPRange []CustomIPRange `json:"customIPRange,omitempty"` +} + +// CustomIPRange User defined IP address range. Note that this range must be smaller than the subnet range +type CustomIPRange struct { + Start string `json:"start"` + // End end address must be greater than or equal to the start address + End string `json:"end"` +} + +// +kubebuilder:validation:Enum=Elastic;Fixed;Manual;Custom;IPAllocTypeNil + +// IPAllocType is the type for ip alloc strategy +type IPAllocType string + +// IPAllocType +const ( + IPAllocTypeNil IPAllocType = "" + IPAllocTypeElastic IPAllocType = "Elastic" + IPAllocTypeFixed IPAllocType = "Fixed" + IPAllocTypeManual IPAllocType = "Manual" + IPAllocTypeCustom IPAllocType = "Custom" +) + +// +kubebuilder:validation:Enum=TTL;Never + +// ReleaseStrategy is the type for ip release strategy +type ReleaseStrategy string + +// ReleaseStrategy +const ( + ReleaseStrategyTTL ReleaseStrategy = "TTL" + ReleaseStrategyNever ReleaseStrategy = "Never" +) + +type PodSubnetTopologySpreadStatus struct { + Name string `json:"name,omitempty"` + SchedulableSubnetsNum int32 `json:"availableSubnetsNum,omitempty"` + UnSchedulableSubnetsNum int32 `json:"unavailableSubnetsNum,omitempty"` + AvailableSubnets map[string]SubnetPodStatus `json:"availableSubnets,omitempty"` + // total number of pods match label selector + PodMatchedCount int32 `json:"podMatchedCount,omitempty"` + // Total pod expected to be affected + PodAffectedCount int32 `json:"podAffectedCount,omitempty"` + UnavailableSubnets map[string]SubnetPodStatus `json:"unavailableSubnets,omitempty"` +} + +type SubnetPodStatus struct { + SubenetDetail `json:",inline"` + // total number of pods under this subnet + PodCount int32 `json:"podCount,omitempty"` + // error message for subnets + Message string `json:"message,omitempty"` + + // IP address allocation details under the subnet + // KEY: ip address + // VALUE: pod name + // Only when the `PodSubnetTopologySpread.spec.enableIPAllocationStatus` spec value is true, + // the IP address allocation information will be recorded + IPAllocations map[string]string `json:"ipAllocations,omitempty"` +} + +type SubenetDetail struct { + AvailableIPNum int `json:"availableIPNum,omitempty"` + Enable bool `json:"enable,omitempty"` + HasNoMoreIP bool `json:"hasNoMoreIP,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + AvailabilityZone string `json:"availabilityZone,omitempty"` + CIDR string `json:"cidr,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=pstt +// +// PodSubnetTopologySpreadTable describes organizational relationships among multiple psts +type PodSubnetTopologySpreadTable struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec []PodSubnetTopologySpreadSpec `json:"spec,omitempty"` + Status []PodSubnetTopologySpreadStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSubnetTopologySpreadTableList contains a list of PodSubnetTopologySpreadTable +type PodSubnetTopologySpreadTableList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PodSubnetTopologySpreadTable `json:"items"` +} diff --git a/pkg/apis/networking/v1alpha1/types.go b/pkg/apis/networking/v1alpha1/types.go index 9eb1a2a..c60326c 100644 --- a/pkg/apis/networking/v1alpha1/types.go +++ b/pkg/apis/networking/v1alpha1/types.go @@ -30,25 +30,42 @@ type WorkloadEndpoint struct { Spec WorkloadEndpointSpec `json:"spec,omitempty"` } +const ( + WorkloadEndpointPhasePodRuning = "podRunning" + WorkloadEndpointPhasePodDeleted = "podDeleted" +) + type WorkloadEndpointSpec struct { - ContainerID string `json:"containerID"` - IP string `json:"ip"` - IPv6 string `json:"ipv6"` - Type string `json:"type"` - Mac string `json:"mac"` - Gw string `json:"gw"` - ENIID string `json:"eniID"` - Node string `json:"node"` - InstanceID string `json:"instanceID"` - SubnetID string `json:"subnetID"` - EnableFixIP string `json:"enableFixIP"` - FixIPDeletePolicy string `json:"fixIPDeletePolicy"` - UpdateAt metav1.Time `json:"updateAt"` + ContainerID string `json:"containerID,omitempty"` + IP string `json:"ip,omitempty"` + IPv6 string `json:"ipv6,omitempty"` + Type string `json:"type,omitempty"` + Mac string `json:"mac,omitempty"` + Gw string `json:"gw,omitempty"` + ENIID string `json:"eniID,omitempty"` + Node string `json:"node,omitempty"` + InstanceID string `json:"instanceID,omitempty"` + SubnetID string `json:"subnetID,omitempty"` + EnableFixIP string `json:"enableFixIP,omitempty"` + FixIPDeletePolicy string `json:"fixIPDeletePolicy,omitempty"` + UpdateAt metav1.Time `json:"updateAt,omitempty"` // subnet id of eni primary IP // This field is valid only when Eni applies for IP across subnets ENISubnetID string `json:"eniSubnetID,omitempty"` // PodSubnetTopologySpread object name referenced by pod SubnetTopologyReference string `json:"subnetTopologyReference,omitempty"` + + Phase string `json:"phase,omitempty"` + // Release + Release *EndpointRelease `json:"release,omitempty"` +} + +// EndpointRelease status of ip reuse mode +type EndpointRelease struct { + // delete wep after pod deleted for TTL + TTL metav1.Duration `json:"TTL,omitempty"` + + PodDeletedTime *metav1.Time `json:"podDeletedTime,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -69,7 +86,7 @@ type MultiIPWorkloadEndpoint struct { metav1.ObjectMeta `json:"metadata,omitempty"` NodeName string `json:"nodeName"` InstanceID string `json:"instanceID"` - Type string `json:"type"` //类型,目前只有一种:Roce + Type string `json:"type"` //类型,取值:roce, eri Spec []MultiIPWorkloadEndpointSpec `json:"spec,omitempty"` } @@ -318,220 +335,3 @@ type CrossVPCEniCondition struct { EniStatus EniStatus `json:"eniStatus,omitempty"` LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` } - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=psts - -// PodSubnetTopologySpread describes how to distribute pods in the scenario of sub customized subnets -type PodSubnetTopologySpread struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PodSubnetTopologySpreadSpec `json:"spec,omitempty"` - Status PodSubnetTopologySpreadStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodSubnetTopologySpreadList contains a list of PodSubnetTopologySpread -type PodSubnetTopologySpreadList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []PodSubnetTopologySpread `json:"items"` -} - -type PodSubnetTopologySpreadSpec struct { - Name string `json:"name,omitempty"` - // +kubebuilder:validation:MinProperties:=1 - - // Subnets for the subnet used by the object, each subnet topology constraint - // object must specify at least one available subnet. - // The subnet must be the subnet ID of the same VPC as the current cluster. - // The format is `sbn-*` for example, sbn-ccfud13pwcqf - // If a dedicated subnet is used, the user should confirm that the subnet - // is only used by the current CCE cluster - Subnets map[string]SubnetAllocation `json:"subnets"` - - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector *metav1.LabelSelector `json:"selector,omitempty"` - - // +kubebuilder:validation:Minimum:=0 - - // Priority describes which object the target pod should use when multiple - // objects affect a pod at the same time. The higher the priority value, - // the earlier the object is configured. When multiple objects have the same - // priority value, only the configuration of the first object is taken. - Priority int32 `json:"priority,omitempty"` - - // +kubebuilder:default:=1 - // +kubebuilder:validation:Minimum:=0 - - // MaxSkew describes the degree to which pods may be unevenly distributed. - // It's the maximum permitted difference between the number of matching pods in - // any two topology domains of a given topology type. - // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 1/1/0: - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P | P | | - // +-------+-------+-------+ - // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; - // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) - // violate MaxSkew(1). - // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - // It's a required field. Default value is 1 and 0 is not allowed. - MaxSkew int32 `json:"maxSkew,omitempty"` - - // +kubebuilder:default:=DoNotSchedule - - // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - // the spread constraint. - // - DoNotSchedule (default) tells the scheduler not to schedule it - // - ScheduleAnyway tells the scheduler to still schedule it - // It's considered as "Unsatisfiable" if and only if placing incoming pod on any - // topology violates "MaxSkew". - // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 3/1/1: - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P P P | P | P | - // +-------+-------+-------+ - // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - // won't make it *more* imbalanced. - // It's a required field. - WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"` - - // +kubebuilder:default:=true - - // Whether the IP address allocation details under each subnet are displayed - // in the status. When this attribute is enabled, the pod to which each IP - // address under the current subnet is historically assigned will be recorded. - // Note: recording IP address allocation status may produce large objects, - // which may affect etcd performance - EnableIPAllocationStatus bool `json:"nableIPAllocationStatus,omitempty"` - - EnablePodTopologySpread bool `json:"enablePodTopologySpread,omitempty"` -} - -type UnsatisfiableConstraintAction string - -const ( - // DoNotSchedule instructs the scheduler not to schedule the pod - // when constraints are not satisfied. - DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" - // ScheduleAnyway instructs the scheduler to schedule the pod - // even if constraints are not satisfied. - ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" -) - -// SubnetAllocation describe how the IP address under the subnet should be allocated -type SubnetAllocation struct { - // +kubebuilder:default:=Elastic - Type IPAllocType `json:"type,omitempty"` - - // +kubebuilder:default:=TTL - - // IP address recycling policy - // TTL: represents the default dynamic IP address recycling policy,default. - // Never: this policy can only be used in fixed IP scenarios - ReleaseStrategy ReleaseStrategy `json:"releaseStrategy,omitempty"` - - IPv4 []string `json:"ipv4,omitempty"` - IPv6 []string `json:"ipv6,omitempty"` - IPv4Range []string `json:"ipv4Range,omitempty"` - IPv6Range []string `json:"ipv6Range,omitempty"` -} - -// +kubebuilder:validation:Enum=Elastic;Fixed;Manual - -// IPAllocType is the type for ip alloc strategy -type IPAllocType string - -// IPAllocType -const ( - IPAllocTypeElastic IPAllocType = "Elastic" - IPAllocTypeFixed IPAllocType = "Fixed" - IPAllocTypeManual IPAllocType = "Manual" -) - -// +kubebuilder:validation:Enum=TTL;Never - -// ReleaseStrategy is the type for ip release strategy -type ReleaseStrategy string - -// ReleaseStrategy -const ( - ReleaseStrategyTTL ReleaseStrategy = "TTL" - ReleaseStrategyNever ReleaseStrategy = "Never" -) - -type PodSubnetTopologySpreadStatus struct { - Name string `json:"name,omitempty"` - SchedulableSubnetsNum int32 `json:"availableSubnetsNum,omitempty"` - UnSchedulableSubnetsNum int32 `json:"unavailableSubnetsNum,omitempty"` - AvailableSubnets map[string]SubnetPodStatus `json:"availableSubnets,omitempty"` - // total number of pods match label selector - PodMatchedCount int32 `json:"podMatchedCount,omitempty"` - // Total pod expected to be affected - PodAffectedCount int32 `json:"podAffectedCount,omitempty"` - UnavailableSubnets map[string]SubnetPodStatus `json:"unavailableSubnets,omitempty"` -} - -type SubnetPodStatus struct { - SubenetDetail `json:",inline"` - // total number of pods under this subnet - PodCount int32 `json:"podCount,omitempty"` - // error message for subnets - Message string `json:"message,omitempty"` - - // IP address allocation details under the subnet - // KEY: ip address - // VALUE: pod name - // Only when the `PodSubnetTopologySpread.spec.enableIPAllocationStatus` spec value is true, - // the IP address allocation information will be recorded - IPAllocations map[string]string `json:"ipAllocations,omitempty"` -} - -type SubenetDetail struct { - AvailableIPNum int `json:"availableIPNum,omitempty"` - Enable bool `json:"enable,omitempty"` - HasNoMoreIP bool `json:"hasNoMoreIP,omitempty"` - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - AvailabilityZone string `json:"availabilityZone,omitempty"` - CIDR string `json:"cidr,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=pstt -// -// PodSubnetTopologySpreadTable describes organizational relationships among multiple psts -type PodSubnetTopologySpreadTable struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec []PodSubnetTopologySpreadSpec `json:"spec,omitempty"` - Status []PodSubnetTopologySpreadStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodSubnetTopologySpreadTableList contains a list of PodSubnetTopologySpreadTable -type PodSubnetTopologySpreadTableList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []PodSubnetTopologySpreadTable `json:"items"` -} diff --git a/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go index 11886aa..8bb53a2 100644 --- a/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/networking/v1alpha1/zz_generated.deepcopy.go @@ -137,6 +137,43 @@ func (in *CrossVPCEniStatus) DeepCopy() *CrossVPCEniStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomAllocation) DeepCopyInto(out *CustomAllocation) { + *out = *in + if in.CustomIPRange != nil { + in, out := &in.CustomIPRange, &out.CustomIPRange + *out = make([]CustomIPRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomAllocation. +func (in *CustomAllocation) DeepCopy() *CustomAllocation { + if in == nil { + return nil + } + out := new(CustomAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomIPRange) DeepCopyInto(out *CustomIPRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomIPRange. +func (in *CustomIPRange) DeepCopy() *CustomIPRange { + if in == nil { + return nil + } + out := new(CustomIPRange) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ENI) DeepCopyInto(out *ENI) { *out = *in @@ -217,6 +254,48 @@ func (in *ENIStatus) DeepCopy() *ENIStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointRelease) DeepCopyInto(out *EndpointRelease) { + *out = *in + out.TTL = in.TTL + if in.PodDeletedTime != nil { + in, out := &in.PodDeletedTime, &out.PodDeletedTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointRelease. +func (in *EndpointRelease) DeepCopy() *EndpointRelease { + if in == nil { + return nil + } + out := new(EndpointRelease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAllocationStrategy) DeepCopyInto(out *IPAllocationStrategy) { + *out = *in + if in.TTL != nil { + in, out := &in.TTL, &out.TTL + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAllocationStrategy. +func (in *IPAllocationStrategy) DeepCopy() *IPAllocationStrategy { + if in == nil { + return nil + } + out := new(IPAllocationStrategy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPPool) DeepCopyInto(out *IPPool) { *out = *in @@ -485,6 +564,11 @@ func (in *PodSubnetTopologySpreadSpec) DeepCopyInto(out *PodSubnetTopologySpread (*out)[key] = *val.DeepCopy() } } + if in.Strategy != nil { + in, out := &in.Strategy, &out.Strategy + *out = new(IPAllocationStrategy) + (*in).DeepCopyInto(*out) + } if in.Selector != nil { in, out := &in.Selector, &out.Selector *out = new(v1.LabelSelector) @@ -711,6 +795,7 @@ func (in *Subnet) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SubnetAllocation) DeepCopyInto(out *SubnetAllocation) { *out = *in + in.IPAllocationStrategy.DeepCopyInto(&out.IPAllocationStrategy) if in.IPv4 != nil { in, out := &in.IPv4, &out.IPv4 *out = make([]string, len(*in)) @@ -731,6 +816,13 @@ func (in *SubnetAllocation) DeepCopyInto(out *SubnetAllocation) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = make([]CustomAllocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -897,6 +989,11 @@ func (in *WorkloadEndpointList) DeepCopyObject() runtime.Object { func (in *WorkloadEndpointSpec) DeepCopyInto(out *WorkloadEndpointSpec) { *out = *in in.UpdateAt.DeepCopyInto(&out.UpdateAt) + if in.Release != nil { + in, out := &in.Release, &out.Release + *out = new(EndpointRelease) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/apis/networking/v1beta1/types.go b/pkg/apis/networking/v1beta1/types.go new file mode 100644 index 0000000..39a7f46 --- /dev/null +++ b/pkg/apis/networking/v1beta1/types.go @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2023 Baidu, Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file + * except in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the + * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific language governing permissions + * and limitations under the License. + * + */ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type CceEniList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []CceEni `json:"items"` +} + +type CceEni struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec EniSpec `json:"spec,omitempty"` + Status EniStatus `json:"status,omitempty"` +} + +type EniSpec struct { + Eni Eni `json:"eni,omitempty"` + // Eni 要绑定的节点 id + InstanceID string `json:"instanceID,omitempty"` +} + +// 和 vpc Eni spec 定义保持一致 +// https://github.com/baidubce/bce-sdk-go/blob/master/services/eni/model.go#L61 +type Eni struct { + EniID string `json:"eniID,omitempty"` + Name string `json:"name,omitempty"` + ZoneName string `json:"zoneName,omitempty"` + Description string `json:"description,omitempty"` + InstanceID string `json:"instanceID,omitempty"` + MacAddress string `json:"macAddress,omitempty"` + VpcID string `json:"vpcID,omitempty"` + SubnetID string `json:"subnetID,omitempty"` + Status string `json:"status,omitempty"` + PrivateIPSet []PrivateIP `json:"privateIPSet,omitempty"` + SecurityGroupIds []string `json:"securityGroupIds,omitempty"` + EnterpriseSecurityGroupIds []string `json:"enterpriseSecurityGroupIds,omitempty"` + CreatedTime string `json:"createdTime,omitempty"` +} + +type PrivateIP struct { + PublicIPAddress string `json:"publicIPAddress,omitempty"` + Primary bool `json:"primary,omitempty"` + PrivateIPAddress string `json:"privateIPAddress,omitempty"` +} + +type EniStatus struct { + StatusInfo StatusInfo `json:"statusInfo,omitempty"` + Node string `json:"node,omitempty"` + PodInfo PodInfo `json:"podInfo,omitempty"` +} + +type PodInfo struct { + PodNs string `json:"podNs,omitempty"` + PodName string `json:"podName,omitempty"` + ContainerID string `json:"containerID,omitempty"` + NetNs string `json:"netNs,omitempty"` +} + +type StatusInfo struct { + CurrentStatus CceEniStatus `json:"currentStatus,omitempty"` + LastStatus CceEniStatus `json:"lastStatus,omitempty"` + VpcStatus VpcEniStatus `json:"vpcStatus,omitempty"` + UpdateTime metav1.Time `json:"updateTime,omitempty"` +} + +type CceEniStatus string +type VpcEniStatus string + +/* vpc 中 Eni 共 4 种状态(https://cloud.baidu.com/doc/VPC/s/6kknfn5m8): + * available:创建完成,未挂载 + * attaching:挂载中 + * inuse: 已挂载到单机,vpc 认为的可用状态 + * detaching:卸载中 + */ +const ( + vpcENIStatusAvailable VpcEniStatus = "available" + vpcENIStatusAttaching VpcEniStatus = "attaching" + vpcENIStatusInuse VpcEniStatus = "inuse" + vpcENIStatusDetaching VpcEniStatus = "detaching" +) + +/* cce 中 Eni 状态: + * Pending: 创建 crd 之后的初始状态 + * Created: 向 Vpc 发起创建 Eni 请求成功之后 + * ReadyInVpc: attach 成功,Vpc 中进入 inuse 状态 + * ReadyOnNode: ReadyInVpc 之后单机对 Eni check ok + * UsingInPod: Eni 被 pod 独占使用中 + * DeletedInVpc: Eni 被从 Vpc 中强删后的最终状态 + * + * 独占 Eni 状态机流转: + * + * 创建请求成功(ipam写) attach后进入inuse状态(ipam写) + * Pending ----------------------> Created ---------------------------> ReadyInVpc + * ^ | + * Vpc中强制detach后(ipam写) | |单机check ok(agent写) + * | | + * --------------------------------- | + * | | | + * Vpc中强删后(ipam写) | pod创建后(agent写) | v + * DeletedInVpc <--------------------- UsingInPod <-------------------- ReadyOnNode -- + * | | ^ | + * | | | | + * | | pod删除后(agent写) | | + * | ---------------------------------- | + * | | + * ------------------------------------------------------- + */ +const ( + cceEniStatusPending CceEniStatus = "Pending" + cceEniStatusCreated CceEniStatus = "Created" + cceEniStatusReadyInVpc CceEniStatus = "ReadyInVpc" + cceEniStatusReadyOnNode CceEniStatus = "ReadyOnNode" + cceEniStatusUsingInPod CceEniStatus = "UsingInPod" + cceENIStatusDeletedInVpc CceEniStatus = "DeletedInVpc" +) diff --git a/pkg/bce/cloud/cloud.go b/pkg/bce/cloud/cloud.go index 1236a27..28b262a 100644 --- a/pkg/bce/cloud/cloud.go +++ b/pkg/bce/cloud/cloud.go @@ -21,6 +21,7 @@ import ( "os" "time" + eniExt "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/eni" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/hpc" "github.com/baidubce/bce-sdk-go/bce" "github.com/baidubce/bce-sdk-go/services/bbc" @@ -101,8 +102,9 @@ func New( } bccClient.Config.ConnectionTimeoutInMillis = connectionTimeoutSInSecond * 1000 - eniClient := &eni.Client{ - BceClient: bce.NewBceClient(bccClientConfig, auth.GetSigner(ctx)), + // todo iaas sdk 暂未支持过滤 eri 和 eni,暂时自行封装一层支持,待后续 sdk 支持过滤 eri 和 eni 后,去除这部分封装 + eniClient := &eniExt.Client{ + Client: &eni.Client{BceClient: bce.NewBceClient(bccClientConfig, auth.GetSigner(ctx))}, } eniClient.Config.ConnectionTimeoutInMillis = connectionTimeoutSInSecond * 1000 @@ -124,7 +126,7 @@ func New( return c, nil } -func (c *Client) ListENIs(ctx context.Context, args eni.ListEniArgs) ([]eni.Eni, error) { +func (c *Client) ListENIs(_ context.Context, args eni.ListEniArgs) ([]eni.Eni, error) { var enis []eni.Eni isTruncated := true @@ -140,7 +142,7 @@ func (c *Client) ListENIs(ctx context.Context, args eni.ListEniArgs) ([]eni.Eni, Marker: nextMarker, } - res, err := c.eniClient.ListEni(listArgs) + res, err := c.eniClient.ListEnis(listArgs) exportMetric("ListENI", t, err) if err != nil { return nil, err @@ -155,6 +157,37 @@ func (c *Client) ListENIs(ctx context.Context, args eni.ListEniArgs) ([]eni.Eni, return enis, nil } +func (c *Client) ListERIs(_ context.Context, args eni.ListEniArgs) ([]eni.Eni, error) { + var enis []eni.Eni + + isTruncated := true + nextMarker := "" + + for isTruncated { + t := time.Now() + + listArgs := &eni.ListEniArgs{ + VpcId: args.VpcId, + Name: args.Name, + InstanceId: args.InstanceId, + Marker: nextMarker, + } + + res, err := c.eniClient.ListEris(listArgs) + exportMetric("ListERI", t, err) + if err != nil { + return nil, err + } + + enis = append(enis, res.Eni...) + + nextMarker = res.NextMarker + isTruncated = res.IsTruncated + } + + return enis, nil +} + func (c *Client) AddPrivateIP(ctx context.Context, privateIP string, eniID string) (string, error) { t := time.Now() resp, err := c.eniClient.AddPrivateIp(&eni.EniPrivateIpArgs{ diff --git a/pkg/bce/cloud/testing/fake_cloud.go b/pkg/bce/cloud/testing/fake_cloud.go index 3a5ee27..eed1692 100644 --- a/pkg/bce/cloud/testing/fake_cloud.go +++ b/pkg/bce/cloud/testing/fake_cloud.go @@ -206,6 +206,10 @@ func (fake *FakeBceCloud) ListENIs(ctx context.Context, args eni.ListEniArgs) ([ return nil, nil } +func (fake *FakeBceCloud) ListERIs(ctx context.Context, args eni.ListEniArgs) ([]eni.Eni, error) { + return nil, nil +} + func (fake *FakeBceCloud) ListRouteTable(ctx context.Context, vpcID string, routeTableID string) ([]vpc.RouteRule, error) { return nil, nil } diff --git a/pkg/bce/cloud/testing/mock_cloud.go b/pkg/bce/cloud/testing/mock_cloud.go index c30da9e..6976d83 100644 --- a/pkg/bce/cloud/testing/mock_cloud.go +++ b/pkg/bce/cloud/testing/mock_cloud.go @@ -1,422 +1,436 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud (interfaces: Interface) +// Source: types.go // Package testing is a generated GoMock package. package testing import ( context "context" - reflect "reflect" - hpc "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/hpc" bbc "github.com/baidubce/bce-sdk-go/services/bbc" api "github.com/baidubce/bce-sdk-go/services/bcc/api" eni "github.com/baidubce/bce-sdk-go/services/eni" vpc "github.com/baidubce/bce-sdk-go/services/vpc" gomock "github.com/golang/mock/gomock" + reflect "reflect" ) -// MockInterface is a mock of Interface interface. +// MockInterface is a mock of Interface interface type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } -// MockInterfaceMockRecorder is the mock recorder for MockInterface. +// MockInterfaceMockRecorder is the mock recorder for MockInterface type MockInterfaceMockRecorder struct { mock *MockInterface } -// NewMockInterface creates a new mock instance. +// NewMockInterface creates a new mock instance func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// AddPrivateIP mocks base method. -func (m *MockInterface) AddPrivateIP(arg0 context.Context, arg1, arg2 string) (string, error) { +// ListENIs mocks base method +func (m *MockInterface) ListENIs(ctx context.Context, args eni.ListEniArgs) ([]eni.Eni, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddPrivateIP", arg0, arg1, arg2) - ret0, _ := ret[0].(string) + ret := m.ctrl.Call(m, "ListENIs", ctx, args) + ret0, _ := ret[0].([]eni.Eni) ret1, _ := ret[1].(error) return ret0, ret1 } -// AddPrivateIP indicates an expected call of AddPrivateIP. -func (mr *MockInterfaceMockRecorder) AddPrivateIP(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPrivateIP", reflect.TypeOf((*MockInterface)(nil).AddPrivateIP), arg0, arg1, arg2) -} - -// AttachENI mocks base method. -func (m *MockInterface) AttachENI(arg0 context.Context, arg1 *eni.EniInstance) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AttachENI", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// AttachENI indicates an expected call of AttachENI. -func (mr *MockInterfaceMockRecorder) AttachENI(arg0, arg1 interface{}) *gomock.Call { +// ListENIs indicates an expected call of ListENIs +func (mr *MockInterfaceMockRecorder) ListENIs(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachENI", reflect.TypeOf((*MockInterface)(nil).AttachENI), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListENIs", reflect.TypeOf((*MockInterface)(nil).ListENIs), ctx, args) } -// BBCBatchAddIP mocks base method. -func (m *MockInterface) BBCBatchAddIP(arg0 context.Context, arg1 *bbc.BatchAddIpArgs) (*bbc.BatchAddIpResponse, error) { +// ListERIs mocks base method +func (m *MockInterface) ListERIs(ctx context.Context, args eni.ListEniArgs) ([]eni.Eni, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BBCBatchAddIP", arg0, arg1) - ret0, _ := ret[0].(*bbc.BatchAddIpResponse) + ret := m.ctrl.Call(m, "ListERIs", ctx, args) + ret0, _ := ret[0].([]eni.Eni) ret1, _ := ret[1].(error) return ret0, ret1 } -// BBCBatchAddIP indicates an expected call of BBCBatchAddIP. -func (mr *MockInterfaceMockRecorder) BBCBatchAddIP(arg0, arg1 interface{}) *gomock.Call { +// ListERIs indicates an expected call of ListERIs +func (mr *MockInterfaceMockRecorder) ListERIs(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BBCBatchAddIP", reflect.TypeOf((*MockInterface)(nil).BBCBatchAddIP), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListERIs", reflect.TypeOf((*MockInterface)(nil).ListERIs), ctx, args) } -// BBCBatchAddIPCrossSubnet mocks base method. -func (m *MockInterface) BBCBatchAddIPCrossSubnet(arg0 context.Context, arg1 *bbc.BatchAddIpCrossSubnetArgs) (*bbc.BatchAddIpResponse, error) { +// AddPrivateIP mocks base method +func (m *MockInterface) AddPrivateIP(ctx context.Context, privateIP, eniID string) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BBCBatchAddIPCrossSubnet", arg0, arg1) - ret0, _ := ret[0].(*bbc.BatchAddIpResponse) + ret := m.ctrl.Call(m, "AddPrivateIP", ctx, privateIP, eniID) + ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// BBCBatchAddIPCrossSubnet indicates an expected call of BBCBatchAddIPCrossSubnet. -func (mr *MockInterfaceMockRecorder) BBCBatchAddIPCrossSubnet(arg0, arg1 interface{}) *gomock.Call { +// AddPrivateIP indicates an expected call of AddPrivateIP +func (mr *MockInterfaceMockRecorder) AddPrivateIP(ctx, privateIP, eniID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BBCBatchAddIPCrossSubnet", reflect.TypeOf((*MockInterface)(nil).BBCBatchAddIPCrossSubnet), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPrivateIP", reflect.TypeOf((*MockInterface)(nil).AddPrivateIP), ctx, privateIP, eniID) } -// BBCBatchDelIP mocks base method. -func (m *MockInterface) BBCBatchDelIP(arg0 context.Context, arg1 *bbc.BatchDelIpArgs) error { +// DeletePrivateIP mocks base method +func (m *MockInterface) DeletePrivateIP(ctx context.Context, privateIP, eniID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BBCBatchDelIP", arg0, arg1) + ret := m.ctrl.Call(m, "DeletePrivateIP", ctx, privateIP, eniID) ret0, _ := ret[0].(error) return ret0 } -// BBCBatchDelIP indicates an expected call of BBCBatchDelIP. -func (mr *MockInterfaceMockRecorder) BBCBatchDelIP(arg0, arg1 interface{}) *gomock.Call { +// DeletePrivateIP indicates an expected call of DeletePrivateIP +func (mr *MockInterfaceMockRecorder) DeletePrivateIP(ctx, privateIP, eniID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BBCBatchDelIP", reflect.TypeOf((*MockInterface)(nil).BBCBatchDelIP), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePrivateIP", reflect.TypeOf((*MockInterface)(nil).DeletePrivateIP), ctx, privateIP, eniID) } -// BatchAddHpcEniPrivateIP mocks base method. -func (m *MockInterface) BatchAddHpcEniPrivateIP(arg0 context.Context, arg1 *hpc.EniBatchPrivateIPArgs) (*hpc.BatchAddPrivateIPResult, error) { +// BatchAddPrivateIpCrossSubnet mocks base method +func (m *MockInterface) BatchAddPrivateIpCrossSubnet(ctx context.Context, eniID, subnetID string, privateIPs []string, count int) ([]string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchAddHpcEniPrivateIP", arg0, arg1) - ret0, _ := ret[0].(*hpc.BatchAddPrivateIPResult) + ret := m.ctrl.Call(m, "BatchAddPrivateIpCrossSubnet", ctx, eniID, subnetID, privateIPs, count) + ret0, _ := ret[0].([]string) ret1, _ := ret[1].(error) return ret0, ret1 } -// BatchAddHpcEniPrivateIP indicates an expected call of BatchAddHpcEniPrivateIP. -func (mr *MockInterfaceMockRecorder) BatchAddHpcEniPrivateIP(arg0, arg1 interface{}) *gomock.Call { +// BatchAddPrivateIpCrossSubnet indicates an expected call of BatchAddPrivateIpCrossSubnet +func (mr *MockInterfaceMockRecorder) BatchAddPrivateIpCrossSubnet(ctx, eniID, subnetID, privateIPs, count interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchAddHpcEniPrivateIP", reflect.TypeOf((*MockInterface)(nil).BatchAddHpcEniPrivateIP), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchAddPrivateIpCrossSubnet", reflect.TypeOf((*MockInterface)(nil).BatchAddPrivateIpCrossSubnet), ctx, eniID, subnetID, privateIPs, count) } -// BatchAddPrivateIP mocks base method. -func (m *MockInterface) BatchAddPrivateIP(arg0 context.Context, arg1 []string, arg2 int, arg3 string) ([]string, error) { +// BatchAddPrivateIP mocks base method +func (m *MockInterface) BatchAddPrivateIP(ctx context.Context, privateIPs []string, count int, eniID string) ([]string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchAddPrivateIP", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "BatchAddPrivateIP", ctx, privateIPs, count, eniID) ret0, _ := ret[0].([]string) ret1, _ := ret[1].(error) return ret0, ret1 } -// BatchAddPrivateIP indicates an expected call of BatchAddPrivateIP. -func (mr *MockInterfaceMockRecorder) BatchAddPrivateIP(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// BatchAddPrivateIP indicates an expected call of BatchAddPrivateIP +func (mr *MockInterfaceMockRecorder) BatchAddPrivateIP(ctx, privateIPs, count, eniID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchAddPrivateIP", reflect.TypeOf((*MockInterface)(nil).BatchAddPrivateIP), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchAddPrivateIP", reflect.TypeOf((*MockInterface)(nil).BatchAddPrivateIP), ctx, privateIPs, count, eniID) } -// BatchAddPrivateIpCrossSubnet mocks base method. -func (m *MockInterface) BatchAddPrivateIpCrossSubnet(arg0 context.Context, arg1, arg2 string, arg3 []string, arg4 int) ([]string, error) { +// BatchDeletePrivateIP mocks base method +func (m *MockInterface) BatchDeletePrivateIP(ctx context.Context, privateIPs []string, eniID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchAddPrivateIpCrossSubnet", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].([]string) + ret := m.ctrl.Call(m, "BatchDeletePrivateIP", ctx, privateIPs, eniID) + ret0, _ := ret[0].(error) + return ret0 +} + +// BatchDeletePrivateIP indicates an expected call of BatchDeletePrivateIP +func (mr *MockInterfaceMockRecorder) BatchDeletePrivateIP(ctx, privateIPs, eniID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchDeletePrivateIP", reflect.TypeOf((*MockInterface)(nil).BatchDeletePrivateIP), ctx, privateIPs, eniID) +} + +// CreateENI mocks base method +func (m *MockInterface) CreateENI(ctx context.Context, args *eni.CreateEniArgs) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateENI", ctx, args) + ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// BatchAddPrivateIpCrossSubnet indicates an expected call of BatchAddPrivateIpCrossSubnet. -func (mr *MockInterfaceMockRecorder) BatchAddPrivateIpCrossSubnet(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +// CreateENI indicates an expected call of CreateENI +func (mr *MockInterfaceMockRecorder) CreateENI(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchAddPrivateIpCrossSubnet", reflect.TypeOf((*MockInterface)(nil).BatchAddPrivateIpCrossSubnet), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateENI", reflect.TypeOf((*MockInterface)(nil).CreateENI), ctx, args) } -// BatchDeleteHpcEniPrivateIP mocks base method. -func (m *MockInterface) BatchDeleteHpcEniPrivateIP(arg0 context.Context, arg1 *hpc.EniBatchDeleteIPArgs) error { +// DeleteENI mocks base method +func (m *MockInterface) DeleteENI(ctx context.Context, eniID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchDeleteHpcEniPrivateIP", arg0, arg1) + ret := m.ctrl.Call(m, "DeleteENI", ctx, eniID) ret0, _ := ret[0].(error) return ret0 } -// BatchDeleteHpcEniPrivateIP indicates an expected call of BatchDeleteHpcEniPrivateIP. -func (mr *MockInterfaceMockRecorder) BatchDeleteHpcEniPrivateIP(arg0, arg1 interface{}) *gomock.Call { +// DeleteENI indicates an expected call of DeleteENI +func (mr *MockInterfaceMockRecorder) DeleteENI(ctx, eniID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchDeleteHpcEniPrivateIP", reflect.TypeOf((*MockInterface)(nil).BatchDeleteHpcEniPrivateIP), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteENI", reflect.TypeOf((*MockInterface)(nil).DeleteENI), ctx, eniID) } -// BatchDeletePrivateIP mocks base method. -func (m *MockInterface) BatchDeletePrivateIP(arg0 context.Context, arg1 []string, arg2 string) error { +// AttachENI mocks base method +func (m *MockInterface) AttachENI(ctx context.Context, args *eni.EniInstance) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchDeletePrivateIP", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "AttachENI", ctx, args) ret0, _ := ret[0].(error) return ret0 } -// BatchDeletePrivateIP indicates an expected call of BatchDeletePrivateIP. -func (mr *MockInterfaceMockRecorder) BatchDeletePrivateIP(arg0, arg1, arg2 interface{}) *gomock.Call { +// AttachENI indicates an expected call of AttachENI +func (mr *MockInterfaceMockRecorder) AttachENI(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchDeletePrivateIP", reflect.TypeOf((*MockInterface)(nil).BatchDeletePrivateIP), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachENI", reflect.TypeOf((*MockInterface)(nil).AttachENI), ctx, args) } -// CreateENI mocks base method. -func (m *MockInterface) CreateENI(arg0 context.Context, arg1 *eni.CreateEniArgs) (string, error) { +// DetachENI mocks base method +func (m *MockInterface) DetachENI(ctx context.Context, args *eni.EniInstance) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateENI", arg0, arg1) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "DetachENI", ctx, args) + ret0, _ := ret[0].(error) + return ret0 } -// CreateENI indicates an expected call of CreateENI. -func (mr *MockInterfaceMockRecorder) CreateENI(arg0, arg1 interface{}) *gomock.Call { +// DetachENI indicates an expected call of DetachENI +func (mr *MockInterfaceMockRecorder) DetachENI(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateENI", reflect.TypeOf((*MockInterface)(nil).CreateENI), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachENI", reflect.TypeOf((*MockInterface)(nil).DetachENI), ctx, args) } -// CreateRouteRule mocks base method. -func (m *MockInterface) CreateRouteRule(arg0 context.Context, arg1 *vpc.CreateRouteRuleArgs) (string, error) { +// StatENI mocks base method +func (m *MockInterface) StatENI(ctx context.Context, eniID string) (*eni.Eni, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateRouteRule", arg0, arg1) - ret0, _ := ret[0].(string) + ret := m.ctrl.Call(m, "StatENI", ctx, eniID) + ret0, _ := ret[0].(*eni.Eni) ret1, _ := ret[1].(error) return ret0, ret1 } -// CreateRouteRule indicates an expected call of CreateRouteRule. -func (mr *MockInterfaceMockRecorder) CreateRouteRule(arg0, arg1 interface{}) *gomock.Call { +// StatENI indicates an expected call of StatENI +func (mr *MockInterfaceMockRecorder) StatENI(ctx, eniID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRouteRule", reflect.TypeOf((*MockInterface)(nil).CreateRouteRule), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatENI", reflect.TypeOf((*MockInterface)(nil).StatENI), ctx, eniID) } -// DeleteENI mocks base method. -func (m *MockInterface) DeleteENI(arg0 context.Context, arg1 string) error { +// ListRouteTable mocks base method +func (m *MockInterface) ListRouteTable(ctx context.Context, vpcID, routeTableID string) ([]vpc.RouteRule, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteENI", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "ListRouteTable", ctx, vpcID, routeTableID) + ret0, _ := ret[0].([]vpc.RouteRule) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// DeleteENI indicates an expected call of DeleteENI. -func (mr *MockInterfaceMockRecorder) DeleteENI(arg0, arg1 interface{}) *gomock.Call { +// ListRouteTable indicates an expected call of ListRouteTable +func (mr *MockInterfaceMockRecorder) ListRouteTable(ctx, vpcID, routeTableID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteENI", reflect.TypeOf((*MockInterface)(nil).DeleteENI), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRouteTable", reflect.TypeOf((*MockInterface)(nil).ListRouteTable), ctx, vpcID, routeTableID) } -// DeletePrivateIP mocks base method. -func (m *MockInterface) DeletePrivateIP(arg0 context.Context, arg1, arg2 string) error { +// CreateRouteRule mocks base method +func (m *MockInterface) CreateRouteRule(ctx context.Context, args *vpc.CreateRouteRuleArgs) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeletePrivateIP", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "CreateRouteRule", ctx, args) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// DeletePrivateIP indicates an expected call of DeletePrivateIP. -func (mr *MockInterfaceMockRecorder) DeletePrivateIP(arg0, arg1, arg2 interface{}) *gomock.Call { +// CreateRouteRule indicates an expected call of CreateRouteRule +func (mr *MockInterfaceMockRecorder) CreateRouteRule(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePrivateIP", reflect.TypeOf((*MockInterface)(nil).DeletePrivateIP), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRouteRule", reflect.TypeOf((*MockInterface)(nil).CreateRouteRule), ctx, args) } -// DeleteRouteRule mocks base method. -func (m *MockInterface) DeleteRouteRule(arg0 context.Context, arg1 string) error { +// DeleteRouteRule mocks base method +func (m *MockInterface) DeleteRouteRule(ctx context.Context, routeID string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteRouteRule", arg0, arg1) + ret := m.ctrl.Call(m, "DeleteRouteRule", ctx, routeID) ret0, _ := ret[0].(error) return ret0 } -// DeleteRouteRule indicates an expected call of DeleteRouteRule. -func (mr *MockInterfaceMockRecorder) DeleteRouteRule(arg0, arg1 interface{}) *gomock.Call { +// DeleteRouteRule indicates an expected call of DeleteRouteRule +func (mr *MockInterfaceMockRecorder) DeleteRouteRule(ctx, routeID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRouteRule", reflect.TypeOf((*MockInterface)(nil).DeleteRouteRule), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRouteRule", reflect.TypeOf((*MockInterface)(nil).DeleteRouteRule), ctx, routeID) } -// DescribeSubnet mocks base method. -func (m *MockInterface) DescribeSubnet(arg0 context.Context, arg1 string) (*vpc.Subnet, error) { +// DescribeSubnet mocks base method +func (m *MockInterface) DescribeSubnet(ctx context.Context, subnetID string) (*vpc.Subnet, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DescribeSubnet", arg0, arg1) + ret := m.ctrl.Call(m, "DescribeSubnet", ctx, subnetID) ret0, _ := ret[0].(*vpc.Subnet) ret1, _ := ret[1].(error) return ret0, ret1 } -// DescribeSubnet indicates an expected call of DescribeSubnet. -func (mr *MockInterfaceMockRecorder) DescribeSubnet(arg0, arg1 interface{}) *gomock.Call { +// DescribeSubnet indicates an expected call of DescribeSubnet +func (mr *MockInterfaceMockRecorder) DescribeSubnet(ctx, subnetID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeSubnet", reflect.TypeOf((*MockInterface)(nil).DescribeSubnet), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeSubnet", reflect.TypeOf((*MockInterface)(nil).DescribeSubnet), ctx, subnetID) } -// DetachENI mocks base method. -func (m *MockInterface) DetachENI(arg0 context.Context, arg1 *eni.EniInstance) error { +// ListSubnets mocks base method +func (m *MockInterface) ListSubnets(ctx context.Context, args *vpc.ListSubnetArgs) ([]vpc.Subnet, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DetachENI", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "ListSubnets", ctx, args) + ret0, _ := ret[0].([]vpc.Subnet) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// DetachENI indicates an expected call of DetachENI. -func (mr *MockInterfaceMockRecorder) DetachENI(arg0, arg1 interface{}) *gomock.Call { +// ListSubnets indicates an expected call of ListSubnets +func (mr *MockInterfaceMockRecorder) ListSubnets(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DetachENI", reflect.TypeOf((*MockInterface)(nil).DetachENI), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSubnets", reflect.TypeOf((*MockInterface)(nil).ListSubnets), ctx, args) } -// GetBBCInstanceDetail mocks base method. -func (m *MockInterface) GetBBCInstanceDetail(arg0 context.Context, arg1 string) (*bbc.InstanceModel, error) { +// ListSecurityGroup mocks base method +func (m *MockInterface) ListSecurityGroup(ctx context.Context, vpcID, instanceID string) ([]api.SecurityGroupModel, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBBCInstanceDetail", arg0, arg1) - ret0, _ := ret[0].(*bbc.InstanceModel) + ret := m.ctrl.Call(m, "ListSecurityGroup", ctx, vpcID, instanceID) + ret0, _ := ret[0].([]api.SecurityGroupModel) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetBBCInstanceDetail indicates an expected call of GetBBCInstanceDetail. -func (mr *MockInterfaceMockRecorder) GetBBCInstanceDetail(arg0, arg1 interface{}) *gomock.Call { +// ListSecurityGroup indicates an expected call of ListSecurityGroup +func (mr *MockInterfaceMockRecorder) ListSecurityGroup(ctx, vpcID, instanceID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBBCInstanceDetail", reflect.TypeOf((*MockInterface)(nil).GetBBCInstanceDetail), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecurityGroup", reflect.TypeOf((*MockInterface)(nil).ListSecurityGroup), ctx, vpcID, instanceID) } -// GetBBCInstanceENI mocks base method. -func (m *MockInterface) GetBBCInstanceENI(arg0 context.Context, arg1 string) (*bbc.GetInstanceEniResult, error) { +// GetBCCInstanceDetail mocks base method +func (m *MockInterface) GetBCCInstanceDetail(ctx context.Context, instanceID string) (*api.InstanceModel, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBBCInstanceENI", arg0, arg1) - ret0, _ := ret[0].(*bbc.GetInstanceEniResult) + ret := m.ctrl.Call(m, "GetBCCInstanceDetail", ctx, instanceID) + ret0, _ := ret[0].(*api.InstanceModel) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetBBCInstanceENI indicates an expected call of GetBBCInstanceENI. -func (mr *MockInterfaceMockRecorder) GetBBCInstanceENI(arg0, arg1 interface{}) *gomock.Call { +// GetBCCInstanceDetail indicates an expected call of GetBCCInstanceDetail +func (mr *MockInterfaceMockRecorder) GetBCCInstanceDetail(ctx, instanceID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBBCInstanceENI", reflect.TypeOf((*MockInterface)(nil).GetBBCInstanceENI), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBCCInstanceDetail", reflect.TypeOf((*MockInterface)(nil).GetBCCInstanceDetail), ctx, instanceID) } -// GetBCCInstanceDetail mocks base method. -func (m *MockInterface) GetBCCInstanceDetail(arg0 context.Context, arg1 string) (*api.InstanceModel, error) { +// GetBBCInstanceDetail mocks base method +func (m *MockInterface) GetBBCInstanceDetail(ctx context.Context, instanceID string) (*bbc.InstanceModel, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBCCInstanceDetail", arg0, arg1) - ret0, _ := ret[0].(*api.InstanceModel) + ret := m.ctrl.Call(m, "GetBBCInstanceDetail", ctx, instanceID) + ret0, _ := ret[0].(*bbc.InstanceModel) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetBCCInstanceDetail indicates an expected call of GetBCCInstanceDetail. -func (mr *MockInterfaceMockRecorder) GetBCCInstanceDetail(arg0, arg1 interface{}) *gomock.Call { +// GetBBCInstanceDetail indicates an expected call of GetBBCInstanceDetail +func (mr *MockInterfaceMockRecorder) GetBBCInstanceDetail(ctx, instanceID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBCCInstanceDetail", reflect.TypeOf((*MockInterface)(nil).GetBCCInstanceDetail), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBBCInstanceDetail", reflect.TypeOf((*MockInterface)(nil).GetBBCInstanceDetail), ctx, instanceID) } -// GetHPCEniID mocks base method. -func (m *MockInterface) GetHPCEniID(arg0 context.Context, arg1 string) (*hpc.EniList, error) { +// GetBBCInstanceENI mocks base method +func (m *MockInterface) GetBBCInstanceENI(ctx context.Context, instanceID string) (*bbc.GetInstanceEniResult, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHPCEniID", arg0, arg1) - ret0, _ := ret[0].(*hpc.EniList) + ret := m.ctrl.Call(m, "GetBBCInstanceENI", ctx, instanceID) + ret0, _ := ret[0].(*bbc.GetInstanceEniResult) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetHPCEniID indicates an expected call of GetHPCEniID. -func (mr *MockInterfaceMockRecorder) GetHPCEniID(arg0, arg1 interface{}) *gomock.Call { +// GetBBCInstanceENI indicates an expected call of GetBBCInstanceENI +func (mr *MockInterfaceMockRecorder) GetBBCInstanceENI(ctx, instanceID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHPCEniID", reflect.TypeOf((*MockInterface)(nil).GetHPCEniID), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBBCInstanceENI", reflect.TypeOf((*MockInterface)(nil).GetBBCInstanceENI), ctx, instanceID) } -// ListENIs mocks base method. -func (m *MockInterface) ListENIs(arg0 context.Context, arg1 eni.ListEniArgs) ([]eni.Eni, error) { +// BBCBatchAddIP mocks base method +func (m *MockInterface) BBCBatchAddIP(ctx context.Context, args *bbc.BatchAddIpArgs) (*bbc.BatchAddIpResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListENIs", arg0, arg1) - ret0, _ := ret[0].([]eni.Eni) + ret := m.ctrl.Call(m, "BBCBatchAddIP", ctx, args) + ret0, _ := ret[0].(*bbc.BatchAddIpResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// ListENIs indicates an expected call of ListENIs. -func (mr *MockInterfaceMockRecorder) ListENIs(arg0, arg1 interface{}) *gomock.Call { +// BBCBatchAddIP indicates an expected call of BBCBatchAddIP +func (mr *MockInterfaceMockRecorder) BBCBatchAddIP(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListENIs", reflect.TypeOf((*MockInterface)(nil).ListENIs), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BBCBatchAddIP", reflect.TypeOf((*MockInterface)(nil).BBCBatchAddIP), ctx, args) } -// ListRouteTable mocks base method. -func (m *MockInterface) ListRouteTable(arg0 context.Context, arg1, arg2 string) ([]vpc.RouteRule, error) { +// BBCBatchDelIP mocks base method +func (m *MockInterface) BBCBatchDelIP(ctx context.Context, args *bbc.BatchDelIpArgs) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListRouteTable", arg0, arg1, arg2) - ret0, _ := ret[0].([]vpc.RouteRule) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "BBCBatchDelIP", ctx, args) + ret0, _ := ret[0].(error) + return ret0 } -// ListRouteTable indicates an expected call of ListRouteTable. -func (mr *MockInterfaceMockRecorder) ListRouteTable(arg0, arg1, arg2 interface{}) *gomock.Call { +// BBCBatchDelIP indicates an expected call of BBCBatchDelIP +func (mr *MockInterfaceMockRecorder) BBCBatchDelIP(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListRouteTable", reflect.TypeOf((*MockInterface)(nil).ListRouteTable), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BBCBatchDelIP", reflect.TypeOf((*MockInterface)(nil).BBCBatchDelIP), ctx, args) } -// ListSecurityGroup mocks base method. -func (m *MockInterface) ListSecurityGroup(arg0 context.Context, arg1, arg2 string) ([]api.SecurityGroupModel, error) { +// BBCBatchAddIPCrossSubnet mocks base method +func (m *MockInterface) BBCBatchAddIPCrossSubnet(ctx context.Context, args *bbc.BatchAddIpCrossSubnetArgs) (*bbc.BatchAddIpResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListSecurityGroup", arg0, arg1, arg2) - ret0, _ := ret[0].([]api.SecurityGroupModel) + ret := m.ctrl.Call(m, "BBCBatchAddIPCrossSubnet", ctx, args) + ret0, _ := ret[0].(*bbc.BatchAddIpResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// ListSecurityGroup indicates an expected call of ListSecurityGroup. -func (mr *MockInterfaceMockRecorder) ListSecurityGroup(arg0, arg1, arg2 interface{}) *gomock.Call { +// BBCBatchAddIPCrossSubnet indicates an expected call of BBCBatchAddIPCrossSubnet +func (mr *MockInterfaceMockRecorder) BBCBatchAddIPCrossSubnet(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecurityGroup", reflect.TypeOf((*MockInterface)(nil).ListSecurityGroup), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BBCBatchAddIPCrossSubnet", reflect.TypeOf((*MockInterface)(nil).BBCBatchAddIPCrossSubnet), ctx, args) } -// ListSubnets mocks base method. -func (m *MockInterface) ListSubnets(arg0 context.Context, arg1 *vpc.ListSubnetArgs) ([]vpc.Subnet, error) { +// GetHPCEniID mocks base method +func (m *MockInterface) GetHPCEniID(ctx context.Context, instanceID string) (*hpc.EniList, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListSubnets", arg0, arg1) - ret0, _ := ret[0].([]vpc.Subnet) + ret := m.ctrl.Call(m, "GetHPCEniID", ctx, instanceID) + ret0, _ := ret[0].(*hpc.EniList) ret1, _ := ret[1].(error) return ret0, ret1 } -// ListSubnets indicates an expected call of ListSubnets. -func (mr *MockInterfaceMockRecorder) ListSubnets(arg0, arg1 interface{}) *gomock.Call { +// GetHPCEniID indicates an expected call of GetHPCEniID +func (mr *MockInterfaceMockRecorder) GetHPCEniID(ctx, instanceID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSubnets", reflect.TypeOf((*MockInterface)(nil).ListSubnets), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHPCEniID", reflect.TypeOf((*MockInterface)(nil).GetHPCEniID), ctx, instanceID) } -// StatENI mocks base method. -func (m *MockInterface) StatENI(arg0 context.Context, arg1 string) (*eni.Eni, error) { +// BatchDeleteHpcEniPrivateIP mocks base method +func (m *MockInterface) BatchDeleteHpcEniPrivateIP(ctx context.Context, args *hpc.EniBatchDeleteIPArgs) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StatENI", arg0, arg1) - ret0, _ := ret[0].(*eni.Eni) + ret := m.ctrl.Call(m, "BatchDeleteHpcEniPrivateIP", ctx, args) + ret0, _ := ret[0].(error) + return ret0 +} + +// BatchDeleteHpcEniPrivateIP indicates an expected call of BatchDeleteHpcEniPrivateIP +func (mr *MockInterfaceMockRecorder) BatchDeleteHpcEniPrivateIP(ctx, args interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchDeleteHpcEniPrivateIP", reflect.TypeOf((*MockInterface)(nil).BatchDeleteHpcEniPrivateIP), ctx, args) +} + +// BatchAddHpcEniPrivateIP mocks base method +func (m *MockInterface) BatchAddHpcEniPrivateIP(ctx context.Context, args *hpc.EniBatchPrivateIPArgs) (*hpc.BatchAddPrivateIPResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchAddHpcEniPrivateIP", ctx, args) + ret0, _ := ret[0].(*hpc.BatchAddPrivateIPResult) ret1, _ := ret[1].(error) return ret0, ret1 } -// StatENI indicates an expected call of StatENI. -func (mr *MockInterfaceMockRecorder) StatENI(arg0, arg1 interface{}) *gomock.Call { +// BatchAddHpcEniPrivateIP indicates an expected call of BatchAddHpcEniPrivateIP +func (mr *MockInterfaceMockRecorder) BatchAddHpcEniPrivateIP(ctx, args interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatENI", reflect.TypeOf((*MockInterface)(nil).StatENI), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchAddHpcEniPrivateIP", reflect.TypeOf((*MockInterface)(nil).BatchAddHpcEniPrivateIP), ctx, args) } diff --git a/pkg/bce/cloud/types.go b/pkg/bce/cloud/types.go index 2c3817a..e1af4e7 100644 --- a/pkg/bce/cloud/types.go +++ b/pkg/bce/cloud/types.go @@ -17,8 +17,9 @@ package cloud import ( "context" - "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/hpc" + eniExt "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/eni" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/hpc" "github.com/baidubce/bce-sdk-go/services/bbc" "github.com/baidubce/bce-sdk-go/services/bcc" bccapi "github.com/baidubce/bce-sdk-go/services/bcc/api" @@ -28,6 +29,7 @@ import ( type Interface interface { ListENIs(ctx context.Context, args eni.ListEniArgs) ([]eni.Eni, error) + ListERIs(ctx context.Context, args eni.ListEniArgs) ([]eni.Eni, error) AddPrivateIP(ctx context.Context, privateIP string, eniID string) (string, error) DeletePrivateIP(ctx context.Context, privateIP string, eniID string) error @@ -68,7 +70,7 @@ type Interface interface { type Client struct { bccClient *bcc.Client - eniClient *eni.Client + eniClient *eniExt.Client vpcClient *vpc.Client hpcClient *hpc.Client bbcClient *bbc.Client diff --git a/pkg/bce/eni/client.go b/pkg/bce/eni/client.go new file mode 100644 index 0000000..c00e3f2 --- /dev/null +++ b/pkg/bce/eni/client.go @@ -0,0 +1,123 @@ +package eni + +import ( + "fmt" + "strconv" + "strings" + + "github.com/baidubce/bce-sdk-go/bce" + "github.com/baidubce/bce-sdk-go/http" + "github.com/baidubce/bce-sdk-go/services/eni" +) + +const ( + modeEri = "highPerformance" + modeEni = "standard" +) + +type Client struct { + *eni.Client +} + +func NewClient(ak, sk, endPoint string) (*Client, error) { + eniClient, err := eni.NewClient(ak, sk, endPoint) + if err != nil { + return nil, err + } + return &Client{ + Client: eniClient, + }, nil +} + +// ListEnis - list all eni without eri +// +// PARAMS: +// - args: the arguments to list all eni without eri +// RETURNS: +// - *ListEniResult: the result of list all eni +// - error: nil if success otherwise the specific error +func (c *Client) ListEnis(args *eni.ListEniArgs) (*eni.ListEniResult, error) { + return c.listEniOrEris(args, isEni) +} + +// ListEris - list all eri with the specific parameters +// +// PARAMS: +// - args: the arguments to list all eri +// RETURNS: +// - *ListEniResult: the result of list all eri +// - error: nil if success otherwise the specific error +func (c *Client) ListEris(args *eni.ListEniArgs) (*eni.ListEniResult, error) { + return c.listEniOrEris(args, isEri) +} + +func (c *Client) listEniOrEris(args *eni.ListEniArgs, filterFunc func(eriOrEni Eni) bool) (*eni.ListEniResult, error) { + eniAndEriList, err := c.listEniAndEris(args) + if err != nil { + return nil, err + } + + eniOrEriList := make([]eni.Eni, 0) + for i := range eniAndEriList.Eni { + eniOrEri := eniAndEriList.Eni[i] + if filterFunc(eniOrEri) { + eniOrEriList = append(eniOrEriList, eniOrEri.Eni) + } + } + + erisResult := &eni.ListEniResult{ + Eni: eniOrEriList, + Marker: eniAndEriList.Marker, + IsTruncated: eniAndEriList.IsTruncated, + NextMarker: eniAndEriList.NextMarker, + MaxKeys: eniAndEriList.MaxKeys, + } + + return erisResult, err +} + +func (c *Client) listEniAndEris(args *eni.ListEniArgs) (*ListEniResult, error) { + if args == nil { + return nil, fmt.Errorf("the ListEniArgs cannot be nil") + } + if args.MaxKeys == 0 { + args.MaxKeys = 1000 + } + + result := &ListEniResult{} + builder := bce.NewRequestBuilder(c). + WithURL(getURLForEni()). + WithMethod(http.GET). + WithQueryParam("vpcId", args.VpcId). + WithQueryParamFilter("instanceId", args.InstanceId). + WithQueryParamFilter("name", args.Name). + WithQueryParamFilter("marker", args.Marker). + WithQueryParamFilter("maxKeys", strconv.Itoa(args.MaxKeys)) + + if len(args.PrivateIpAddress) != 0 { + builder.WithQueryParam("privateIpAddress", + strings.Replace(strings.Trim(fmt.Sprint(args.PrivateIpAddress), "[]"), " ", ",", -1)) + } + + err := builder.WithResult(result).Do() + + return result, err +} + +func getURLForEni() string { + return eni.URI_PREFIX + eni.REQUEST_ENI_URL +} + +func isEri(eriOrEni Eni) bool { + if strings.EqualFold(eriOrEni.NetworkInterfaceTrafficMode, modeEri) { + return true + } + return false +} + +func isEni(eriOrEni Eni) bool { + if strings.EqualFold(eriOrEni.NetworkInterfaceTrafficMode, modeEni) { + return true + } + return false +} diff --git a/pkg/bce/eni/client_test.go b/pkg/bce/eni/client_test.go new file mode 100644 index 0000000..64272de --- /dev/null +++ b/pkg/bce/eni/client_test.go @@ -0,0 +1,213 @@ +package eni + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/baidubce/bce-sdk-go/services/eni" + "github.com/davecgh/go-spew/spew" + "github.com/stretchr/testify/assert" +) + +var ( + eriClient *Client +) + +type TestServerConfig struct { + t *testing.T + ass *assert.Assertions + RequestMethod string + RequestURLPath string + RequestBody []byte + RequestHeaders map[string]string + RequestQueryParams map[string]string + ResponseHeaders map[string]string + ResponseBody []byte + ResponseBodyFunc func(t *testing.T, actualBody []byte) + ResponseStatusCode int + HookAfterResponse func() + Debug bool +} + +func NewMockClient(endpoint string) *Client { + eriClient, _ = NewClient("dfsdfsfs", "dfsfdsfs", endpoint) + return eriClient +} + +func NewTestServer(t *testing.T, config *TestServerConfig) *httptest.Server { + config.ass = assert.New(t) + return httptest.NewServer(config) +} + +func (config *TestServerConfig) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ass := config.ass + defer r.Body.Close() + + if config.RequestMethod != "" { + if config.Debug { + fmt.Fprintf(os.Stderr, "Method: ") + spew.Fdump(os.Stderr, r.Method) + } + ass.Equal(config.RequestMethod, r.Method, "request method mismatch") + } + + if config.RequestURLPath != "" { + if config.Debug { + fmt.Fprintf(os.Stderr, "Path: ") + spew.Fdump(os.Stderr, r.URL.Path) + } + ass.Equal(config.RequestURLPath, r.URL.Path, "request path mismatch") + } + + if config.Debug && len(config.RequestHeaders) > 0 { + if config.Debug { + fmt.Fprintf(os.Stderr, "Header: ") + spew.Fdump(os.Stderr, r.Header) + } + } + reqHeaders := r.Header + for k, want := range config.RequestHeaders { + actual := reqHeaders.Get(k) + ass.Equal(want, actual, fmt.Sprintf("header '%s' mismatch", k)) + } + + if len(config.RequestQueryParams) > 0 { + if config.Debug { + fmt.Fprintf(os.Stderr, "QueryParams: ") + spew.Fdump(os.Stderr, r.URL.RawQuery) + } + } + reqQueryParams := r.URL.Query() + for k, want := range config.RequestQueryParams { + actual := reqQueryParams.Get(k) + ass.Equal(want, actual, fmt.Sprintf("query param '%s' mismatch", k)) + } + + if len(config.RequestBody) > 0 { + body, _ := ioutil.ReadAll(r.Body) + if config.Debug { + fmt.Fprintf(os.Stderr, "Body: ") + spew.Fdump(os.Stderr, body) + } + if config.ResponseBodyFunc != nil { + config.ResponseBodyFunc(config.t, body) + } else { + ass.Equal(config.RequestBody, body, "request body mismatch") + } + } + + // 返回顺序要遵守:header -> status code -> body + rspHeaders := w.Header() + for k, v := range config.ResponseHeaders { + rspHeaders.Set(k, v) + } + + if config.ResponseStatusCode > 0 { + w.WriteHeader(config.ResponseStatusCode) + } + + if len(config.ResponseBody) > 0 { + write, _ := w.Write(config.ResponseBody) + if config.Debug { + fmt.Fprintf(os.Stderr, "Body: ") + spew.Fdump(os.Stderr, write) + } + } + + if config.HookAfterResponse != nil { + config.HookAfterResponse() + } +} + +func Test_ListEris(t *testing.T) { + ass := assert.New(t) + tests := []struct { + name string + context context.Context + err error + config *TestServerConfig + args *eni.ListEniArgs + + expect int + }{ + { + name: "normal", + config: &TestServerConfig{ + RequestMethod: http.MethodGet, + RequestURLPath: "/v1/eni", + ResponseHeaders: map[string]string{"Content-Type": "application/json"}, + ResponseBody: []byte("{\"enis\":[{\"networkInterfaceTrafficMode\":\"highPerformance\"}," + + "{\"networkInterfaceTrafficMode\":\"standard\"}," + + "{\"networkInterfaceTrafficMode\":\"highPerformance\"}]}"), + }, + args: &eni.ListEniArgs{ + VpcId: "fsdf", + PrivateIpAddress: []string{"10.0.0.1"}, + }, + expect: 2, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + svr := NewTestServer(t, test.config) + client := NewMockClient(svr.URL) + result, actualErr := client.ListEris(test.args) + if test.err == nil { + ass.Nil(actualErr, actualErr) + ass.Equal(test.expect, len(result.Eni)) + } else { + ass.ErrorContains(actualErr, test.err.Error(), "err mismatch") + } + svr.Close() + }) + } +} + +func Test_ListEnis(t *testing.T) { + ass := assert.New(t) + tests := []struct { + name string + context context.Context + err error + config *TestServerConfig + args *eni.ListEniArgs + + expect int + }{ + { + name: "normal", + config: &TestServerConfig{ + RequestMethod: http.MethodGet, + RequestURLPath: "/v1/eni", + ResponseHeaders: map[string]string{"Content-Type": "application/json"}, + ResponseBody: []byte("{\"enis\":[{\"networkInterfaceTrafficMode\":\"highPerformance\"}," + + "{\"networkInterfaceTrafficMode\":\"standard\"}," + + "{\"networkInterfaceTrafficMode\":\"highPerformance\"}]}"), + }, + args: &eni.ListEniArgs{ + VpcId: "fsdf", + PrivateIpAddress: []string{"10.0.0.1"}, + }, + expect: 1, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + svr := NewTestServer(t, test.config) + client := NewMockClient(svr.URL) + result, actualErr := client.ListEnis(test.args) + if test.err == nil { + ass.Nil(actualErr, actualErr) + ass.Equal(test.expect, len(result.Eni)) + } else { + ass.ErrorContains(actualErr, test.err.Error(), "err mismatch") + } + svr.Close() + }) + } +} diff --git a/pkg/bce/eni/model.go b/pkg/bce/eni/model.go new file mode 100644 index 0000000..ac89120 --- /dev/null +++ b/pkg/bce/eni/model.go @@ -0,0 +1,16 @@ +package eni + +import "github.com/baidubce/bce-sdk-go/services/eni" + +type ListEniResult struct { + Eni []Eni `json:"enis"` + Marker string `json:"marker"` + IsTruncated bool `json:"isTruncated"` + NextMarker string `json:"nextMarker"` + MaxKeys int `json:"maxKeys"` +} + +type Eni struct { + eni.Eni + NetworkInterfaceTrafficMode string `json:"networkInterfaceTrafficMode"` +} diff --git a/pkg/bce/metadata/metadata.go b/pkg/bce/metadata/metadata.go index dabe7fa..a2e746c 100644 --- a/pkg/bce/metadata/metadata.go +++ b/pkg/bce/metadata/metadata.go @@ -58,6 +58,9 @@ type Interface interface { GetSubnetID() (string, error) GetLinkGateway(string, string) (string, error) GetLinkMask(string, string) (string, error) + GetVifFeatures(macAddress string) (string, error) + + ListMacs() ([]string, error) } var _ Interface = &Client{} @@ -85,12 +88,12 @@ func NewClient() *Client { func (c *Client) sendRequest(path string) ([]byte, error) { ctx := log.NewContext() - url := url.URL{ + reqURL := url.URL{ Scheme: c.scheme, Host: c.host, Path: path, } - resp, err := http.Get(url.String()) + resp, err := http.Get(reqURL.String()) if err != nil { return nil, err } @@ -290,3 +293,43 @@ func (c *Client) GetLinkSecondaryIPs(macAddress string) ([]string, error) { return secondaryIPs, nil } + +func (c *Client) GetVifFeatures(macAddress string) (string, error) { + // eg. /1.0/meta-data/network/interfaces/macs/fa:26:00:01:6f:37/vif_features + // response: + // elastic_rdma + path := fmt.Sprintf(metadataBasePath+"network/interfaces/macs/%s/vif_features", macAddress) + body, err := c.sendRequest(path) + if err != nil { + return "", err + } + vifFeatures := strings.TrimSpace(string(body)) + return vifFeatures, nil +} + +func (c *Client) ListMacs() ([]string, error) { + // eg. /1.0/meta-data/network/interfaces/macs + // response: + // fa:f6:00:01:7b:f4/ + // fa:f6:00:07:f8:f4/ + // fa:f6:00:08:13:e1/ + // fa:f6:00:14:4c:f0/ + path := fmt.Sprintf(metadataBasePath + "network/interfaces/macs") + body, err := c.sendRequest(path) + if err != nil { + return nil, err + } + + var ( + macAddrs []string + ) + + reader := strings.NewReader(strings.TrimSpace(string(body))) + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + macAddrs = append(macAddrs, strings.TrimSuffix(line, "/")) + } + + return macAddrs, nil +} diff --git a/pkg/bce/metadata/metadata_test.go b/pkg/bce/metadata/metadata_test.go index dea6381..f095b0a 100644 --- a/pkg/bce/metadata/metadata_test.go +++ b/pkg/bce/metadata/metadata_test.go @@ -46,6 +46,8 @@ func newHandler(handler func(w http.ResponseWriter, r *http.Request)) http.Handl r.HandleFunc(metadataBasePath+"region", handler).Methods("GET") r.HandleFunc(metadataBasePath+"vpc-id", handler).Methods("GET") r.HandleFunc(metadataBasePath+"subnet-id", handler).Methods("GET") + r.HandleFunc(metadataBasePath+"network/interfaces/macs/test-mac/vif_features", handler).Methods("GET") + r.HandleFunc(metadataBasePath+"network/interfaces/macs", handler).Methods("GET") return r } @@ -121,6 +123,22 @@ func TestMetaDataOK(t *testing.T) { if result != "xxx" { t.Errorf("TestMetaData want: %v , got : %v", "xxx", result) } + + result, err = c.GetVifFeatures("test-mac") + if err != nil { + t.Errorf("TestMetaData got error: %v", err) + } + if result != "xxx" { + t.Errorf("TestMetaData want: %v , got : %v", "xxx", result) + } + + results, listErr := c.ListMacs() + if listErr != nil { + t.Errorf("TestMetaData got error: %v", listErr) + } + if len(results) != 1 { + t.Errorf("TestMetaData len(results) wants: %d , got : %d", 1, len(results)) + } } func TestMetaDataNotImplemented(t *testing.T) { diff --git a/pkg/bce/metadata/testing/mock_metadata.go b/pkg/bce/metadata/testing/mock_metadata.go index 91423b5..6eeafbb 100644 --- a/pkg/bce/metadata/testing/mock_metadata.go +++ b/pkg/bce/metadata/testing/mock_metadata.go @@ -1,55 +1,39 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/metadata (interfaces: Interface) +// Source: metadata.go // Package testing is a generated GoMock package. package testing import ( - reflect "reflect" - metadata "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/metadata" gomock "github.com/golang/mock/gomock" + reflect "reflect" ) -// MockInterface is a mock of Interface interface. +// MockInterface is a mock of Interface interface type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } -// MockInterfaceMockRecorder is the mock recorder for MockInterface. +// MockInterfaceMockRecorder is the mock recorder for MockInterface type MockInterfaceMockRecorder struct { mock *MockInterface } -// NewMockInterface creates a new mock instance. +// NewMockInterface creates a new mock instance func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// GetAvailabilityZone mocks base method. -func (m *MockInterface) GetAvailabilityZone() (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAvailabilityZone") - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAvailabilityZone indicates an expected call of GetAvailabilityZone. -func (mr *MockInterfaceMockRecorder) GetAvailabilityZone() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAvailabilityZone", reflect.TypeOf((*MockInterface)(nil).GetAvailabilityZone)) -} - -// GetInstanceID mocks base method. +// GetInstanceID mocks base method func (m *MockInterface) GetInstanceID() (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetInstanceID") @@ -58,13 +42,13 @@ func (m *MockInterface) GetInstanceID() (string, error) { return ret0, ret1 } -// GetInstanceID indicates an expected call of GetInstanceID. +// GetInstanceID indicates an expected call of GetInstanceID func (mr *MockInterfaceMockRecorder) GetInstanceID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceID", reflect.TypeOf((*MockInterface)(nil).GetInstanceID)) } -// GetInstanceName mocks base method. +// GetInstanceName mocks base method func (m *MockInterface) GetInstanceName() (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetInstanceName") @@ -73,13 +57,13 @@ func (m *MockInterface) GetInstanceName() (string, error) { return ret0, ret1 } -// GetInstanceName indicates an expected call of GetInstanceName. +// GetInstanceName indicates an expected call of GetInstanceName func (mr *MockInterfaceMockRecorder) GetInstanceName() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceName", reflect.TypeOf((*MockInterface)(nil).GetInstanceName)) } -// GetInstanceTypeEx mocks base method. +// GetInstanceTypeEx mocks base method func (m *MockInterface) GetInstanceTypeEx() (metadata.InstanceTypeEx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetInstanceTypeEx") @@ -88,73 +72,73 @@ func (m *MockInterface) GetInstanceTypeEx() (metadata.InstanceTypeEx, error) { return ret0, ret1 } -// GetInstanceTypeEx indicates an expected call of GetInstanceTypeEx. +// GetInstanceTypeEx indicates an expected call of GetInstanceTypeEx func (mr *MockInterfaceMockRecorder) GetInstanceTypeEx() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstanceTypeEx", reflect.TypeOf((*MockInterface)(nil).GetInstanceTypeEx)) } -// GetLinkGateway mocks base method. -func (m *MockInterface) GetLinkGateway(arg0, arg1 string) (string, error) { +// GetLocalIPv4 mocks base method +func (m *MockInterface) GetLocalIPv4() (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLinkGateway", arg0, arg1) + ret := m.ctrl.Call(m, "GetLocalIPv4") ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetLinkGateway indicates an expected call of GetLinkGateway. -func (mr *MockInterfaceMockRecorder) GetLinkGateway(arg0, arg1 interface{}) *gomock.Call { +// GetLocalIPv4 indicates an expected call of GetLocalIPv4 +func (mr *MockInterfaceMockRecorder) GetLocalIPv4() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLinkGateway", reflect.TypeOf((*MockInterface)(nil).GetLinkGateway), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLocalIPv4", reflect.TypeOf((*MockInterface)(nil).GetLocalIPv4)) } -// GetLinkMask mocks base method. -func (m *MockInterface) GetLinkMask(arg0, arg1 string) (string, error) { +// GetAvailabilityZone mocks base method +func (m *MockInterface) GetAvailabilityZone() (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLinkMask", arg0, arg1) + ret := m.ctrl.Call(m, "GetAvailabilityZone") ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetLinkMask indicates an expected call of GetLinkMask. -func (mr *MockInterfaceMockRecorder) GetLinkMask(arg0, arg1 interface{}) *gomock.Call { +// GetAvailabilityZone indicates an expected call of GetAvailabilityZone +func (mr *MockInterfaceMockRecorder) GetAvailabilityZone() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLinkMask", reflect.TypeOf((*MockInterface)(nil).GetLinkMask), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAvailabilityZone", reflect.TypeOf((*MockInterface)(nil).GetAvailabilityZone)) } -// GetLocalIPv4 mocks base method. -func (m *MockInterface) GetLocalIPv4() (string, error) { +// GetRegion mocks base method +func (m *MockInterface) GetRegion() (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLocalIPv4") + ret := m.ctrl.Call(m, "GetRegion") ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetLocalIPv4 indicates an expected call of GetLocalIPv4. -func (mr *MockInterfaceMockRecorder) GetLocalIPv4() *gomock.Call { +// GetRegion indicates an expected call of GetRegion +func (mr *MockInterfaceMockRecorder) GetRegion() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLocalIPv4", reflect.TypeOf((*MockInterface)(nil).GetLocalIPv4)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRegion", reflect.TypeOf((*MockInterface)(nil).GetRegion)) } -// GetRegion mocks base method. -func (m *MockInterface) GetRegion() (string, error) { +// GetVPCID mocks base method +func (m *MockInterface) GetVPCID() (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRegion") + ret := m.ctrl.Call(m, "GetVPCID") ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetRegion indicates an expected call of GetRegion. -func (mr *MockInterfaceMockRecorder) GetRegion() *gomock.Call { +// GetVPCID indicates an expected call of GetVPCID +func (mr *MockInterfaceMockRecorder) GetVPCID() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRegion", reflect.TypeOf((*MockInterface)(nil).GetRegion)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVPCID", reflect.TypeOf((*MockInterface)(nil).GetVPCID)) } -// GetSubnetID mocks base method. +// GetSubnetID mocks base method func (m *MockInterface) GetSubnetID() (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSubnetID") @@ -163,23 +147,68 @@ func (m *MockInterface) GetSubnetID() (string, error) { return ret0, ret1 } -// GetSubnetID indicates an expected call of GetSubnetID. +// GetSubnetID indicates an expected call of GetSubnetID func (mr *MockInterfaceMockRecorder) GetSubnetID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetID", reflect.TypeOf((*MockInterface)(nil).GetSubnetID)) } -// GetVPCID mocks base method. -func (m *MockInterface) GetVPCID() (string, error) { +// GetLinkGateway mocks base method +func (m *MockInterface) GetLinkGateway(arg0, arg1 string) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetVPCID") + ret := m.ctrl.Call(m, "GetLinkGateway", arg0, arg1) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// GetVPCID indicates an expected call of GetVPCID. -func (mr *MockInterfaceMockRecorder) GetVPCID() *gomock.Call { +// GetLinkGateway indicates an expected call of GetLinkGateway +func (mr *MockInterfaceMockRecorder) GetLinkGateway(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVPCID", reflect.TypeOf((*MockInterface)(nil).GetVPCID)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLinkGateway", reflect.TypeOf((*MockInterface)(nil).GetLinkGateway), arg0, arg1) +} + +// GetLinkMask mocks base method +func (m *MockInterface) GetLinkMask(arg0, arg1 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLinkMask", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLinkMask indicates an expected call of GetLinkMask +func (mr *MockInterfaceMockRecorder) GetLinkMask(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLinkMask", reflect.TypeOf((*MockInterface)(nil).GetLinkMask), arg0, arg1) +} + +// GetVifFeatures mocks base method +func (m *MockInterface) GetVifFeatures(macAddress string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVifFeatures", macAddress) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVifFeatures indicates an expected call of GetVifFeatures +func (mr *MockInterfaceMockRecorder) GetVifFeatures(macAddress interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVifFeatures", reflect.TypeOf((*MockInterface)(nil).GetVifFeatures), macAddress) +} + +// ListMacs mocks base method +func (m *MockInterface) ListMacs() ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListMacs") + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListMacs indicates an expected call of ListMacs +func (mr *MockInterfaceMockRecorder) ListMacs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMacs", reflect.TypeOf((*MockInterface)(nil).ListMacs)) } diff --git a/pkg/controller/subnet/mock/mock_subnet.go b/pkg/controller/subnet/mock/mock_subnet.go new file mode 100644 index 0000000..1dc2bc5 --- /dev/null +++ b/pkg/controller/subnet/mock/mock_subnet.go @@ -0,0 +1,116 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: types.go + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + v1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + subnet "github.com/baidubce/baiducloud-cce-cni-driver/pkg/controller/subnet" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockSubnetControl is a mock of SubnetControl interface +type MockSubnetControl struct { + ctrl *gomock.Controller + recorder *MockSubnetControlMockRecorder +} + +// MockSubnetControlMockRecorder is the mock recorder for MockSubnetControl +type MockSubnetControlMockRecorder struct { + mock *MockSubnetControl +} + +// NewMockSubnetControl creates a new mock instance +func NewMockSubnetControl(ctrl *gomock.Controller) *MockSubnetControl { + mock := &MockSubnetControl{ctrl: ctrl} + mock.recorder = &MockSubnetControlMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSubnetControl) EXPECT() *MockSubnetControlMockRecorder { + return m.recorder +} + +// Get mocks base method +func (m *MockSubnetControl) Get(name string) (*v1alpha1.Subnet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", name) + ret0, _ := ret[0].(*v1alpha1.Subnet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get +func (mr *MockSubnetControlMockRecorder) Get(name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSubnetControl)(nil).Get), name) +} + +// Create mocks base method +func (m *MockSubnetControl) Create(ctx context.Context, name string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Create", ctx, name) + ret0, _ := ret[0].(error) + return ret0 +} + +// Create indicates an expected call of Create +func (mr *MockSubnetControlMockRecorder) Create(ctx, name interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockSubnetControl)(nil).Create), ctx, name) +} + +// DeclareSubnetHasNoMoreIP mocks base method +func (m *MockSubnetControl) DeclareSubnetHasNoMoreIP(ctx context.Context, subnetID string, hasNoMoreIP bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeclareSubnetHasNoMoreIP", ctx, subnetID, hasNoMoreIP) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeclareSubnetHasNoMoreIP indicates an expected call of DeclareSubnetHasNoMoreIP +func (mr *MockSubnetControlMockRecorder) DeclareSubnetHasNoMoreIP(ctx, subnetID, hasNoMoreIP interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeclareSubnetHasNoMoreIP", reflect.TypeOf((*MockSubnetControl)(nil).DeclareSubnetHasNoMoreIP), ctx, subnetID, hasNoMoreIP) +} + +// MockSubnetClientInject is a mock of SubnetClientInject interface +type MockSubnetClientInject struct { + ctrl *gomock.Controller + recorder *MockSubnetClientInjectMockRecorder +} + +// MockSubnetClientInjectMockRecorder is the mock recorder for MockSubnetClientInject +type MockSubnetClientInjectMockRecorder struct { + mock *MockSubnetClientInject +} + +// NewMockSubnetClientInject creates a new mock instance +func NewMockSubnetClientInject(ctrl *gomock.Controller) *MockSubnetClientInject { + mock := &MockSubnetClientInject{ctrl: ctrl} + mock.recorder = &MockSubnetClientInjectMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockSubnetClientInject) EXPECT() *MockSubnetClientInjectMockRecorder { + return m.recorder +} + +// InjectSubnetClient mocks base method +func (m *MockSubnetClientInject) InjectSubnetClient(sbnClient subnet.SubnetControl) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InjectSubnetClient", sbnClient) + ret0, _ := ret[0].(error) + return ret0 +} + +// InjectSubnetClient indicates an expected call of InjectSubnetClient +func (mr *MockSubnetClientInjectMockRecorder) InjectSubnetClient(sbnClient interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InjectSubnetClient", reflect.TypeOf((*MockSubnetClientInject)(nil).InjectSubnetClient), sbnClient) +} diff --git a/pkg/controller/topology_spread/controller.go b/pkg/controller/topology_spread/controller.go index 587f6b3..1b6ea1f 100644 --- a/pkg/controller/topology_spread/controller.go +++ b/pkg/controller/topology_spread/controller.go @@ -289,15 +289,31 @@ func (tsc *TopologySpreadController) syncPSTSStatus(psts *networkv1alpha1.PodSub return nil } +// useDefault If there is a psts level policy, the psts level policy is directly used as the default policy. +// If there is no PSTS level policy, the first subnet level policy is used as the default policy. +// If there is no subnet level policy, a temporary Elastic policy will be built by default func useDefault(spec *networkv1alpha1.PodSubnetTopologySpreadSpec) { - for sbnID := range spec.Subnets { - sbn := spec.Subnets[sbnID] - if sbn.Type == "" { - sbn.Type = networkv1alpha1.IPAllocTypeElastic + if spec.Strategy == nil { + for sbnID := range spec.Subnets { + sbn := spec.Subnets[sbnID] + if sbn.Type == "" { + sbn.Type = networkv1alpha1.IPAllocTypeElastic + } + if sbn.Type == networkv1alpha1.IPAllocTypeElastic || sbn.Type == networkv1alpha1.IPAllocTypeManual { + sbn.ReleaseStrategy = networkv1alpha1.ReleaseStrategyTTL + } + + tmp := spec.Subnets[sbnID].IPAllocationStrategy + spec.Strategy = &tmp + + spec.Subnets[sbnID] = sbn + break } - if sbn.Type == networkv1alpha1.IPAllocTypeElastic || sbn.Type == networkv1alpha1.IPAllocTypeManual { - sbn.ReleaseStrategy = networkv1alpha1.ReleaseStrategyTTL + } else { + if spec.Strategy.Type == networkv1alpha1.IPAllocTypeCustom { + if spec.Strategy.TTL == nil { + spec.Strategy.TTL = networkv1alpha1.DefaultReuseIPTTL + } } - spec.Subnets[sbnID] = sbn } } diff --git a/pkg/controller/topology_spread/controller_test.go b/pkg/controller/topology_spread/controller_test.go index 5cc1e29..7decfce 100644 --- a/pkg/controller/topology_spread/controller_test.go +++ b/pkg/controller/topology_spread/controller_test.go @@ -17,7 +17,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" listercorev1 "k8s.io/client-go/listers/core/v1" @@ -39,8 +38,6 @@ func MockTopologySpreadController(t *testing.T) (*TopologySpreadController, kube kubeClient, kubeInformer, crdClient, crdInformer, cloudClient, ebc, _ := data.NewMockEnv(gomock.NewController(t)) sbnc := subnet.NewSubnetController(crdInformer, crdClient, cloudClient, ebc) tsc := NewTopologySpreadController(kubeInformer, crdInformer, crdClient, ebc, sbnc) - kubeInformer.Start(make(<-chan struct{})) - crdInformer.Start(make(<-chan struct{})) return tsc, kubeClient } @@ -59,7 +56,12 @@ func (suite *TopologySpreadControllerTester) SetupTest() { } func (suite *TopologySpreadControllerTester) assert() { - suite.tsc.waitCache(wait.NeverStop) + stopchan := make(chan struct{}) + defer close(stopchan) + suite.tsc.crdInformer.Start(stopchan) + suite.tsc.kubeInformer.Start(stopchan) + suite.tsc.waitCache(stopchan) + err := suite.tsc.sync(suite.key) if suite.wantErr { suite.Error(err, "sync error is not match") @@ -107,7 +109,12 @@ func (suite *TopologySpreadControllerTester) TestTrySyncSubnetNotFound() { _, err := tsc.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(context.TODO(), psts, metav1.CreateOptions{}) suite.NoError(err, "create psts error") - suite.tsc.waitCache(wait.NeverStop) + stopchan := make(chan struct{}) + defer close(stopchan) + suite.tsc.crdInformer.Start(stopchan) + suite.tsc.kubeInformer.Start(stopchan) + suite.tsc.waitCache(stopchan) + psts = nil for psts == nil { psts, err = suite.tsc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Lister().PodSubnetTopologySpreads(corev1.NamespaceDefault).Get(pstsName) @@ -144,7 +151,11 @@ func (suite *TopologySpreadControllerTester) TestTrySyncSubnetEnable() { psts, err := tsc.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(context.TODO(), psts, metav1.CreateOptions{}) suite.NoError(err, "create psts error") - suite.tsc.waitCache(wait.NeverStop) + stopchan := make(chan struct{}) + defer close(stopchan) + suite.tsc.crdInformer.Start(stopchan) + suite.tsc.kubeInformer.Start(stopchan) + suite.tsc.waitCache(stopchan) psts = nil for psts == nil { psts, err = suite.tsc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Lister().PodSubnetTopologySpreads(corev1.NamespaceDefault).Get(pstsName) @@ -184,7 +195,12 @@ func (suite *TopologySpreadControllerTester) TestTrySyncWithWEP() { psts, err := tsc.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(context.TODO(), psts, metav1.CreateOptions{}) suite.NoError(err, "create psts error") - suite.tsc.waitCache(wait.NeverStop) + stopchan := make(chan struct{}) + defer close(stopchan) + suite.tsc.crdInformer.Start(stopchan) + suite.tsc.kubeInformer.Start(stopchan) + suite.tsc.waitCache(stopchan) + psts = nil for psts == nil { psts, err = suite.tsc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Lister().PodSubnetTopologySpreads(corev1.NamespaceDefault).Get(pstsName) @@ -239,7 +255,12 @@ func (suite *TopologySpreadControllerTester) TestTrySyncWithUpdatePod() { psts, err := tsc.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(context.TODO(), psts, metav1.CreateOptions{}) suite.NoError(err, "create psts error") - suite.tsc.waitCache(wait.NeverStop) + stopchan := make(chan struct{}) + defer close(stopchan) + suite.tsc.crdInformer.Start(stopchan) + suite.tsc.kubeInformer.Start(stopchan) + suite.tsc.waitCache(stopchan) + psts = nil for psts == nil { psts, err = suite.tsc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Lister().PodSubnetTopologySpreads(corev1.NamespaceDefault).Get(pstsName) diff --git a/pkg/controller/topology_spread/pstt_controller_test.go b/pkg/controller/topology_spread/pstt_controller_test.go index 123d6b0..309192e 100644 --- a/pkg/controller/topology_spread/pstt_controller_test.go +++ b/pkg/controller/topology_spread/pstt_controller_test.go @@ -9,8 +9,6 @@ import ( "github.com/stretchr/testify/suite" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - cache "k8s.io/client-go/tools/cache" networkv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" ) @@ -38,7 +36,12 @@ func (suite *topologySpreadTableControllerTester) SetupTest() { } func (suite *topologySpreadTableControllerTester) assert() { - suite.waitForCache() + stopchan := make(chan struct{}) + defer close(stopchan) + suite.psttc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreadTables() + suite.psttc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads() + suite.psttc.crdInformer.Start(stopchan) + suite.psttc.crdInformer.WaitForCacheSync(stopchan) err := suite.psttc.sync(suite.key) if suite.wantErr { suite.Error(err, "sync error is not match") @@ -47,13 +50,6 @@ func (suite *topologySpreadTableControllerTester) assert() { } } -func (suite *topologySpreadTableControllerTester) waitForCache() { - tsc := suite.psttc - pstsInfomer := tsc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Informer() - psttInfomer := tsc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreadTables().Informer() - cache.WaitForNamedCacheSync("topology-spread-controller", wait.NeverStop, pstsInfomer.HasSynced, psttInfomer.HasSynced) -} - func (suite *topologySpreadTableControllerTester) TestTSCRun() { stopCh := make(chan struct{}) close(stopCh) @@ -73,8 +69,12 @@ func (suite *topologySpreadTableControllerTester) TestCreatePSTS() { suite.psttc.crdClient.CceV1alpha1().PodSubnetTopologySpreadTables(corev1.NamespaceDefault).Create(context.TODO(), pstt, metav1.CreateOptions{}) - suite.assert() - suite.waitForCache() + stopchan := make(chan struct{}) + defer close(stopchan) + suite.psttc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreadTables() + suite.psttc.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads() + suite.psttc.crdInformer.Start(stopchan) + suite.psttc.crdInformer.WaitForCacheSync(stopchan) suite.psttc.queue.Add(corev1.NamespaceDefault) suite.psttc.processNextWorkItem() @@ -101,7 +101,6 @@ func (suite *topologySpreadTableControllerTester) TestSyncPSTSStatus() { suite.assert() - suite.waitForCache() newpstt, _ := suite.psttc.crdClient.CceV1alpha1().PodSubnetTopologySpreadTables(corev1.NamespaceDefault).Get(context.Background(), "pstt-test", metav1.GetOptions{}) suite.Assert().Len(newpstt.Status, 1, "len of pstt statuss") } @@ -137,7 +136,6 @@ func (suite *topologySpreadTableControllerTester) TestCleanOldPSTS() { suite.assert() - suite.waitForCache() newpstt, _ := suite.psttc.crdClient.CceV1alpha1().PodSubnetTopologySpreadTables(corev1.NamespaceDefault).Get(context.Background(), "pstt-test", metav1.GetOptions{}) suite.Assert().Len(newpstt.Status, 1, "len of pstt statuss") } diff --git a/pkg/eniipam/cmd/root/root.go b/pkg/eniipam/cmd/root/root.go index 418151a..2430956 100644 --- a/pkg/eniipam/cmd/root/root.go +++ b/pkg/eniipam/cmd/root/root.go @@ -32,7 +32,6 @@ import ( "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" componentbaseconfig "k8s.io/component-base/config" - "k8s.io/kubernetes/pkg/client/leaderelectionconfig" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/config/types" @@ -41,10 +40,12 @@ import ( bbcipam "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/bbc" bccipam "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/bcc" eniipam "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/crossvpceni" + eriipam "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/eri" roceipam "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/roce" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" crdinformers "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/metric" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/cidr" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/k8s" log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/version" @@ -93,6 +94,7 @@ func NewRootCommand() *cobra.Command { options.addFlags(cmd.Flags()) webhook.RegisterWebhookFlags(cmd.Flags()) + cidr.RegisterCIDRFlags(cmd.Flags()) cmd.AddCommand(version.NewVersionCommand()) @@ -199,6 +201,18 @@ func runCommand(ctx context.Context, cmd *cobra.Command, args []string, opts *Op log.Fatalf(ctx, "failed to create roce ipamd: %v", err) } + eriipamd, err := eriipam.NewIPAM( + opts.VPCID, + kubeClient, + crdClient, + bceClient, + opts.ResyncPeriod, + opts.GCPeriod, + ) + if err != nil { + log.Fatalf(ctx, "failed to create eri ipamd: %v", err) + } + log.Infof(ctx, "cni mode is: %v", opts.CNIMode) switch { @@ -237,12 +251,21 @@ func runCommand(ctx context.Context, cmd *cobra.Command, args []string, opts *Op } }(roceipamd) } + if eriipamd != nil { + go func(eriipamd ipam.RoceInterface) { + ctx := log.NewContext() + if err := eriipamd.Run(ctx, opts.stopCh); err != nil { + log.Fatalf(ctx, "eri ipamd failed to run: %v", err) + } + }(eriipamd) + } ipamGrpcBackend := grpc.New( ipamds[0], ipamds[1], eniipamd, roceipamd, + eriipamd, opts.Port, opts.AllocateIPConcurrencyLimit, opts.ReleaseIPConcurrencyLimit, @@ -347,7 +370,7 @@ func (o *Options) addFlags(fs *pflag.FlagSet) { fs.IntVar(&o.IdleIPPoolMaxSize, "idle-ip-pool-max-size", o.IdleIPPoolMaxSize, "Idle IP Pool Max Size") fs.IntVar(&o.IdleIPPoolMinSize, "idle-ip-pool-min-size", o.IdleIPPoolMinSize, "Idle IP Pool Min Size") fs.BoolVar(&o.Debug, "debug", o.Debug, "Debug mode") - leaderelectionconfig.BindFlags(&o.LeaderElection, fs) + bindLeaderFlags(&o.LeaderElection, fs) } func (o *Options) validate() error { @@ -384,3 +407,33 @@ func printFlags(flags *pflag.FlagSet) { log.Infof(context.TODO(), "FLAG: --%s=%q", flag.Name, flag.Value) }) } + +// BindFlags binds the LeaderElectionConfiguration struct fields to a flagset +func bindLeaderFlags(l *componentbaseconfig.LeaderElectionConfiguration, fs *pflag.FlagSet) { + fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+ + "Start a leader election client and gain leadership before "+ + "executing the main loop. Enable this when running replicated "+ + "components for high availability.") + fs.DurationVar(&l.LeaseDuration.Duration, "leader-elect-lease-duration", l.LeaseDuration.Duration, ""+ + "The duration that non-leader candidates will wait after observing a leadership "+ + "renewal until attempting to acquire leadership of a led but unrenewed leader "+ + "slot. This is effectively the maximum duration that a leader can be stopped "+ + "before it is replaced by another candidate. This is only applicable if leader "+ + "election is enabled.") + fs.DurationVar(&l.RenewDeadline.Duration, "leader-elect-renew-deadline", l.RenewDeadline.Duration, ""+ + "The interval between attempts by the acting master to renew a leadership slot "+ + "before it stops leading. This must be less than or equal to the lease duration. "+ + "This is only applicable if leader election is enabled.") + fs.DurationVar(&l.RetryPeriod.Duration, "leader-elect-retry-period", l.RetryPeriod.Duration, ""+ + "The duration the clients should wait between attempting acquisition and renewal "+ + "of a leadership. This is only applicable if leader election is enabled.") + fs.StringVar(&l.ResourceLock, "leader-elect-resource-lock", l.ResourceLock, ""+ + "The type of resource object that is used for locking during "+ + "leader election. Supported options are `endpoints` (default) and `configmaps`.") + fs.StringVar(&l.ResourceName, "leader-elect-resource-name", l.ResourceName, ""+ + "The name of resource object that is used for locking during "+ + "leader election.") + fs.StringVar(&l.ResourceNamespace, "leader-elect-resource-namespace", l.ResourceNamespace, ""+ + "The namespace of resource object that is used for locking during "+ + "leader election.") +} diff --git a/pkg/eniipam/datastore/v1/datastore.go b/pkg/eniipam/datastore/v1/datastore.go index 0a56874..6182e35 100644 --- a/pkg/eniipam/datastore/v1/datastore.go +++ b/pkg/eniipam/datastore/v1/datastore.go @@ -38,21 +38,27 @@ var ( ) var ( - UnknownNodeError = errors.New("datastore: unknown Node") + ErrUnknownNode = errors.New("datastore: unknown Node") - UnknownENIError = errors.New("datastore: unknown ENI") + ErrUnknownENI = errors.New("datastore: unknown ENI") - UnknownIPError = errors.New("datastore: unknown IP") + ErrUnknownIP = errors.New("datastore: unknown IP") - EmptyNodeError = errors.New("datastore: empty Node") + ErrEmptyNode = errors.New("datastore: empty Node") - EmptyENIError = errors.New("datastore: empty ENI") + ErrEmptyENI = errors.New("datastore: empty ENI") - NoAvailableIPAddressInDataStoreError = errors.New("no available ip address in datastore") + ErrNoAvailableIPAddressInDataStore = errors.New("no available ip address in datastore") - NoAvailableIPAddressInENIError = errors.New("no available ip address in eni") + ErrNoAvailableIPAddressInENI = errors.New("no available ip address in eni") ) +func ErrNoAvailableIPAddressWithInCoolingPeriodInENI(addressInfo *AddressInfo) error { + errStr := fmt.Sprintf("no available ip address in eni, the ip %s is in cooling period, time left %s, unassigned time is %s", + addressInfo.Address, addressInfo.coolingPeriodTimeLeft().String(), addressInfo.UnassignedTime.Format("2006-01-02T15:04:05Z")) + return errors.New(errStr) +} + type DataStore struct { store map[string]*Instance // key is node name @@ -120,6 +126,15 @@ func (addr AddressInfo) inCoolingPeriod() bool { return time.Since(addr.UnassignedTime) < addressCoolingPeriod } +// coolingPeriodTimeLeft return a time.Duration for cooling period time left +func (addr AddressInfo) coolingPeriodTimeLeft() time.Duration { + timeSince := time.Since(addr.UnassignedTime) + if timeSince < addressCoolingPeriod { + return addressCoolingPeriod - timeSince + } + return 0 +} + // Synchronized Executing transactions in locks func (ds *DataStore) Synchronized(task func() error) error { ds.lock.Lock() @@ -133,7 +148,7 @@ func (ds *DataStore) AllocatePodPrivateIP(node string) (string, error) { instance, ok := ds.store[node] if !ok { - return "", UnknownNodeError + return "", ErrUnknownNode } for _, eni := range instance.eniPool { @@ -153,7 +168,7 @@ func (ds *DataStore) AllocatePodPrivateIP(node string) (string, error) { return addr.Address, nil } - return "", NoAvailableIPAddressInDataStoreError + return "", ErrNoAvailableIPAddressInDataStore } func (ds *DataStore) AllocatePodPrivateIPByENI(node, eniID string) (string, error) { @@ -162,17 +177,20 @@ func (ds *DataStore) AllocatePodPrivateIPByENI(node, eniID string) (string, erro instance, ok := ds.store[node] if !ok { - return "", UnknownNodeError + return "", ErrUnknownNode } eni, ok := instance.eniPool[eniID] if !ok { - return "", UnknownENIError + return "", ErrUnknownENI } addr, err := eni.idle.Top() - if err != nil || addr.Assigned || addr.inCoolingPeriod() { - return "", NoAvailableIPAddressInENIError + if err != nil || addr.Assigned { + return "", ErrNoAvailableIPAddressInENI + } + if addr.inCoolingPeriod() { + return "", ErrNoAvailableIPAddressWithInCoolingPeriodInENI(addr) } // update status @@ -197,12 +215,12 @@ func (ds *DataStore) ReleasePodPrivateIPUnsafe(node, eniID, ip string) { func (ds *DataStore) __ReleasePodPrivateIPUnsafe(node, eniID, ip string, crossSubnet bool) error { instance, ok := ds.getNodeInstance(node, crossSubnet) if !ok { - return UnknownNodeError + return ErrUnknownNode } eni, ok := instance.eniPool[eniID] if !ok { - return UnknownENIError + return ErrUnknownENI } addr, ok := eni.IPv4Addresses[ip] @@ -219,22 +237,22 @@ func (ds *DataStore) __ReleasePodPrivateIPUnsafe(node, eniID, ip string, crossSu return nil } - return UnknownIPError + return ErrUnknownIP } // Add the IP address to the Eni cache, and mark whether the IP address is an IP address across the subnet func (ds *DataStore) AddPrivateIPToStoreUnsafe(node, eniID, ipAddress string, assigned, crossSubnet bool) error { if node == "" { - return EmptyNodeError + return ErrEmptyNode } if eniID == "" { - return EmptyENIError + return ErrEmptyENI } instance, ok := ds.getNodeInstance(node, crossSubnet) if !ok { - return UnknownNodeError + return ErrUnknownNode } _, ok = instance.eniPool[eniID] @@ -297,12 +315,12 @@ func (ds *DataStore) DeletePrivateIPFromStoreUnsafe(node, eniID, ipAddress strin func (ds *DataStore) __DeletePrivateIPFromStoreUnsafe(node, eniID, ipAddress string, crossSubnet bool) error { instance, ok := ds.getNodeInstance(node, crossSubnet) if !ok { - return UnknownNodeError + return ErrUnknownNode } eni, ok := instance.eniPool[eniID] if !ok { - return UnknownENIError + return ErrUnknownENI } // decrease total @@ -325,7 +343,7 @@ func (ds *DataStore) __DeletePrivateIPFromStoreUnsafe(node, eniID, ipAddress str func (ds *DataStore) AddNodeToStore(node, instanceID string) error { if node == "" || instanceID == "" { - return UnknownNodeError + return ErrUnknownNode } ds.lock.Lock() @@ -369,7 +387,7 @@ func (ds *DataStore) DeleteNodeFromStore(node string) error { func (ds *DataStore) AddENIToStore(node, eniID string) error { if eniID == "" { - return EmptyENIError + return ErrEmptyENI } ds.lock.Lock() @@ -377,7 +395,7 @@ func (ds *DataStore) AddENIToStore(node, eniID string) error { instance, ok := ds.store[node] if !ok { - return UnknownNodeError + return ErrUnknownNode } _, ok = instance.eniPool[eniID] @@ -426,12 +444,12 @@ func (ds *DataStore) DeleteENIFromStore(node, eniID string) error { instance, ok := ds.store[node] if !ok { - return UnknownNodeError + return ErrUnknownNode } eni, ok := instance.eniPool[eniID] if !ok { - return UnknownENIError + return ErrUnknownENI } total := eni.TotalIPv4Addresses() @@ -464,7 +482,7 @@ func (ds *DataStore) GetNodeStats(node string) (int, int, error) { instance, ok := ds.store[node] if !ok { - return 0, 0, UnknownNodeError + return 0, 0, ErrUnknownNode } total := instance.total @@ -484,12 +502,12 @@ func (ds *DataStore) GetENIStats(node, eniID string) (int, int, error) { instance, ok := ds.store[node] if !ok { - return 0, 0, UnknownNodeError + return 0, 0, ErrUnknownNode } eni, ok := instance.eniPool[eniID] if !ok { - return 0, 0, UnknownENIError + return 0, 0, ErrUnknownENI } total := eni.TotalIPv4Addresses() @@ -513,7 +531,7 @@ func (ds *DataStore) GetUnassignedPrivateIPByNode(node string) ([]string, error) instance, ok := ds.store[node] if !ok { - return nil, UnknownNodeError + return nil, ErrUnknownNode } var result []string diff --git a/pkg/eniipam/datastore/v1/datastore_test.go b/pkg/eniipam/datastore/v1/datastore_test.go index 6a27f2b..6967181 100644 --- a/pkg/eniipam/datastore/v1/datastore_test.go +++ b/pkg/eniipam/datastore/v1/datastore_test.go @@ -96,7 +96,7 @@ func TestExhaustPodPrivateIP(t *testing.T) { assert.Equal(t, addr, "1.1.1.1") _, err = store.AllocatePodPrivateIP(node) - assert.Equal(t, err, NoAvailableIPAddressInDataStoreError) + assert.Equal(t, err, ErrNoAvailableIPAddressInDataStore) err = store.ReleasePodPrivateIP(node, eniID, "1.1.1.1") assert.NoError(t, err) diff --git a/pkg/eniipam/datastore/v2/datastore.go b/pkg/eniipam/datastore/v2/datastore.go index c491292..9918d3c 100644 --- a/pkg/eniipam/datastore/v2/datastore.go +++ b/pkg/eniipam/datastore/v2/datastore.go @@ -37,17 +37,17 @@ var ( ) var ( - UnknownNodeError = errors.New("datastore: unknown Node") + ErrUnknownNode = errors.New("datastore: unknown Node") UnknownSubnetError = errors.New("datastore: unknown subnet") - UnknownIPError = errors.New("datastore: unknown IP") + ErrUnknownIP = errors.New("datastore: unknown IP") - EmptyNodeError = errors.New("datastore: empty Node") + ErrEmptyNode = errors.New("datastore: empty Node") EmptySubnetError = errors.New("datastore: empty subnet") - NoAvailableIPAddressInDataStoreError = errors.New("no available ip address in datastore") + ErrNoAvailableIPAddressInDataStore = errors.New("no available ip address in datastore") NoAvailableIPAddressInSubnetBucketError = errors.New("no available ip address in subnet bucket") ) @@ -119,7 +119,7 @@ func (ds *DataStore) AllocatePodPrivateIP(node string) (string, string, error) { instance, ok := ds.store[node] if !ok { - return "", "", UnknownNodeError + return "", "", ErrUnknownNode } for _, sbucket := range instance.pool { @@ -142,7 +142,7 @@ func (ds *DataStore) AllocatePodPrivateIP(node string) (string, string, error) { return addr.Address, addr.SubnetID, nil } - return "", "", NoAvailableIPAddressInDataStoreError + return "", "", ErrNoAvailableIPAddressInDataStore } func (ds *DataStore) AllocatePodPrivateIPBySubnet(node, subnetID string) (string, string, error) { @@ -151,7 +151,7 @@ func (ds *DataStore) AllocatePodPrivateIPBySubnet(node, subnetID string) (string instance, ok := ds.store[node] if !ok { - return "", "", UnknownNodeError + return "", "", ErrUnknownNode } sbucket, ok := instance.pool[subnetID] @@ -180,7 +180,7 @@ func (ds *DataStore) ReleasePodPrivateIP(node, subnetID, ip string) error { instance, ok := ds.store[node] if !ok { - return UnknownNodeError + return ErrUnknownNode } sbucket, ok := instance.pool[subnetID] @@ -205,12 +205,12 @@ func (ds *DataStore) ReleasePodPrivateIP(node, subnetID, ip string) error { return nil } - return UnknownIPError + return ErrUnknownIP } func (ds *DataStore) AddPrivateIPToStore(node, subnetID, ipAddress string, assigned bool) error { if node == "" { - return EmptyNodeError + return ErrEmptyNode } if subnetID == "" { @@ -222,7 +222,7 @@ func (ds *DataStore) AddPrivateIPToStore(node, subnetID, ipAddress string, assig instance, ok := ds.store[node] if !ok { - return UnknownNodeError + return ErrUnknownNode } _, ok = instance.pool[subnetID] @@ -272,7 +272,7 @@ func (ds *DataStore) DeletePrivateIPFromStore(node, subnetID, ipAddress string) instance, ok := ds.store[node] if !ok { - return UnknownNodeError + return ErrUnknownNode } sbucket, ok := instance.pool[subnetID] @@ -305,7 +305,7 @@ func (ds *DataStore) DeletePrivateIPFromStore(node, subnetID, ipAddress string) func (ds *DataStore) AddNodeToStore(node, instanceID string) error { if node == "" { - return EmptyNodeError + return ErrEmptyNode } ds.lock.Lock() @@ -355,7 +355,7 @@ func (ds *DataStore) GetNodeStats(node string) (int, int, error) { instance, ok := ds.store[node] if !ok { - return 0, 0, UnknownNodeError + return 0, 0, ErrUnknownNode } return instance.total, instance.assigned, nil @@ -367,7 +367,7 @@ func (ds *DataStore) GetSubnetBucketStats(node, subnetID string) (int, int, erro instance, ok := ds.store[node] if !ok { - return 0, 0, UnknownNodeError + return 0, 0, ErrUnknownNode } sbucket, ok := instance.pool[subnetID] @@ -384,7 +384,7 @@ func (ds *DataStore) GetUnassignedPrivateIPByNode(node string) ([]string, error) instance, ok := ds.store[node] if !ok { - return nil, UnknownNodeError + return nil, ErrUnknownNode } var result []string diff --git a/pkg/eniipam/grpc/handler.go b/pkg/eniipam/grpc/handler.go index 46d5aea..afc55bf 100644 --- a/pkg/eniipam/grpc/handler.go +++ b/pkg/eniipam/grpc/handler.go @@ -47,6 +47,7 @@ type ENIIPAMGrpcServer struct { bbcipamd ipam.Interface eniipamd ipam.ExclusiveEniInterface roceipamd ipam.RoceInterface + eriipamd ipam.RoceInterface port int allocWorkers int releaseWorkers int @@ -59,6 +60,7 @@ func New( bbcipam ipam.Interface, eniipam ipam.ExclusiveEniInterface, roceipamd ipam.RoceInterface, + eriipamd ipam.RoceInterface, port int, allocLimit int, releaseLimit int, @@ -77,6 +79,7 @@ func New( bbcipamd: bbcipam, eniipamd: eniipam, roceipamd: roceipamd, + eriipamd: eriipamd, port: port, debug: debug, } @@ -162,19 +165,21 @@ func (cb *ENIIPAMGrpcServer) AllocateIP(ctx context.Context, req *rpc.AllocateIP var ( ipamd ipam.Interface crossVpcEniIpamd ipam.ExclusiveEniInterface - roceIpamd ipam.RoceInterface + multiIPIpamd ipam.RoceInterface ) if req.IPType == rpc.IPType_CrossVPCENIIPType { crossVpcEniIpamd = cb.eniipamd } else if req.IPType == rpc.IPType_RoceENIMultiIPType { - roceIpamd = cb.roceipamd + multiIPIpamd = cb.roceipamd + } else if req.IPType == rpc.IPType_ERIENIMultiIPType { + multiIPIpamd = cb.eriipamd } else { ipamd = cb.getIpamByIPType(ctx, req.IPType) } // we are unlikely to hit this - if ipamd == nil && roceIpamd == nil && crossVpcEniIpamd == nil { + if ipamd == nil && multiIPIpamd == nil && crossVpcEniIpamd == nil { rpcReply.IsSuccess = false rpcReply.ErrMsg = fmt.Sprintf("unsupported ipType %v from cni request", req.IPType) return rpcReply, nil @@ -182,9 +187,9 @@ func (cb *ENIIPAMGrpcServer) AllocateIP(ctx context.Context, req *rpc.AllocateIP // request comes in cb.incWorker(true) + defer cb.decWorker(true) if cb.getWorker(true) > allocateIPConcurrencyLimit { // request rejected - cb.decWorker(true) metric.RPCRejectedCounter.WithLabelValues(metric.MetaInfo.ClusterID, req.IPType.String(), rpcAPI).Inc() rpcReply.IsSuccess = false @@ -198,27 +203,21 @@ func (cb *ENIIPAMGrpcServer) AllocateIP(ctx context.Context, req *rpc.AllocateIP // allocate IP var ( crossVpcEni *v1alpha1.CrossVPCEni + wep *v1alpha1.WorkloadEndpoint + err error ) if crossVpcEniIpamd != nil { - crossVpcEni, err := crossVpcEniIpamd.Allocate(ctx, name, namespace, containerID) - // request completes - cb.decWorker(true) - rpcReply = makeAllocateRPCReply(nil, crossVpcEni, rpcReply, err) + crossVpcEni, err = crossVpcEniIpamd.Allocate(ctx, name, namespace, containerID) } else { - if req.IPType == rpc.IPType_RoceENIMultiIPType { + if req.IPType == rpc.IPType_RoceENIMultiIPType || req.IPType == rpc.IPType_ERIENIMultiIPType { mac := req.GetENIMultiIP().Mac - wep, err := roceIpamd.Allocate(ctx, name, namespace, containerID, mac) - // request completes - cb.decWorker(true) - rpcReply = makeAllocateRPCReply(wep, crossVpcEni, rpcReply, err) + wep, err = multiIPIpamd.Allocate(ctx, name, namespace, containerID, mac) } else { - wep, err := ipamd.Allocate(ctx, name, namespace, containerID) - // request completes - cb.decWorker(true) - rpcReply = makeAllocateRPCReply(wep, crossVpcEni, rpcReply, err) + wep, err = ipamd.Allocate(ctx, name, namespace, containerID) } } + rpcReply = makeAllocateRPCReply(wep, crossVpcEni, rpcReply, err) return rpcReply, nil } @@ -305,18 +304,20 @@ func (cb *ENIIPAMGrpcServer) ReleaseIP(ctx context.Context, req *rpc.ReleaseIPRe var ( ipamd ipam.Interface crossVpcEniIpamd ipam.ExclusiveEniInterface - roceIpamd ipam.RoceInterface + multiIPIpamd ipam.RoceInterface ) if req.IPType == rpc.IPType_CrossVPCENIIPType { crossVpcEniIpamd = cb.eniipamd } else if req.IPType == rpc.IPType_RoceENIMultiIPType { - roceIpamd = cb.roceipamd + multiIPIpamd = cb.roceipamd + } else if req.IPType == rpc.IPType_ERIENIMultiIPType { + multiIPIpamd = cb.eriipamd } else { ipamd = cb.getIpamByIPType(ctx, req.IPType) } // we are unlikely to hit this - if ipamd == nil && roceIpamd == nil && crossVpcEniIpamd == nil { + if ipamd == nil && multiIPIpamd == nil && crossVpcEniIpamd == nil { rpcReply.IsSuccess = false rpcReply.ErrMsg = fmt.Sprintf("unsupported ipType %v from cni request", req.IPType) return rpcReply, nil @@ -345,8 +346,8 @@ func (cb *ENIIPAMGrpcServer) ReleaseIP(ctx context.Context, req *rpc.ReleaseIPRe if crossVpcEniIpamd != nil { crossVpcEni, err = crossVpcEniIpamd.Release(ctx, name, namespace, containerID) } else { - if req.IPType == rpc.IPType_RoceENIMultiIPType { - wep, err = roceIpamd.Release(ctx, name, namespace, containerID) + if req.IPType == rpc.IPType_RoceENIMultiIPType || req.IPType == rpc.IPType_ERIENIMultiIPType { + wep, err = multiIPIpamd.Release(ctx, name, namespace, containerID) } else { wep, err = ipamd.Release(ctx, name, namespace, containerID) } diff --git a/pkg/eniipam/grpc/handler_test.go b/pkg/eniipam/grpc/handler_test.go index 4607f5b..beae36c 100644 --- a/pkg/eniipam/grpc/handler_test.go +++ b/pkg/eniipam/grpc/handler_test.go @@ -29,11 +29,13 @@ import ( func TestENIIPAMGrpcServer_AllocateIP(t *testing.T) { type fields struct { - ctrl *gomock.Controller - ipamd ipam.Interface - eniipamd ipam.ExclusiveEniInterface - port int - ipType rpc.IPType + ctrl *gomock.Controller + ipamd ipam.Interface + eniipamd ipam.ExclusiveEniInterface + roceipamd ipam.RoceInterface + eriipamd ipam.RoceInterface + port int + ipType rpc.IPType } type args struct { ctx context.Context @@ -176,6 +178,96 @@ func TestENIIPAMGrpcServer_AllocateIP(t *testing.T) { }, wantErr: false, }, + { + name: "RoCE", + fields: func() fields { + ctrl := gomock.NewController(t) + + roceipamd := mockipam.NewMockRoceInterface(ctrl) + gomock.InOrder( + roceipamd.EXPECT().Allocate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(&v1alpha1.WorkloadEndpoint{ + Spec: v1alpha1.WorkloadEndpointSpec{ + IP: "192.168.100.100", + }, + }, nil), + ) + + return fields{ + ctrl: ctrl, + roceipamd: roceipamd, + } + }(), + args: args{ + ctx: context.TODO(), + req: &rpc.AllocateIPRequest{ + K8SPodName: "busybox", + K8SPodNamespace: "default", + K8SPodInfraContainerID: "xxxxx", + IPType: rpc.IPType_RoceENIMultiIPType, + NetworkInfo: &rpc.AllocateIPRequest_ENIMultiIP{ + ENIMultiIP: &rpc.ENIMultiIPRequest{ + Mac: "fsfsgsg", + }, + }, + }, + }, + want: &rpc.AllocateIPReply{ + IsSuccess: true, + IPType: rpc.IPType_RoceENIMultiIPType, + NetworkInfo: &rpc.AllocateIPReply_ENIMultiIP{ + ENIMultiIP: &rpc.ENIMultiIPReply{ + IP: "192.168.100.100", + }, + }, + }, + wantErr: false, + }, + { + name: "ERI", + fields: func() fields { + ctrl := gomock.NewController(t) + + eriipamd := mockipam.NewMockRoceInterface(ctrl) + gomock.InOrder( + eriipamd.EXPECT().Allocate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + Return(&v1alpha1.WorkloadEndpoint{ + Spec: v1alpha1.WorkloadEndpointSpec{ + IP: "192.168.100.100", + }, + }, nil), + ) + + return fields{ + ctrl: ctrl, + eriipamd: eriipamd, + } + }(), + args: args{ + ctx: context.TODO(), + req: &rpc.AllocateIPRequest{ + K8SPodName: "busybox", + K8SPodNamespace: "default", + K8SPodInfraContainerID: "xxxxx", + IPType: rpc.IPType_ERIENIMultiIPType, + NetworkInfo: &rpc.AllocateIPRequest_ENIMultiIP{ + ENIMultiIP: &rpc.ENIMultiIPRequest{ + Mac: "fsfsgsg", + }, + }, + }, + }, + want: &rpc.AllocateIPReply{ + IsSuccess: true, + IPType: rpc.IPType_ERIENIMultiIPType, + NetworkInfo: &rpc.AllocateIPReply_ENIMultiIP{ + ENIMultiIP: &rpc.ENIMultiIPReply{ + IP: "192.168.100.100", + }, + }, + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -183,9 +275,11 @@ func TestENIIPAMGrpcServer_AllocateIP(t *testing.T) { defer tt.fields.ctrl.Finish() } cb := &ENIIPAMGrpcServer{ - bccipamd: tt.fields.ipamd, - eniipamd: tt.fields.eniipamd, - port: tt.fields.port, + bccipamd: tt.fields.ipamd, + eniipamd: tt.fields.eniipamd, + roceipamd: tt.fields.roceipamd, + eriipamd: tt.fields.eriipamd, + port: tt.fields.port, } got, err := cb.AllocateIP(tt.args.ctx, tt.args.req) if (err != nil) != tt.wantErr { @@ -201,11 +295,13 @@ func TestENIIPAMGrpcServer_AllocateIP(t *testing.T) { func TestENIIPAMGrpcServer_ReleaseIP(t *testing.T) { type fields struct { - ctrl *gomock.Controller - ipamd ipam.Interface - eniipamd ipam.ExclusiveEniInterface - port int - ipType rpc.IPType + ctrl *gomock.Controller + ipamd ipam.Interface + eniipamd ipam.ExclusiveEniInterface + roceipamd ipam.RoceInterface + eriipamd ipam.RoceInterface + port int + ipType rpc.IPType } type args struct { ctx context.Context @@ -339,6 +435,84 @@ func TestENIIPAMGrpcServer_ReleaseIP(t *testing.T) { }, wantErr: false, }, + { + name: "RoCE", + fields: func() fields { + ctrl := gomock.NewController(t) + + ipamd := mockipam.NewMockRoceInterface(ctrl) + gomock.InOrder( + ipamd.EXPECT().Release(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&v1alpha1.WorkloadEndpoint{ + Spec: v1alpha1.WorkloadEndpointSpec{ + IP: "192.168.100.100", + }, + }, nil), + ) + + return fields{ + ctrl: ctrl, + roceipamd: ipamd, + } + }(), + args: args{ + ctx: context.TODO(), + req: &rpc.ReleaseIPRequest{ + K8SPodName: "busybox", + K8SPodNamespace: "default", + K8SPodInfraContainerID: "xxxxx", + IPType: rpc.IPType_RoceENIMultiIPType, + }, + }, + want: &rpc.ReleaseIPReply{ + IsSuccess: true, + IPType: rpc.IPType_RoceENIMultiIPType, + NetworkInfo: &rpc.ReleaseIPReply_ENIMultiIP{ + ENIMultiIP: &rpc.ENIMultiIPReply{ + IP: "192.168.100.100", + }, + }, + }, + wantErr: false, + }, + { + name: "ERI", + fields: func() fields { + ctrl := gomock.NewController(t) + + ipamd := mockipam.NewMockRoceInterface(ctrl) + gomock.InOrder( + ipamd.EXPECT().Release(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&v1alpha1.WorkloadEndpoint{ + Spec: v1alpha1.WorkloadEndpointSpec{ + IP: "192.168.100.100", + }, + }, nil), + ) + + return fields{ + ctrl: ctrl, + eriipamd: ipamd, + } + }(), + args: args{ + ctx: context.TODO(), + req: &rpc.ReleaseIPRequest{ + K8SPodName: "busybox", + K8SPodNamespace: "default", + K8SPodInfraContainerID: "xxxxx", + IPType: rpc.IPType_ERIENIMultiIPType, + }, + }, + want: &rpc.ReleaseIPReply{ + IsSuccess: true, + IPType: rpc.IPType_ERIENIMultiIPType, + NetworkInfo: &rpc.ReleaseIPReply_ENIMultiIP{ + ENIMultiIP: &rpc.ENIMultiIPReply{ + IP: "192.168.100.100", + }, + }, + }, + wantErr: false, + }, } for _, tt := range tests { if tt.fields.ctrl != nil { @@ -346,9 +520,11 @@ func TestENIIPAMGrpcServer_ReleaseIP(t *testing.T) { } t.Run(tt.name, func(t *testing.T) { cb := &ENIIPAMGrpcServer{ - bccipamd: tt.fields.ipamd, - eniipamd: tt.fields.eniipamd, - port: tt.fields.port, + bccipamd: tt.fields.ipamd, + eniipamd: tt.fields.eniipamd, + roceipamd: tt.fields.roceipamd, + eriipamd: tt.fields.eriipamd, + port: tt.fields.port, } got, err := cb.ReleaseIP(tt.args.ctx, tt.args.req) if (err != nil) != tt.wantErr { @@ -408,6 +584,7 @@ func TestRunRPCServer(t *testing.T) { nil, nil, nil, + nil, 0, 0, 0, diff --git a/pkg/eniipam/ipam/bcc/cni_request.go b/pkg/eniipam/ipam/bcc/cni_request.go index 9009ab6..645c1bd 100644 --- a/pkg/eniipam/ipam/bcc/cni_request.go +++ b/pkg/eniipam/ipam/bcc/cni_request.go @@ -2,27 +2,25 @@ package bcc import ( "context" - goerrors "errors" "fmt" "time" - enisdk "github.com/baidubce/bce-sdk-go/services/eni" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apimachinery/networking" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" - "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/metadata" - ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" - "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/util" - k8sutil "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/k8s" log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" ) +const ( + allocateIPTimeout = 30 * time.Second + errorMsgAllocateTimeout = "allocate ip timeout" +) + func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID string) (*v1alpha1.WorkloadEndpoint, error) { log.Infof(ctx, "[Allocate] allocating IP for pod (%v %v) starts", namespace, name) defer log.Infof(ctx, "[Allocate] allocating IP for pod (%v %v) ends", namespace, name) @@ -32,9 +30,9 @@ func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID str return nil, fmt.Errorf("ipam has not synced cache yet") } - var ipResult = "" - var addIPErrors []error - var ipAddedENI *enisdk.Eni + // The allocateIPTimeout is not set in the context because the context will be automatically disconnected when timeout. + deadline := time.Now().Add(allocateIPTimeout) + pod, err := ipam.kubeInformer.Core().V1().Pods().Lister().Pods(namespace).Get(name) if err != nil { return nil, err @@ -48,53 +46,25 @@ func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID str node = node.DeepCopy() - // check datastore node status - err = wait.ExponentialBackoff(retry.DefaultRetry, func() (done bool, err error) { - if !ipam.datastore.NodeExistsInStore(node.Name) { - var ( - evt = &event{ - node: node, - ctx: ctx, - } - ch = make(chan *event) - ) - - ipam.lock.Lock() - _, ok := ipam.buildDataStoreEventChan[evt.node.Name] - if !ok { - ipam.buildDataStoreEventChan[node.Name] = ch - go ipam.handleRebuildNodeDatastoreEvent(ctx, evt.node, ch) - } - ch = ipam.buildDataStoreEventChan[evt.node.Name] - ipam.lock.Unlock() - - ch <- evt - return false, nil - } else { - return true, nil - } - }) - if err == wait.ErrWaitTimeout { - return nil, fmt.Errorf("init node %v datastore error", node.Name) + // ensure node in datastore + nodeStoreErr := ipam.ensureNodeInStore(ctx, node) + if nodeStoreErr != nil { + return nil, err } // get node stats from store, to further check if pool is corrupted - total, used, err := ipam.datastore.GetNodeStats(node.Name) - if err != nil { - msg := fmt.Sprintf("get node %v stats in datastore failed: %v", node, err) - log.Error(ctx, msg) - return nil, goerrors.New(msg) + statsErr := ipam.statsNodeIP(ctx, "before allocated", node.Name) + if statsErr != nil { + log.Errorf(ctx, "stat node %s ip failed: %s", node.Name, statsErr) + return nil, statsErr } - log.Infof(ctx, "total, used before allocate for node %v: %v %v", node.Name, total, used) - // find out which enis are suitable to bind enis, err := ipam.findSuitableENIs(ctx, pod) if err != nil { log.Errorf(ctx, "failed to find a suitable eni for pod (%v %v): %v", namespace, name, err) return nil, err } - suitableENINum := len(enis) wep, err := ipam.crdInformer.Cce().V1alpha1().WorkloadEndpoints().Lister().WorkloadEndpoints(namespace).Get(name) if err != nil && !errors.IsNotFound(err) { @@ -108,142 +78,82 @@ func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID str return ipam.subnetTopologyAllocates(ctx, pod, enis, wep, node.Name, containerID) } - // not a subnet topology spread pod, use node ippool to allocate ip address - // use old wep - if err == nil { - ipToAllocate := wep.Spec.IP - if !isFixIPStatefulSetPod(pod) { - log.Warningf(ctx, "pod (%v %v) still has wep, but is not a fix-ip sts pod", namespace, name) - ipToAllocate = "" - } - if ipToAllocate != "" { - log.Infof(ctx, "try to reuse fix IP %v for pod (%v %v)", ipToAllocate, namespace, name) - } - for _, eni := range enis { - if ipToAllocate == "" { - ipResult, err = ipam.datastore.AllocatePodPrivateIPByENI(node.Name, eni.EniId) - if err == nil { - ipAddedENI = eni - break - } - } - - ipResult, err = ipam.tryAllocateIPForFixIPPod(ctx, eni, wep, ipToAllocate, node, ipamgeneric.CCECniTimeout/time.Duration(suitableENINum)) - if err == nil { - ipAddedENI = eni - break - } else { - addErr := fmt.Errorf("error eni: %v, %v", eni.EniId, err.Error()) - addIPErrors = append(addIPErrors, addErr) - } - } + var allocatedErr error - if ipAddedENI == nil { - return nil, fmt.Errorf("all %d enis binded cannot add IP %v: %v", len(enis), wep.Spec.IP, utilerrors.NewAggregate(addIPErrors)) - } + if isStsPodReuseIP(wep, pod) { + // sts pod reuse ip + wep, allocatedErr = ipam.allocateIPForFixedIPPod(ctx, node, pod, containerID, enis, wep, deadline) + } else { + // other pod allocate ip + wep, allocatedErr = ipam.allocateIPForOrdinaryPod(ctx, node, pod, containerID, enis, wep, deadline) + } + if allocatedErr != nil { + return nil, allocatedErr + } - if wep.Labels == nil { - wep.Labels = make(map[string]string) - } - wep.Spec.ContainerID = containerID - wep.Spec.IP = ipResult - wep.Spec.ENIID = ipAddedENI.EniId - wep.Spec.Mac = ipAddedENI.MacAddress - wep.Spec.Node = pod.Spec.NodeName - wep.Spec.SubnetID = ipAddedENI.SubnetId - wep.Spec.UpdateAt = metav1.Time{Time: time.Unix(0, 0)} - wep.Labels[ipamgeneric.WepLabelSubnetIDKey] = ipAddedENI.SubnetId - wep.Labels[ipamgeneric.WepLabelInstanceTypeKey] = string(metadata.InstanceTypeExBCC) - if k8sutil.IsStatefulSetPod(pod) { - wep.Labels[ipamgeneric.WepLabelStsOwnerKey] = util.GetStsName(wep) - } - if pod.Annotations != nil { - wep.Spec.EnableFixIP = pod.Annotations[StsPodAnnotationEnableFixIP] - wep.Spec.FixIPDeletePolicy = pod.Annotations[StsPodAnnotationFixIPDeletePolicy] - } - _, err = ipam.crdClient.CceV1alpha1().WorkloadEndpoints(namespace).Update(ctx, wep, metav1.UpdateOptions{}) - if err != nil { - log.Errorf(ctx, "failed to update wep for pod (%v %v): %v", namespace, name, err) - time.Sleep(minPrivateIPLifeTime) - ipam.tryDeleteSubnetIPRetainAllocateCache(ctx, wep) - return nil, err - } - ipam.lock.Lock() - ipam.allocated[ipResult] = wep - ipam.lock.Unlock() - return wep, nil + statsErr = ipam.statsNodeIP(ctx, "after allocated", node.Name) + if statsErr != nil { + log.Errorf(ctx, "stat node %s ip failed: %s", node.Name, statsErr) } - // create a new wep - log.Infof(ctx, "try to allocate IP and create wep for pod (%v %v)", pod.Namespace, pod.Name) + return wep, nil +} - idleIPs, _ := ipam.datastore.GetUnassignedPrivateIPByNode(node.Name) - log.Infof(ctx, "idle ip in datastore before allocate for node %v: %v", node.Name, idleIPs) +func (ipam *IPAM) ensureNodeInStore(ctx context.Context, node *corev1.Node) error { + err := wait.ExponentialBackoff(retry.DefaultRetry, func() (done bool, err error) { + if ipam.datastore.NodeExistsInStore(node.Name) { + return true, nil + } - ipResult, ipAddedENI, err = ipam.allocateIPFromLocalPool(ctx, node, enis) - if err != nil { - msg := fmt.Sprintf("error allocate private IP for pod(%s/%s): %v", namespace, name, err) - log.Error(ctx, msg) - return nil, goerrors.New(msg) - } - wep = &v1alpha1.WorkloadEndpoint{ - ObjectMeta: metav1.ObjectMeta{ - Name: pod.Name, - Namespace: pod.Namespace, - Labels: map[string]string{ - ipamgeneric.WepLabelSubnetIDKey: ipAddedENI.SubnetId, - ipamgeneric.WepLabelInstanceTypeKey: string(metadata.InstanceTypeExBCC), - }, - Finalizers: []string{ipamgeneric.WepFinalizer}, - }, - Spec: v1alpha1.WorkloadEndpointSpec{ - ContainerID: containerID, - IP: ipResult, - Type: ipamgeneric.WepTypePod, - Mac: ipAddedENI.MacAddress, - ENIID: ipAddedENI.EniId, - Node: pod.Spec.NodeName, - SubnetID: ipAddedENI.SubnetId, - UpdateAt: metav1.Time{Time: time.Unix(0, 0)}, - }, - } + evt := &event{ + node: node, + ctx: ctx, + } - if k8sutil.IsStatefulSetPod(pod) { - wep.Spec.Type = ipamgeneric.WepTypeSts - wep.Labels[ipamgeneric.WepLabelStsOwnerKey] = util.GetStsName(wep) - } + ipam.lock.Lock() + ch, ok := ipam.buildDataStoreEventChan[node.Name] + if !ok { + ch = make(chan *event) + ipam.buildDataStoreEventChan[node.Name] = ch + go func() { + err := ipam.handleRebuildNodeDatastoreEvent(ctx, node, ch) + if err != nil { + log.Warningf(ctx, "rebuild node %s failed, wait to retry, err: %s", node.Name, err) + } + }() + } + ipam.lock.Unlock() - if pod.Annotations != nil { - wep.Spec.EnableFixIP = pod.Annotations[StsPodAnnotationEnableFixIP] - wep.Spec.FixIPDeletePolicy = pod.Annotations[StsPodAnnotationFixIPDeletePolicy] + ch <- evt + return false, nil + }) + if err != nil { + return fmt.Errorf("init node %v datastore error: %s", node.Name, err) } + return nil +} - _, err = ipam.crdClient.CceV1alpha1().WorkloadEndpoints(namespace).Create(ctx, wep, metav1.CreateOptions{}) +func (ipam *IPAM) statsNodeIP(ctx context.Context, stage, nodeName string) error { + total, used, err := ipam.datastore.GetNodeStats(nodeName) if err != nil { - log.Errorf(ctx, "failed to create wep for pod (%v %v): %v", namespace, name, err) - ipam.tryDeleteIPByWep(ctx, wep) - return nil, err + return err } - log.Infof(ctx, "create wep with spec %+v for pod (%v %v) successfully", wep.Spec, namespace, name) + log.Infof(ctx, "%s: total, used ip for node %s: %d %d", stage, nodeName, total, used) - // update allocated pod cache - ipam.lock.Lock() - if ipam.removeAddIPBackoffCache(wep.Spec.ENIID, true) { - log.Infof(ctx, "remove backoff for eni %v when handling pod (%v %v) due to successful ip allocate", wep.Spec.ENIID, namespace, name) + idleIPs, err := ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) + if err != nil { + return err } - ipam.allocated[ipResult] = wep - ipam.lock.Unlock() + log.Infof(ctx, "%s: idle ip in datastore before allocate for node %s: %v", stage, nodeName, idleIPs) + return nil +} - total, used, err = ipam.datastore.GetNodeStats(node.Name) - if err == nil { - log.Infof(ctx, "total, used after allocate for node %v: %v %v", node.Name, total, used) - } - idleIPs, err = ipam.datastore.GetUnassignedPrivateIPByNode(node.Name) - if err == nil { - log.Infof(ctx, "idle ip in datastore after allocate for node %v: %v", node.Name, idleIPs) +func isStsPodReuseIP(wep *v1alpha1.WorkloadEndpoint, pod *corev1.Pod) bool { + if wep == nil { + return false } - return wep, nil + return wep.Spec.IP != "" && IsFixIPStatefulSetPod(pod) } func (ipam *IPAM) Release(ctx context.Context, name, namespace, containerID string) (*v1alpha1.WorkloadEndpoint, error) { @@ -274,7 +184,7 @@ func (ipam *IPAM) Release(ctx context.Context, name, namespace, containerID stri return nil, nil } - if isFixIPStatefulSetPodWep(wep) { + if networking.IsFixIPStatefulSetPodWep(wep) { log.Infof(ctx, "release: sts pod (%v %v) will update wep but private IP won't release", namespace, name) wep.Spec.UpdateAt = metav1.Time{Time: time.Now()} _, err = ipam.crdClient.CceV1alpha1().WorkloadEndpoints(namespace).Update(ctx, wep, metav1.UpdateOptions{}) @@ -296,21 +206,3 @@ func (ipam *IPAM) Release(ctx context.Context, name, namespace, containerID stri return wep, nil } - -// allocateIPFromLocalPool get ip from local cache -// increase the poll if datastore has no available ip -func (ipam *IPAM) allocateIPFromLocalPool(ctx context.Context, node *corev1.Node, enis []*enisdk.Eni) (ipresult string, eni *enisdk.Eni, err error) { - err = wait.ExponentialBackoff(retry.DefaultRetry, func() (done bool, err error) { - if ipam.canAllocateIP(ctx, node.Name, enis) { - return true, nil - } - - ipam.sendIncreasePoolEvent(ctx, node, enis, true) - return false, nil - }) - if err == wait.ErrWaitTimeout { - return "", nil, fmt.Errorf("allocate ip for node %v error", node.Name) - } - - return ipam.tryAllocateIPByENIs(ctx, node.Name, enis) -} diff --git a/pkg/eniipam/ipam/bcc/cni_request_test.go b/pkg/eniipam/ipam/bcc/cni_request_test.go new file mode 100644 index 0000000..ffad50f --- /dev/null +++ b/pkg/eniipam/ipam/bcc/cni_request_test.go @@ -0,0 +1,35 @@ +package bcc + +import ( + "context" + mockcloud "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud/testing" + datastorev1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/datastore/v1" + enisdk "github.com/baidubce/bce-sdk-go/services/eni" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "testing" +) + +func TestIPAM_ensureNodeInStore(t *testing.T) { + ctrl := gomock.NewController(t) + mockInterface := mockcloud.NewMockInterface(ctrl) + mockInterface.EXPECT().ListENIs(gomock.Any(), gomock.Any()).Return([]enisdk.Eni{}, nil).AnyTimes() + + ipam := &IPAM{ + datastore: datastorev1.NewDataStore(), + buildDataStoreEventChan: make(map[string]chan *event), + cloud: mockInterface, + } + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "i-xxxx0", + }, + Spec: v1.NodeSpec{ + ProviderID: "cce://i-xxxx0", + }, + } + err := ipam.ensureNodeInStore(context.Background(), node) + assert.Nil(t, err) +} diff --git a/pkg/eniipam/ipam/bcc/eni.go b/pkg/eniipam/ipam/bcc/eni.go index c990e6b..e78aa32 100644 --- a/pkg/eniipam/ipam/bcc/eni.go +++ b/pkg/eniipam/ipam/bcc/eni.go @@ -32,6 +32,7 @@ import ( "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/metadata" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/ipcache" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/util" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/metric" utileni "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/eni" @@ -47,6 +48,15 @@ const ( CreateENIMaxConcurrency = 15 ) +type eniManager struct { + *ipcache.CacheMapArray[*enisdk.Eni] + ipam *IPAM +} + +func (m *eniManager) start(stopCh <-chan struct{}) { + +} + func (ipam *IPAM) syncENI(stopCh <-chan struct{}) error { eniSyncInterval := wait.Jitter(ipam.eniSyncPeriod, 0.2) @@ -126,27 +136,26 @@ func (ipam *IPAM) buildInuseENICache(ctx context.Context, nodes []*v1.Node, enis defer ipam.lock.Unlock() // build eni cache - ipam.eniCache = make(map[string][]*enisdk.Eni) + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() ipam.privateIPNumCache = make(map[string]int) instanceIdToNodeNameMap := buildInstanceIdToNodeNameMap(ctx, nodes) // init eni cache - for _, n := range nodes { - ipam.eniCache[n.Name] = make([]*enisdk.Eni, 0) - } for idx, eni := range enis { + log.Infof(ctx, "try to add eni %s to cache", eni.EniId) if eni.Status != utileni.ENIStatusInuse { continue } if nodeName, ok := instanceIdToNodeNameMap[eni.InstanceId]; ok { - ipam.eniCache[nodeName] = append(ipam.eniCache[nodeName], &enis[idx]) + eniCache.Append(nodeName, &enis[idx]) } // update private ip num of enis ipam.privateIPNumCache[eni.EniId] = len(eni.PrivateIpSet) } + ipam.eniCache = eniCache return nil } @@ -160,8 +169,8 @@ func (ipam *IPAM) findSuitableENIs(ctx context.Context, pod *v1.Pod) ([]*enisdk. nodeName := pod.Spec.NodeName // get eni list from eniCache - enis, ok := ipam.eniCache[nodeName] - if !ok || len(enis) == 0 { + enis, ok := ipam.eniCache.Get(nodeName) + if !ok { return nil, fmt.Errorf("no eni binded to node %s", nodeName) } @@ -171,7 +180,7 @@ func (ipam *IPAM) findSuitableENIs(ctx context.Context, pod *v1.Pod) ([]*enisdk. } // for fix IP pod, candidate subnets should be in the same subnet - if isFixIPStatefulSetPod(pod) { + if IsFixIPStatefulSetPod(pod) { wep, err := ipam.crdInformer.Cce().V1alpha1().WorkloadEndpoints().Lister().WorkloadEndpoints(pod.Namespace).Get(pod.Name) if err == nil { // get old subnet of pod diff --git a/pkg/eniipam/ipam/bcc/eni_test.go b/pkg/eniipam/ipam/bcc/eni_test.go index 7cf9733..01de87d 100644 --- a/pkg/eniipam/ipam/bcc/eni_test.go +++ b/pkg/eniipam/ipam/bcc/eni_test.go @@ -27,11 +27,12 @@ import ( mockcloud "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud/testing" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/config/types" datastorev1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/datastore/v1" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/ipcache" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" crdinformers "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" eniutil "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/eni" - utileni "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/eni" log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" + "github.com/baidubce/baiducloud-cce-cni-driver/test/data" enisdk "github.com/baidubce/bce-sdk-go/services/eni" "github.com/golang/mock/gomock" "github.com/juju/ratelimit" @@ -87,9 +88,9 @@ func Test_buildENICache(t *testing.T) { func Test_listAttachedENIs(t *testing.T) { var ( enis = []enisdk.Eni{ - enisdk.Eni{Name: "cce-xxx/i-aaa/nodeA/1234", Status: utileni.ENIStatusInuse, InstanceId: "i-aaa"}, - enisdk.Eni{Name: "cce-xxx/i-bbb/nodeB/1234", Status: utileni.ENIStatusAttaching}, - enisdk.Eni{Name: "cce-xxx/i-aaa/nodeA/1234", Status: utileni.ENIStatusAttaching}, + enisdk.Eni{Name: "cce-xxx/i-aaa/nodeA/1234", Status: eniutil.ENIStatusInuse, InstanceId: "i-aaa"}, + enisdk.Eni{Name: "cce-xxx/i-bbb/nodeB/1234", Status: eniutil.ENIStatusAttaching}, + enisdk.Eni{Name: "cce-xxx/i-aaa/nodeA/1234", Status: eniutil.ENIStatusAttaching}, } ) type args struct { @@ -148,7 +149,6 @@ func TestIPAM_getSecurityGroupsFromDefaultIPPool(t *testing.T) { possibleLeakedIPCache map[eniAndIPAddrKey]time.Time addIPBackoffCache map[string]*wait.Backoff cacheHasSynced bool - allocated map[string]*v1alpha1.WorkloadEndpoint datastore *datastorev1.DataStore idleIPPoolMinSize int idleIPPoolMaxSize int @@ -330,14 +330,17 @@ func TestIPAM_getSecurityGroupsFromDefaultIPPool(t *testing.T) { if tt.fields.ctrl != nil { defer tt.fields.ctrl.Finish() } + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + for key, v := range tt.fields.eniCache { + + eniCache.Append(key, v...) + } t.Run(tt.name, func(t *testing.T) { ipam := &IPAM{ - eniCache: tt.fields.eniCache, + eniCache: eniCache, privateIPNumCache: tt.fields.privateIPNumCache, possibleLeakedIPCache: tt.fields.possibleLeakedIPCache, - addIPBackoffCache: tt.fields.addIPBackoffCache, cacheHasSynced: tt.fields.cacheHasSynced, - allocated: tt.fields.allocated, datastore: tt.fields.datastore, idleIPPoolMinSize: tt.fields.idleIPPoolMinSize, idleIPPoolMaxSize: tt.fields.idleIPPoolMaxSize, @@ -376,7 +379,7 @@ func TestIPAM_getSecurityGroupsFromDefaultIPPool(t *testing.T) { } } -type IPAMENI struct { +type IPAMENITester struct { suite.Suite ipam *IPAM wantErr bool @@ -387,30 +390,41 @@ type IPAMENI struct { } // 每次测试前设置上下文 -func (suite *IPAMENI) SetupTest() { +func (suite *IPAMENITester) SetupTest() { suite.stopChan = make(chan struct{}) suite.ipam = mockIPAM(suite.T(), suite.stopChan) suite.ctx = context.TODO() + + suite.ipam.kubeInformer.Core().V1().Nodes().Informer() + suite.ipam.kubeInformer.Core().V1().Pods().Informer() + suite.ipam.kubeInformer.Apps().V1().StatefulSets().Informer() + suite.ipam.crdInformer.Cce().V1alpha1().WorkloadEndpoints().Informer() + suite.ipam.crdInformer.Cce().V1alpha1().IPPools().Informer() + suite.ipam.crdInformer.Cce().V1alpha1().Subnets().Informer() + suite.ipam.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Informer() + + suite.ipam.kubeInformer.Start(suite.stopChan) + suite.ipam.crdInformer.Start(suite.stopChan) } // 每次测试后执行清理 -func (suite *IPAMENI) TearDownTest() { +func (suite *IPAMENITester) TearDownTest() { suite.ipam = nil suite.ctx = nil suite.wantErr = false close(suite.stopChan) } -func (suite *IPAMENI) TestSyncEni() { +func (suite *IPAMENITester) TestSyncEni() { mockInterface := suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT() mockInterface.ListENIs(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error")).AnyTimes() go suite.ipam.syncENI(suite.stopChan) time.Sleep(time.Second) } -func (suite *IPAMENI) TestResyncEni() { +func (suite *IPAMENITester) TestResyncEni() { eni4 := mockEni("eni-4", "i-3", "10.0.0.3") - eni4.Status = utileni.ENIStatusAttaching + eni4.Status = eniutil.ENIStatusAttaching enis := []enisdk.Eni{ mockEni("eni-1", "i-1", "10.0.0.1"), mockEni("eni-2", "i-1", "10.0.0.1"), @@ -461,12 +475,40 @@ func (suite *IPAMENI) TestResyncEni() { suite.ipam.resyncENI() } +type increaseEniSuite struct { + IPAMENITester +} + +func (suite *increaseEniSuite) TestIncreaseEni() { + eni4 := mockEni("eni-4", "i-3", "10.0.0.3") + eni4.Status = eniutil.ENIStatusAttaching + enis := []enisdk.Eni{ + mockEni("eni-1", "i-1", "10.0.0.1"), + mockEni("eni-2", "i-1", "10.0.0.1"), + mockEni("eni-3", "i-1", "10.0.0.1"), + eni4, + } + mockInterface := suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT() + mockInterface.ListENIs(gomock.Any(), gomock.Any()).Return(enis, nil).AnyTimes() + + node := data.MockNode("10.0.0.1", "BCC", "cce://i-1") + suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + node2 := data.MockNode("10.0.0.2", "BCC", "cce://i-2") + suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), node2, metav1.CreateOptions{}) + node3 := data.MockNode("10.0.0.3", "BCC", "cce://i-3") + node3.Status.Conditions = make([]corev1.NodeCondition, 0) + suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), node3, metav1.CreateOptions{}) + + __waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer, suite.stopChan) + suite.ipam.resyncENI() +} + func mockEni(id, instanceId, node string) enisdk.Eni { return enisdk.Eni{ EniId: id, Name: fmt.Sprintf("clusterID/%s/%s/%s", instanceId, node, id), - Status: utileni.ENIStatusInuse, + Status: eniutil.ENIStatusInuse, ZoneName: "zoneF", SubnetId: "sbn-test", VpcId: "vpcID", @@ -475,5 +517,11 @@ func mockEni(id, instanceId, node string) enisdk.Eni { } } func TestIPAMENI(t *testing.T) { - suite.Run(t, new(IPAMENI)) + t.Parallel() + suite.Run(t, new(IPAMENITester)) +} + +func TestIPAMENI2(t *testing.T) { + t.Parallel() + suite.Run(t, new(increaseEniSuite)) } diff --git a/pkg/eniipam/ipam/bcc/gc.go b/pkg/eniipam/ipam/bcc/gc.go index 37c6d48..a462fa2 100644 --- a/pkg/eniipam/ipam/bcc/gc.go +++ b/pkg/eniipam/ipam/bcc/gc.go @@ -14,14 +14,18 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apimachinery/networking" networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/util" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/metric" utilippool "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/ippool" k8sutil "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/k8s" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" + enisdk "github.com/baidubce/bce-sdk-go/services/eni" ) func (ipam *IPAM) gc(stopCh <-chan struct{}) error { @@ -78,18 +82,16 @@ func (ipam *IPAM) gcLeakedIP(ctx context.Context) error { return err } - // list all weps whose owner is sts - requirement, _ := labels.NewRequirement(ipamgeneric.WepLabelStsOwnerKey, selection.Exists, nil) - selector := labels.NewSelector().Add(*requirement) - stsWeps, err := ipam.crdInformer.Cce().V1alpha1().WorkloadEndpoints().Lister().WorkloadEndpoints(v1.NamespaceAll).List(selector) + // list all weps + weps, err := ipam.crdInformer.Cce().V1alpha1().WorkloadEndpoints().Lister().WorkloadEndpoints(v1.NamespaceAll).List(labels.Everything()) if err != nil { - log.Errorf(ctx, "gc: failed to list wep with selector: %v: %v", selector.String(), err) + log.Errorf(ctx, "gc: failed to list wep: %v", err) return err } var ( - podIPSet = sets.NewString() - stsPodIPSet = sets.NewString() + podIPSet = sets.NewString() + reservedIPSet = sets.NewString() ) // store pod ip temporarily @@ -115,13 +117,15 @@ func (ipam *IPAM) gcLeakedIP(ctx context.Context) error { } } - // store sts pod ip temporarily - for _, wep := range stsWeps { - stsPodIPSet.Insert(wep.Spec.IP) + // store ip which needs to be reused temporarily + for _, wep := range weps { + if networking.IsFixIPStatefulSetPodWep(wep) || networking.ISCustomReuseModeWEP(wep) { + reservedIPSet.Insert(wep.Spec.IP) + } } // build leaked ip cache - ipam.buildPossibleLeakedIPCache(ctx, podIPSet, stsPodIPSet) + ipam.buildPossibleLeakedIPCache(ctx, podIPSet, reservedIPSet) // prune leaked ip ipam.pruneExpiredLeakedIP(ctx) @@ -129,49 +133,55 @@ func (ipam *IPAM) gcLeakedIP(ctx context.Context) error { return nil } -func (ipam *IPAM) buildPossibleLeakedIPCache(ctx context.Context, podIPSet, stsPodIPSet sets.String) { +func (ipam *IPAM) buildPossibleLeakedIPCache(ctx context.Context, podIPSet, reservedIPSet sets.String) { ipam.lock.Lock() defer ipam.lock.Unlock() - for nodeName, enis := range ipam.eniCache { - idleIPs, err := ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) - if err != nil { - log.Errorf(ctx, "gc: failed to get idle ips in datastore of node %v: %v", nodeName, err) + var ( + nodeName string + idleIPSet sets.String + ) + ipam.eniCache.ForEachSubItem(func(key string, index int, eni *enisdk.Eni) bool { + if key != nodeName { + nodeName = key + idleIPs, err := ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) + if err != nil { + log.Errorf(ctx, "gc: failed to get idle ips in datastore of node %v: %v", nodeName, err) + } + idleIPSet = sets.NewString(idleIPs...) } - idleIPSet := sets.NewString(idleIPs...) - - for _, eni := range enis { - for _, ip := range eni.PrivateIpSet { - if !ip.Primary { - key := eniAndIPAddrKey{nodeName, eni.EniId, ip.PrivateIpAddress} - // ip not in pod and neither in sts wep, nor in idle pool - if !podIPSet.Has(ip.PrivateIpAddress) && !stsPodIPSet.Has(ip.PrivateIpAddress) && !idleIPSet.Has(ip.PrivateIpAddress) { - if _, ok := ipam.possibleLeakedIPCache[key]; !ok { - ipam.possibleLeakedIPCache[key] = ipam.clock.Now() - log.Warningf(ctx, "gc: eni %v on node %v may has IP %v leaked", eni.EniId, nodeName, ip.PrivateIpAddress) - } + + for _, ip := range eni.PrivateIpSet { + if !ip.Primary { + key := eniAndIPAddrKey{nodeName, eni.EniId, ip.PrivateIpAddress} + // ip not in pod and neither in sts wep, nor in idle pool + if !podIPSet.Has(ip.PrivateIpAddress) && !reservedIPSet.Has(ip.PrivateIpAddress) && !idleIPSet.Has(ip.PrivateIpAddress) { + if _, ok := ipam.possibleLeakedIPCache[key]; !ok { + ipam.possibleLeakedIPCache[key] = ipam.clock.Now() + log.Warningf(ctx, "gc: eni %v on node %v may has IP %v leaked", eni.EniId, nodeName, ip.PrivateIpAddress) + } + } else { + // if ip in pod, maybe a false positive + if _, ok := ipam.possibleLeakedIPCache[key]; ok { + delete(ipam.possibleLeakedIPCache, key) + log.Warningf(ctx, "gc: remove IP %v on eni %v from possibleLeakedIPCache", ip.PrivateIpAddress, eni.EniId) } else { - // if ip in pod, maybe a false positive - if _, ok := ipam.possibleLeakedIPCache[key]; ok { - delete(ipam.possibleLeakedIPCache, key) - log.Warningf(ctx, "gc: remove IP %v on eni %v from possibleLeakedIPCache", ip.PrivateIpAddress, eni.EniId) - } else { - // If the eni network card bound to the IP address changes, - // but the private IP of eni has not been synchronized yet, - // and the IP has been allocated to the new Pod, the garbage - // collection process for the IP should be cancelled - for tmpKey := range ipam.possibleLeakedIPCache { - if tmpKey.ipAddr == ip.PrivateIpAddress { - log.Warningf(ctx, "gc: cancel garbage collection of inuse IP %s which bound to eni %s ", ip.PrivateIpAddress, eni.EniId) - delete(ipam.possibleLeakedIPCache, tmpKey) - } + // If the eni network card bound to the IP address changes, + // but the private IP of eni has not been synchronized yet, + // and the IP has been allocated to the new Pod, the garbage + // collection process for the IP should be cancelled + for tmpKey := range ipam.possibleLeakedIPCache { + if tmpKey.ipAddr == ip.PrivateIpAddress { + log.Warningf(ctx, "gc: cancel garbage collection of inuse IP %s which bound to eni %s ", ip.PrivateIpAddress, eni.EniId) + delete(ipam.possibleLeakedIPCache, tmpKey) } } } } } } - } + return true + }) } func (ipam *IPAM) pruneExpiredLeakedIP(ctx context.Context) { @@ -271,7 +281,7 @@ func (ipam *IPAM) __deleteIPFromCache(ctx context.Context, nodeName, ip, eniID s ipam.lock.Lock() defer ipam.lock.Unlock() if deleteAllocateCache { - ipam.removeIPFromCache(ip, true) + ipam.allocated.Delete(ip) } ipam.decPrivateIPNumCache(eniID, true) } @@ -334,14 +344,6 @@ func (ipam *IPAM) tryReleaseIdleIP(ctx context.Context, nodeName, eniID string) return ipam.__tryDeleteIPByIPAndENI(ctx, nodeName, tempIP, eniID, true) } -func (ipam *IPAM) removeIPFromCache(ipAddr string, lockless bool) { - if !lockless { - ipam.lock.Lock() - defer ipam.lock.Unlock() - } - delete(ipam.allocated, ipAddr) -} - func (ipam *IPAM) removeIPFromLeakedCache(node, eniID, ipAddr string) { ipam.lock.Lock() defer ipam.lock.Unlock() @@ -352,7 +354,7 @@ func (ipam *IPAM) removeIPFromLeakedCache(node, eniID, ipAddr string) { func (ipam *IPAM) gcDeletedSts(ctx context.Context, wepList []*networkingv1alpha1.WorkloadEndpoint) error { for _, wep := range wepList { // only delete ip if sts requires fix ip - if !isFixIPStatefulSetPodWep(wep) { + if !networking.IsFixIPStatefulSetPodWep(wep) { continue } // don't delete ip if policy is Never @@ -400,7 +402,7 @@ func (ipam *IPAM) gcScaledDownSts(ctx context.Context, stsList []*appv1.Stateful } for _, wep := range weps { // only delete ip if sts requires fix ip - if !isFixIPStatefulSetPodWep(wep) { + if !networking.IsFixIPStatefulSetPodWep(wep) { continue } // don't delete ip if policy is Never @@ -427,7 +429,7 @@ func (ipam *IPAM) gcScaledDownSts(ctx context.Context, stsList []*appv1.Stateful func (ipam *IPAM) gcLeakedPod(ctx context.Context, wepList []*networkingv1alpha1.WorkloadEndpoint) error { for _, wep := range wepList { // only gc non-fix ip pod - if isFixIPStatefulSetPodWep(wep) { + if networking.IsFixIPStatefulSetPodWep(wep) { continue } _, err := ipam.kubeInformer.Core().V1().Pods().Lister().Pods(wep.Namespace).Get(wep.Name) @@ -439,6 +441,11 @@ func (ipam *IPAM) gcLeakedPod(ctx context.Context, wepList []*networkingv1alpha1 Kind: "wep", Name: fmt.Sprintf("%v %v", wep.Namespace, wep.Name), }, v1.EventTypeWarning, "PodLeaked", msg) + + // mark wep phase to deleted + if markWEPPodDeletedPhase(ctx, ipam.crdClient, wep) { + continue + } ipam.gcIPAndDeleteWep(ctx, wep) } else { log.Errorf(ctx, "gc: failed to get pod (%v %v): %v", wep.Namespace, wep.Name, err) @@ -449,6 +456,31 @@ func (ipam *IPAM) gcLeakedPod(ctx context.Context, wepList []*networkingv1alpha1 return nil } +// markWEPPodDeletedPhase in custom IP reuse mode, if the pod is deleted, +// the IP will be deleted only when the maximum TTL period is exceeded. +func markWEPPodDeletedPhase(ctx context.Context, crdClient versioned.Interface, wep *networkingv1alpha1.WorkloadEndpoint) bool { + if networking.ISCustomReuseModeWEP(wep) { + // marking wep phase to pod deleted + if wep.Spec.Phase != networkingv1alpha1.WorkloadEndpointPhasePodDeleted { + newWep := wep.DeepCopy() + newWep.Spec.Phase = networkingv1alpha1.WorkloadEndpointPhasePodDeleted + newWep.Spec.UpdateAt = metav1.Now() + + if newWep.Spec.Release == nil { + newWep.Spec.Release = &networkingv1alpha1.EndpointRelease{} + } + newWep.Spec.Release.PodDeletedTime = &newWep.Spec.UpdateAt + + _, err := crdClient.CceV1alpha1().WorkloadEndpoints(newWep.Namespace).Update(ctx, newWep, metav1.UpdateOptions{}) + if err != nil { + logger.Errorf(ctx, "mark WEP to PodDeleted phase failed %v", err) + } + logger.Infof(ctx, "mark WEP to PodDeleted phasesuccess") + } + } + return false +} + // gcWepAndIP and delete wep // If the IP pool mode is used, when the idle IP is less than // the maximum IP size, the IP address will be recycled for reuse. @@ -459,26 +491,37 @@ func (ipam *IPAM) gcLeakedPod(ctx context.Context, wepList []*networkingv1alpha1 // mark ip as unassigned in datastore, then delete wep // case 3. delete ip from cloud func (ipam *IPAM) gcIPAndDeleteWep(ctx context.Context, wep *networkingv1alpha1.WorkloadEndpoint) (err error) { - idle := ipam.idleIPNum(wep.Spec.Node) - + // delete ip of cross subnet if wep.Spec.SubnetTopologyReference != "" { + _, err = ipam.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Lister().PodSubnetTopologySpreads(wep.Namespace).Get(wep.Spec.SubnetTopologyReference) + if err != nil && !errors.IsNotFound(err) { + return err + } else if err == nil && networking.ISCustomReuseModeWEP(wep) { + // should not delete wep with custom reuse mode + if !networking.NeedReleaseReuseModeWEP(wep) { + return nil + } + } + err = ipam.tryDeleteCrossSubnetIPByWep(ctx, wep) if err != nil { - err = fmt.Errorf("gc: error delete private IP %v cross subnet for pod (%v %v): %v", wep.Spec.IP, wep.Namespace, wep.Name, err) - log.Errorf(ctx, "%v", err) + err = fmt.Errorf("delete private IP %v cross subnet for pod (%v %v) failed: %v", wep.Spec.IP, wep.Namespace, wep.Name, err) + logger.Errorf(ctx, "%v", err) return } else { - goto delWep + return ipam.deleteWepAndRemoveCahce(ctx, wep) } } + // release ip to pool + idle := ipam.idleIPNum(wep.Spec.Node) if idle < ipam.idleIPPoolMaxSize { log.Infof(ctx, "gc: try to only release wep for pod (%v %v) due to idle ip (%v) less than max idle pool", wep.Namespace, wep.Name, idle) err = ipam.datastore.ReleasePodPrivateIP(wep.Spec.Node, wep.Spec.ENIID, wep.Spec.IP) if err != nil { log.Errorf(ctx, "gc: error releasing private IP %v from datastore for pod (%v %v): %v", wep.Spec.IP, wep.Namespace, wep.Name, err) } else { - goto delWep + return ipam.deleteWepAndRemoveCahce(ctx, wep) } } @@ -488,12 +531,15 @@ func (ipam *IPAM) gcIPAndDeleteWep(ctx context.Context, wep *networkingv1alpha1. log.Errorf(ctx, err.Error()) return } -delWep: + return ipam.deleteWepAndRemoveCahce(ctx, wep) +} + +func (ipam *IPAM) deleteWepAndRemoveCahce(ctx context.Context, wep *networkingv1alpha1.WorkloadEndpoint) error { metric.MultiEniMultiIPEniIPCount.WithLabelValues(metric.MetaInfo.ClusterID, metric.MetaInfo.VPCID, wep.Spec.SubnetID).Dec() log.Infof(ctx, "release private IP %v for pod (%v %v) successfully", wep.Spec.IP, wep.Name, wep.Name) - ipam.removeIPFromCache(wep.Spec.IP, false) + ipam.allocated.Delete(wep.Spec.IP) ipam.removeIPFromLeakedCache(wep.Spec.Node, wep.Spec.ENIID, wep.Spec.IP) - err = ipam.tryDeleteWep(ctx, wep) + err := ipam.tryDeleteWep(ctx, wep) if err != nil { log.Errorf(ctx, "gc: try delete wep (%v %v) error: %v", wep.Namespace, wep.Name, err) } diff --git a/pkg/eniipam/ipam/bcc/gc_test.go b/pkg/eniipam/ipam/bcc/gc_test.go index 0a830a6..8f69389 100644 --- a/pkg/eniipam/ipam/bcc/gc_test.go +++ b/pkg/eniipam/ipam/bcc/gc_test.go @@ -132,7 +132,7 @@ func (suite *IPAMGC) Test__gcScaledDownSts() { mockInterface.DeletePrivateIP(gomock.Any(), "192.168.1.109", gomock.Any()).Return(nil).AnyTimes() mockInterface.DeletePrivateIP(gomock.Any(), "192.168.1.110", gomock.Any()).Return(fmt.Errorf("cannot delete ip")).AnyTimes() - __waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer, suite.stopChan) + waitCacheSync(suite.ipam, suite.stopChan) suite.ipam.gcScaledDownSts(suite.ctx, stsList) // should not be deleted @@ -190,7 +190,7 @@ func (suite *IPAMGC) Test__gcLeakedIPPool() { ippool.Name = "ippool-10-0-0-9" suite.ipam.crdClient.CceV1alpha1().IPPools(corev1.NamespaceDefault).Create(suite.ctx, ippool, metav1.CreateOptions{}) - __waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer, suite.stopChan) + waitCacheSync(suite.ipam, suite.stopChan) suite.ipam.gcLeakedIPPool(suite.ctx) } @@ -207,5 +207,6 @@ func (suite *IPAMGC) Test__gcDeletedNode() { } func TestIPAMGC(t *testing.T) { + t.Parallel() suite.Run(t, new(IPAMGC)) } diff --git a/pkg/eniipam/ipam/bcc/ipam.go b/pkg/eniipam/ipam/bcc/ipam.go index 3290234..3f80ddb 100644 --- a/pkg/eniipam/ipam/bcc/ipam.go +++ b/pkg/eniipam/ipam/bcc/ipam.go @@ -17,7 +17,6 @@ package bcc import ( "context" - goerrors "errors" "fmt" "net" "sort" @@ -43,12 +42,15 @@ import ( "k8s.io/client-go/util/retry" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/metadata" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/config/types" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/controller/subnet" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/controller/topology_spread" datastorev1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/datastore/v1" ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/ipcache" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/util" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" crdinformers "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" @@ -60,14 +62,18 @@ import ( ) const ( - // minPrivateIPLifeTime is the life time of a private ip (from allocation to release), aim to trade off db slave delay + // minPrivateIPLifeTime is the lifetime of a private ip (from allocation to release), aim to trade off db slave delay minPrivateIPLifeTime = 5 * time.Second + minIdleIPPoolSize = 1 + rateLimitErrorSleepPeriod = time.Millisecond * 200 rateLimitErrorJitterFactor = 5 + + increasePoolSizePerNode = 10 ) -func (ipam *IPAM) Ready(ctx context.Context) bool { +func (ipam *IPAM) Ready(_ context.Context) bool { return ipam.cacheHasSynced } @@ -88,7 +94,7 @@ func NewIPAM( batchAddIPNum int, eniSyncPeriod time.Duration, gcPeriod time.Duration, - debug bool, + _ bool, ) (ipamgeneric.Interface, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ @@ -100,6 +106,10 @@ func NewIPAM( sbnController := subnet.NewSubnetController(crdInformer, crdClient, bceClient, eventBroadcaster) pstsController := topology_spread.NewTopologySpreadController(kubeInformer, crdInformer, crdClient, eventBroadcaster, sbnController) + // min size of idle ip pool must >= 1 + if idleIPPoolMinSize < minIdleIPPoolSize { + idleIPPoolMinSize = minIdleIPPoolSize + } ipam := &IPAM{ eventBroadcaster: eventBroadcaster, eventRecorder: recorder, @@ -115,11 +125,12 @@ func NewIPAM( subnetSelectionPolicy: subnetSelectionPolicy, eniSyncPeriod: eniSyncPeriod, gcPeriod: gcPeriod, - eniCache: make(map[string][]*enisdk.Eni), + eniCache: ipcache.NewCacheMapArray[*enisdk.Eni](), privateIPNumCache: make(map[string]int), possibleLeakedIPCache: make(map[eniAndIPAddrKey]time.Time), - addIPBackoffCache: make(map[string]*wait.Backoff), - allocated: make(map[string]*v1alpha1.WorkloadEndpoint), + addIPBackoffCache: ipcache.NewCacheMap[*wait.Backoff](), + allocated: ipcache.NewCacheMap[*networkingv1alpha1.WorkloadEndpoint](), + reusedIPs: ipcache.NewReuseIPAndWepPool(), datastore: datastorev1.NewDataStore(), bucket: ratelimit.NewBucketWithRate(ipMutatingRate, ipMutatingBurst), idleIPPoolMinSize: idleIPPoolMinSize, @@ -156,6 +167,8 @@ func (ipam *IPAM) Run(ctx context.Context, stopCh <-chan struct{}) error { subnetInformer := ipam.crdInformer.Cce().V1alpha1().Subnets().Informer() pstsInformer := ipam.crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Informer() + wepInformer.AddEventHandler(ipam.reusedIPs) + ipam.kubeInformer.Start(stopCh) ipam.crdInformer.Start(stopCh) if !cache.WaitForNamedCacheSync( @@ -180,9 +193,11 @@ func (ipam *IPAM) Run(ctx context.Context, stopCh <-chan struct{}) error { if err != nil { return err } - // rebuild datastore cache - ipam.buildAllocatedNodeCache(ctx) + err = ipam.buildAllocatedNodeCache(ctx) + if err != nil { + return err + } group := &wait.Group{} @@ -198,7 +213,11 @@ func (ipam *IPAM) Run(ctx context.Context, stopCh <-chan struct{}) error { } }) - group.Start(func() { ipam.checkIdleIPPoolPeriodically() }) + group.Start(func() { + if err := ipam.checkIdleIPPoolPeriodically(); err != nil { + log.Errorf(ctx, "failed to start checkIdleIPPoolPeriodically: %v", err) + } + }) subsystemRun := func(sc subsystemController) { sc.Run(stopCh) @@ -211,80 +230,266 @@ func (ipam *IPAM) Run(ctx context.Context, stopCh <-chan struct{}) error { subsystemRun(ipam.tsCtl.(subsystemController)) }) group.Start(func() { - ipam.reconcileRelationOfWepEni(stopCh) + if err := ipam.reconcileRelationOfWepEni(stopCh); err != nil { + log.Errorf(ctx, "failed to start reconcileRelationOfWepEni: %v", err) + } }) - // k8sr resource and ip cache are synced + // k8s resource and ip cache are synced ipam.cacheHasSynced = true group.Wait() <-stopCh return nil } -func (ipam *IPAM) tryAllocateIPForFixIPPod(ctx context.Context, eni *enisdk.Eni, wep *v1alpha1.WorkloadEndpoint, ipToAllocate string, node *v1.Node, backoffCap time.Duration) (string, error) { - var namespace, name string = wep.Namespace, wep.Name - var ipResult []string - var err error - // Note: here DeletePrivateIP and AddPrivateIP should be atomic. we leverage a lock to do this - // ensure private ip not attached to other eni +func (ipam *IPAM) buildAllocatedCache(ctx context.Context) error { + selector, err := wepListerSelector() + if err != nil { + log.Errorf(ctx, "error parsing requirement: %v", err) + return err + } + + wepList, err := ipam.crdInformer.Cce().V1alpha1().WorkloadEndpoints().Lister().List(selector) + if err != nil { + return err + } + for _, wep := range wepList { + nwep := wep.DeepCopy() + ipam.allocated.Add(wep.Spec.IP, nwep) + log.Infof(ctx, "build allocated pod cache: found IP %v assigned to pod (%v %v)", wep.Spec.IP, wep.Namespace, wep.Name) + } + return nil +} - err = ipam.__tryDeleteIPByIPAndENI(ctx, wep.Spec.Node, wep.Spec.IP, wep.Spec.ENIID, true) - if err != nil && !cloud.IsErrorENIPrivateIPNotFound(err) { - log.Errorf(ctx, "error delete private IP %v for pod (%v %v): %v", wep.Spec.IP, namespace, name, err) - if cloud.IsErrorRateLimit(err) { +func (ipam *IPAM) allocateIPForFixedIPPod(ctx context.Context, node *v1.Node, pod *v1.Pod, containerID string, + enis []*enisdk.Eni, wep *v1alpha1.WorkloadEndpoint, deadline time.Time) (*v1alpha1.WorkloadEndpoint, error) { + var allocatedIP string + var allocatedEni *enisdk.Eni + var allocatedErr error + + // 1.1 try to allocate fixed ip + allocatedIP, allocatedEni, allocatedErr = ipam.allocateFixedIPFromCloud(ctx, node, enis, wep, deadline) + if allocatedErr != nil { + log.Warningf(ctx, "allocate fixed ip %s for pod (%s %s) failed: %s", + wep.Spec.IP, wep.Namespace, wep.Name, allocatedErr) + + // 1.2. if 1.1 failed, try to allocate new ip for sts pod + if timeoutErr := assertDeadline(deadline); timeoutErr != nil { + return nil, timeoutErr + } + log.Infof(ctx, "try to allocate new ip for pod (%s %s)", wep.Namespace, wep.Name) + + allocatedIP, allocatedEni, allocatedErr = ipam.tryToAllocateIPFromCache(ctx, node, enis, deadline) + if allocatedErr != nil { + log.Errorf(ctx, "allocate ip for pod (%s %s) failed: %s", wep.Namespace, wep.Name, allocatedErr) + return nil, allocatedErr + } + } + + // 2. update wep + newWep := ipam.fillFieldsToWep(wep, pod, containerID, allocatedIP, allocatedEni) + + _, updateErr := ipam.crdClient.CceV1alpha1().WorkloadEndpoints(newWep.Namespace).Update(ctx, newWep, metav1.UpdateOptions{}) + if updateErr != nil { + log.Errorf(ctx, "failed to update wep for pod (%s %s): %s", pod.Namespace, pod.Name, updateErr) + time.Sleep(minPrivateIPLifeTime) + + if rollbackErr := ipam.tryDeleteSubnetIPRetainAllocateCache(ctx, newWep); rollbackErr != nil { + log.Errorf(ctx, "rollback wep for pod (%s %s) failed: %s", pod.Namespace, pod.Name, rollbackErr) + } + return nil, updateErr + } + log.Infof(ctx, "update wep with spec %+v for pod (%v %v) successfully", newWep.Spec, newWep.Namespace, newWep.Name) + + // 3. update cache ip and wep + ipam.allocated.Add(allocatedIP, newWep) + return newWep, nil +} + +func (ipam *IPAM) allocateFixedIPFromCloud(ctx context.Context, node *v1.Node, enis []*enisdk.Eni, + wep *v1alpha1.WorkloadEndpoint, deadline time.Time) (string, *enisdk.Eni, error) { + var namespace, name = wep.Namespace, wep.Name + + // Note: here DeletePrivateIP and AddPrivateIP should be atomic. we leverage a lock to do this + // Ensures private ip not attached to other eni + deleteErr := ipam.__tryDeleteIPByIPAndENI(ctx, wep.Spec.Node, wep.Spec.IP, wep.Spec.ENIID, true) + if deleteErr != nil && !cloud.IsErrorENIPrivateIPNotFound(deleteErr) { + log.Errorf(ctx, "error delete private IP %v for pod (%v %v): %v", wep.Spec.IP, namespace, name, deleteErr) + if cloud.IsErrorRateLimit(deleteErr) { time.Sleep(wait.Jitter(rateLimitErrorSleepPeriod, rateLimitErrorJitterFactor)) } } - allocIPMaxTry := 3 - for i := 0; i < allocIPMaxTry; i++ { - log.Infof(ctx, "try to add IP %v to %v", ipToAllocate, eni.EniId) - ipResult, err = ipam.batchAddPrivateIP(ctx, []string{ipToAllocate}, 0, eni.EniId) - if err != nil { - log.Errorf(ctx, "error add private IP %v for pod (%v %v): %v", ipToAllocate, namespace, name, err) - if cloud.IsErrorSubnetHasNoMoreIP(err) { - if e := ipam.sbnCtl.DeclareSubnetHasNoMoreIP(ctx, eni.SubnetId, true); e != nil { - log.Errorf(ctx, "failed to patch subnet %v that has no more ip: %v", eni.SubnetId, e) - } + // try to allocate fixed ip + var ipToAllocate = wep.Spec.IP + var backoff = ipamgeneric.CCECniTimeout / time.Duration(len(enis)) + var addIPErrors []error + var successEni *enisdk.Eni + var successIP string + for _, oneEni := range enis { + if timeoutErr := assertDeadline(deadline); timeoutErr != nil { + return "", nil, timeoutErr + } + log.Infof(ctx, "try to add IP %v to %v", ipToAllocate, oneEni.EniId) + ipResult, addErr := ipam.batchAddPrivateIP(ctx, []string{ipToAllocate}, 0, oneEni.EniId) + if addErr != nil { + addIPErrors = append(addIPErrors, addErr) + ipam.handleAllocateError(ctx, addErr, oneEni, backoff) + if cloud.IsErrorPrivateIPInUse(addErr) { + break } - if cloud.IsErrorRateLimit(err) { + + // failed: continue to try other eni + if cloud.IsErrorRateLimit(addErr) { time.Sleep(wait.Jitter(rateLimitErrorSleepPeriod, rateLimitErrorJitterFactor)) } - if cloud.IsErrorPrivateIPInUse(err) { - log.Warningf(ctx, "fix ip %v has been mistakenly allocated to somewhere else for pod (%v %v)", ipToAllocate, namespace, name) - ipToAllocate = "" - continue + } else { + // success + successEni = oneEni + successIP = ipResult[0] + break + } + } + + if successEni == nil { + return "", nil, fmt.Errorf("all %d enis binded cannot add IP %s: %v", + len(enis), wep.Spec.IP, utilerrors.NewAggregate(addIPErrors)) + } + + log.Infof(ctx, "add private IP %s for pod (%s %s) successfully", successIP, namespace, name) + + ipam.incPrivateIPNumCache(successEni.EniId, false) + storeErr := ipam.datastore.AddPrivateIPToStore(node.Name, successEni.EniId, successIP, true) + if storeErr != nil { + log.Warningf(ctx, "add private IP %s to store failed: %s", successIP, storeErr) + } + metric.MultiEniMultiIPEniIPCount.WithLabelValues(metric.MetaInfo.ClusterID, metric.MetaInfo.VPCID, + successEni.SubnetId).Inc() + + return successIP, successEni, nil +} + +func (ipam *IPAM) allocateIPForOrdinaryPod(ctx context.Context, node *v1.Node, pod *v1.Pod, containerID string, + enis []*enisdk.Eni, wep *v1alpha1.WorkloadEndpoint, deadline time.Time) (*v1alpha1.WorkloadEndpoint, error) { + // 1. allocate ip from cache + allocatedIP, allocatedEni, allocatedErr := ipam.tryToAllocateIPFromCache(ctx, node, enis, deadline) + if allocatedErr != nil { + log.Errorf(ctx, "allocate ip for pod (%s %s) failed: %s", pod.Namespace, pod.Name, allocatedErr) + return nil, allocatedErr + } + + // 2. update or create wep + newWep := ipam.fillFieldsToWep(wep, pod, containerID, allocatedIP, allocatedEni) + if wep == nil { + // create wep + _, createErr := ipam.crdClient.CceV1alpha1().WorkloadEndpoints(newWep.Namespace). + Create(ctx, newWep, metav1.CreateOptions{}) + if createErr != nil { + if rollbackErr := ipam.tryDeleteIPByWep(ctx, newWep); rollbackErr != nil { + log.Errorf(ctx, "rollback wep for pod (%s %s) failed: %s", pod.Namespace, pod.Name, rollbackErr) } - if isErrorNeedExponentialBackoff(err) { - if _, ok := ipam.addIPBackoffCache[eni.EniId]; !ok { - ipam.addIPBackoffCache[eni.EniId] = util.NewBackoffWithCap(backoffCap) - log.Infof(ctx, "add backoff with cap %v for eni %v when handling pod (%v %v) due to error: %v", backoffCap, eni.EniId, namespace, name, err) - } + return nil, createErr + } + } else { + // update wep + _, updateErr := ipam.crdClient.CceV1alpha1().WorkloadEndpoints(newWep.Namespace). + Update(ctx, newWep, metav1.UpdateOptions{}) + if updateErr != nil { + time.Sleep(minPrivateIPLifeTime) + if rollbackErr := ipam.tryDeleteSubnetIPRetainAllocateCache(ctx, newWep); rollbackErr != nil { + log.Errorf(ctx, "rollback wep for pod (%s %s) failed: %s", pod.Namespace, pod.Name, rollbackErr) } - return "", err - } else if err == nil { - break + return nil, updateErr } } + log.Infof(ctx, "update or create wep with spec %+v for pod (%v %v) successfully", + newWep.Spec, newWep.Namespace, newWep.Name) - if len(ipResult) < 1 { - msg := "unexpected result from eni openapi: at least one ip should be added" - log.Error(ctx, msg) - return "", goerrors.New(msg) + // 3. update cache ip and wep + ipam.allocated.Add(allocatedIP, newWep) + return newWep, nil +} + +// tryToAllocateIPFromCache get ip from local cache +// increasing the pool if datastore has no available ip +func (ipam *IPAM) tryToAllocateIPFromCache(ctx context.Context, node *v1.Node, enis []*enisdk.Eni, deadline time.Time) ( + ipResult string, eni *enisdk.Eni, err error) { + firstEvent := true + + // it takes 1s to allocate ip from cloud, retry 5 and wait 200ms+ per times + backoff := wait.Backoff{ + Steps: 5, + Duration: 200 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, } - log.Infof(ctx, "add private IP %v for pod (%v %v) successfully", ipResult, namespace, name) + err = wait.ExponentialBackoff(backoff, func() (done bool, err error) { + if timeoutErr := assertDeadline(deadline); timeoutErr != nil { + return false, timeoutErr + } + if ipam.canAllocateIP(ctx, node.Name, enis) { + return true, nil + } - for _, ip := range ipResult { - ipam.incPrivateIPNumCache(eni.EniId, false) - ipam.datastore.AddPrivateIPToStore(node.Name, eni.EniId, ip, true) + if err := ipam.assertNodeCanIncreasePool(ctx, node, enis); err != nil { + return true, err + } + // only send one increase pool event for one ctx + if firstEvent { + ipam.sendIncreasePoolEvent(ctx, node, enis, true) + firstEvent = false + } + return false, nil + }) + if err == wait.ErrWaitTimeout { + return "", nil, fmt.Errorf("allocate ip for node %v error", node.Name) + } + if err != nil { + return "", nil, err + } + + return ipam.allocateIPFromCache(ctx, node.Name, enis) +} + +func (ipam *IPAM) canAllocateIP(_ context.Context, nodeName string, enis []*enisdk.Eni) bool { + var ( + total = 0 + used = 0 + ) + + for _, eni := range enis { + t, u, err := ipam.datastore.GetENIStats(nodeName, eni.EniId) + if err == nil { + total += t + used += u + } } - metric.MultiEniMultiIPEniIPCount.WithLabelValues(metric.MetaInfo.ClusterID, metric.MetaInfo.VPCID, eni.SubnetId).Inc() + return total > used +} + +func (ipam *IPAM) allocateIPFromCache(ctx context.Context, node string, enis []*enisdk.Eni) (string, *enisdk.Eni, error) { + var ( + failedEniList []string + ) + + ipam.sortENIByDatastoreStats(node, enis, false) + + // iterate each subnet, try to allocate + for _, eni := range enis { + ipResult, err := ipam.datastore.AllocatePodPrivateIPByENI(node, eni.EniId) + if err == nil { + // allocate one successfully + return ipResult, eni, nil + } + log.Warningf(ctx, "datastore try allocate ip for node %v in eni %v failed: %v", node, eni.EniId, err) + failedEniList = append(failedEniList, eni.EniId) + } - return ipResult[0], nil + return "", nil, fmt.Errorf("no available ip address in datastore for node %v in enis: %v", node, failedEniList) } -func (ipam *IPAM) tryAllocateIP( +func (ipam *IPAM) batchAllocateIPFromCloud( ctx context.Context, eni *enisdk.Eni, node *v1.Node, @@ -295,174 +500,116 @@ func (ipam *IPAM) tryAllocateIP( var err error for batchAddNum > 0 { - ipResult, err = ipam.batchAddPrivateIPWithExponentialBackoff(ctx, ipResult, batchAddNum, eni, node) + if err := ipam.assertEniCanIncreasePool(ctx, node, eni); err != nil { + break + } + + ipResult, err = ipam.batchAllocateIPWithBackoff(ctx, batchAddNum, eni) if err == nil { - log.Infof(ctx, "batch add %v private ip(s) for %v on eni %s successfully, %v", batchAddNum, node.Name, eni.EniId, ipResult) - ipam.removeAddIPBackoffCache(eni.EniId, false) + log.Infof(ctx, "batch add %d private ip(s) for %s on eni %s successfully, %v", + batchAddNum, node.Name, eni.EniId, ipResult) break } if err != nil { - log.Warningf(ctx, "warn: batch add %v private ip(s) for node %v failed: %v", batchAddNum, node.Name, err) - - if cloud.IsErrorSubnetHasNoMoreIP(err) { - if e := ipam.sbnCtl.DeclareSubnetHasNoMoreIP(ctx, eni.SubnetId, true); e != nil { - log.Errorf(ctx, "failed to patch subnet %v that has no more ip: %v", eni.SubnetId, e) - } - } - if cloud.IsErrorRateLimit(err) { - time.Sleep(wait.Jitter(rateLimitErrorSleepPeriod, rateLimitErrorJitterFactor)) - } + // retry condition: batchAddNum > 1 && isErrorNeedExponentialBackoff + log.Warningf(ctx, "batch add %s private ip(s) for node %s failed: %s", batchAddNum, node.Name, err) - if batchAddNum == 1 && cloud.IsErrorSubnetHasNoMoreIP(err) { - ipam.sbnCtl.DeclareSubnetHasNoMoreIP(ctx, eni.SubnetId, true) + if batchAddNum == 1 { + ipam.handleAllocateError(ctx, err, eni, backoffCap) } if isErrorNeedExponentialBackoff(err) { - if batchAddNum == 1 { - ipam.lock.Lock() - if _, ok := ipam.addIPBackoffCache[eni.EniId]; !ok { - ipam.addIPBackoffCache[eni.EniId] = util.NewBackoffWithCap(backoffCap) - log.Infof(ctx, "add backoff with cap %v for eni %v due to error: %v", backoffCap, eni.EniId, err) - } - ipam.lock.Unlock() - } - // decrease batchAddNum then retry batchAddNum = batchAddNum >> 1 continue } - msg := fmt.Sprintf("error batch add %v private ip(s): %v", batchAddNum, err) - log.Error(ctx, msg) - return nil, goerrors.New(msg) + // don't retry for other errors + break } } - if len(ipResult) == 0 { - msg := fmt.Sprintf("cannot batch add more ip to eni on node %s instance %s", node.Name, eni.EniId) - log.Error(ctx, msg) - return nil, goerrors.New(msg) + if err != nil { + return nil, err } - for _ = range ipResult { + for _, allocatedIP := range ipResult { ipam.incPrivateIPNumCache(eni.EniId, false) + err := ipam.datastore.AddPrivateIPToStore(node.Name, eni.EniId, allocatedIP, false) + if err != nil { + log.Errorf(ctx, "add private ip %s to datastore failed: %v", allocatedIP, err) + } metric.MultiEniMultiIPEniIPCount.WithLabelValues(metric.MetaInfo.ClusterID, metric.MetaInfo.VPCID, eni.SubnetId).Inc() } return ipResult, nil } -func (ipam *IPAM) tryAllocateIPByENIs(ctx context.Context, node string, enis []*enisdk.Eni) (string, *enisdk.Eni, error) { - var ( - eniList []string - ) - - ipam.sortENIByDatastoreStats(node, enis, false) - - // iterate each subnet, try to allocate - for _, eni := range enis { - ipResult, err := ipam.datastore.AllocatePodPrivateIPByENI(node, eni.EniId) - if err == nil { - // allocate one successfully - return ipResult, eni, nil - } else { - log.Warningf(ctx, "datastore try allocate ip for node %v in eni %v failed: %v", node, eni.EniId, err) +// handleAllocateError +// +// isErrorSubnetHasNoMoreIP: declare subnet = no more ip +// isErrorNeedExponentialBackoff: add backoff to addIPBackoffCache +func (ipam *IPAM) handleAllocateError(ctx context.Context, allocateErr error, eni *enisdk.Eni, backoffCap time.Duration) { + if cloud.IsErrorSubnetHasNoMoreIP(allocateErr) { + if e := ipam.sbnCtl.DeclareSubnetHasNoMoreIP(ctx, eni.SubnetId, true); e != nil { + log.Errorf(ctx, "failed to patch subnet %v that has no more ip: %v", eni.SubnetId, e) } - - eniList = append(eniList, eni.EniId) } - - return "", nil, fmt.Errorf("no available ip address in datastore for node %v in enis: %v", node, eniList) -} - -func (ipam *IPAM) canAllocateIP(ctx context.Context, nodeName string, enis []*enisdk.Eni) bool { - var ( - total int = 0 - used int = 0 - ) - - for _, eni := range enis { - t, u, err := ipam.datastore.GetENIStats(nodeName, eni.EniId) - if err == nil { - total += t - used += u + if isErrorNeedExponentialBackoff(allocateErr) { + if _, ok := ipam.addIPBackoffCache.Get(eni.EniId); !ok { + ipam.addIPBackoffCache.Add(eni.EniId, util.NewBackoffWithCap(backoffCap)) + log.Infof(ctx, "add backoff with cap %s for eni %s due to error: %v", + backoffCap, eni.EniId, allocateErr) } } - - return total > used } -func (ipam *IPAM) handleIncreasePoolEvent(ctx context.Context, node *v1.Node, ch chan *event) { - log.Infof(ctx, "start increase pool goroutine for node %v", node.Name) +func (ipam *IPAM) handleIncreasePoolEvent(ctx context.Context, nodeName string, ch chan *event) { + log.Infof(ctx, "start increase pool goroutine for node %v", nodeName) for e := range ch { var ( - enis = e.enis - node = e.node - ctx = e.ctx - ipAddedENI *enisdk.Eni - ipResult []string - suitableENINum = len(enis) - err error - addIPErrors []error + enis = e.enis + node = e.node + ctx = e.ctx + err error ) - if e.passive && ipam.canAllocateIP(ctx, e.node.Name, e.enis) { + + if len(enis) == 0 { + log.Warningf(ctx, "no eni in node %s, skip increase pool", nodeName) continue } - - if !e.passive && ipam.idleIPNum(e.node.Name) >= ipam.idleIPPoolMinSize { + if ipam.canIgnoreIncreasingEvent(ctx, e) { continue } ipam.sortENIByDatastoreStats(node.Name, enis, true) + backoff := ipamgeneric.CCECniTimeout / time.Duration(len(enis)) for _, eni := range enis { - ipResult, err = ipam.tryAllocateIP(ctx, eni, node, ipam.batchAddIPNum, ipamgeneric.CCECniTimeout/time.Duration(suitableENINum)) + _, err = ipam.batchAllocateIPFromCloud(ctx, eni, node, ipam.batchAddIPNum, backoff) if err == nil { - ipAddedENI = eni break - } else { - addErr := fmt.Errorf("error ENI: %v, %v", eni.EniId, err.Error()) - addIPErrors = append(addIPErrors, addErr) } - } - - if ipAddedENI == nil { - msg := fmt.Sprintf("all %d enis bound to node %v cannot add IP: %v", len(enis), node.Name, utilerrors.NewAggregate(addIPErrors)) - log.Error(ctx, msg) - } - - for _, ip := range ipResult { - err := ipam.datastore.AddPrivateIPToStore(e.node.Name, ipAddedENI.EniId, ip, false) - if err != nil { - msg := fmt.Sprintf("add private ip %v to datastore failed: %v", ip, err) - log.Error(ctx, msg) + if cloud.IsErrorRateLimit(err) { + // wait to try other eni + time.Sleep(wait.Jitter(rateLimitErrorSleepPeriod, rateLimitErrorJitterFactor)) } + log.Warningf(ctx, "failed to batch add ip on eni %s: %s", eni.EniId, err) } } - log.Infof(ctx, "closed channel for node %v, exit...", node.Name) + log.Infof(ctx, "closed channel for node %s, exit...", nodeName) } -func (ipam *IPAM) buildAllocatedCache(ctx context.Context) error { - ipam.lock.Lock() - defer ipam.lock.Unlock() - - selector, err := wepListerSelector() - if err != nil { - log.Errorf(ctx, "error parsing requirement: %v", err) - return err +func (ipam *IPAM) canIgnoreIncreasingEvent(ctx context.Context, evt *event) bool { + if evt.passive && ipam.canAllocateIP(ctx, evt.node.Name, evt.enis) { + return true } - wepList, err := ipam.crdInformer.Cce().V1alpha1().WorkloadEndpoints().Lister().List(selector) - if err != nil { - return err - } - for _, wep := range wepList { - nwep := wep.DeepCopy() - ipam.allocated[wep.Spec.IP] = nwep - log.Infof(ctx, "build allocated pod cache: found IP %v assigned to pod (%v %v)", wep.Spec.IP, wep.Namespace, wep.Name) + if !evt.passive && ipam.idleIPNum(evt.node.Name) >= ipam.idleIPPoolMinSize { + return true } - return nil + return false } func (ipam *IPAM) buildAllocatedNodeCache(ctx context.Context) error { @@ -559,10 +706,7 @@ func (ipam *IPAM) rebuildNodeDataStoreCache(ctx context.Context, node *v1.Node, for _, ip := range eni.PrivateIpSet { if !ip.Primary { // ipam will sync all weps to build allocated cache when it starts - assigned := false - if _, ok := ipam.allocated[ip.PrivateIpAddress]; ok { - assigned = true - } + assigned := ipam.allocated.Exists(ip.PrivateIpAddress) // If the private IP is not in the CIDR of Eni, it means that it is a cross subnet IP crossSubnet := !ipNet.Contains(net.ParseIP(ip.PrivateIpAddress)) @@ -570,7 +714,7 @@ func (ipam *IPAM) rebuildNodeDataStoreCache(ctx context.Context, node *v1.Node, assigned = true } - ipam.datastore.Synchronized(func() error { + syncErr := ipam.datastore.Synchronized(func() error { err := ipam.datastore.AddPrivateIPToStoreUnsafe(node.Name, eni.EniId, ip.PrivateIpAddress, assigned, crossSubnet) if err != nil { msg := fmt.Sprintf("add private ip %v to datastore failed: %v", ip.PrivateIpAddress, err) @@ -580,7 +724,9 @@ func (ipam *IPAM) rebuildNodeDataStoreCache(ctx context.Context, node *v1.Node, } return err }) - + if syncErr != nil { + log.Warningf(ctx, "datastore Synchronized failed: %s", syncErr) + } } } } @@ -634,23 +780,11 @@ func (ipam *IPAM) handleRebuildNodeDatastoreEvent(ctx context.Context, node *v1. } func (ipam *IPAM) addIPBackoff(eniID string, cap time.Duration) { - ipam.lock.Lock() - if _, ok := ipam.addIPBackoffCache[eniID]; !ok { - ipam.addIPBackoffCache[eniID] = util.NewBackoffWithCap(cap) - } - ipam.lock.Unlock() + ipam.addIPBackoffCache.AddIfNotExists(eniID, util.NewBackoffWithCap(cap)) } -func (ipam *IPAM) removeAddIPBackoffCache(eniID string, lockless bool) bool { - if !lockless { - ipam.lock.Lock() - defer ipam.lock.Unlock() - } - _, ok := ipam.addIPBackoffCache[eniID] - if ok { - delete(ipam.addIPBackoffCache, eniID) - } - return ok +func (ipam *IPAM) removeAddIPBackoffCache(eniID string, _ bool) bool { + return ipam.addIPBackoffCache.Delete(eniID) } func (ipam *IPAM) updateIPPoolStatus(ctx context.Context, node *v1.Node, instanceID string, enis []enisdk.Eni) error { @@ -750,10 +884,11 @@ func (ipam *IPAM) checkIdleIPPool() (bool, error) { if idle < ipam.idleIPPoolMinSize { log.Infof(ctx, "ipam will increase pool due to idle ip num %v less than --idle-ip-pool-min-size %v", idle, ipam.idleIPPoolMinSize) - ipam.lock.Lock() - enis := ipam.eniCache[node.Name] - ipam.lock.Unlock() - + enis, ok := ipam.eniCache.Get(node.Name) + if !ok { + log.Warningf(ctx, "no eni in node %s", node.Name) + return + } ipam.sendIncreasePoolEvent(ctx, node, enis, false) } @@ -766,7 +901,7 @@ func (ipam *IPAM) checkIdleIPPool() (bool, error) { } // sendIncreasePoolEvent send increase poll event to cache chan -// create chan if not exsit +// create chan if not exists func (ipam *IPAM) sendIncreasePoolEvent(ctx context.Context, node *v1.Node, enis []*enisdk.Eni, passive bool) { var ( evt = &event{ @@ -782,13 +917,19 @@ func (ipam *IPAM) sendIncreasePoolEvent(ctx context.Context, node *v1.Node, enis ipam.lock.Lock() ch, ok = ipam.increasePoolEventChan[evt.node.Name] if !ok { - ch = make(chan *event) + ch = make(chan *event, increasePoolSizePerNode) ipam.increasePoolEventChan[node.Name] = ch - go ipam.handleIncreasePoolEvent(ctx, evt.node, ch) + go ipam.handleIncreasePoolEvent(ctx, node.Name, ch) } ipam.lock.Unlock() - ch <- evt + select { + case ch <- evt: + return + default: + log.Warningf(ctx, "node %s increase pool is full", node.Name) + return + } } func (ipam *IPAM) checkIdleIPPoolPeriodically() error { @@ -800,25 +941,21 @@ func (ipam *IPAM) batchAddPrivateIP(ctx context.Context, privateIPs []string, ba return ipam.cloud.BatchAddPrivateIP(ctx, privateIPs, batchAddNum, eniID) } -func (ipam *IPAM) batchAddPrivateIPWithExponentialBackoff( - ctx context.Context, - privateIPs []string, - batchAddNum int, - eni *enisdk.Eni, - node *v1.Node, -) ([]string, error) { +// batchAllocateIPWithBackoff allocate ip by eni from cloud with backoff if exists +func (ipam *IPAM) batchAllocateIPWithBackoff(ctx context.Context, batchAddNum int, eni *enisdk.Eni) ([]string, error) { var backoffWaitPeriod time.Duration var backoff *wait.Backoff var ok bool const waitPeriodNum = 10 ipam.lock.Lock() - backoff = ipam.addIPBackoffCache[eni.EniId] + backoff, ok = ipam.addIPBackoffCache.Get(eni.EniId) if backoff != nil && backoff.Steps >= 0 { backoffWaitPeriod = backoff.Step() } ipam.lock.Unlock() + // backoff wait if backoffWaitPeriod != 0 { log.Infof(ctx, "backoff: wait %v to allocate private ip on %v", backoffWaitPeriod, eni.EniId) @@ -826,7 +963,7 @@ func (ipam *IPAM) batchAddPrivateIPWithExponentialBackoff( for i := 0; i < waitPeriodNum; i++ { time.Sleep(backoffWaitPeriod / waitPeriodNum) ipam.lock.RLock() - backoff, ok = ipam.addIPBackoffCache[eni.EniId] + backoff, ok = ipam.addIPBackoffCache.Get(eni.EniId) ipam.lock.RUnlock() if !ok { log.Warningf(ctx, "found backoff on eni %v removed", eni.EniId) @@ -835,40 +972,11 @@ func (ipam *IPAM) batchAddPrivateIPWithExponentialBackoff( } } - // if have reached backoff cap, first check then add ip - if backoff != nil && backoffWaitPeriod >= backoff.Cap { - // 1. check if subnet still has available ip - subnetID := eni.SubnetId - subnet, err := ipam.cloud.DescribeSubnet(ctx, subnetID) - if err == nil && subnet.AvailableIp <= 0 { - msg := fmt.Sprintf("backoff short-circuit: subnet %v has no available ip", subnetID) - log.Warning(ctx, msg) - return nil, goerrors.New(msg) - } - if err != nil { - log.Errorf(ctx, "failed to describe subnet %v: %v", subnetID, err) - } - - // 2. check if node cannot attach more ip due to memory - node, err := ipam.kubeInformer.Core().V1().Nodes().Lister().Get(node.Name) - if err == nil { - maxIPPerENI, err := utileni.GetMaxIPPerENIFromNodeAnnotations(node) - if err == nil { - resp, err := ipam.cloud.StatENI(ctx, eni.EniId) - if err == nil && len(resp.PrivateIpSet) >= maxIPPerENI { - msg := fmt.Sprintf("backoff short-circuit: eni %v cannot add more ip due to memory", eni.EniId) - log.Warning(ctx, msg) - return nil, goerrors.New(msg) - } - - if err != nil { - log.Errorf(ctx, "failed to get stat eni %v: %v", eni.EniId, err) - } - } - } + ipResults, batchErr := ipam.batchAddPrivateIP(ctx, []string{}, batchAddNum, eni.EniId) + if batchErr == nil { + ipam.removeAddIPBackoffCache(eni.EniId, false) } - - return ipam.batchAddPrivateIP(ctx, privateIPs, batchAddNum, eni.EniId) + return ipResults, batchErr } func (ipam *IPAM) sortENIByDatastoreStats(node string, enis []*enisdk.Eni, byTotal bool) { @@ -914,17 +1022,6 @@ func isErrorNeedExponentialBackoff(err error) bool { return cloud.IsErrorVmMemoryCanNotAttachMoreIpException(err) || cloud.IsErrorSubnetHasNoMoreIP(err) } -func isFixIPStatefulSetPodWep(wep *v1alpha1.WorkloadEndpoint) bool { - return wep.Spec.Type == ipamgeneric.WepTypeSts && wep.Spec.EnableFixIP == EnableFixIPTrue -} - -func isFixIPStatefulSetPod(pod *v1.Pod) bool { - if pod.Annotations == nil || !k8sutil.IsStatefulSetPod(pod) { - return false - } - return pod.Annotations[StsPodAnnotationEnableFixIP] == EnableFixIPTrue -} - func buildInstanceIdToNodeNameMap(ctx context.Context, nodes []*v1.Node) map[string]string { instanceIdToNodeNameMap := make(map[string]string, len(nodes)) for _, n := range nodes { @@ -954,3 +1051,106 @@ func bccNodeListerSelector() (labels.Selector, error) { } return labels.NewSelector().Add(*requirement), nil } + +func (ipam *IPAM) fillFieldsToWep(wep *v1alpha1.WorkloadEndpoint, pod *v1.Pod, containerID, + allocatedIP string, allocatedEni *enisdk.Eni) *v1alpha1.WorkloadEndpoint { + if wep == nil { + wep = &v1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + Namespace: pod.Namespace, + Finalizers: []string{ipamgeneric.WepFinalizer}, + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + Type: ipamgeneric.WepTypePod, + }, + } + } + if wep.Labels == nil { + wep.Labels = make(map[string]string) + } + wep.Spec.ContainerID = containerID + wep.Spec.IP = allocatedIP + wep.Spec.ENIID = allocatedEni.EniId + wep.Spec.Mac = allocatedEni.MacAddress + wep.Spec.Node = pod.Spec.NodeName + wep.Spec.SubnetID = allocatedEni.SubnetId + wep.Spec.UpdateAt = metav1.Time{Time: time.Unix(0, 0)} + wep.Labels[ipamgeneric.WepLabelSubnetIDKey] = allocatedEni.SubnetId + wep.Labels[ipamgeneric.WepLabelInstanceTypeKey] = string(metadata.InstanceTypeExBCC) + if k8sutil.IsStatefulSetPod(pod) { + wep.Spec.Type = ipamgeneric.WepTypeSts + wep.Labels[ipamgeneric.WepLabelStsOwnerKey] = util.GetStsName(wep) + } + if pod.Annotations != nil { + wep.Spec.EnableFixIP = pod.Annotations[StsPodAnnotationEnableFixIP] + wep.Spec.FixIPDeletePolicy = pod.Annotations[StsPodAnnotationFixIPDeletePolicy] + } + return wep +} + +func (ipam *IPAM) assertNodeCanIncreasePool(ctx context.Context, node *v1.Node, enis []*enisdk.Eni) error { + var lastErr error + canIncrease := false + + for _, eni := range enis { + oneErr := ipam.assertEniCanIncreasePool(ctx, node, eni) + if oneErr == nil { + canIncrease = true + break + } + lastErr = oneErr + } + if canIncrease { + return nil + } + if lastErr != nil { + return lastErr + } + return fmt.Errorf("node doesn't have eni") +} + +func (ipam *IPAM) assertEniCanIncreasePool(ctx context.Context, node *v1.Node, eni *enisdk.Eni) error { + // 1. check if subnet still has available ip + subnetID := eni.SubnetId + subnetInfo, err := ipam.cloud.DescribeSubnet(ctx, subnetID) + if err == nil && subnetInfo.AvailableIp <= 0 { + log.Warningf(ctx, "assertEniCanIncreasePool: subnet %s has no available ip", subnetID) + return fmt.Errorf("subnet %s has no available ip", subnetID) + } + if err != nil { + log.Warningf(ctx, "assertEniCanIncreasePool: failed to describe subnet %s: %s", subnetID, err) + } + + // 2. check if node cannot attach more ip due to memory + nodeFromKube, nodeErr := ipam.kubeInformer.Core().V1().Nodes().Lister().Get(node.Name) + if nodeErr != nil { + log.Warningf(ctx, "assertEniCanIncreasePool: failed to get node %s: %s", node.Name, nodeErr) + return nil + } + + maxIPPerENI, annoErr := utileni.GetMaxIPPerENIFromNodeAnnotations(nodeFromKube) + if annoErr != nil { + log.Warningf(ctx, "assertEniCanIncreasePool: failed to get MaxIPPerENI of node %s: %s", node.Name, annoErr) + return nil + } + + resp, eniErr := ipam.cloud.StatENI(ctx, eni.EniId) + if eniErr != nil { + log.Errorf(ctx, "assertEniCanIncreasePool: failed to get stat eni %v: %v", eni.EniId, eniErr) + return nil + } + + if len(resp.PrivateIpSet) >= maxIPPerENI { + log.Warningf(ctx, "assertEniCanIncreasePool: eni %s cannot add more ip due to memory", eni.EniId) + return fmt.Errorf("eni %s cannot add more ip due to memory", eni.EniId) + } + return nil +} + +func assertDeadline(deadline time.Time) error { + if time.Now().After(deadline) { + return fmt.Errorf(errorMsgAllocateTimeout) + } + return nil +} diff --git a/pkg/eniipam/ipam/bcc/ipam_test.go b/pkg/eniipam/ipam/bcc/ipam_test.go index fd276e8..06a413d 100644 --- a/pkg/eniipam/ipam/bcc/ipam_test.go +++ b/pkg/eniipam/ipam/bcc/ipam_test.go @@ -18,6 +18,7 @@ package bcc import ( "context" "fmt" + "github.com/baidubce/bce-sdk-go/services/vpc" "reflect" "testing" "time" @@ -27,8 +28,10 @@ import ( "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" mockcloud "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud/testing" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/config/types" + mocksubnet "github.com/baidubce/baiducloud-cce-cni-driver/pkg/controller/subnet/mock" datastorev1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/datastore/v1" ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/ipcache" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" crdfake "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned/fake" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" @@ -81,36 +84,29 @@ func setupEnv(ctrl *gomock.Controller) ( } func waitForCacheSync(kubeInformer informers.SharedInformerFactory, crdInformer crdinformers.SharedInformerFactory) { - __waitForCacheSync(kubeInformer, crdInformer, wait.NeverStop) -} - -func __waitForCacheSync(kubeInformer informers.SharedInformerFactory, crdInformer crdinformers.SharedInformerFactory, stopChan <-chan struct{}) { - nodeInformer := kubeInformer.Core().V1().Nodes().Informer() - podInformer := kubeInformer.Core().V1().Pods().Informer() - stsInformer := kubeInformer.Apps().V1().StatefulSets().Informer() - wepInformer := crdInformer.Cce().V1alpha1().WorkloadEndpoints().Informer() - ippoolInformer := crdInformer.Cce().V1alpha1().IPPools().Informer() - subnetInformer := crdInformer.Cce().V1alpha1().Subnets().Informer() - pstsInfomer := crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Informer() + stopChan := wait.NeverStop + kubeInformer.Core().V1().Nodes().Informer() + kubeInformer.Core().V1().Pods().Informer() + kubeInformer.Apps().V1().StatefulSets().Informer() + crdInformer.Cce().V1alpha1().WorkloadEndpoints().Informer() + crdInformer.Cce().V1alpha1().IPPools().Informer() + crdInformer.Cce().V1alpha1().Subnets().Informer() + crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Informer() kubeInformer.Start(stopChan) crdInformer.Start(stopChan) - time.Sleep(time.Microsecond) + __waitForCacheSync(kubeInformer, crdInformer, stopChan) +} - cache.WaitForNamedCacheSync( - "cce-ipam", - stopChan, - nodeInformer.HasSynced, - podInformer.HasSynced, - stsInformer.HasSynced, - wepInformer.HasSynced, - ippoolInformer.HasSynced, - subnetInformer.HasSynced, - pstsInfomer.HasSynced, - ) +func __waitForCacheSync(kubeInformer informers.SharedInformerFactory, crdInformer crdinformers.SharedInformerFactory, stopChan <-chan struct{}) { + time.Sleep(time.Microsecond) + kubeInformer.WaitForCacheSync(stopChan) + crdInformer.WaitForCacheSync(stopChan) + time.Sleep(time.Microsecond) } func Test_buildInstanceIdToNodeNameMap(t *testing.T) { + t.Parallel() type args struct { ctx context.Context nodes []*v1.Node @@ -164,12 +160,12 @@ func Test_buildInstanceIdToNodeNameMap(t *testing.T) { } func TestIPAM_Allocate(t *testing.T) { + t.Parallel() type fields struct { ctrl *gomock.Controller eniCache map[string][]*enisdk.Eni privateIPNumCache map[string]int cacheHasSynced bool - allocated map[string]*v1alpha1.WorkloadEndpoint eventBroadcaster record.EventBroadcaster eventRecorder record.EventRecorder kubeInformer informers.SharedInformerFactory @@ -296,7 +292,6 @@ func TestIPAM_Allocate(t *testing.T) { }}, }, cacheHasSynced: true, - allocated: map[string]*v1alpha1.WorkloadEndpoint{}, privateIPNumCache: map[string]int{}, datastore: ds, eventBroadcaster: brdcaster, @@ -369,17 +364,18 @@ func TestIPAM_Allocate(t *testing.T) { }, }, metav1.CreateOptions{}) - crdClient.CceV1alpha1().WorkloadEndpoints(v1.NamespaceDefault).Create(context.TODO(), &v1alpha1.WorkloadEndpoint{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-0", - }, - Spec: v1alpha1.WorkloadEndpointSpec{ - SubnetID: "sbn-test", - IP: "10.1.1.1", - Type: ipamgeneric.WepTypeSts, - }, - }, metav1.CreateOptions{}) + crdClient.CceV1alpha1().WorkloadEndpoints(v1.NamespaceDefault).Create(context.TODO(), + &v1alpha1.WorkloadEndpoint{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-0", + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + SubnetID: "sbn-test", + IP: "10.1.1.1", + Type: ipamgeneric.WepTypeSts, + }, + }, metav1.CreateOptions{}) waitForCacheSync(kubeInformer, crdInformer) @@ -409,7 +405,6 @@ func TestIPAM_Allocate(t *testing.T) { }, }, cacheHasSynced: true, - allocated: map[string]*v1alpha1.WorkloadEndpoint{}, privateIPNumCache: map[string]int{}, datastore: ds, eventBroadcaster: brdcaster, @@ -456,11 +451,18 @@ func TestIPAM_Allocate(t *testing.T) { if tt.fields.ctrl != nil { defer tt.fields.ctrl.Finish() } + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + for key, v := range tt.fields.eniCache { + + eniCache.Append(key, v...) + } + ipam := &IPAM{ - eniCache: tt.fields.eniCache, + eniCache: eniCache, privateIPNumCache: tt.fields.privateIPNumCache, cacheHasSynced: tt.fields.cacheHasSynced, - allocated: tt.fields.allocated, + allocated: ipcache.NewCacheMap[*networkingv1alpha1.WorkloadEndpoint](), + addIPBackoffCache: ipcache.NewCacheMap[*wait.Backoff](), eventBroadcaster: tt.fields.eventBroadcaster, eventRecorder: tt.fields.eventRecorder, kubeInformer: tt.fields.kubeInformer, @@ -492,6 +494,7 @@ func TestIPAM_Allocate(t *testing.T) { } func TestIPAM_Release(t *testing.T) { + t.Parallel() type fields struct { ctrl *gomock.Controller eniCache map[string][]*enisdk.Eni @@ -839,7 +842,12 @@ func TestIPAM_Release(t *testing.T) { ) ipamServer := ipam.(*IPAM) ipamServer.cacheHasSynced = true - ipamServer.eniCache = tt.fields.eniCache + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + for key, v := range tt.fields.eniCache { + + eniCache.Append(key, v...) + } + ipamServer.eniCache = eniCache ipamServer.clock = clock.NewFakeClock(time.Unix(0, 0)) if tt.fields.idleIPPoolMaxSize > 0 { ipamServer.idleIPPoolMaxSize = tt.fields.idleIPPoolMaxSize @@ -887,6 +895,7 @@ func startInformers(kubeInformer informers.SharedInformerFactory, crdInformer cr } func TestIPAM_gcLeakedIP(t *testing.T) { + t.Parallel() type fields struct { ctrl *gomock.Controller datastore *datastorev1.DataStore @@ -1043,6 +1052,11 @@ func TestIPAM_gcLeakedIP(t *testing.T) { if tt.fields.ctrl != nil { defer tt.fields.ctrl.Finish() } + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + for key, v := range tt.fields.eniCache { + + eniCache.Append(key, v...) + } ipam := &IPAM{ eventBroadcaster: tt.fields.eventBroadcaster, eventRecorder: tt.fields.eventRecorder, @@ -1055,9 +1069,9 @@ func TestIPAM_gcLeakedIP(t *testing.T) { vpcID: tt.fields.vpcID, clusterID: tt.fields.clusterID, datastore: tt.fields.datastore, - eniCache: tt.fields.eniCache, + eniCache: eniCache, possibleLeakedIPCache: tt.fields.possibleLeakedIPCache, - allocated: tt.fields.allocated, + allocated: ipcache.NewCacheMap[*networkingv1alpha1.WorkloadEndpoint](), bucket: tt.fields.bucket, batchAddIPNum: tt.fields.batchAddIPNum, cacheHasSynced: tt.fields.cacheHasSynced, @@ -1085,12 +1099,12 @@ func TestIPAM_gcLeakedIP(t *testing.T) { } func TestIPAM_buildAllocatedCache(t *testing.T) { + t.Parallel() type fields struct { ctrl *gomock.Controller eniCache map[string][]*enisdk.Eni privateIPNumCache map[string]int cacheHasSynced bool - allocated map[string]*v1alpha1.WorkloadEndpoint eventBroadcaster record.EventBroadcaster eventRecorder record.EventRecorder kubeInformer informers.SharedInformerFactory @@ -1136,7 +1150,6 @@ func TestIPAM_buildAllocatedCache(t *testing.T) { }}, }, cacheHasSynced: true, - allocated: map[string]*v1alpha1.WorkloadEndpoint{}, privateIPNumCache: map[string]int{}, eventBroadcaster: brdcaster, eventRecorder: recorder, @@ -1157,11 +1170,16 @@ func TestIPAM_buildAllocatedCache(t *testing.T) { if tt.fields.ctrl != nil { defer tt.fields.ctrl.Finish() } + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + for key, v := range tt.fields.eniCache { + + eniCache.Append(key, v...) + } ipam := &IPAM{ - eniCache: tt.fields.eniCache, + eniCache: eniCache, privateIPNumCache: tt.fields.privateIPNumCache, cacheHasSynced: tt.fields.cacheHasSynced, - allocated: tt.fields.allocated, + allocated: ipcache.NewCacheMap[*networkingv1alpha1.WorkloadEndpoint](), eventBroadcaster: tt.fields.eventBroadcaster, eventRecorder: tt.fields.eventRecorder, kubeInformer: tt.fields.kubeInformer, @@ -1187,11 +1205,11 @@ func TestIPAM_buildAllocatedCache(t *testing.T) { } func TestIPAM_buildInuseENICache(t *testing.T) { + t.Parallel() type fields struct { eniCache map[string][]*enisdk.Eni privateIPNumCache map[string]int cacheHasSynced bool - allocated map[string]*v1alpha1.WorkloadEndpoint eventBroadcaster record.EventBroadcaster eventRecorder record.EventRecorder kubeInformer informers.SharedInformerFactory @@ -1254,11 +1272,16 @@ func TestIPAM_buildInuseENICache(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + for key, v := range tt.fields.eniCache { + + eniCache.Append(key, v...) + } ipam := &IPAM{ - eniCache: tt.fields.eniCache, + eniCache: eniCache, privateIPNumCache: tt.fields.privateIPNumCache, cacheHasSynced: tt.fields.cacheHasSynced, - allocated: tt.fields.allocated, + allocated: ipcache.NewCacheMap[*networkingv1alpha1.WorkloadEndpoint](), eventBroadcaster: tt.fields.eventBroadcaster, eventRecorder: tt.fields.eventRecorder, kubeInformer: tt.fields.kubeInformer, @@ -1284,6 +1307,7 @@ func TestIPAM_buildInuseENICache(t *testing.T) { } func TestIPAM_updateIPPoolStatus(t *testing.T) { + t.Parallel() type fields struct { ctrl *gomock.Controller eniCache map[string][]*enisdk.Eni @@ -1385,11 +1409,16 @@ func TestIPAM_updateIPPoolStatus(t *testing.T) { if tt.fields.ctrl != nil { defer tt.fields.ctrl.Finish() } + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + for key, v := range tt.fields.eniCache { + + eniCache.Append(key, v...) + } ipam := &IPAM{ - eniCache: tt.fields.eniCache, + eniCache: eniCache, privateIPNumCache: tt.fields.privateIPNumCache, cacheHasSynced: tt.fields.cacheHasSynced, - allocated: tt.fields.allocated, + allocated: ipcache.NewCacheMap[*networkingv1alpha1.WorkloadEndpoint](), eventBroadcaster: tt.fields.eventBroadcaster, eventRecorder: tt.fields.eventRecorder, kubeInformer: tt.fields.kubeInformer, @@ -1415,6 +1444,7 @@ func TestIPAM_updateIPPoolStatus(t *testing.T) { } func TestIPAM_canAllocateIP(t *testing.T) { + t.Parallel() type fields struct { eniCache map[string][]*enisdk.Eni privateIPNumCache map[string]int @@ -1490,13 +1520,18 @@ func TestIPAM_canAllocateIP(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + for key, v := range tt.fields.eniCache { + + eniCache.Append(key, v...) + } ipam := &IPAM{ - eniCache: tt.fields.eniCache, + eniCache: eniCache, privateIPNumCache: tt.fields.privateIPNumCache, possibleLeakedIPCache: tt.fields.possibleLeakedIPCache, - addIPBackoffCache: tt.fields.addIPBackoffCache, + addIPBackoffCache: ipcache.NewCacheMap[*wait.Backoff](), cacheHasSynced: tt.fields.cacheHasSynced, - allocated: tt.fields.allocated, + allocated: ipcache.NewCacheMap[*networkingv1alpha1.WorkloadEndpoint](), datastore: tt.fields.datastore, idleIPPoolMinSize: tt.fields.idleIPPoolMinSize, idleIPPoolMaxSize: tt.fields.idleIPPoolMaxSize, @@ -1551,14 +1586,12 @@ func mockIPAM(t *testing.T, stopChan chan struct{}) *IPAM { ) ipamServer := ipam.(*IPAM) ipamServer.cacheHasSynced = true - ipamServer.eniCache = map[string][]*enisdk.Eni{ - "test-node": {{ - EniId: "eni-test", - }}, - } + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + eniCache.Append("test-node", &enisdk.Eni{ + EniId: "eni-test", + }) + ipamServer.eniCache = eniCache ipamServer.clock = clock.NewFakeClock(time.Unix(0, 0)) - ipamServer.kubeInformer.Start(stopChan) - ipamServer.crdInformer.Start(stopChan) return ipam.(*IPAM) } @@ -1601,28 +1634,114 @@ func (suite *IPAMTest) TearDownTest() { func (suite *IPAMTest) TestIPAMRun() { mockInterface := suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT() mockInterface.ListENIs(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("list error")).AnyTimes() - go suite.ipam.Run(suite.ctx, suite.stopChan) + go func() { + err := suite.ipam.Run(suite.ctx, suite.stopChan) + suite.Assert().Nil(err) + }() time.Sleep(3 * time.Second) } -func (suite *IPAMTest) TestGetIPFromLocalPool() { - node := &corev1.Node{ +func (suite *IPAMTest) Test_tryToGetIPFromCache() { + var ( + nodeName = "test-node" + instanceID = "i-xxx" + eniID = "test-eni" + subnetID = "test-subnet" + ip = "192.168.1.109" + subnetInfo = &vpc.Subnet{ + AvailableIp: 10, + } + eniInfo = &enisdk.Eni{ + PrivateIpSet: []enisdk.PrivateIp{ + { + Primary: true, + PrivateIpAddress: "192.168.1.100", + }, + { + Primary: false, + PrivateIpAddress: "192.168.1.107", + }, + }, + } + node = &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + } + enis = []*enisdk.Eni{ + { + EniId: eniID, + SubnetId: subnetID, + ZoneName: "zoneF", + }, + } + ) + + storeErr := suite.ipam.datastore.AddNodeToStore(nodeName, instanceID) + suite.Assert().Nil(storeErr) + storeErr = suite.ipam.datastore.AddENIToStore(node.Name, eniID) + suite.Assert().Nil(storeErr) + + // assertEniCanIncreasePool + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DescribeSubnet(suite.ctx, subnetID).Return(subnetInfo, nil).Times(2) + + _, nodeErr := suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-node", - }, - } - suite.ipam.datastore.AddENIToStore(node.Name, "eni-test-1") - enis := []*enisdk.Eni{ - { - EniId: "eni-test-1", - ZoneName: "zoneF", + Name: nodeName, + Annotations: map[string]string{ + "cce.io/max-ip-per-eni": "8", + }, }, - } - // mock cloud api - suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return([]string{"192.168.1.109"}, nil).AnyTimes() - _, _, err := suite.ipam.allocateIPFromLocalPool(suite.ctx, node, enis) - suite.Assert().Error(err, "time out to allocate from pool") + }, metav1.CreateOptions{}) + suite.Assert().Nil(nodeErr) + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().StatENI(suite.ctx, eniID).Return(eniInfo, nil).Times(2) + // end of assertEniCanIncreasePool + + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any()).Return([]string{"192.168.1.109"}, nil) + + allocatedIP, allocatedENI, err := suite.ipam.tryToAllocateIPFromCache(suite.ctx, node, enis, getDeadline()) + suite.Assert().Equal(ip, allocatedIP) + suite.Assert().Equal(eniID, allocatedENI.EniId) + suite.Assert().Nil(err) +} + +func (suite *IPAMTest) Test_tryToGetIPFromCache_error() { + var ( + nodeName = "test-node" + instanceID = "i-xxx" + eniID = "test-eni" + subnetID = "test-subnet" + subnetInfo = &vpc.Subnet{ + AvailableIp: 0, + } + node = &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + } + enis = []*enisdk.Eni{ + { + EniId: eniID, + SubnetId: subnetID, + ZoneName: "zoneF", + }, + } + ) + + storeErr := suite.ipam.datastore.AddNodeToStore(nodeName, instanceID) + suite.Assert().Nil(storeErr) + storeErr = suite.ipam.datastore.AddENIToStore(node.Name, eniID) + suite.Assert().Nil(storeErr) + + // assertEniCanIncreasePool + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DescribeSubnet(suite.ctx, subnetID).Return(subnetInfo, nil) + // end of assertEniCanIncreasePool + + _, _, err := suite.ipam.tryToAllocateIPFromCache(suite.ctx, node, enis, getDeadline()) + suite.Assert().ErrorContains(err, "has no available ip") } func (suite *IPAMTest) Test_tryAllocateIPForFixIPPodFailRateLimit() { @@ -1630,43 +1749,51 @@ func (suite *IPAMTest) Test_tryAllocateIPForFixIPPodFailRateLimit() { eni = &enisdk.Eni{ EniId: "eni-test", } - wep = data.MockFixedWorkloadEndpoint() - ipToAllocate = wep.Spec.IP - node = &corev1.Node{ + wep = data.MockFixedWorkloadEndpoint() + node = &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", }, } - backoffCap = time.Second ) suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("RateLimit")) suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(gomock.Any(), gomock.Len(1), 0, "eni-test").Return([]string{}, fmt.Errorf("RateLimit")) - _, err := suite.ipam.tryAllocateIPForFixIPPod(suite.ctx, eni, wep, ipToAllocate, node, backoffCap) + _, _, err := suite.ipam.allocateFixedIPFromCloud(suite.ctx, node, []*enisdk.Eni{eni}, wep, getDeadline()) suite.Error(err, "allocation ip error") } func (suite *IPAMTest) Test_tryAllocateIPForFixIPPodFailSubnetHasNoMoreIpException() { var ( - eni = &enisdk.Eni{ - EniId: "eni-test", + nodeName = "test-node" + eniID = "test-eni" + subnetID = "test-subnet" + eni = &enisdk.Eni{ + EniId: eniID, + SubnetId: subnetID, } - wep = data.MockFixedWorkloadEndpoint() - ipToAllocate = wep.Spec.IP - node = &corev1.Node{ + wep = data.MockFixedWorkloadEndpoint() + node = &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-node", + Name: nodeName, }, } - backoffCap = time.Second ) - suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("SubnetHasNoMoreIpException")) - suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(gomock.Any(), gomock.Len(1), 0, "eni-test").Return([]string{}, fmt.Errorf("SubnetHasNoMoreIpException")) - - _, err := suite.ipam.tryAllocateIPForFixIPPod(suite.ctx, eni, wep, ipToAllocate, node, backoffCap) + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()). + Return(fmt.Errorf("SubnetHasNoMoreIpException")) + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(gomock.Any(), gomock.Len(1), 0, + eniID).Return([]string{}, fmt.Errorf("SubnetHasNoMoreIpException")) + // I don't know how to mock sbnCtl, because it implements multi interface + oldSbnCtl := suite.ipam.sbnCtl + suite.ipam.sbnCtl = mocksubnet.NewMockSubnetControl(gomock.NewController(suite.T())) + defer func() { + suite.ipam.sbnCtl = oldSbnCtl + }() + suite.ipam.sbnCtl.(*mocksubnet.MockSubnetControl).EXPECT().DeclareSubnetHasNoMoreIP(suite.ctx, subnetID, + true).Return(nil) + _, _, err := suite.ipam.allocateFixedIPFromCloud(suite.ctx, node, []*enisdk.Eni{eni}, wep, getDeadline()) suite.Error(err, "allocation ip error") - } func (suite *IPAMTest) Test_tryAllocateIPForFixIPPodFailPrivateIpInUseException() { @@ -1674,126 +1801,423 @@ func (suite *IPAMTest) Test_tryAllocateIPForFixIPPodFailPrivateIpInUseException( eni = &enisdk.Eni{ EniId: "eni-test", } - wep = data.MockFixedWorkloadEndpoint() - ipToAllocate = wep.Spec.IP - node = &corev1.Node{ + wep = data.MockFixedWorkloadEndpoint() + node = &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", }, } - backoffCap = time.Second ) suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("RateLimit")) suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(gomock.Any(), gomock.Len(1), 0, "eni-test").Return([]string{}, fmt.Errorf("PrivateIpInUseException")).AnyTimes() - _, err := suite.ipam.tryAllocateIPForFixIPPod(suite.ctx, eni, wep, ipToAllocate, node, backoffCap) + _, _, err := suite.ipam.allocateFixedIPFromCloud(suite.ctx, node, []*enisdk.Eni{eni}, wep, getDeadline()) suite.Error(err, "allocation ip error") } +// normal case func (suite *IPAMTest) Test_handleIncreasePoolEvent() { var ( - e = &event{ + nodeName = "test-node" + instanceID = "i-xxx" + eniID = "test-eni" + subnetID = "test-subnet" + e = &event{ node: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-node", + Name: nodeName, }, }, - enis: []*enisdk.Eni{&enisdk.Eni{ - EniId: "eni-test", + enis: []*enisdk.Eni{{ + EniId: eniID, + SubnetId: subnetID, }}, passive: false, ctx: suite.ctx, } + subnetInfo = &vpc.Subnet{ + AvailableIp: 10, + } + eniInfo = &enisdk.Eni{ + PrivateIpSet: []enisdk.PrivateIp{ + { + Primary: true, + PrivateIpAddress: "192.168.1.100", + }, + { + Primary: false, + PrivateIpAddress: "192.168.1.107", + }, + }, + } ipam = suite.ipam ) ipam.batchAddIPNum = 1 ipam.idleIPPoolMinSize = 2 - suite.ipam.datastore.AddNodeToStore("test-node", "i-xxx") - suite.ipam.datastore.AddENIToStore("test-node", "eni-test") - suite.ipam.datastore.AddPrivateIPToStore("test-node", "eni-test", "192.168.1.107", false) + err := suite.ipam.datastore.AddNodeToStore(nodeName, instanceID) + suite.Assert().Nil(err) + err = suite.ipam.datastore.AddENIToStore(nodeName, eniID) + suite.Assert().Nil(err) + err = suite.ipam.datastore.AddPrivateIPToStore(nodeName, eniID, "192.168.1.107", false) + suite.Assert().Nil(err) + + // assertEniCanIncreasePool + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DescribeSubnet(suite.ctx, subnetID).Return(subnetInfo, nil) + + _, nodeErr := suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{ + "cce.io/max-ip-per-eni": "8", + }, + }, + }, metav1.CreateOptions{}) + suite.Assert().Nil(nodeErr) + + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().StatENI(suite.ctx, eniID).Return(eniInfo, nil) + // end of assertEniCanIncreasePool - suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("RateLimit")).AnyTimes() - suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(gomock.Any(), gomock.Len(0), 1, "eni-test").Return([]string{"192.168.1.108"}, nil).AnyTimes() + // batchAllocateIPWithBackoff + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT(). + BatchAddPrivateIP(suite.ctx, gomock.Len(0), ipam.batchAddIPNum, eniID). + Return([]string{"192.168.1.108"}, nil).AnyTimes() ch := make(chan *event, 1) ch <- e close(ch) - ipam.handleIncreasePoolEvent(suite.ctx, e.node, ch) + + ips, err := suite.ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) + suite.Assert().Nil(err) + suite.Assert().Equal(1, len(ips)) + + ipam.handleIncreasePoolEvent(suite.ctx, nodeName, ch) + + ips, err = suite.ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) + suite.Assert().Nil(err) + suite.Assert().Equal(2, len(ips)) } -func (suite *IPAMTest) Test_handleIncreasePoolEventError() { +// exception case +func (suite *IPAMTest) Test_handleIncreasePoolEvent_canIgnoreEvent() { var ( - e = &event{ + nodeName = "test-node" + instanceID = "i-xxx" + eniID = "test-eni" + subnetID = "test-subnet" + activeEvt = &event{ node: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-node", + Name: nodeName, }, }, - enis: []*enisdk.Eni{&enisdk.Eni{ - EniId: "eni-test", + enis: []*enisdk.Eni{{ + EniId: eniID, + SubnetId: subnetID, }}, passive: false, ctx: suite.ctx, } + passiveEvt = &event{ + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + }, + enis: []*enisdk.Eni{{ + EniId: eniID, + SubnetId: subnetID, + }}, + passive: true, + ctx: suite.ctx, + } ipam = suite.ipam ) ipam.batchAddIPNum = 1 - ipam.idleIPPoolMinSize = 2 + ipam.idleIPPoolMinSize = 1 + + err := suite.ipam.datastore.AddNodeToStore(nodeName, instanceID) + suite.Assert().Nil(err) + err = suite.ipam.datastore.AddENIToStore(nodeName, eniID) + suite.Assert().Nil(err) + err = suite.ipam.datastore.AddPrivateIPToStore(nodeName, eniID, "192.168.1.107", false) + suite.Assert().Nil(err) + + ch := make(chan *event, 10) + ch <- passiveEvt + ch <- activeEvt + close(ch) - suite.ipam.datastore.AddNodeToStore("test-node", "i-xxx") - suite.ipam.datastore.AddENIToStore("test-node", "eni-test") - suite.ipam.datastore.AddPrivateIPToStore("test-node", "eni-test", "192.168.1.107", false) + ips, err := suite.ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) + suite.Assert().Nil(err) + suite.Assert().Equal(1, len(ips)) - suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("RateLimit")).AnyTimes() - suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(gomock.Any(), gomock.Len(0), 1, "eni-test").Return([]string{}, fmt.Errorf("SubnetHasNoMoreIpException")).AnyTimes() + ipam.handleIncreasePoolEvent(suite.ctx, nodeName, ch) - ch := make(chan *event, 1) - ch <- e - close(ch) - ipam.handleIncreasePoolEvent(suite.ctx, e.node, ch) + ips, err = suite.ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) + suite.Assert().Nil(err) + suite.Assert().Equal(1, len(ips)) } -func (suite *IPAMTest) Test_handleIncreasePoolEventContinue() { +func (suite *IPAMTest) Test_handleIncreasePoolEvent_eniCannotIncreasePool() { var ( - e = &event{ + nodeName = "test-node" + instanceID = "i-xxx" + eniID = "test-eni" + subnetID = "test-subnet" + e = &event{ node: &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-node", + Name: nodeName, }, }, - enis: []*enisdk.Eni{&enisdk.Eni{ - EniId: "eni-test", + enis: []*enisdk.Eni{{ + EniId: eniID, + SubnetId: subnetID, }}, - passive: true, + passive: false, ctx: suite.ctx, } + subnetInfo = &vpc.Subnet{ + AvailableIp: 0, + } + eniInfo = &enisdk.Eni{ + PrivateIpSet: []enisdk.PrivateIp{ + { + Primary: true, + PrivateIpAddress: "192.168.1.100", + }, + { + Primary: false, + PrivateIpAddress: "192.168.1.107", + }, + }, + } ipam = suite.ipam ) ipam.batchAddIPNum = 1 ipam.idleIPPoolMinSize = 2 - suite.ipam.datastore.AddNodeToStore("test-node", "i-xxx") - suite.ipam.datastore.AddENIToStore("test-node", "eni-test") - suite.ipam.datastore.AddPrivateIPToStore("test-node", "eni-test", "192.168.1.107", false) + err := suite.ipam.datastore.AddNodeToStore(nodeName, instanceID) + suite.Assert().Nil(err) + err = suite.ipam.datastore.AddENIToStore(nodeName, eniID) + suite.Assert().Nil(err) + err = suite.ipam.datastore.AddPrivateIPToStore(nodeName, eniID, "192.168.1.107", false) + suite.Assert().Nil(err) - ch := make(chan *event, 1) + ch := make(chan *event) + go ipam.handleIncreasePoolEvent(suite.ctx, nodeName, ch) + + // 1. subnet has no available ip + // 1.1 prepare + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DescribeSubnet(suite.ctx, subnetID).Return(subnetInfo, nil) + + // 1.2 start ch <- e - close(ch) - ipam.handleIncreasePoolEvent(suite.ctx, e.node, ch) - e.passive = false - ipam.idleIPPoolMinSize = 0 - ch = make(chan *event, 1) + // 1.3 wait then check + time.Sleep(10 * time.Millisecond) + ips, err := suite.ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) + suite.Assert().Nil(err) + suite.Assert().Equal(1, len(ips)) + + // 2. node cannot attach more ip due to memory + // 2.1 prepare + subnetInfo.AvailableIp = 2 + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DescribeSubnet(suite.ctx, subnetID).Return(subnetInfo, nil) + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().StatENI(suite.ctx, eniID).Return(eniInfo, nil) + + suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{ + "cce.io/max-ip-per-eni": "1", + }, + }, + }, metav1.CreateOptions{}) + + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + + // 2.2 start ch <- e + + // 2.3 wait then check + time.Sleep(10 * time.Millisecond) + ips, err = suite.ipam.datastore.GetUnassignedPrivateIPByNode(nodeName) + suite.Assert().Nil(err) + suite.Assert().Equal(1, len(ips)) + close(ch) - ipam.handleIncreasePoolEvent(suite.ctx, e.node, ch) } func TestIPAM(t *testing.T) { + t.Parallel() suite.Run(t, new(IPAMTest)) } + +func getDeadline() time.Time { + return time.Now().Add(allocateIPTimeout) +} + +func (suite *IPAMTest) Test_allocateIPForFixedIPPod() { + var ( + nodeName = "test-node" + instanceID = "i-xxxx" + containerID = "test-con" + oldEniID = "test-eni-1" + newEniID = "test-eni-2" + subnetID = "test-subnet" + ip = "111.111.222.222" + wepName = "test-pod" + node = &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + }, + } + pod = &corev1.Pod{} + enis = []*enisdk.Eni{ + { + EniId: newEniID, + SubnetId: subnetID, + ZoneName: "zoneF", + }, + } + wep = &v1alpha1.WorkloadEndpoint{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: wepName, + Namespace: v1.NamespaceDefault, + }, + Spec: networkingv1alpha1.WorkloadEndpointSpec{ + Node: nodeName, + ENIID: oldEniID, + }, + } + subnetInfo = &vpc.Subnet{ + AvailableIp: 10, + } + ) + // init datastore + storeErr := suite.ipam.datastore.AddNodeToStore(nodeName, instanceID) + suite.Assert().Nil(storeErr) + + // allocateFixedIPFromCloud + gomock.InOrder( + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT(). + DeletePrivateIP(suite.ctx, gomock.Any(), oldEniID).Return(fmt.Errorf("RateLimit")), + + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT(). + BatchAddPrivateIP(gomock.Any(), gomock.Len(1), 0, newEniID). + Return([]string{}, fmt.Errorf("RateLimit")), + ) + + // assertEniCanIncreasePool + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DescribeSubnet(suite.ctx, subnetID).Return(subnetInfo, nil).Times(2) + + _, nodeErr := suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{ + "cce.io/max-ip-per-eni": "8", + }, + }, + }, metav1.CreateOptions{}) + suite.Assert().Nil(nodeErr) + + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().StatENI(suite.ctx, newEniID).Return(enis[0], nil).Times(2) + // end of assertEniCanIncreasePool + + // handleIncreasePoolEvent + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIP(suite.ctx, gomock.Len(0), 1, newEniID). + Return([]string{ip}, nil) + + _, wepErr := suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(v1.NamespaceDefault).Create(context.TODO(), + &v1alpha1.WorkloadEndpoint{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: wepName, + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + SubnetID: subnetID, + IP: "10.1.1.1", + Type: ipamgeneric.WepTypeSts, + }, + }, metav1.CreateOptions{}) + suite.Assert().Nil(wepErr) + + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + newWep, err := suite.ipam.allocateIPForFixedIPPod(suite.ctx, node, pod, containerID, enis, wep, getDeadline()) + suite.Assert().Nil(err) + suite.Assert().Equal(ip, newWep.Spec.IP) + suite.Assert().Equal(newEniID, newWep.Spec.ENIID) +} + +func (suite *IPAMTest) Test_checkIdleIPPool() { + var ( + nodeWithEni = "test-node-1" + instanceID1 = "test-inst-1" + nodeWithoutEni = "test-node-2" + instanceID2 = "test-inst-2" + subnetID = "test-subnet" + subnetInfo = &vpc.Subnet{ + AvailableIp: 0, + } + ) + // init node + _, nodeErr1 := suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.LabelInstanceType: "BCC", + }, + Name: nodeWithEni, + Annotations: map[string]string{ + "cce.io/max-ip-per-eni": "8", + }, + }, + }, metav1.CreateOptions{}) + suite.Assert().Nil(nodeErr1) + + _, nodeErr2 := suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + v1.LabelInstanceType: "BCC", + }, + Name: nodeWithoutEni, + Annotations: map[string]string{ + "cce.io/max-ip-per-eni": "8", + }, + }, + }, metav1.CreateOptions{}) + suite.Assert().Nil(nodeErr2) + + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + // init datastore + storeErr := suite.ipam.datastore.AddNodeToStore(nodeWithEni, instanceID1) + suite.Assert().Nil(storeErr) + storeErr2 := suite.ipam.datastore.AddNodeToStore(nodeWithoutEni, instanceID2) + suite.Assert().Nil(storeErr2) + // init eni cache + suite.ipam.eniCache.Append(nodeWithEni, &enisdk.Eni{ + EniId: "eni-test", + SubnetId: subnetID, + }) + // mock cloud + // assertEniCanIncreasePool + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DescribeSubnet(gomock.Any(), subnetID). + Return(subnetInfo, nil).AnyTimes() + + _, err := suite.ipam.checkIdleIPPool() + suite.Assert().Nil(err) + // wait handleIncreasePoolEvent finish + time.Sleep(10 * time.Millisecond) + _, ok1 := suite.ipam.increasePoolEventChan[nodeWithEni] + suite.Assert().True(ok1) + _, ok2 := suite.ipam.increasePoolEventChan[nodeWithoutEni] + suite.Assert().False(ok2) +} diff --git a/pkg/eniipam/ipam/bcc/psts_ipam.go b/pkg/eniipam/ipam/bcc/psts_ipam.go index ca6096b..850e321 100644 --- a/pkg/eniipam/ipam/bcc/psts_ipam.go +++ b/pkg/eniipam/ipam/bcc/psts_ipam.go @@ -20,7 +20,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apimachinery/networking" @@ -29,8 +28,8 @@ import ( "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/metadata" ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/util" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/iprange" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/metric" - "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/cidr" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" enisdk "github.com/baidubce/bce-sdk-go/services/eni" @@ -45,7 +44,9 @@ func (ipam *IPAM) subnetTopologyAllocates(ctx context.Context, pod *corev1.Pod, log.Errorf(ctx, "failed to get PodSubnetTopologySpread of pod (%v/%v): %v", pod.Namespace, pod.Name, err) return nil, err } - if !networking.PSTSContainsAvailableSubnet(psts) { + + strategy, available := networking.PSTSMode(psts) + if !available { return nil, fmt.Errorf("PodSubnetTopologySpread (%s/%s) have none available subnet", psts.GetNamespace(), psts.GetName()) } log.Infof(ctx, "try to use PodSubnetTopologySpread (%s/%s) for pod (%v/%v)", psts.GetNamespace(), psts.GetName(), pod.GetNamespace(), pod.GetName()) @@ -62,27 +63,36 @@ func (ipam *IPAM) subnetTopologyAllocates(ctx context.Context, pod *corev1.Pod, return nil, fmt.Errorf("PodSubnetTopologySpread (%s/%s) no available subnet", psts.GetNamespace(), psts.GetName()) } - needSubnetLock := false + needSubnetLock := true defer func() { if needSubnetLock { ipam.unlockExclusiveSubnet(toAllocate.candidateSubnets) } }() - // fixed ip mode and suffix of pod name is number - if networking.IsFixedIPMode(psts) && networking.IsEndWithNum(pod.GetName()) { - needSubnetLock = true + switch strategy.Type { + case networkingv1alpha1.IPAllocTypeNil: + fallthrough + case networkingv1alpha1.IPAllocTypeElastic: + // allocate ip with auto mode + needSubnetLock = false + err = ipam.elasticAllocateIPCrossSubnet(ctx, toAllocate, enis) + case networkingv1alpha1.IPAllocTypeFixed: + // fixed ip mode and suffix of pod name is number + if networking.IsEndWithNum(pod.GetName()) { + ipam.lockExclusiveSubnet(toAllocate.candidateSubnets) + err = ipam.fixedAllocateIPCrossSubnet(ctx, toAllocate, oldWep) + } + case networkingv1alpha1.IPAllocTypeManual: ipam.lockExclusiveSubnet(toAllocate.candidateSubnets) - err = ipam.fixedAllocateIPCrossSubnet(ctx, toAllocate, oldWep) - } - if networking.IsManualMode(psts) { - needSubnetLock = true + toAllocate.iprange, err = iprange.NewCIDRRangePool(toAllocate.psts, ipam) + if err == nil { + err = ipam.manualAllocateIPCrossSubnet(ctx, toAllocate) + } + case networkingv1alpha1.IPAllocTypeCustom: ipam.lockExclusiveSubnet(toAllocate.candidateSubnets) - err = ipam.manualAllocateIPCrossSubnet(ctx, toAllocate) - } - // allocate ip with auto mode - if networking.IsElasticMode(psts) { - err = ipam.elasticAllocateIPCrossSubnet(ctx, toAllocate, enis) + err = ipam.customAllocateIPCrossSubnet(ctx, toAllocate, oldWep) } + if err != nil { log.Errorf(ctx, "%s error : %v", toAllocate.info(), err) return nil, err @@ -125,6 +135,7 @@ func (ipam *IPAM) __saveIPAllocationToWEP(ctx context.Context, toAllocate *ipToA FixIPDeletePolicy: string(networking.GetReleaseStrategy(psts)), ENISubnetID: toAllocate.eni.SubnetId, SubnetTopologyReference: psts.GetName(), + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, }, } @@ -133,6 +144,18 @@ func (ipam *IPAM) __saveIPAllocationToWEP(ctx context.Context, toAllocate *ipToA newWep.Spec.Type = ipamgeneric.WepTypeSts newWep.Spec.EnableFixIP = EnableFixIPTrue newWep.Labels[ipamgeneric.WepLabelStsOwnerKey] = util.GetStsName(newWep) + } else if networking.IsReuseIPCustomPSTS(psts) { + newWep.Spec.Type = ipamgeneric.WepTypeReuseIPPod + newWep.Spec.EnableFixIP = EnableFixIPTrue + if psts.Spec.Strategy.TTL != nil { + newWep.Spec.Release = &networkingv1alpha1.EndpointRelease{ + TTL: *psts.Spec.Strategy.TTL, + } + } else { + newWep.Spec.Release = &networkingv1alpha1.EndpointRelease{ + TTL: *networkingv1alpha1.DefaultReuseIPTTL, + } + } } // to rollback if update wep error @@ -154,12 +177,8 @@ func (ipam *IPAM) __saveIPAllocationToWEP(ctx context.Context, toAllocate *ipToA if ipam.removeAddIPBackoffCache(newWep.Spec.ENIID, true) { log.Infof(ctx, "remove backoff for eni %v when handling pod (%v %v) due to successful ip allocate", newWep.Spec.ENIID, pod.GetNamespace(), pod.GetName()) } - if toAllocate.ipv4Result != "" { - ipam.allocated[toAllocate.ipv4Result] = newWep - } - if toAllocate.ipv6Result != "" { - ipam.allocated[toAllocate.ipv6Result] = newWep - } + ipam.allocated.Add(toAllocate.ipv4Result, newWep) + ipam.allocated.Add(toAllocate.ipv6Result, newWep) return newWep, nil // update wep error @@ -185,14 +204,18 @@ func (ipam *IPAM) rollbackIPAllocated(ctx context.Context, toAllocate *ipToAlloc // The previous fixed IP is no longer in the latest list. // At this time, try to allocate IP again func (ipam *IPAM) fixedAllocateIPCrossSubnet(ctx context.Context, toAllocate *ipToAllocate, oldWep *networkingv1alpha1.WorkloadEndpoint) error { + var err error + toAllocate.iprange, err = iprange.NewCIDRRangePool(toAllocate.psts, ipam) + if err != nil { + return err + } if networking.OwnerByPodSubnetTopologySpread(oldWep, toAllocate.psts) { ipam.tryDeleteSubnetIPRetainAllocateCache(ctx, oldWep) - - toAllocate.ipv4 = oldWep.Spec.IP - toAllocate.sbnID = oldWep.Spec.SubnetID - - if !networking.PSTSContainersIP(toAllocate.ipv4, toAllocate.psts) { - toAllocate.ipv4 = "" + if toAllocate.iprange.IPInRange(oldWep.Spec.IP) { + toAllocate.ipv4 = oldWep.Spec.IP + toAllocate.sbnID = oldWep.Spec.SubnetID + } else { + log.Warningf(ctx, "old ip (%s) not in psts range %s", oldWep.Spec.IP, toAllocate.iprange.String()) } } return ipam.manualAllocateIPCrossSubnet(ctx, toAllocate) @@ -289,34 +312,14 @@ func (ipam *IPAM) tryAllocateIPsCrossSubnet(ctx context.Context, toAllocate *ipT // multiple subnets according to the sorting rules. The rule // of preferred IP is to select the first unused IP under the subnet func (ipam *IPAM) priorityIPAndSubnet(ctx context.Context, toAllocate *ipToAllocate) { - ipam.lock.RLock() - defer ipam.lock.RUnlock() - - var ( - ips []string - ) - - ipv4Map := ipam.filterCandidateIPs(ctx, toAllocate.psts, toAllocate.candidateSubnets, 4) - for id, arr := range ipv4Map { - if len(arr) > len(ips) { - toAllocate.sbnID = id - ips = arr - } - } - if len(ips) > 0 { - toAllocate.ipv4 = ips[0] - } - - ips = make([]string, 0) - ipv6Map := ipam.filterCandidateIPs(ctx, toAllocate.psts, toAllocate.candidateSubnets, 6) - for _, arr := range ipv6Map { - if len(arr) > len(ips) { - ips = arr + for _, sbnID := range toAllocate.candidateSubnets { + ip := toAllocate.iprange.FirstAvailableIP(sbnID) + if ip != nil { + toAllocate.sbnID = sbnID + toAllocate.ipv4 = ip.String() + return } } - if len(ips) > 0 { - toAllocate.ipv6 = ips[0] - } } // lockExclusiveSubnet lock the subnet to prevent concurrent preemption of fixed IP @@ -378,60 +381,12 @@ func filterAvailableSubnet(ctx context.Context, eni *enisdk.Eni, availableSubnet return candidateSubnets } -// filtercindidateIPs -// origin: all of subnet and ip -// candidateSubnets: subnet to filter -// return: ip can be allocated -func (ipam *IPAM) filterCandidateIPs(ctx context.Context, psts *networkingv1alpha1.PodSubnetTopologySpread, candidateSubnets []string, version int) map[string][]string { - var condidateIPs map[string][]string = make(map[string][]string) - - for _, sbnID := range candidateSubnets { - set := sets.NewString() - if sa, ok := psts.Spec.Subnets[sbnID]; ok { - var ( - ipArray []string = sa.IPv4 - ipRange []string = sa.IPv4Range - sbnCIDR string = "" - ) - sbn, _ := ipam.sbnCtl.Get(sbnID) - if sbn != nil { - sbnCIDR = sbn.Spec.CIDR - } else { - continue - } - - if version == 6 { - ipArray = sa.IPv6 - ipRange = sa.IPv6Range - } - for _, ranges := range ipRange { - ips := cidr.ListIPsFromCIDRString(ranges) - for _, ip := range ips { - if cidr.IsUnicastIP(ip, sbnCIDR) { - ipArray = append(ipArray, ip.String()) - } - } - } - - for _, ip := range ipArray { - if _, ok := ipam.allocated[ip]; !ok { - set.Insert(ip) - } - } - - } - condidateIPs[sbnID] = set.List() - } - return condidateIPs -} - func (ipam *IPAM) __allocateIPCrossSubnet(ctx context.Context, eni *enisdk.Eni, ipToAllocate, subnetID string, backoffCap time.Duration, nodeName string) (string, error) { var ( ipResult []string err error ) - allocIPMaxTry := 3 - for i := 0; i < allocIPMaxTry; i++ { + for i := 0; i < 3; i++ { ipam.tryBackoff(eni.EniId) log.Infof(ctx, "try to add IP %v to %v cross subnet", ipToAllocate, eni.EniId) if ipToAllocate != "" { @@ -476,6 +431,10 @@ func (ipam *IPAM) __allocateIPCrossSubnet(ctx context.Context, eni *enisdk.Eni, return "", errors.New(msg) } + if err != nil { + return "", err + } + log.Infof(ctx, "add private IP cross subnet %v successfully", ipResult) ipam.datastore.Synchronized(func() error { @@ -495,8 +454,8 @@ func (ipam *IPAM) __allocateIPCrossSubnet(ctx context.Context, eni *enisdk.Eni, func (ipam *IPAM) tryBackoff(eniid string) { var toSleep time.Duration ipam.lock.RLock() - if v, ok := ipam.addIPBackoffCache[eniid]; ok { - toSleep = v.Step() + if v, ok := ipam.addIPBackoffCache.Get(eniid); ok { + toSleep = (*wait.Backoff)(v).Step() } ipam.lock.RUnlock() if toSleep != 0 { @@ -524,30 +483,29 @@ func (ipam *IPAM) syncRelationOfWepEni() { var toUpdateWeps []*networkingv1alpha1.WorkloadEndpoint - ipam.lock.RLock() - for _, enis := range ipam.eniCache { - for _, eni := range enis { - for _, addr := range eni.PrivateIpSet { - if wep, ok := ipam.allocated[addr.PrivateIpAddress]; ok { - wep, err := wepLister.WorkloadEndpoints(wep.Namespace).Get(wep.Name) - if err != nil { - continue - } - if wep.Spec.ENIID != eni.EniId && - wep.Spec.UpdateAt.Add(2*ipam.eniSyncPeriod).Before(time.Now()) { - wep = wep.DeepCopy() - wep.Spec.ENIID = eni.EniId - wep.Spec.UpdateAt = metav1.Now() - toUpdateWeps = append(toUpdateWeps, wep) - } + ipam.eniCache.ForEachSubItem(func(key string, index int, eni *enisdk.Eni) bool { + for _, addr := range eni.PrivateIpSet { + if wep, ok := ipam.allocated.Get(addr.PrivateIpAddress); ok { + wep, err := wepLister.WorkloadEndpoints(wep.Namespace).Get(wep.Name) + if err != nil { + continue + } + if wep.Spec.ENIID != eni.EniId && + wep.Spec.UpdateAt.Add(2*ipam.eniSyncPeriod).Before(time.Now()) { + wep = wep.DeepCopy() + wep.Spec.ENIID = eni.EniId + wep.Spec.UpdateAt = metav1.Now() + toUpdateWeps = append(toUpdateWeps, wep) } } } - } - ipam.lock.RUnlock() + + // continue + return true + }) for _, wep := range toUpdateWeps { - if !isFixIPStatefulSetPodWep(wep) { + if !(networking.IsFixIPStatefulSetPodWep(wep) || networking.ISCustomReuseModeWEP(wep)) { continue } ipam.crdClient.CceV1alpha1().WorkloadEndpoints(wep.Namespace).Update(ctx, wep, metav1.UpdateOptions{}) @@ -576,6 +534,9 @@ type ipToAllocate struct { // ip allocate result ipv4Result string ipv6Result string + + // iprange manager available IP of range + iprange *iprange.RangePool } func (toAllocate *ipToAllocate) info() string { diff --git a/pkg/eniipam/ipam/bcc/psts_ipam_custom.go b/pkg/eniipam/ipam/bcc/psts_ipam_custom.go new file mode 100644 index 0000000..9bf8884 --- /dev/null +++ b/pkg/eniipam/ipam/bcc/psts_ipam_custom.go @@ -0,0 +1,40 @@ +package bcc + +import ( + "context" + "net" + + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apimachinery/networking" + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/iprange" + log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" +) + +// customAllocateIPCrossSubnet +func (ipam *IPAM) customAllocateIPCrossSubnet(ctx context.Context, toAllocate *ipToAllocate, oldWep *networkingv1alpha1.WorkloadEndpoint) error { + var err error + toAllocate.iprange, err = iprange.NewCustomRangePool(toAllocate.psts, ipam) + if err != nil { + return err + } + + if networking.OwnerByPodSubnetTopologySpread(oldWep, toAllocate.psts) && networking.IsReuseIPCustomPSTS(toAllocate.psts) { + ipam.tryDeleteSubnetIPRetainAllocateCache(ctx, oldWep) + + if toAllocate.iprange.IPInRange(oldWep.Spec.IP) { + toAllocate.ipv4 = oldWep.Spec.IP + toAllocate.sbnID = oldWep.Spec.SubnetID + } else { + log.Warningf(ctx, "old ip %s not in psts range", oldWep.Spec.IP) + } + } + return ipam.manualAllocateIPCrossSubnet(ctx, toAllocate) +} + +// FilterIP return true if ip was not used +func (ipam *IPAM) FilterIP(ip net.IP) bool { + if ipam.allocated.Exists(ip.String()) || ipam.reusedIPs.Exists(ip.String()) { + return false + } + return true +} diff --git a/pkg/eniipam/ipam/bcc/psts_ipam_test.go b/pkg/eniipam/ipam/bcc/psts_ipam_test.go index 1b1dba4..f10ad07 100644 --- a/pkg/eniipam/ipam/bcc/psts_ipam_test.go +++ b/pkg/eniipam/ipam/bcc/psts_ipam_test.go @@ -3,6 +3,7 @@ package bcc import ( "context" "fmt" + "strings" "testing" "time" @@ -10,6 +11,8 @@ import ( networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" mockcloud "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud/testing" ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/ipcache" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" "github.com/baidubce/baiducloud-cce-cni-driver/test/data" enisdk "github.com/baidubce/bce-sdk-go/services/eni" "github.com/golang/mock/gomock" @@ -18,9 +21,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/informers" + k8sutilnet "k8s.io/utils/net" ) -type IPAMSubnetTopologyAllocates struct { +type ipamSubnetTopologySuperTester struct { suite.Suite ipam *IPAM wantErr bool @@ -34,7 +39,7 @@ type IPAMSubnetTopologyAllocates struct { } // 每次测试前设置上下文 -func (suite *IPAMSubnetTopologyAllocates) SetupTest() { +func (suite *ipamSubnetTopologySuperTester) SetupTest() { suite.stopChan = make(chan struct{}) suite.ipam = mockIPAM(suite.T(), suite.stopChan) suite.ctx = context.TODO() @@ -43,72 +48,112 @@ func (suite *IPAMSubnetTopologyAllocates) SetupTest() { suite.podLabel = labels.Set{ "k8s.io/app": "busybox", } + runtime.ReallyCrash = false +} +func (suite *ipamSubnetTopologySuperTester) setupTest() { + suite.stopChan = make(chan struct{}) + suite.ipam = mockIPAM(suite.T(), suite.stopChan) + suite.ctx = context.TODO() + suite.name = "busybox" + suite.namespace = corev1.NamespaceDefault + suite.podLabel = labels.Set{ + "k8s.io/app": "busybox", + } runtime.ReallyCrash = false - suite.waitCacheSync() } -func (suite *IPAMSubnetTopologyAllocates) waitCacheSync() { - __waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer, suite.stopChan) +// 每次测试后执行清理 +func (suite *ipamSubnetTopologySuperTester) TearDownTest() { + close(suite.stopChan) + suite.ipam = nil + suite.ctx = nil + suite.name = "busybox" + suite.namespace = corev1.NamespaceDefault + suite.want = nil + suite.wantErr = false +} + +func (suite *ipamSubnetTopologySuperTester) tearDownTest() { + close(suite.stopChan) + suite.ipam = nil + suite.ctx = nil + suite.name = "busybox" + suite.namespace = corev1.NamespaceDefault + suite.want = nil + suite.wantErr = false +} + +func startInformer(kubeInformer informers.SharedInformerFactory, crdInformer externalversions.SharedInformerFactory, stopChan chan struct{}) { + kubeInformer.Core().V1().Nodes().Informer() + kubeInformer.Core().V1().Pods().Informer() + kubeInformer.Apps().V1().StatefulSets().Informer() + crdInformer.Cce().V1alpha1().WorkloadEndpoints().Informer() + crdInformer.Cce().V1alpha1().IPPools().Informer() + crdInformer.Cce().V1alpha1().Subnets().Informer() + crdInformer.Cce().V1alpha1().PodSubnetTopologySpreads().Informer() + + kubeInformer.Start(stopChan) + crdInformer.Start(stopChan) +} + +func waitCacheSync(ipam *IPAM, stopChan chan struct{}) { + startInformer(ipam.kubeInformer, ipam.crdInformer, stopChan) + __waitForCacheSync(ipam.kubeInformer, ipam.crdInformer, stopChan) } // assert for test case -func assertSuite(suite *IPAMSubnetTopologyAllocates) { - suite.waitCacheSync() +func assertSuite(suite *ipamSubnetTopologySuperTester) { suite.ipam.datastore.AddNodeToStore("test-node", "i-xxx") suite.ipam.datastore.AddENIToStore("test-node", "eni-test") suite.ipam.datastore.AddPrivateIPToStore("test-node", "eni-test", "192.168.1.107", false) var ( - pod *corev1.Pod err error + got *networkingv1alpha1.WorkloadEndpoint ) - i := 0 - for pod == nil || err != nil { - pod, err = suite.ipam.kubeInformer.Core().V1().Pods().Lister().Pods(corev1.NamespaceDefault).Get(suite.name) - suite.waitCacheSync() - i++ - if i > 100 { + for i := 0; i < 3; i++ { + select { + case <-suite.stopChan: + suite.T().Error("unexcept chan closed") return + default: + } + waitCacheSync(suite.ipam, suite.stopChan) + got, err = suite.ipam.Allocate(suite.ctx, suite.name, suite.namespace, suite.containerID) + if !suite.wantErr && err != nil && strings.Contains(err.Error(), "not found") { + suite.T().Logf("Warning: kube informer not found error: %v", err) + } else { + break } } - time.Sleep(time.Second) - - got, err := suite.ipam.Allocate(suite.ctx, suite.name, suite.namespace, suite.containerID) if !suite.wantErr { - suite.Assert().NoError(err, "Allocate() ip error") - // 更新修改时间,避免测试用例不通过 - if suite.want != nil { - suite.Assert().NotNil(got, "allocate ip return nil") - got.Spec.UpdateAt = metav1.Time{Time: time.Unix(0, 0)} - suite.Assert().EqualValues(suite.want, got, "Allocate() want not euqal ") + if suite.Assert().NoError(err, "Allocate() ip error") { + // 更新修改时间,避免测试用例不通过 + if suite.want != nil { + suite.Assert().NotNil(got, "allocate ip return nil") + got.Spec.UpdateAt = metav1.Time{Time: time.Unix(0, 0)} + suite.Assert().EqualValues(suite.want, got, "Allocate() want not euqal ") + } } } else { suite.Assert().Error(err, "Allocate() ip error") } - } -// 每次测试后执行清理 -func (suite *IPAMSubnetTopologyAllocates) TearDownTest() { - suite.ipam = nil - suite.ctx = nil - suite.name = "busybox" - suite.namespace = corev1.NamespaceDefault - suite.want = nil - suite.wantErr = false - close(suite.stopChan) -} +func (suite *ipamSubnetTopologySuperTester) BeforeTest(suiteName, testName string) {} +func (suite *ipamSubnetTopologySuperTester) AfterTest(suiteName, testName string) {} -func (suite *IPAMSubnetTopologyAllocates) BeforeTest(suiteName, testName string) {} -func (suite *IPAMSubnetTopologyAllocates) AfterTest(suiteName, testName string) {} +type dynamicIPCrossSubnetTester struct { + ipamSubnetTopologySuperTester +} // create a pod use dymamic IP cross subnet -func (suite *IPAMSubnetTopologyAllocates) TestAllocateDynamicIPCrossSubnet() { - suite.createSubnet() - - suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(suite.ctx, data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel), metav1.CreateOptions{}) +func (suite *dynamicIPCrossSubnetTester) TestAllocateDynamicIPCrossSubnet() { + subnet := createSubnet(suite.ipam) + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) - suite.createPodAndNode() + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(suite.ctx, psts, metav1.CreateOptions{}) + suite.ipam.createPodAndNode(suite.podLabel) suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(0), 1).Return([]string{"192.168.1.109"}, nil).AnyTimes() @@ -132,103 +177,108 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocateDynamicIPCrossSubnet() { EnableFixIP: "false", SubnetTopologyReference: "psts-test", FixIPDeletePolicy: "TTL", + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, }, } - suite.waitCacheSync() - pod, err := suite.ipam.kubeInformer.Core().V1().Pods().Lister().Pods(corev1.NamespaceDefault).Get("busybox") - if pod == nil || err != nil { - time.Sleep(time.Second) - } - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) +} + +type pstsOtherTest struct { + ipamSubnetTopologySuperTester } // not found the psts object -func (suite *IPAMSubnetTopologyAllocates) TestPSTSNotFound() { - suite.createPodAndNode() +func (suite *pstsOtherTest) TestPSTSNotFound() { + suite.ipam.createPodAndNode(suite.podLabel) suite.wantErr = true - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } // allocation dynamic ip cross subnet failed -func (suite *IPAMSubnetTopologyAllocates) TestFailedLimit() { +func (suite *pstsOtherTest) TestFailedLimit() { ctx := suite.ctx - - suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel), metav1.CreateOptions{}) - suite.createPodAndNode() + subnet := createSubnet(suite.ipam) + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel), metav1.CreateOptions{}) + suite.ipam.createPodAndNode(suite.podLabel) suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(0), 1).Return([]string{"192.168.1.109"}, fmt.Errorf("RateLimit")).AnyTimes() suite.wantErr = true - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) TestFailedSubnetHasNoMoreIpException() { +func (suite *pstsOtherTest) TestFailedSubnetHasNoMoreIpException() { ctx := suite.ctx + subnet := createSubnet(suite.ipam) - suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel), metav1.CreateOptions{}) - suite.createPodAndNode() + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel), metav1.CreateOptions{}) + suite.ipam.createPodAndNode(suite.podLabel) suite.wantErr = true suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(0), 1).Return([]string{"192.168.1.109"}, fmt.Errorf("SubnetHasNoMoreIpException")).AnyTimes() - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) TestFailedPrivateIpInUseException() { +func (suite *pstsOtherTest) TestFailedPrivateIpInUseException() { ctx := suite.ctx - - suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel), metav1.CreateOptions{}) - suite.createPodAndNode() + subnet := createSubnet(suite.ipam) + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel), metav1.CreateOptions{}) + suite.ipam.createPodAndNode(suite.podLabel) suite.wantErr = true suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(0), 1).Return([]string{"192.168.1.109"}, fmt.Errorf("PrivateIpInUseException")).AnyTimes() - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) TestFailedResultLenError() { +func (suite *pstsOtherTest) TestFailedResultLenError() { ctx := suite.ctx - - suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel), metav1.CreateOptions{}) - suite.createPodAndNode() + subnet := createSubnet(suite.ipamSubnetTopologySuperTester.ipam) + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel), metav1.CreateOptions{}) + suite.ipam.createPodAndNode(suite.podLabel) suite.wantErr = true suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(0), 1).Return([]string{}, nil).AnyTimes() - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) createSubnet() { +func createSubnet(ipam *IPAM) *networkingv1alpha1.Subnet { subnet := data.MockSubnet(corev1.NamespaceDefault, "sbn-test", "192.168.1.0/24") - suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(suite.ctx, subnet, metav1.CreateOptions{}) + ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(context.Background(), subnet, metav1.CreateOptions{}) + return subnet } -func (suite *IPAMSubnetTopologyAllocates) createPodAndNode() { - ctx := suite.ctx - suite.ipam.kubeClient.CoreV1().Pods(corev1.NamespaceDefault).Create(ctx, &corev1.Pod{ - TypeMeta: metav1.TypeMeta{}, +func (ipam *IPAM) createPodAndNode(l map[string]string) { + ctx := context.Background() + ipam.kubeClient.CoreV1().Pods(corev1.NamespaceDefault).Create(ctx, &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "busybox", Annotations: map[string]string{ networking.AnnotationPodSubnetTopologySpread: "psts-test", }, - Labels: suite.podLabel, + Labels: l, }, Spec: corev1.PodSpec{ NodeName: "test-node", }, }, metav1.CreateOptions{}) - suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ + ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", }, }, metav1.CreateOptions{}) } +type fixedIPCrossSubnetTester struct { + ipamSubnetTopologySuperTester +} + // allocation fixed ip cross subnet first -func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPCrossSubnetFirst() { +func (suite *fixedIPCrossSubnetTester) TestAllocationFixedIPCrossSubnetFirst() { ctx := suite.ctx suite.name = "busybox-0" @@ -236,7 +286,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPCrossSubnetFirst( subnet.Spec.Exclusive = true suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) - psts := data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel) + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) allocation := psts.Spec.Subnets["sbn-test"] allocation.IPv4 = append(allocation.IPv4, "192.168.1.109") allocation.Type = networkingv1alpha1.IPAllocTypeFixed @@ -244,7 +294,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPCrossSubnetFirst( psts.Spec.Subnets["sbn-test"] = allocation suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) - suite.createFixedIPPodAndNode() + suite.ipam.createFixedIPPodAndNode(suite.podLabel) // mock cloud api suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(1), 1).Return([]string{"192.168.1.109"}, nil).AnyTimes() @@ -270,16 +320,23 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPCrossSubnetFirst( EnableFixIP: "True", SubnetTopologyReference: "psts-test", FixIPDeletePolicy: string(networkingv1alpha1.ReleaseStrategyNever), + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, }, } - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPCrossSubnetAgain() { +type fixedIPCrossSubnetTester2 struct { + ipamSubnetTopologySuperTester +} + +func (suite *fixedIPCrossSubnetTester2) TestAllocationFixedIPCrossSubnetAgain() { ctx := suite.ctx suite.name = "busybox-0" - suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(corev1.NamespaceDefault).Create(ctx, data.MockFixedWorkloadEndpoint(), metav1.CreateOptions{}) + + endpoint := data.MockFixedWorkloadEndpoint() + suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(corev1.NamespaceDefault).Create(ctx, endpoint, metav1.CreateOptions{}) suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() @@ -287,7 +344,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPCrossSubnetAgain( subnet.Spec.Exclusive = true suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) - psts := data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel) + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) allocation := psts.Spec.Subnets["sbn-test"] allocation.IPv4 = append(allocation.IPv4, "192.168.1.109") allocation.Type = networkingv1alpha1.IPAllocTypeFixed @@ -295,7 +352,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPCrossSubnetAgain( psts.Spec.Subnets["sbn-test"] = allocation suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) - suite.createFixedIPPodAndNode() + suite.ipam.createFixedIPPodAndNode(suite.podLabel) // mock cloud api suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(1), 1).Return([]string{"192.168.1.109"}, nil).AnyTimes() @@ -321,13 +378,18 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPCrossSubnetAgain( EnableFixIP: "True", SubnetTopologyReference: "psts-test", FixIPDeletePolicy: string(networkingv1alpha1.ReleaseStrategyNever), + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, }, } - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) +} + +type munualIPCrossSubnetTester struct { + ipamSubnetTopologySuperTester } -func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPCrossSubnetFirst() { +func (suite *munualIPCrossSubnetTester) TestAllocationMunualIPCrossSubnetFirst() { ctx := suite.ctx suite.name = "busybox-0" @@ -335,7 +397,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPCrossSubnetFirst subnet.Spec.Exclusive = true suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) - psts := data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel) + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) allocation := psts.Spec.Subnets["sbn-test"] allocation.IPv4 = append(allocation.IPv4, "192.168.1.109") allocation.Type = networkingv1alpha1.IPAllocTypeManual @@ -343,7 +405,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPCrossSubnetFirst psts.Spec.Subnets["sbn-test"] = allocation suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) - suite.createFixedIPPodAndNode() + suite.ipam.createFixedIPPodAndNode(suite.podLabel) // mock cloud api suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(1), 1).Return([]string{"192.168.1.109"}, nil).AnyTimes() @@ -368,13 +430,18 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPCrossSubnetFirst EnableFixIP: "false", SubnetTopologyReference: "psts-test", FixIPDeletePolicy: string(networkingv1alpha1.ReleaseStrategyTTL), + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, }, } - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) Test__rollbackIPAllocated() { +type munualIPCrossSubnetTester2 struct { + ipamSubnetTopologySuperTester +} + +func (suite *munualIPCrossSubnetTester2) Test__rollbackIPAllocated() { wep := &networkingv1alpha1.WorkloadEndpoint{ ObjectMeta: metav1.ObjectMeta{ Name: "busybox-0", @@ -417,7 +484,11 @@ func (suite *IPAMSubnetTopologyAllocates) Test__rollbackIPAllocated() { suite.ipam.rollbackIPAllocated(suite.ctx, &ipToAllocate{pod: pod, ipv4Result: "192.168.1.109"}, fmt.Errorf("test error"), wep) } -func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPRangeCrossSubnet() { +type munualIPCrossSubnetTester3 struct { + ipamSubnetTopologySuperTester +} + +func (suite *munualIPCrossSubnetTester3) TestAllocationMunualIPRangeCrossSubnet() { ctx := suite.ctx suite.name = "busybox-0" @@ -425,7 +496,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPRangeCrossSubnet subnet.Spec.Exclusive = true suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) - psts := data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel) + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) allocation := psts.Spec.Subnets["sbn-test"] allocation.IPv4Range = append(allocation.IPv4, "192.168.1.109/32") allocation.Type = networkingv1alpha1.IPAllocTypeManual @@ -434,7 +505,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPRangeCrossSubnet suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) - suite.createFixedIPPodAndNode() + suite.ipam.createFixedIPPodAndNode(suite.podLabel) // mock cloud api suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(1), 1).Return([]string{"192.168.1.109"}, nil).AnyTimes() @@ -459,20 +530,25 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPRangeCrossSubnet EnableFixIP: "false", SubnetTopologyReference: "psts-test", FixIPDeletePolicy: string(networkingv1alpha1.ReleaseStrategyTTL), + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, }, } - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPRangeCrossSubnetWithEmptySubnet() { +type munualIPCrossSubnetTester4 struct { + ipamSubnetTopologySuperTester +} + +func (suite *munualIPCrossSubnetTester4) TestAllocationMunualIPRangeCrossSubnetWithEmptySubnet() { ctx := suite.ctx suite.name = "busybox-0" subnet := data.MockSubnet(corev1.NamespaceDefault, "sbn-test", "192.168.1.0/24") subnet.Spec.Exclusive = true subnet.Name = "" - psts := data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel) + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) allocation := psts.Spec.Subnets["sbn-test"] allocation.IPv4Range = append(allocation.IPv4, "192.168.1.109/32") allocation.Type = networkingv1alpha1.IPAllocTypeManual @@ -481,19 +557,78 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationMunualIPRangeCrossSubnet suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) - suite.createFixedIPPodAndNode() + suite.ipam.createFixedIPPodAndNode(suite.podLabel) // mock cloud api suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(1), 1).Return([]string{"192.168.1.109"}, nil).AnyTimes() suite.wantErr = true - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) +} + +type AllocationFixedIPWithDeleteIPFailed struct { + ipamSubnetTopologySuperTester +} + +func (suite *AllocationFixedIPWithDeleteIPFailed) TestAllocationFixedIPWithDeleteIPFailed() { + ctx := suite.ctx + suite.name = "busybox-0" + wep := data.MockFixedWorkloadEndpoint() + suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(corev1.NamespaceDefault).Create(ctx, wep, metav1.CreateOptions{}) + + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("RateLimit")).AnyTimes() + + subnet := data.MockSubnet(corev1.NamespaceDefault, "sbn-test", "192.168.1.0/24") + subnet.Spec.Exclusive = true + suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) + + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) + allocation := psts.Spec.Subnets["sbn-test"] + allocation.IPv4 = append(allocation.IPv4, "192.168.1.109") + allocation.Type = networkingv1alpha1.IPAllocTypeFixed + allocation.ReleaseStrategy = networkingv1alpha1.ReleaseStrategyNever + psts.Spec.Subnets["sbn-test"] = allocation + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) + + suite.ipam.createFixedIPPodAndNode(suite.podLabel) + + // mock cloud api + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(1), 1).Return([]string{"192.168.1.109"}, nil).AnyTimes() + + suite.want = &networkingv1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox-0", + Namespace: "default", + Labels: map[string]string{ + "cce.io/subnet-id": "sbn-test", + "cce.io/instance-type": "bcc", + ipamgeneric.WepLabelStsOwnerKey: "busybox", + }, + Finalizers: []string{"cce-cni.cce.io"}, + }, + Spec: networkingv1alpha1.WorkloadEndpointSpec{ + IP: "192.168.1.109", + SubnetID: "sbn-test", + Type: ipamgeneric.WepTypeSts, + ENIID: "eni-test", + Node: "test-node", + UpdateAt: metav1.Time{Time: time.Unix(0, 0)}, + EnableFixIP: "True", + SubnetTopologyReference: "psts-test", + FixIPDeletePolicy: string(networkingv1alpha1.ReleaseStrategyNever), + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, + }, + } + + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPWithDeleteIPFailed() { +func (suite *AllocationFixedIPWithDeleteIPFailed) TestAllocationFixedIPWithOldNotInSubnet() { ctx := suite.ctx suite.name = "busybox-0" - suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(corev1.NamespaceDefault).Create(ctx, data.MockFixedWorkloadEndpoint(), metav1.CreateOptions{}) + wep := data.MockFixedWorkloadEndpoint() + wep.Spec.IP = "127.0.0.1" + suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(corev1.NamespaceDefault).Create(ctx, wep, metav1.CreateOptions{}) suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("RateLimit")).AnyTimes() @@ -501,7 +636,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPWithDeleteIPFaile subnet.Spec.Exclusive = true suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) - psts := data.MockPodSubnetTopologySpread(corev1.NamespaceDefault, "psts-test", "sbn-test", suite.podLabel) + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) allocation := psts.Spec.Subnets["sbn-test"] allocation.IPv4 = append(allocation.IPv4, "192.168.1.109") allocation.Type = networkingv1alpha1.IPAllocTypeFixed @@ -509,7 +644,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPWithDeleteIPFaile psts.Spec.Subnets["sbn-test"] = allocation suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) - suite.createFixedIPPodAndNode() + suite.ipam.createFixedIPPodAndNode(suite.podLabel) // mock cloud api suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", gomock.Len(1), 1).Return([]string{"192.168.1.109"}, nil).AnyTimes() @@ -535,34 +670,35 @@ func (suite *IPAMSubnetTopologyAllocates) TestAllocationFixedIPWithDeleteIPFaile EnableFixIP: "True", SubnetTopologyReference: "psts-test", FixIPDeletePolicy: string(networkingv1alpha1.ReleaseStrategyNever), + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, }, } - assertSuite(suite) + assertSuite(&suite.ipamSubnetTopologySuperTester) } -func (suite *IPAMSubnetTopologyAllocates) createFixedIPPodAndNode() { - suite.ipam.kubeClient.CoreV1().Pods(corev1.NamespaceDefault).Create(context.TODO(), &corev1.Pod{ +func (ipam *IPAM) createFixedIPPodAndNode(l map[string]string) { + ipam.kubeClient.CoreV1().Pods(corev1.NamespaceDefault).Create(context.TODO(), &corev1.Pod{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "busybox-0", Annotations: map[string]string{ networking.AnnotationPodSubnetTopologySpread: "psts-test", }, - Labels: suite.podLabel, + Labels: l, }, Spec: corev1.PodSpec{ NodeName: "test-node", }, }, metav1.CreateOptions{}) - suite.ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ + ipam.kubeClient.CoreV1().Nodes().Create(context.TODO(), &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", }, }, metav1.CreateOptions{}) } -func (suite *IPAMSubnetTopologyAllocates) Test__filterAvailableSubnet() { +func (suite *pstsOtherTest) Test__filterAvailableSubnet() { eni := &enisdk.Eni{ ZoneName: "zoneF", } @@ -582,7 +718,7 @@ func (suite *IPAMSubnetTopologyAllocates) Test__filterAvailableSubnet() { suite.Assert().EqualValues([]string{"sbn-1", "sbn-4"}, result, "filter available subnet") } -func (suite *IPAMSubnetTopologyAllocates) Test__allocateIPCrossSubnet() { +func (suite *pstsOtherTest) Test__allocateIPCrossSubnet() { eni := &enisdk.Eni{ ZoneName: "zoneF", EniId: "eni-test", @@ -608,7 +744,11 @@ func (suite *IPAMSubnetTopologyAllocates) Test__allocateIPCrossSubnet() { } -func (suite *IPAMSubnetTopologyAllocates) TestSyncRelationOfWepEni() { +type SyncRelationOfWepEniTest struct { + ipamSubnetTopologySuperTester +} + +func (suite *SyncRelationOfWepEniTest) TestSyncRelationOfWepEni() { eni := &enisdk.Eni{ ZoneName: "zoneF", EniId: "eni-test", @@ -627,13 +767,16 @@ func (suite *IPAMSubnetTopologyAllocates) TestSyncRelationOfWepEni() { }, }, } - suite.ipam.eniCache = make(map[string][]*enisdk.Eni) - suite.ipam.eniCache["node-test"] = append(suite.ipam.eniCache["node-test"], eni) + + eniCache := ipcache.NewCacheMapArray[*enisdk.Eni]() + eniCache.Append("node-test", eni) + suite.ipam.eniCache = eniCache wep := data.MockFixedWorkloadEndpoint() wep.Name = "w1" wep.Spec.IP = "10.0.0.1" - suite.ipam.allocated[wep.Spec.IP] = wep + + suite.ipam.allocated.Add(wep.Spec.IP, wep) suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(wep.Namespace).Create(suite.ctx, wep, metav1.CreateOptions{}) wep = data.MockFixedWorkloadEndpoint() @@ -641,7 +784,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestSyncRelationOfWepEni() { wep.Spec.IP = "10.0.0.2" wep.Spec.ENIID = "eni-change" wep.Spec.UpdateAt = metav1.Time{Time: time.Unix(0, 0)} - suite.ipam.allocated[wep.Spec.IP] = wep + suite.ipam.allocated.Add(wep.Spec.IP, wep) suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(wep.Namespace).Create(suite.ctx, wep, metav1.CreateOptions{}) wep = data.MockFixedWorkloadEndpoint() @@ -649,7 +792,7 @@ func (suite *IPAMSubnetTopologyAllocates) TestSyncRelationOfWepEni() { wep.Spec.IP = "10.0.0.3" wep.Spec.ENIID = "eni-change" wep.Spec.UpdateAt = metav1.Time{Time: time.Now()} - suite.ipam.allocated[wep.Spec.IP] = wep + suite.ipam.allocated.Add(wep.Spec.IP, wep) suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(wep.Namespace).Create(suite.ctx, wep, metav1.CreateOptions{}) wep = data.MockFixedWorkloadEndpoint() @@ -658,10 +801,10 @@ func (suite *IPAMSubnetTopologyAllocates) TestSyncRelationOfWepEni() { wep.Spec.IP = "10.0.0.4" wep.Spec.ENIID = "eni-change" wep.Spec.UpdateAt = metav1.Time{Time: time.Now()} - suite.ipam.allocated[wep.Spec.IP] = wep + suite.ipam.allocated.Add(wep.Spec.IP, wep) suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(wep.Namespace).Create(suite.ctx, wep, metav1.CreateOptions{}) - __waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer, suite.stopChan) + waitCacheSync(suite.ipam, suite.stopChan) // resync to update wep suite.ipam.syncRelationOfWepEni() @@ -679,6 +822,337 @@ func (suite *IPAMSubnetTopologyAllocates) TestSyncRelationOfWepEni() { } } +type AllocationCustomModeTester struct { + ipamSubnetTopologySuperTester +} + +// allocate IP from custom mode +func (suite *AllocationCustomModeTester) TestAllocationCustomMode() { + ctx := suite.ctx + suite.name = "busybox-0" + + subnet := data.MockSubnet(corev1.NamespaceDefault, "sbn-test", "192.168.1.0/24") + subnet.Spec.Exclusive = true + suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) + + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + TTL: networkingv1alpha1.DefaultReuseIPTTL, + } + + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) + + suite.ipam.createFixedIPPodAndNode(suite.podLabel) + + // mock cloud api, and check the first available ip 192.168.1.2 + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", []string{"192.168.1.2"}, 1).Return([]string{"192.168.1.2"}, nil).AnyTimes() + + assertSuite(&suite.ipamSubnetTopologySuperTester) +} + +type AllocationCustomModeFromIPRangeTester struct { + ipamSubnetTopologySuperTester +} + +func (suite *AllocationCustomModeFromIPRangeTester) TestAllocationCustomModeFromIPRange() { + ctx := suite.ctx + suite.name = "busybox-0" + + subnet := data.MockSubnet(corev1.NamespaceDefault, "sbn-test", "192.168.1.0/24") + subnet.Spec.Exclusive = true + suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) + + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + TTL: networkingv1alpha1.DefaultReuseIPTTL, + } + allocation := psts.Spec.Subnets["sbn-test"] + custom := networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{ + { + Start: "192.168.1.56", + End: "192.168.1.56", + }, + }, + } + allocation.Custom = append(allocation.Custom, custom) + psts.Spec.Subnets["sbn-test"] = allocation + + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) + + suite.ipam.createFixedIPPodAndNode(suite.podLabel) + + // mock cloud api, and check the first available ip 192.168.1.2 + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", []string{"192.168.1.56"}, 1).Return([]string{"192.168.1.56"}, nil).AnyTimes() + + assertSuite(&suite.ipamSubnetTopologySuperTester) +} + +type AllocationCustomModeReuseOldIPNotInIPRangeTester struct { + ipamSubnetTopologySuperTester +} + +// The previously allocated IP address is not in the scope of the new psts +func (suite *AllocationCustomModeReuseOldIPNotInIPRangeTester) TestAllocationCustomModeReuseOldIPNotInIPRange() { + ctx := suite.ctx + suite.name = "busybox-0" + wep := data.MockFixedWorkloadEndpoint() + suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(corev1.NamespaceDefault).Create(ctx, wep, metav1.CreateOptions{}) + + subnet := data.MockSubnet(corev1.NamespaceDefault, "sbn-test", "192.168.1.0/24") + subnet.Spec.Exclusive = true + suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) + + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + TTL: networkingv1alpha1.DefaultReuseIPTTL, + EnableReuseIPAddress: true, + } + allocation := psts.Spec.Subnets["sbn-test"] + custom := networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{ + { + Start: "192.168.1.56", + End: "192.168.1.57", + }, + }, + } + allocation.Custom = append(allocation.Custom, custom) + psts.Spec.Subnets["sbn-test"] = allocation + + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) + + suite.ipam.createFixedIPPodAndNode(suite.podLabel) + + // mock cloud api, and check the first available ip 192.168.1.2 + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", []string{"192.168.1.56"}, 1).Return([]string{"192.168.1.56"}, nil).AnyTimes() + + assertSuite(&suite.ipamSubnetTopologySuperTester) +} + +type AllocationCustomModeReuseInIPRangeTester struct { + ipamSubnetTopologySuperTester +} + +func (suite *AllocationCustomModeReuseInIPRangeTester) TestAllocationCustomModeReuseInIPRange() { + ip := "192.168.1.57" + ctx := suite.ctx + suite.name = "busybox-0" + wep := data.MockFixedWorkloadEndpoint() + wep.Spec.IP = "192.168.10.57" + suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(corev1.NamespaceDefault).Create(ctx, wep, metav1.CreateOptions{}) + + subnet := data.MockSubnet(corev1.NamespaceDefault, "sbn-test", "192.168.1.0/24") + subnet.Spec.Exclusive = true + suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) + + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + TTL: networkingv1alpha1.DefaultReuseIPTTL, + EnableReuseIPAddress: true, + } + allocation := psts.Spec.Subnets["sbn-test"] + custom := networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{ + { + Start: ip, + End: "192.168.1.57", + }, + }, + } + allocation.Custom = append(allocation.Custom, custom) + psts.Spec.Subnets["sbn-test"] = allocation + + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) + + suite.ipam.createFixedIPPodAndNode(suite.podLabel) + + // mock cloud api, and check the first available ip 192.168.1.2 + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", []string{ip}, 1).Return([]string{ip}, nil).AnyTimes() + suite.wantErr = false + assertSuite(&suite.ipamSubnetTopologySuperTester) +} + +type AllocationCustomModeReuseInIPRangeNoTTLTester struct { + ipamSubnetTopologySuperTester +} + +func (suite *AllocationCustomModeReuseInIPRangeNoTTLTester) TestAllocationCustomModeReuseInIPRange() { + ip := "192.168.1.57" + ctx := suite.ctx + suite.name = "busybox-0" + wep := data.MockFixedWorkloadEndpoint() + wep.Spec.IP = ip + suite.ipam.crdClient.CceV1alpha1().WorkloadEndpoints(corev1.NamespaceDefault).Create(ctx, wep, metav1.CreateOptions{}) + + subnet := data.MockSubnet(corev1.NamespaceDefault, "sbn-test", "192.168.1.0/24") + subnet.Spec.Exclusive = true + suite.ipam.crdClient.CceV1alpha1().Subnets(corev1.NamespaceDefault).Create(ctx, subnet, metav1.CreateOptions{}) + + psts := data.MockPodSubnetTopologySpreadWithSubnet(corev1.NamespaceDefault, "psts-test", subnet, suite.podLabel) + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + EnableReuseIPAddress: true, + } + allocation := psts.Spec.Subnets["sbn-test"] + custom := networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{ + { + Start: ip, + End: "192.168.1.57", + }, + }, + } + allocation.Custom = append(allocation.Custom, custom) + psts.Spec.Subnets["sbn-test"] = allocation + + suite.ipam.crdClient.CceV1alpha1().PodSubnetTopologySpreads(corev1.NamespaceDefault).Create(ctx, psts, metav1.CreateOptions{}) + + suite.ipam.createFixedIPPodAndNode(suite.podLabel) + + // mock cloud api, and check the first available ip 192.168.1.2 + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().DeletePrivateIP(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT().BatchAddPrivateIpCrossSubnet(gomock.Any(), "eni-test", "sbn-test", []string{ip}, 1).Return([]string{ip}, nil).AnyTimes() + + assertSuite(&suite.ipamSubnetTopologySuperTester) +} + func TestIPAM_subnetTopologyAllocates(t *testing.T) { - suite.Run(t, new(IPAMSubnetTopologyAllocates)) + t.Parallel() + test := new(ipamSubnetTopologySuperTester) + + suite.Run(t, test) + +} + +func TestIPAM2(t *testing.T) { + t.Parallel() + test := new(dynamicIPCrossSubnetTester) + + suite.Run(t, test) + +} + +func TestIPAM3(t *testing.T) { + t.Parallel() + test := new(fixedIPCrossSubnetTester) + + suite.Run(t, test) + +} + +func TestIPAM4(t *testing.T) { + t.Parallel() + test := new(fixedIPCrossSubnetTester2) + + suite.Run(t, test) + +} + +func TestIPAM5(t *testing.T) { + t.Parallel() + test := new(AllocationFixedIPWithDeleteIPFailed) + + suite.Run(t, test) + +} + +func TestIPAM6(t *testing.T) { + t.Parallel() + test := new(munualIPCrossSubnetTester) + + suite.Run(t, test) + +} + +func TestIPAM7(t *testing.T) { + t.Parallel() + test := new(munualIPCrossSubnetTester2) + + suite.Run(t, test) + +} + +func TestIPAM8(t *testing.T) { + t.Parallel() + test := new(munualIPCrossSubnetTester3) + + suite.Run(t, test) + +} + +func TestIPAM9(t *testing.T) { + t.Parallel() + test := new(munualIPCrossSubnetTester4) + + suite.Run(t, test) + +} + +func TestIPAM10(t *testing.T) { + t.Parallel() + test := new(SyncRelationOfWepEniTest) + + suite.Run(t, test) + +} + +func TestIPAM11(t *testing.T) { + t.Parallel() + test := new(AllocationCustomModeTester) + + suite.Run(t, test) + +} + +func TestIPAM12(t *testing.T) { + t.Parallel() + test := new(AllocationCustomModeFromIPRangeTester) + + suite.Run(t, test) + +} + +func TestIPAM13(t *testing.T) { + t.Parallel() + test := new(AllocationCustomModeReuseOldIPNotInIPRangeTester) + + suite.Run(t, test) + +} + +func TestIPAM14(t *testing.T) { + t.Parallel() + test := new(AllocationCustomModeReuseInIPRangeTester) + + suite.Run(t, test) + +} +func TestIPAM16(t *testing.T) { + t.Parallel() + test := new(AllocationCustomModeReuseInIPRangeNoTTLTester) + + suite.Run(t, test) + +} + +func TestIPAM15(t *testing.T) { + t.Parallel() + test := new(pstsOtherTest) + suite.Run(t, test) } diff --git a/pkg/eniipam/ipam/bcc/types.go b/pkg/eniipam/ipam/bcc/types.go index 5f479cb..cc1f2a8 100644 --- a/pkg/eniipam/ipam/bcc/types.go +++ b/pkg/eniipam/ipam/bcc/types.go @@ -29,13 +29,14 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/record" - "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/config/types" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/controller/subnet" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/controller/topology_spread" datastorev1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/datastore/v1" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/ipcache" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" crdinformers "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" ) @@ -70,17 +71,17 @@ type event struct { type IPAM struct { lock sync.RWMutex // key is node name, value is list of enis attached - eniCache map[string][]*enisdk.Eni + eniCache *ipcache.CacheMapArray[*enisdk.Eni] // privateIPNumCache stores allocated IP num of each eni. key is eni id. privateIPNumCache map[string]int // possibleLeakedIPCache stores possible leaked ip cache. possibleLeakedIPCache map[eniAndIPAddrKey]time.Time // addIPBackoffCache to slow down add ip API call if subnet or vm cannot allocate more ip - addIPBackoffCache map[string]*wait.Backoff + addIPBackoffCache *ipcache.CacheMap[*wait.Backoff] // ipam will rebuild cache if restarts, should not handle request from cni if cacheHasSynced is false cacheHasSynced bool // key is ip, value is wep - allocated map[string]*v1alpha1.WorkloadEndpoint + allocated *ipcache.CacheMap[*networkingv1alpha1.WorkloadEndpoint] datastore *datastorev1.DataStore idleIPPoolMinSize int idleIPPoolMaxSize int @@ -119,6 +120,8 @@ type IPAM struct { // lock for allocation IP from exclusisve subnet exclusiveSubnetCond *sync.Cond exclusiveSubnetFlag map[string]bool + + reusedIPs *ipcache.ReuseIPAndWepPool } var _ ipam.Interface = &IPAM{} diff --git a/pkg/eniipam/ipam/bcc/utils.go b/pkg/eniipam/ipam/bcc/utils.go new file mode 100644 index 0000000..7b9be2b --- /dev/null +++ b/pkg/eniipam/ipam/bcc/utils.go @@ -0,0 +1,13 @@ +package bcc + +import ( + k8sutil "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/k8s" + corev1 "k8s.io/api/core/v1" +) + +func IsFixIPStatefulSetPod(pod *corev1.Pod) bool { + if pod.Annotations == nil || !k8sutil.IsStatefulSetPod(pod) { + return false + } + return pod.Annotations[StsPodAnnotationEnableFixIP] == EnableFixIPTrue +} diff --git a/pkg/eniipam/ipam/crossvpceni/ipam.go b/pkg/eniipam/ipam/crossvpceni/ipam.go index 67c75b6..8fe319b 100644 --- a/pkg/eniipam/ipam/crossvpceni/ipam.go +++ b/pkg/eniipam/ipam/crossvpceni/ipam.go @@ -73,6 +73,9 @@ var ( PodAnnotationCrossVPCEniDefaultRouteInterfaceDelegation = "cross-vpc-eni.cce.io/defaultRouteInterfaceDelegation" PodAnnotationCrossVPCEniDefaultRouteExcludedCidrs = "cross-vpc-eni.cce.io/defaultRouteExcludedCidrs" + NodeAnnotationMaxCrossVPCEni = "cross-vpc-eni.cce.io/maxEniNumber" + NodeLabelMaxCrossVPCEni = "cross-vpc-eni.cce.io/max-eni-number" + necessaryAnnoKeyList = []string{ PodAnnotationCrossVPCEniUserID, PodAnnotationCrossVPCEniSubnetID, diff --git a/pkg/eniipam/ipam/crossvpceni/ipam_test.go b/pkg/eniipam/ipam/crossvpceni/ipam_test.go index 0abe7fe..2cd76c9 100644 --- a/pkg/eniipam/ipam/crossvpceni/ipam_test.go +++ b/pkg/eniipam/ipam/crossvpceni/ipam_test.go @@ -17,6 +17,7 @@ package crossvpceni import ( "context" + "github.com/stretchr/testify/assert" "reflect" "sync" "testing" @@ -869,7 +870,7 @@ func Test_eventsToErrorMsg(t *testing.T) { tests := []struct { name string args args - want string + want []string }{ { name: "empty list", @@ -878,7 +879,7 @@ func Test_eventsToErrorMsg(t *testing.T) { Items: []v1.Event{}, }, }, - want: "", + want: []string{}, }, { name: "normal list", @@ -903,13 +904,18 @@ func Test_eventsToErrorMsg(t *testing.T) { }, }, }, - want: `[CreateEni: aaa, AttachEni: ccc]`, + want: []string{"CreateEni: aaa", "AttachEni: ccc"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := eventsToErrorMsg(tt.args.events); got != tt.want { - t.Errorf("eventsToErrorMsg() = %v, want %v", got, tt.want) + got := eventsToErrorMsg(tt.args.events) + if len(tt.want) == 0 { + assert.Equal(t, "", got) + } else { + for _, subWant := range tt.want { + assert.Contains(t, got, subWant) + } } }) } diff --git a/pkg/eniipam/ipam/eri/gc.go b/pkg/eniipam/ipam/eri/gc.go new file mode 100644 index 0000000..64a2715 --- /dev/null +++ b/pkg/eniipam/ipam/eri/gc.go @@ -0,0 +1,215 @@ +package eri + +import ( + "context" + "fmt" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" + ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/util" + log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" + enisdk "github.com/baidubce/bce-sdk-go/services/eni" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + "time" +) + +func (ipam *IPAM) gc(stopCh <-chan struct{}) error { + log.Infof(context.TODO(), "start gc by eri ipam, gcPeriod is %v", ipam.gcPeriod) + err := wait.PollImmediateUntil(wait.Jitter(ipam.gcPeriod, 0.5), func() (bool, error) { + ctx := log.NewContext() + log.Infof(ctx, "gc by eri ipam start") + + // release mwep if pod not found + podErr := ipam.gcLeakedPod(ctx) + if podErr != nil { + return false, nil + } + + nodeErr := ipam.gcDeletedNode(ctx) + if nodeErr != nil { + return false, nil + } + + ipErr := ipam.gcLeakedIP(ctx) + if ipErr != nil { + return false, nil + } + + log.Infof(ctx, "gc by eri ipam end") + return false, nil + }, stopCh) + + if err != nil { + return err + } + return nil +} + +// release ip and mwep when pod not found +func (ipam *IPAM) gcLeakedPod(ctx context.Context) error { + mwepSelector, selectorErr := mwepListerSelector() + if selectorErr != nil { + log.Errorf(ctx, "make mwep lister selector has error: %v", selectorErr) + return selectorErr + } + + mwepList, mwepErr := ipam.crdInformer.Cce().V1alpha1().MultiIPWorkloadEndpoints().Lister().List(mwepSelector) + if mwepErr != nil { + log.Errorf(ctx, "gc: error list mwep in cluster: %v", mwepErr) + return mwepErr + } + log.Infof(ctx, "list mwepList count is %d ", len(mwepList)) + + for _, mwep := range mwepList { + _, podErr := ipam.kubeInformer.Core().V1().Pods().Lister().Pods(mwep.Namespace).Get(mwep.Name) + if podErr == nil { + // pod exist + continue + } + if !errors.IsNotFound(podErr) { + // get pod failed + log.Errorf(ctx, "gc: get pod (%s/%s) failed: %v", mwep.Namespace, mwep.Name, podErr) + continue + } + // pod not found. delete mwep + msg := fmt.Sprintf("gc: pod not found, try to release leaked mwep (%s/%s)", mwep.Namespace, mwep.Name) + log.Info(ctx, msg) + + // add event + ipam.eventRecorder.Event(&v1.ObjectReference{ + Kind: "mwep", + Name: fmt.Sprintf("%v %v", mwep.Namespace, mwep.Name), + }, v1.EventTypeWarning, "PodLeaked", msg) + + // delete ip, delete mwep crd + releaseErr := ipam.releaseIPByMwep(ctx, mwep) + if releaseErr != nil { + log.Errorf(ctx, "gc: release mwep (%s/%s) and ip failed: %s", mwep.Namespace, mwep.Name, releaseErr) + } + } + return nil +} + +// delete node from cache when node not found +func (ipam *IPAM) gcDeletedNode(ctx context.Context) error { + for _, node := range ipam.nodeCache { + _, nodeErr := ipam.kubeInformer.Core().V1().Nodes().Lister().Get(node.Name) + if nodeErr == nil { + // node exist + continue + } + if !errors.IsNotFound(nodeErr) { + // get node failed + log.Errorf(ctx, "gc: get node (%s) failed: %v", node.Name, nodeErr) + continue + } + + log.Infof(ctx, "detect node %v has been deleted, clean up datastore", node.Name) + + // clean up cache + delErr := ipam.deleteNodeFromCache(node) + if delErr != nil { + log.Errorf(ctx, "gc: delete node %s from datastore failed: %v", node.Name, delErr) + } + } + return nil +} + +func (ipam *IPAM) deleteNodeFromCache(node *v1.Node) error { + instanceID, insIDErr := util.GetInstanceIDFromNode(node) + if insIDErr != nil { + return fmt.Errorf("get instanceID for node (%s) error: %v", node.Name, insIDErr) + } + + if _, exist := ipam.nodeCache[instanceID]; exist { + delete(ipam.nodeCache, instanceID) + } + return nil +} + +// release ip when ip not in mwep +func (ipam *IPAM) gcLeakedIP(ctx context.Context) error { + for instanceID := range ipam.nodeCache { + listArgs := enisdk.ListEniArgs{ + InstanceId: instanceID, + VpcId: ipam.vpcID, + } + eris, listErr := ipam.cloud.ListERIs(ctx, listArgs) + if listErr != nil { + log.Infof(ctx, "list eri for %s failed: %s", instanceID, listErr) + if cloud.IsErrorRateLimit(listErr) { + // wait for rate limit + time.Sleep(wait.Jitter(rateLimitErrorSleepPeriod, rateLimitErrorJitterFactor)) + } + continue + } + // get mwep for instanceID + ipSet, ipErr := ipam.getIPSetForNode(ctx, instanceID) + if ipErr != nil { + log.Infof(ctx, "get ip set for %s failed: %s", instanceID, ipErr) + continue + } + // delete eri ip when ip not in mwep + ipam.gcOneNodeLeakedIP(ctx, instanceID, eris, ipSet) + } + + return nil +} + +func (ipam *IPAM) getIPSetForNode(ctx context.Context, instanceID string) (map[string]struct{}, error) { + // list mwep + mwepSelector, selectorErr := mwepListerSelector() + if selectorErr != nil { + log.Errorf(ctx, "make mwep lister selector has error: %v", selectorErr) + return nil, selectorErr + } + mwepList, mwepErr := ipam.crdInformer.Cce().V1alpha1().MultiIPWorkloadEndpoints().Lister().List(mwepSelector) + if mwepErr != nil { + log.Errorf(ctx, "gc: error list mwep in cluster: %v", mwepErr) + return nil, mwepErr + } + log.Infof(ctx, "list mwepList count is %d ", len(mwepList)) + + // collect ip for instanceID + ipSet := make(map[string]struct{}) + for _, mwep := range mwepList { + if mwep.Type != ipamgeneric.MwepTypeERI { + continue + } + if mwep.InstanceID != instanceID { + continue + } + for _, spec := range mwep.Spec { + if _, exist := ipSet[spec.IP]; exist { + continue + } + ipSet[spec.IP] = struct{}{} + } + } + return ipSet, nil +} + +func (ipam *IPAM) gcOneNodeLeakedIP(ctx context.Context, instanceID string, eriList []enisdk.Eni, + ipSet map[string]struct{}) { + for _, eriInfo := range eriList { + log.Infof(ctx, "gc: try to gc leaked ip for eri %s of node %s", eriInfo.EniId, instanceID) + for _, privateIP := range eriInfo.PrivateIpSet { + if privateIP.Primary { + continue + } + if _, exist := ipSet[privateIP.PrivateIpAddress]; exist { + continue + } + log.Infof(ctx, "gc: privateIP %s not found in mwep, try to delete privateIP", + privateIP.PrivateIpAddress) + + // delete ip + deleteErr := ipam.cloud.DeletePrivateIP(ctx, privateIP.PrivateIpAddress, eriInfo.EniId) + if deleteErr != nil { + log.Errorf(ctx, "delete ip %s for eni %s failed: %s", + privateIP.PrivateIpAddress, eriInfo.EniId, deleteErr) + } + } + } +} diff --git a/pkg/eniipam/ipam/eri/gc_test.go b/pkg/eniipam/ipam/eri/gc_test.go new file mode 100644 index 0000000..419e887 --- /dev/null +++ b/pkg/eniipam/ipam/eri/gc_test.go @@ -0,0 +1,200 @@ +package eri + +import ( + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + mockcloud "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud/testing" + ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + enisdk "github.com/baidubce/bce-sdk-go/services/eni" + "github.com/golang/mock/gomock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (suite *IPAMTest) Test__gcLeakedPod() { + // pod-0 exist + // pod-1 not found + mwep0 := &networkingv1alpha1.MultiIPWorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: corev1.NamespaceDefault, + Finalizers: []string{"cce-cni.cce.io"}, + Labels: map[string]string{ + corev1.LabelInstanceType: "BCC", + ipamgeneric.MwepLabelInstanceTypeKey: ipamgeneric.MwepTypeERI, + }, + }, + Spec: []networkingv1alpha1.MultiIPWorkloadEndpointSpec{{ + IP: "10.1.1.0", + EniID: "eni-0", + }}, + } + _, err0 := suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault). + Create(suite.ctx, mwep0, metav1.CreateOptions{}) + suite.Assert().Nil(err0) + + mwep1 := &networkingv1alpha1.MultiIPWorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1", + Namespace: corev1.NamespaceDefault, + Finalizers: []string{"cce-cni.cce.io"}, + Labels: map[string]string{ + corev1.LabelInstanceType: "BCC", + ipamgeneric.MwepLabelInstanceTypeKey: ipamgeneric.MwepTypeERI, + }, + }, + Spec: []networkingv1alpha1.MultiIPWorkloadEndpointSpec{{ + IP: "10.1.1.1", + EniID: "eni-0", + }}, + } + _, err1 := suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault). + Create(suite.ctx, mwep1, metav1.CreateOptions{}) + suite.Assert().Nil(err1) + + _, podErr := suite.ipam.kubeClient.CoreV1().Pods(corev1.NamespaceDefault). + Create(suite.ctx, &corev1.Pod{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + }, + Spec: corev1.PodSpec{ + NodeName: "test-node", + }, + }, metav1.CreateOptions{}) + suite.Assert().Nil(podErr) + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + + mockInterface := suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT() + mockInterface.DeletePrivateIP(gomock.Any(), gomock.Eq("10.1.1.1"), gomock.Eq("eni-0")).Return(nil) + + gcErr := suite.ipam.gcLeakedPod(suite.ctx) + suite.Assert().Nil(gcErr) + + // should not be deleted + _, getErr0 := suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault). + Get(suite.ctx, "pod-0", metav1.GetOptions{}) + suite.Assert().Nil(getErr0) + // should be deleted + _, getErr1 := suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault). + Get(suite.ctx, "pod-1", metav1.GetOptions{}) + suite.Assert().True(errors.IsNotFound(getErr1)) +} + +func (suite *IPAMTest) Test__gcDeletedNode() { + // node-0 exist + // node-1 not found + suite.ipam.nodeCache = map[string]*corev1.Node{ + "i-xxxx0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-0", + }, + Spec: corev1.NodeSpec{ + ProviderID: "cce://i-xxxx0", + }, + }, + "i-xxxx1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + ProviderID: "cce://i-xxxx1", + }, + }, + } + + _, nodeErr := suite.ipam.kubeClient.CoreV1().Nodes(). + Create(suite.ctx, &corev1.Node{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "node-0", + }, + Spec: corev1.NodeSpec{ + ProviderID: "cce://i-xxxx0", + }, + }, metav1.CreateOptions{}) + suite.Assert().Nil(nodeErr) + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + + gcErr := suite.ipam.gcDeletedNode(suite.ctx) + suite.Assert().Nil(gcErr) + + suite.Assert().Equal(1, len(suite.ipam.nodeCache)) + + _, node1Exist := suite.ipam.nodeCache["i-xxxx1"] + suite.Assert().True(!node1Exist, "expect node 1 not found") +} + +func (suite *IPAMTest) Test__gcLeakedIP() { + // 1. empty wep + suite.ipam.nodeCache = map[string]*corev1.Node{ + "node-0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-0", + }, + }, + } + + cloudClient := suite.ipam.cloud.(*mockcloud.MockInterface) + eriList := []enisdk.Eni{ + { + EniId: "eni-0", + PrivateIpSet: []enisdk.PrivateIp{ + { + Primary: true, + PrivateIpAddress: "10.1.1.0", + }, + { + Primary: false, + PrivateIpAddress: "10.1.1.1", + }, + { + Primary: false, + PrivateIpAddress: "10.1.1.2", + }, + }, + }, + } + gomock.InOrder( + cloudClient.EXPECT().ListERIs(gomock.Any(), gomock.Any()).Return(eriList, nil), + cloudClient.EXPECT().DeletePrivateIP(gomock.Any(), gomock.Eq("10.1.1.1"), gomock.Eq("eni-0")).Return(nil), + cloudClient.EXPECT().DeletePrivateIP(gomock.Any(), gomock.Eq("10.1.1.2"), gomock.Eq("eni-0")).Return(nil), + ) + + gcErr := suite.ipam.gcLeakedIP(suite.ctx) + suite.Assert().Nil(gcErr) + + // 2. delete leaked ip 10.1.1.2 + gomock.InOrder( + cloudClient.EXPECT().ListERIs(gomock.Any(), gomock.Any()).Return(eriList, nil), + cloudClient.EXPECT().DeletePrivateIP(gomock.Any(), gomock.Eq("10.1.1.2"), gomock.Eq("eni-0")).Return(nil), + ) + + mwep0 := &networkingv1alpha1.MultiIPWorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: corev1.NamespaceDefault, + Finalizers: []string{"cce-cni.cce.io"}, + Labels: map[string]string{ + corev1.LabelInstanceType: "BCC", + ipamgeneric.MwepLabelInstanceTypeKey: ipamgeneric.MwepTypeERI, + }, + }, + NodeName: "", + InstanceID: "node-0", + Type: ipamgeneric.MwepTypeERI, + Spec: []networkingv1alpha1.MultiIPWorkloadEndpointSpec{ + { + EniID: "eni-0", + IP: "10.1.1.1", + }, + }, + } + _, err0 := suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault). + Create(suite.ctx, mwep0, metav1.CreateOptions{}) + suite.Assert().Nil(err0) + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + + gcErr2 := suite.ipam.gcLeakedIP(suite.ctx) + suite.Assert().Nil(gcErr2) +} diff --git a/pkg/eniipam/ipam/eri/ipam.go b/pkg/eniipam/ipam/eri/ipam.go new file mode 100644 index 0000000..84263e9 --- /dev/null +++ b/pkg/eniipam/ipam/eri/ipam.go @@ -0,0 +1,583 @@ +package eri + +import ( + "context" + goerrors "errors" + "fmt" + "sync" + "time" + + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" + ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/util" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned/scheme" + crdinformers "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/metric" + log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" + enisdk "github.com/baidubce/bce-sdk-go/services/eni" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" +) + +const ( + syncNodePeriod = 2 * time.Hour + cloudMaxTry = 3 + // minPrivateIPLifeTime is the life time of a private ip (from allocation to release), aim to trade off db slave delay + minPrivateIPLifeTime = 5 * time.Second + rateLimitErrorSleepPeriod = time.Millisecond * 200 + rateLimitErrorJitterFactor = 5 +) + +func NewIPAM( + vpcID string, + kubeClient kubernetes.Interface, + crdClient versioned.Interface, + bceClient cloud.Interface, + informerResyncPeriod time.Duration, + gcPeriod time.Duration, +) (ipamgeneric.RoceInterface, error) { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ + Interface: kubeClient.CoreV1().Events(""), + }) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cce-eri-ipam"}) + + kubeInformer := informers.NewSharedInformerFactory(kubeClient, informerResyncPeriod) + crdInformer := crdinformers.NewSharedInformerFactory(crdClient, informerResyncPeriod) + + ipam := &IPAM{ + vpcID: vpcID, + eventRecorder: recorder, + kubeInformer: kubeInformer, + kubeClient: kubeClient, + crdInformer: crdInformer, + crdClient: crdClient, + gcPeriod: gcPeriod, + cloud: bceClient, + cacheHasSynced: false, + nodeCache: make(map[string]*corev1.Node), + } + return ipam, nil +} + +func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID string, + mac string) (*v1alpha1.WorkloadEndpoint, error) { + log.Infof(ctx, "[Allocate] allocating IP for eri pod (%v %v) starts", namespace, name) + defer log.Infof(ctx, "[Allocate] allocating IP for eri pod (%v %v) ends", namespace, name) + + if !ipam.Ready(ctx) { + log.Warningf(ctx, "eri ipam has not synced cache yet") + return nil, fmt.Errorf("eri ipam has not synced cache yet") + } + + node, nodeErr := ipam.getNodeByPodName(ctx, namespace, name) + if nodeErr != nil { + log.Errorf(ctx, "get node for pod (%s/%s) error: %v", namespace, name, nodeErr) + return nil, nodeErr + } + nodeName := node.Name + + instanceID, insIDErr := util.GetInstanceIDFromNode(node) + if insIDErr != nil { + log.Errorf(ctx, "get instanceID for pod (%s/%s) error: %v", namespace, name, insIDErr) + return nil, insIDErr + } + log.Infof(ctx, "instanceID for pod (%s/%s) is %s ", namespace, name, instanceID) + + // add node to cache if not in the cache + if _, exist := ipam.nodeCache[instanceID]; !exist { + ipam.nodeCache[instanceID] = node + } + + eriInfo, err := ipam.findMatchedEriByMac(ctx, instanceID, mac) + if err != nil { + log.Errorf(ctx, "failed to find a suitable eri by mac for pod (%v %v) in eri ipam: %v", namespace, name, err) + return nil, err + } + log.Infof(ctx, "find eniID is %s ", eriInfo.EniId) + + ipam.lock.Lock() + defer ipam.lock.Unlock() + mwep, err := ipam.crdInformer.Cce().V1alpha1().MultiIPWorkloadEndpoints().Lister().MultiIPWorkloadEndpoints(namespace).Get(name) + if err != nil { + if errors.IsNotFound(err) { + return ipam.allocateFirstIP(ctx, namespace, name, eriInfo.EniId, nodeName, instanceID, containerID, mac) + } + log.Errorf(ctx, "get mwep for pod (%v/%v) error: %v", namespace, name, err) + return nil, err + } + + // not first allocate ip for pod + if mwep.NodeName != nodeName { + msg := fmt.Sprintf("mwep node name: %s, not match current node in eri ipam: %s", + mwep.NodeName, nodeName) + log.Warningf(ctx, msg) + return nil, fmt.Errorf(msg) + } + + return ipam.allocateOtherIP(ctx, mwep, eriInfo.EniId, containerID, mac) +} + +// find eri by mac address. Return matched eri and eri list of the instanceID +func (ipam *IPAM) findMatchedEriByMac(ctx context.Context, instanceID string, macAddress string) (*enisdk.Eni, error) { + log.Infof(ctx, "start to find suitable eri by mac for instanceID %v/%v", instanceID, macAddress) + listArgs := enisdk.ListEniArgs{ + InstanceId: instanceID, + VpcId: ipam.vpcID, + } + eriList, listErr := ipam.cloud.ListERIs(ctx, listArgs) + if listErr != nil { + log.Errorf(ctx, "failed to get eri: %v", listErr) + return nil, listErr + } + + for index := range eriList { + eriInfo := eriList[index] + if eriInfo.MacAddress == macAddress { + return &eriInfo, nil + } + } + + log.Errorf(ctx, "macAddress %s mismatch, eriList: %v", macAddress, eriList) + return nil, fmt.Errorf("macAddress %s mismatch, eriList: %v", macAddress, eriList) +} + +// allocate first ip for pod, and create mwep +func (ipam *IPAM) allocateFirstIP(ctx context.Context, namespace, name, eniID, nodeName, + instanceID, containerID, mac string) (*v1alpha1.WorkloadEndpoint, error) { + // 1. allocate ip + ipResult, ipErr := ipam.tryAllocateIP(ctx, namespace, name, eniID) + if ipErr != nil { + msg := fmt.Sprintf("error allocate private IP for pod (%s/%s): %s", namespace, name, ipErr) + log.Error(ctx, msg) + return nil, goerrors.New(msg) + } + + log.Infof(ctx, "eri ipam allocate ip for pod (%s/%s) success, allocate ip result %s ", namespace, name, ipResult) + + // 2. create mwep + mwep := &v1alpha1.MultiIPWorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Finalizers: []string{ipamgeneric.MwepFinalizer}, + Labels: map[string]string{ + ipamgeneric.MwepLabelInstanceTypeKey: ipamgeneric.MwepTypeERI, + v1.LabelInstanceType: "BCC", + }, + }, + NodeName: nodeName, + Type: ipamgeneric.MwepTypeERI, + InstanceID: instanceID, + Spec: []v1alpha1.MultiIPWorkloadEndpointSpec{ + { + EniID: eniID, + ContainerID: containerID, + IP: ipResult, + Mac: mac, + UpdateAt: metav1.Time{Time: time.Now()}, + }, + }, + } + + _, createErr := ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(namespace).Create(ctx, mwep, metav1.CreateOptions{}) + if createErr != nil { + // rollback + log.Errorf(ctx, "create mwep for pod (%s/%s) error: %v", namespace, name, createErr) + time.Sleep(minPrivateIPLifeTime) + + if delErr := ipam.tryDeleteIP(ctx, mwep.Namespace, mwep.Name, eniID, ipResult); delErr != nil { + log.Errorf(ctx, "deleted private ip %s for pod (%s/%s) error: %v", + ipResult, namespace, name, delErr) + return nil, delErr + } + return nil, createErr + } + log.Infof(ctx, "create mwep %v for pod (%s/%s) success", mwep.Spec, namespace, name) + + return &v1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + ENIID: eniID, + ContainerID: containerID, + IP: ipResult, + Mac: mac, + }, + }, nil +} + +// allocate ip then update mwep +func (ipam *IPAM) allocateOtherIP(ctx context.Context, mwep *v1alpha1.MultiIPWorkloadEndpoint, + eniID, containerID, mac string) (*v1alpha1.WorkloadEndpoint, error) { + if mwep == nil { + return nil, fmt.Errorf("mwep required") + } + if mwep.Type != ipamgeneric.MwepTypeERI { + msg := fmt.Sprintf("mwep %s/%s type is %s, not eri", mwep.Namespace, mwep.Name, mwep.Type) + log.Warning(ctx, msg) + return nil, fmt.Errorf(msg) + } + + var oldMwepSpec *v1alpha1.MultiIPWorkloadEndpointSpec + for i := range mwep.Spec { + tmpMwepSpec := mwep.Spec[i] + if tmpMwepSpec.EniID == eniID { + oldMwepSpec = &tmpMwepSpec + break + } + } + // 1. return wep info if mwep contains info of the eniID + if oldMwepSpec != nil { + return &v1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: mwep.Name, + Namespace: mwep.Namespace, + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + ENIID: eniID, + ContainerID: containerID, + IP: oldMwepSpec.IP, + Mac: oldMwepSpec.Mac, + }, + }, nil + } + // 2. allocate ip for the eniID + ipResult, ipErr := ipam.tryAllocateIP(ctx, mwep.Namespace, mwep.Name, eniID) + if ipErr != nil { + msg := fmt.Sprintf("error allocate private IP for pod (%s/%s): %s", mwep.Namespace, mwep.Name, ipErr) + log.Error(ctx, msg) + return nil, goerrors.New(msg) + } + log.Infof(ctx, "eri ipam allocate ip for pod (%s/%s) success, allocate ip result %s ", + mwep.Namespace, mwep.Name, ipResult) + + // append to mwep + newMwepSpec := v1alpha1.MultiIPWorkloadEndpointSpec{ + IP: ipResult, + EniID: eniID, + Mac: mac, + ContainerID: containerID, + UpdateAt: metav1.Time{Time: time.Now()}, + } + + mwep.Spec = append(mwep.Spec, newMwepSpec) + log.Infof(ctx, "new specList count is %d", len(mwep.Spec)) + + // 3. update mwep + _, updateErr := ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(mwep.Namespace).Update( + ctx, mwep, metav1.UpdateOptions{}) + if updateErr != nil { + // rollback + log.Errorf(ctx, "update mwep for pod (%s/%s) error: %s", mwep.Namespace, mwep.Name, updateErr) + time.Sleep(minPrivateIPLifeTime) + + if delErr := ipam.tryDeleteIP(ctx, mwep.Namespace, mwep.Name, eniID, ipResult); delErr != nil { + log.Errorf(ctx, "deleted private ip %s for pod (%v/%v) error: %v", + ipResult, mwep.Namespace, mwep.Name, delErr) + return nil, delErr + } + return nil, updateErr + } + + return &v1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: mwep.Name, + Namespace: mwep.Namespace, + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + ENIID: eniID, + ContainerID: containerID, + IP: ipResult, + Mac: mac, + }, + }, nil +} + +func (ipam *IPAM) tryAllocateIP(ctx context.Context, namespace, podName, eniID string) (string, error) { + log.Infof(ctx, "start allocate IP for pod %s/%s, eniID: %s", + namespace, podName, eniID) + + for i := 0; i < cloudMaxTry; i++ { + log.Infof(ctx, "allocate IP max try time is %d, now is %d time", cloudMaxTry, i) + + ipResult, err := ipam.cloud.AddPrivateIP(ctx, "", eniID) + if err == nil { + log.Infof(ctx, "add private IP %s for pod (%s/%s) success", ipResult, namespace, podName) + metric.MultiEniMultiIPEniIPCount.WithLabelValues(metric.MetaInfo.ClusterID, metric.MetaInfo.VPCID, eniID).Inc() + return ipResult, nil + } + + if cloud.IsErrorRateLimit(err) { + // retry + time.Sleep(wait.Jitter(rateLimitErrorSleepPeriod, rateLimitErrorJitterFactor)) + } else { + log.Errorf(ctx, "error add privateIP in eniID %v for pod %v/%v: %v", eniID, namespace, podName, err) + return "", err + } + } + return "", fmt.Errorf("allocate IP failed, retry count exceeded") +} + +func (ipam *IPAM) Release(ctx context.Context, name, namespace, containerID string) (*v1alpha1.WorkloadEndpoint, error) { + log.Infof(ctx, "[Release] releasing IP for eri pod (%v/%v) starts", namespace, name) + defer log.Infof(ctx, "[Release] releasing IP for eri pod (%v/%v) ends", namespace, name) + + if !ipam.Ready(ctx) { + log.Warningf(ctx, "release: eri ipamd has not synced cache yet") + return nil, fmt.Errorf("release: eri ipamd has not synced cache yet") + } + + // mwep, avoid data racing + tmpMwep, mwepErr := ipam.crdInformer.Cce().V1alpha1().MultiIPWorkloadEndpoints().Lister(). + MultiIPWorkloadEndpoints(namespace).Get(name) + if mwepErr != nil { + log.Errorf(ctx, "release: get mwep of pod (%v/%v) failed: %v", namespace, name, mwepErr) + return nil, mwepErr + } + mwep := tmpMwep.DeepCopy() + + // delete ip, delete mwep crd + releaseErr := ipam.releaseIPByMwep(ctx, mwep) + if releaseErr != nil { + return nil, releaseErr + } + // This API doesn't care about response body + wep := &v1alpha1.WorkloadEndpoint{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: mwep.Name, + Namespace: mwep.Namespace, + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + ContainerID: containerID, + Node: mwep.NodeName, + InstanceID: mwep.InstanceID, + }, + } + return wep, nil +} + +// delete ip, delete mwep +func (ipam *IPAM) releaseIPByMwep(ctx context.Context, mwep *v1alpha1.MultiIPWorkloadEndpoint) error { + namespace := mwep.Namespace + name := mwep.Name + for _, spec := range mwep.Spec { + ipErr := ipam.tryDeleteIP(ctx, namespace, name, spec.EniID, spec.IP) + if ipErr != nil { + log.Errorf(ctx, "release: delete private IP %s for pod (%s/%s) failed: %v", spec.IP, namespace, name, ipErr) + } else { + log.Infof(ctx, "release: delete private IP %s for pod (%s/%s) success", spec.IP, namespace, name) + } + } + + deleteErr := ipam.tryDeleteMwep(ctx, mwep) + if deleteErr != nil { + log.Errorf(ctx, "release: delete mwep %s/%s error: %v", namespace, name, deleteErr) + return deleteErr + } + log.Infof(ctx, "release: delete mwep %s/%s success", namespace, name) + return nil +} + +func (ipam *IPAM) tryDeleteIP(ctx context.Context, namespace, podName, eniID, privateIP string) error { + log.Infof(ctx, "start delete private IP %s for pod %s/%s, eniID: %s", + privateIP, namespace, podName, eniID) + + for i := 0; i < cloudMaxTry; i++ { + log.Infof(ctx, "delete private IP %s max try time is %d, now is %d time", privateIP, cloudMaxTry, i) + + err := ipam.cloud.DeletePrivateIP(ctx, privateIP, eniID) + if err == nil { + log.Infof(ctx, "delete private IP %s for pod (%s/%s) success", privateIP, namespace, podName) + metric.MultiEniMultiIPEniIPCount.WithLabelValues(metric.MetaInfo.ClusterID, metric.MetaInfo.VPCID, eniID).Dec() + return nil + } + + if cloud.IsErrorRateLimit(err) { + // retry + time.Sleep(wait.Jitter(rateLimitErrorSleepPeriod, rateLimitErrorJitterFactor)) + } else { + log.Errorf(ctx, "delete private IP %s for pod (%s/%s) failed: %s", privateIP, namespace, podName, err) + return err + } + } + return fmt.Errorf("delete IP failed, retry count exceeded") +} + +// Delete workload objects from the k8s cluster +func (ipam *IPAM) tryDeleteMwep(ctx context.Context, mwep *v1alpha1.MultiIPWorkloadEndpoint) (err error) { + // remove finalizers + mwep.Finalizers = nil + _, err = ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(mwep.Namespace).Update(ctx, mwep, metav1.UpdateOptions{}) + if err != nil { + log.Errorf(ctx, "tryDeleteWep failed to update wep for pod (%v %v): %v", mwep.Namespace, mwep.Name, err) + return err + } + // delete mwep + if err := ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(mwep.Namespace). + Delete(ctx, mwep.Name, *metav1.NewDeleteOptions(0)); err != nil { + log.Errorf(ctx, "tryDeleteMwep failed to delete wep for orphaned pod (%v %v): %v", mwep.Namespace, mwep.Name, err) + } else { + log.Infof(ctx, "tryDeleteMwep delete wep for orphaned pod (%v %v) successfully", mwep.Namespace, mwep.Name) + } + return nil +} + +func (ipam *IPAM) Run(ctx context.Context, stopCh <-chan struct{}) error { + defer func() { + runtime.HandleCrash() + }() + + log.Info(ctx, "Starting cce ipam controller for eri") + defer log.Info(ctx, "Shutting down cce ipam controller for eri") + + nodeInformer := ipam.kubeInformer.Core().V1().Nodes().Informer() + podInformer := ipam.kubeInformer.Core().V1().Pods().Informer() + mwepInformer := ipam.crdInformer.Cce().V1alpha1().MultiIPWorkloadEndpoints().Informer() + + ipam.kubeInformer.Start(stopCh) + ipam.crdInformer.Start(stopCh) + + if !cache.WaitForNamedCacheSync( + "cce-ipam", + stopCh, + nodeInformer.HasSynced, + podInformer.HasSynced, + mwepInformer.HasSynced, + ) { + log.Warning(ctx, "failed WaitForCacheSync, timeout") + return nil + } + log.Info(ctx, "WaitForCacheSync done") + + // build node cache + nodeErr := ipam.buildNodeCache(ctx) + if nodeErr != nil { + return nodeErr + } + ipam.cacheHasSynced = true + + go func() { + if err := ipam.syncNode(stopCh); err != nil { + log.Errorf(ctx, "failed to sync node info: %v", err) + } + }() + + go func() { + if err := ipam.gc(stopCh); err != nil { + log.Errorf(ctx, "failed to start ipam gc: %v", err) + } + }() + + // k8sr resource are synced + ipam.cacheHasSynced = true + log.Infof(ctx, "ipam cacheHasSynced is: %v", ipam.cacheHasSynced) + + <-stopCh + return nil +} + +func (ipam *IPAM) syncNode(stopCh <-chan struct{}) error { + ctx := log.NewContext() + + err := wait.PollImmediateUntil(syncNodePeriod, func() (bool, error) { + return false, ipam.buildNodeCache(ctx) + }, stopCh) + + if err != nil { + return err + } + return nil +} + +func (ipam *IPAM) buildNodeCache(ctx context.Context) error { + var ( + wg sync.WaitGroup + ch = make(chan struct{}, 10) + ) + + nodeSelector, _ := nodeListerSelector() + nodes, err := ipam.kubeInformer.Core().V1().Nodes().Lister().List(nodeSelector) + if err != nil { + log.Errorf(ctx, "failed to list nodes: %v", err) + return err + } + + for _, node := range nodes { + ch <- struct{}{} + wg.Add(1) + + go func(node *v1.Node) { + defer func() { + wg.Done() + <-ch + }() + + instanceID, err := util.GetInstanceIDFromNode(node) + if err != nil { + return + } + + ipam.lock.Lock() + defer ipam.lock.Unlock() + //add node instance + ipam.nodeCache[instanceID] = node + }(node) + } + + wg.Wait() + + return nil +} + +func nodeListerSelector() (labels.Selector, error) { + requirement, err := labels.NewRequirement(v1.LabelInstanceType, selection.In, []string{"BCC", "GPU", "DCC"}) + if err != nil { + return nil, err + } + return labels.NewSelector().Add(*requirement), nil +} + +func mwepListerSelector() (labels.Selector, error) { + // for mwep owned by bcc, use selector "node.kubernetes.io/instance-type", to be compatible with old versions. + requireInstanceType, insErr := labels.NewRequirement(v1.LabelInstanceType, selection.In, []string{"BCC", "GPU", "DCC"}) + if insErr != nil { + return nil, insErr + } + requireMwepType, typeErr := labels.NewRequirement(ipamgeneric.MwepLabelInstanceTypeKey, selection.Equals, + []string{ipamgeneric.MwepTypeERI}) + if typeErr != nil { + return nil, typeErr + } + return labels.NewSelector().Add(*requireInstanceType, *requireMwepType), nil +} + +func (ipam *IPAM) Ready(_ context.Context) bool { + return ipam.cacheHasSynced +} + +func (ipam *IPAM) getNodeByPodName(ctx context.Context, namespace, podName string) (*corev1.Node, error) { + pod, podErr := ipam.kubeInformer.Core().V1().Pods().Lister().Pods(namespace).Get(podName) + if podErr != nil { + log.Errorf(ctx, "get pod (%s/%s) error: %v", namespace, podName, podErr) + return nil, podErr + } + + return ipam.kubeInformer.Core().V1().Nodes().Lister().Get(pod.Spec.NodeName) +} diff --git a/pkg/eniipam/ipam/eri/ipam_test.go b/pkg/eniipam/ipam/eri/ipam_test.go new file mode 100644 index 0000000..049eff0 --- /dev/null +++ b/pkg/eniipam/ipam/eri/ipam_test.go @@ -0,0 +1,755 @@ +package eri + +import ( + "context" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" + mockcloud "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud/testing" + ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" + crdfake "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned/fake" + crdinformers "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" + "github.com/baidubce/bce-sdk-go/services/eni" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + kubefake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "reflect" + "sync" + "testing" + "time" +) + +type IPAMTest struct { + suite.Suite + ipam *IPAM + wantErr bool + want *v1alpha1.WorkloadEndpoint + ctx context.Context + name string + namespace string + containerID string + podLabel labels.Set + stopChan chan struct{} +} + +// 每次测试前设置上下文 +func (suite *IPAMTest) SetupTest() { + suite.stopChan = make(chan struct{}) + suite.ipam = mockIPAM(suite.T(), suite.stopChan) + suite.ctx = context.TODO() + suite.name = "busybox" + suite.namespace = corev1.NamespaceDefault + suite.podLabel = labels.Set{ + "k8s.io/app": "busybox", + } + + runtime.ReallyCrash = false +} + +// mock a ipam server +func mockIPAM(t *testing.T, stopChan chan struct{}) *IPAM { + ctrl := gomock.NewController(t) + kubeClient, _, crdClient, _, cloudClient := setupEnv(ctrl) + ipam, _ := NewIPAM( + "test-vpcid", + kubeClient, + crdClient, + cloudClient, + 20*time.Second, + 300*time.Second, + ) + ipamServer := ipam.(*IPAM) + ipamServer.cacheHasSynced = true + nodeCache := map[string]*corev1.Node{ + "eni-df8888fs": { + ObjectMeta: metav1.ObjectMeta{ + Name: "", + }, + }, + } + ipamServer.nodeCache = nodeCache + + ipamServer.kubeInformer.Start(stopChan) + ipamServer.crdInformer.Start(stopChan) + return ipam.(*IPAM) +} + +func setupEnv(ctrl *gomock.Controller) ( + kubernetes.Interface, + informers.SharedInformerFactory, + versioned.Interface, + crdinformers.SharedInformerFactory, + *mockcloud.MockInterface, +) { + kubeClient := kubefake.NewSimpleClientset() + kubeInformer := informers.NewSharedInformerFactory(kubeClient, time.Minute) + crdClient := crdfake.NewSimpleClientset() + crdInformer := crdinformers.NewSharedInformerFactory(crdClient, time.Minute) + cloudClient := mockcloud.NewMockInterface(ctrl) + return kubeClient, kubeInformer, + crdClient, crdInformer, cloudClient +} + +func (suite *IPAMTest) TearDownTest() { + close(suite.stopChan) +} + +func (suite *IPAMTest) TestIPAMRun() { + mwep := mockMultiWorkloadEndpoint() + _, _ = suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault).Create( + suite.ctx, mwep, metav1.CreateOptions{}) + + mwep1 := mockMultiWorkloadEndpoint() + mwep1.Name = "busybox-1" + _, _ = suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault).Create( + suite.ctx, mwep1, metav1.CreateOptions{}) + + mwep2 := mockMultiWorkloadEndpoint() + mwep2.Name = "busybox-2" + _, _ = suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault).Create( + suite.ctx, mwep2, metav1.CreateOptions{}) + + suite.ipam.nodeCache = make(map[string]*corev1.Node) + go func() { + _ = suite.ipam.Run(suite.ctx, suite.stopChan) + }() + time.Sleep(3 * time.Second) +} + +func mockMultiWorkloadEndpoint() *networkingv1alpha1.MultiIPWorkloadEndpoint { + return &networkingv1alpha1.MultiIPWorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox-0", + Namespace: "default", + Labels: map[string]string{ + "beta.kubernetes.io/instance-type": "BCC", + }, + Finalizers: []string{"cce-cni.cce.io"}, + }, + Spec: []networkingv1alpha1.MultiIPWorkloadEndpointSpec{{ + IP: "192.168.1.199", + EniID: "eni-dfsfs", + UpdateAt: metav1.Time{Time: time.Unix(0, 0)}, + }, + { + IP: "192.168.1.189", + EniID: "eni-df8888fs", + UpdateAt: metav1.Time{Time: time.Unix(0, 0)}, + }, + { + IP: "192.168.1.179", + EniID: "eni-df8888fdfghds", + UpdateAt: metav1.Time{Time: time.Unix(0, 0)}, + }, + }, + } +} + +func TestIPAM(t *testing.T) { + suite.Run(t, new(IPAMTest)) +} + +func TestIPAM_Allocate(t *testing.T) { + type fields struct { + ctrl *gomock.Controller + lock sync.RWMutex + nodeCache map[string]*corev1.Node + privateIPNumCache map[string]int + cacheHasSynced bool + + kubeInformer informers.SharedInformerFactory + kubeClient kubernetes.Interface + crdInformer crdinformers.SharedInformerFactory + crdClient versioned.Interface + cloud cloud.Interface + + informerResyncPeriod time.Duration + gcPeriod time.Duration + eventRecorder record.EventRecorder + } + type args struct { + ctx context.Context + name string + namespace string + containerID string + mac string + } + tests := []struct { + name string + fields fields + args args + want *v1alpha1.WorkloadEndpoint + wantErr bool + }{ + { + name: "ipam has not synced cache", + fields: func() fields { + ctrl := gomock.NewController(t) + return fields{ + ctrl: ctrl, + cacheHasSynced: false, + } + }(), + args: args{}, + want: nil, + wantErr: true, + }, + { + name: "node has no pod", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, kubeInformer, crdClient, crdInformer, cloudClient := setupEnv(ctrl) + + waitForCacheSync(kubeInformer, crdInformer) + + return fields{ + ctrl: ctrl, + lock: sync.RWMutex{}, + nodeCache: make(map[string]*corev1.Node), + cacheHasSynced: true, + kubeInformer: kubeInformer, + kubeClient: kubeClient, + crdInformer: crdInformer, + crdClient: crdClient, + cloud: cloudClient, + } + }(), + args: args{ + ctx: context.TODO(), + name: "busybox", + namespace: "default", + }, + want: nil, + wantErr: true, + }, + { + name: "node has no eni", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, kubeInformer, crdClient, crdInformer, cloudClient := setupEnv(ctrl) + + _, podErr := kubeClient.CoreV1().Pods(v1.NamespaceDefault).Create(context.TODO(), &v1.Pod{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + }, + Spec: v1.PodSpec{ + NodeName: "test-node", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, podErr) + + waitForCacheSync(kubeInformer, crdInformer) + + return fields{ + ctrl: ctrl, + lock: sync.RWMutex{}, + nodeCache: make(map[string]*corev1.Node), + cacheHasSynced: true, + kubeInformer: kubeInformer, + kubeClient: kubeClient, + crdInformer: crdInformer, + crdClient: crdClient, + cloud: cloudClient, + } + }(), + args: args{ + ctx: context.TODO(), + name: "busybox", + namespace: "default", + }, + want: nil, + wantErr: true, + }, + { + name: "invalid mac", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, kubeInformer, crdClient, crdInformer, cloudClient := setupEnv(ctrl) + + _, podErr := kubeClient.CoreV1().Pods(v1.NamespaceDefault).Create(context.TODO(), &v1.Pod{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + Labels: map[string]string{}, + }, + Spec: v1.PodSpec{ + NodeName: "test-node", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, podErr) + + _, nodeErr := kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Spec: corev1.NodeSpec{ + ProviderID: "cce://i-xxxxx", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, nodeErr) + + waitForCacheSync(kubeInformer, crdInformer) + + gomock.InOrder( + cloudClient.EXPECT().ListERIs(gomock.Any(), gomock.Any()).Return([]eni.Eni{ + { + EniId: "eni-test", + MacAddress: "mac-test", + }, + }, nil), + ) + + return fields{ + ctrl: ctrl, + lock: sync.RWMutex{}, + cacheHasSynced: true, + privateIPNumCache: map[string]int{}, + kubeInformer: kubeInformer, + kubeClient: kubeClient, + crdInformer: crdInformer, + crdClient: crdClient, + cloud: cloudClient, + nodeCache: make(map[string]*corev1.Node), + } + }(), + args: args{ + ctx: context.TODO(), + name: "busybox", + namespace: "default", + mac: "invalid", + }, + wantErr: true, + }, + { + name: "allocate first ip", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, kubeInformer, crdClient, crdInformer, cloudClient := setupEnv(ctrl) + + _, podErr := kubeClient.CoreV1().Pods(v1.NamespaceDefault).Create(context.TODO(), &v1.Pod{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + Labels: map[string]string{}, + }, + Spec: v1.PodSpec{ + NodeName: "test-node", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, podErr) + + _, nodeErr := kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Spec: corev1.NodeSpec{ + ProviderID: "cce://i-xxxxx", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, nodeErr) + + waitForCacheSync(kubeInformer, crdInformer) + + gomock.InOrder( + cloudClient.EXPECT().ListERIs(gomock.Any(), gomock.Any()).Return([]eni.Eni{ + { + EniId: "eni-test", + MacAddress: "mac-test", + }, + }, nil), + cloudClient.EXPECT().AddPrivateIP(gomock.Any(), gomock.Eq(""), gomock.Eq("eni-test")). + Return("10.1.1.1", nil), + ) + + return fields{ + ctrl: ctrl, + lock: sync.RWMutex{}, + cacheHasSynced: true, + privateIPNumCache: map[string]int{}, + kubeInformer: kubeInformer, + kubeClient: kubeClient, + crdInformer: crdInformer, + crdClient: crdClient, + cloud: cloudClient, + nodeCache: make(map[string]*corev1.Node), + } + }(), + args: args{ + ctx: context.TODO(), + name: "busybox", + namespace: "default", + mac: "mac-test", + }, + want: &v1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + Namespace: "default", + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + IP: "10.1.1.1", + ENIID: "eni-test", + Mac: "mac-test", + }, + }, + wantErr: false, + }, + { + name: "already allocated", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, kubeInformer, crdClient, crdInformer, cloudClient := setupEnv(ctrl) + + _, podErr := kubeClient.CoreV1().Pods(v1.NamespaceDefault).Create(context.TODO(), &v1.Pod{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + Labels: map[string]string{}, + }, + Spec: v1.PodSpec{ + NodeName: "test-node", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, podErr) + + _, nodeErr := kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Spec: corev1.NodeSpec{ + ProviderID: "cce://i-xxxxx", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, nodeErr) + + // 准备 mwep + _, mwepErr := crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(v1.NamespaceDefault).Create( + context.TODO(), &networkingv1alpha1.MultiIPWorkloadEndpoint{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + }, + NodeName: "test-node", + Type: ipamgeneric.MwepTypeERI, + Spec: []networkingv1alpha1.MultiIPWorkloadEndpointSpec{ + { + EniID: "eni-test-1", + IP: "10.1.1.1", + Mac: "mac-test-1", + }, + }, + }, metav1.CreateOptions{}) + assert.Nil(t, mwepErr) + + waitForCacheSync(kubeInformer, crdInformer) + + gomock.InOrder( + cloudClient.EXPECT().ListERIs(gomock.Any(), gomock.Any()).Return([]eni.Eni{ + { + EniId: "eni-test-1", + MacAddress: "mac-test-1", + }, + }, nil), + ) + + return fields{ + ctrl: ctrl, + lock: sync.RWMutex{}, + cacheHasSynced: true, + privateIPNumCache: map[string]int{}, + kubeInformer: kubeInformer, + kubeClient: kubeClient, + crdInformer: crdInformer, + crdClient: crdClient, + cloud: cloudClient, + nodeCache: make(map[string]*corev1.Node), + } + }(), + args: args{ + ctx: context.TODO(), + name: "busybox", + namespace: v1.NamespaceDefault, + mac: "mac-test-1", + }, + want: &v1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + Namespace: v1.NamespaceDefault, + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + IP: "10.1.1.1", + ENIID: "eni-test-1", + Mac: "mac-test-1", + }, + }, + wantErr: false, + }, + { + name: "allocate other ip", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, kubeInformer, crdClient, crdInformer, cloudClient := setupEnv(ctrl) + + _, podErr := kubeClient.CoreV1().Pods(v1.NamespaceDefault).Create(context.TODO(), &v1.Pod{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + Labels: map[string]string{}, + }, + Spec: v1.PodSpec{ + NodeName: "test-node", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, podErr) + + _, nodeErr := kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Spec: corev1.NodeSpec{ + ProviderID: "cce://i-xxxxx", + }, + }, metav1.CreateOptions{}) + assert.Nil(t, nodeErr) + + // 准备 mwep + _, mwepErr := crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(v1.NamespaceDefault).Create( + context.TODO(), &networkingv1alpha1.MultiIPWorkloadEndpoint{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + }, + NodeName: "test-node", + Type: ipamgeneric.MwepTypeERI, + Spec: []networkingv1alpha1.MultiIPWorkloadEndpointSpec{ + { + EniID: "eni-test-1", + IP: "10.1.1.1", + Mac: "mac-test-1", + }, + }, + }, metav1.CreateOptions{}) + assert.Nil(t, mwepErr) + + waitForCacheSync(kubeInformer, crdInformer) + + gomock.InOrder( + cloudClient.EXPECT().ListERIs(gomock.Any(), gomock.Any()).Return([]eni.Eni{ + { + EniId: "eni-test-1", + MacAddress: "mac-test-1", + }, + { + EniId: "eni-test-2", + MacAddress: "mac-test-2", + }, + }, nil), + cloudClient.EXPECT().AddPrivateIP(gomock.Any(), gomock.Eq(""), gomock.Eq("eni-test-2")). + Return("10.1.1.2", nil), + ) + + return fields{ + ctrl: ctrl, + lock: sync.RWMutex{}, + cacheHasSynced: true, + privateIPNumCache: map[string]int{}, + kubeInformer: kubeInformer, + kubeClient: kubeClient, + crdInformer: crdInformer, + crdClient: crdClient, + cloud: cloudClient, + nodeCache: make(map[string]*corev1.Node), + } + }(), + args: args{ + ctx: context.TODO(), + name: "busybox", + namespace: v1.NamespaceDefault, + mac: "mac-test-2", + }, + want: &v1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + Namespace: v1.NamespaceDefault, + }, + Spec: v1alpha1.WorkloadEndpointSpec{ + IP: "10.1.1.2", + ENIID: "eni-test-2", + Mac: "mac-test-2", + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + ipam := &IPAM{ + lock: tt.fields.lock, + nodeCache: tt.fields.nodeCache, + cacheHasSynced: tt.fields.cacheHasSynced, + eventRecorder: tt.fields.eventRecorder, + kubeInformer: tt.fields.kubeInformer, + kubeClient: tt.fields.kubeClient, + crdInformer: tt.fields.crdInformer, + crdClient: tt.fields.crdClient, + cloud: tt.fields.cloud, + gcPeriod: tt.fields.gcPeriod, + } + got, err := ipam.Allocate(tt.args.ctx, tt.args.name, tt.args.namespace, tt.args.containerID, tt.args.mac) + if (err != nil) != tt.wantErr { + t.Errorf("Allocate() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Allocate() got = %v, want %v", got, tt.want) + } + }) + } +} + +func waitForCacheSync(kubeInformer informers.SharedInformerFactory, crdInformer crdinformers.SharedInformerFactory) { + nodeInformer := kubeInformer.Core().V1().Nodes().Informer() + podInformer := kubeInformer.Core().V1().Pods().Informer() + mwepInformer := crdInformer.Cce().V1alpha1().MultiIPWorkloadEndpoints().Informer() + ippoolInformer := crdInformer.Cce().V1alpha1().IPPools().Informer() + subnetInformer := crdInformer.Cce().V1alpha1().Subnets().Informer() + + kubeInformer.Start(wait.NeverStop) + crdInformer.Start(wait.NeverStop) + + cache.WaitForNamedCacheSync( + "cce-ipam", + wait.NeverStop, + nodeInformer.HasSynced, + podInformer.HasSynced, + mwepInformer.HasSynced, + ippoolInformer.HasSynced, + subnetInformer.HasSynced, + ) +} + +func TestIPAM_Release(t *testing.T) { + type fields struct { + ctrl *gomock.Controller + lock sync.RWMutex + nodeCache map[string]*corev1.Node + cacheHasSynced bool + kubeInformer informers.SharedInformerFactory + kubeClient kubernetes.Interface + crdInformer crdinformers.SharedInformerFactory + crdClient versioned.Interface + cloud cloud.Interface + gcPeriod time.Duration + } + type args struct { + ctx context.Context + name string + namespace string + containerID string + mac string + } + tests := []struct { + name string + fields fields + args args + want *v1alpha1.WorkloadEndpoint + wantErr bool + }{ + { + name: "delete ip success", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, kubeInformer, crdClient, crdInformer, cloudClient := setupEnv(ctrl) + + _, mwepErr := crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(v1.NamespaceDefault). + Create(context.TODO(), &v1alpha1.MultiIPWorkloadEndpoint{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + }, + Spec: []v1alpha1.MultiIPWorkloadEndpointSpec{ + { + EniID: "eni-1", + IP: "10.1.1.1", + }, + { + EniID: "eni-2", + IP: "10.1.1.2", + }, + }, + }, metav1.CreateOptions{}) + assert.Nil(t, mwepErr) + + waitForCacheSync(kubeInformer, crdInformer) + + gomock.InOrder( + cloudClient.EXPECT().DeletePrivateIP(gomock.Any(), gomock.Eq("10.1.1.1"), gomock.Eq("eni-1")).Return(nil), + cloudClient.EXPECT().DeletePrivateIP(gomock.Any(), gomock.Eq("10.1.1.2"), gomock.Eq("eni-2")).Return(nil), + ) + + return fields{ + ctrl: ctrl, + lock: sync.RWMutex{}, + cacheHasSynced: true, + kubeInformer: kubeInformer, + kubeClient: kubeClient, + crdInformer: crdInformer, + crdClient: crdClient, + cloud: cloudClient, + } + }(), + args: args{ + ctx: context.TODO(), + name: "busybox", + namespace: v1.NamespaceDefault, + }, + want: &v1alpha1.WorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "busybox", + Namespace: v1.NamespaceDefault, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + ipam := &IPAM{ + lock: tt.fields.lock, + cacheHasSynced: tt.fields.cacheHasSynced, + kubeInformer: tt.fields.kubeInformer, + kubeClient: tt.fields.kubeClient, + crdInformer: tt.fields.crdInformer, + crdClient: tt.fields.crdClient, + cloud: tt.fields.cloud, + gcPeriod: tt.fields.gcPeriod, + } + got, err := ipam.Release(tt.args.ctx, tt.args.name, tt.args.namespace, tt.args.containerID) + if (err != nil) != tt.wantErr { + t.Errorf("Release() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Release() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/eniipam/ipam/eri/types.go b/pkg/eniipam/ipam/eri/types.go new file mode 100644 index 0000000..420b528 --- /dev/null +++ b/pkg/eniipam/ipam/eri/types.go @@ -0,0 +1,35 @@ +package eri + +import ( + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/cloud" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" + crdinformers "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/record" + "sync" + "time" +) + +type IPAM struct { + lock sync.RWMutex + + vpcID string + // key is instanceID, value is list of node + nodeCache map[string]*corev1.Node + // ipam will rebuild cache if restarts, should not handle request from cni if cacheHasSynced is false + cacheHasSynced bool + gcPeriod time.Duration + + eventRecorder record.EventRecorder + kubeInformer informers.SharedInformerFactory + kubeClient kubernetes.Interface + crdInformer crdinformers.SharedInformerFactory + crdClient versioned.Interface + + cloud cloud.Interface +} + +var _ ipam.RoceInterface = &IPAM{} diff --git a/pkg/eniipam/ipam/ipcache/cache_map.go b/pkg/eniipam/ipam/ipcache/cache_map.go new file mode 100644 index 0000000..8afdfe9 --- /dev/null +++ b/pkg/eniipam/ipam/ipcache/cache_map.go @@ -0,0 +1,136 @@ +package ipcache + +import ( + "sync" +) + +type CacheMap[T any] struct { + lock sync.RWMutex + pool map[string]T +} + +func NewCacheMap[T any]() *CacheMap[T] { + return &CacheMap[T]{ + pool: make(map[string]T), + } +} + +func (cm *CacheMap[T]) Add(key string, value T) { + cm.lock.Lock() + defer cm.lock.Unlock() + if key == "" { + return + } + cm.pool[key] = value +} + +func (cm *CacheMap[T]) AddIfNotExists(key string, value T) { + if !cm.Exists(key) { + cm.Add(key, value) + } +} + +func (cm *CacheMap[T]) Delete(key string) bool { + cm.lock.Lock() + defer cm.lock.Unlock() + _, ok := cm.pool[key] + delete(cm.pool, key) + return ok +} + +func (cm *CacheMap[T]) Exists(key string) bool { + cm.lock.RLock() + defer cm.lock.RUnlock() + _, ok := cm.pool[key] + return ok +} + +func (cm *CacheMap[T]) Get(key string) (T, bool) { + cm.lock.RLock() + defer cm.lock.RUnlock() + v, ok := cm.pool[key] + return v, ok +} + +func (cm *CacheMap[T]) ForEach(fun func(key string, item T) bool) { + cm.lock.RLock() + defer cm.lock.RUnlock() + + for key, arr := range cm.pool { + if !fun(key, arr) { + return + } + } +} + +type CacheMapArray[T any] struct { + lock sync.RWMutex + pool map[string][]T +} + +func NewCacheMapArray[T any]() *CacheMapArray[T] { + return &CacheMapArray[T]{ + pool: make(map[string][]T), + } +} + +func (cm *CacheMapArray[T]) Append(key string, value ...T) { + cm.lock.Lock() + defer cm.lock.Unlock() + if key == "" { + return + } + cm.pool[key] = append(cm.pool[key], value...) +} + +func (cm *CacheMapArray[T]) Delete(key string) bool { + cm.lock.Lock() + defer cm.lock.Unlock() + _, ok := cm.pool[key] + delete(cm.pool, key) + return ok +} + +func (cm *CacheMapArray[T]) Exists(key string) bool { + cm.lock.RLock() + defer cm.lock.RUnlock() + v, ok := cm.pool[key] + if !ok || len(v) == 0 { + return false + } + return true +} + +func (cm *CacheMapArray[T]) Get(key string) ([]T, bool) { + cm.lock.RLock() + defer cm.lock.RUnlock() + v, ok := cm.pool[key] + if !ok || len(v) == 0 { + return v, false + } + return v, ok +} + +func (cm *CacheMapArray[T]) ForEachSubItem(fun func(key string, index int, item T) bool) { + cm.lock.RLock() + defer cm.lock.RUnlock() + + for key, arr := range cm.pool { + for i, v := range arr { + if !fun(key, i, v) { + return + } + } + } +} + +func (cm *CacheMapArray[T]) ForEach(fun func(key string, item []T) bool) { + cm.lock.RLock() + defer cm.lock.RUnlock() + + for key, arr := range cm.pool { + if !fun(key, arr) { + return + } + } +} diff --git a/pkg/eniipam/ipam/ipcache/cache_map_test.go b/pkg/eniipam/ipam/ipcache/cache_map_test.go new file mode 100644 index 0000000..4ea9f16 --- /dev/null +++ b/pkg/eniipam/ipam/ipcache/cache_map_test.go @@ -0,0 +1,53 @@ +package ipcache + +import ( + "testing" + + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + "github.com/baidubce/baiducloud-cce-cni-driver/test/data" + "github.com/stretchr/testify/suite" +) + +type ipcacheTest struct { + suite.Suite +} + +func (suite *ipcacheTest) TestMap() { + pool := NewReuseIPAndWepPool() + + wep := data.MockFixedWorkloadEndpoint() + wep.Spec.IPv6 = "::1" + pool.OnAdd(wep) + suite.Truef(pool.Exists(wep.Spec.IP), "wep in map") + _, ok := pool.Get(wep.Spec.IPv6) + suite.Truef(ok, "wep 6 in map") + pool.OnUpdate(wep, wep) + pool.OnDelete(wep) + + pool.AddIfNotExists("10.0.0.1", wep) + pool.AddIfNotExists("", wep) + pool.ForEach(func(key string, item *networkingv1alpha1.WorkloadEndpoint) bool { + return false + }) +} + +func (suite *ipcacheTest) TestMapArray() { + pool := NewCacheMapArray[int]() + + pool.Append("a", 0) + pool.Append("", 0) + if suite.Truef(pool.Exists("a"), "wep in map") { + pool.Get("a") + } + pool.ForEach(func(key string, item []int) bool { + return false + }) + pool.ForEachSubItem(func(key string, index, item int) bool { + return false + }) + pool.Delete("0") +} + +func TestCacheMap(t *testing.T) { + suite.Run(t, new(ipcacheTest)) +} diff --git a/pkg/eniipam/ipam/ipcache/reuse_ip_pool.go b/pkg/eniipam/ipam/ipcache/reuse_ip_pool.go new file mode 100644 index 0000000..ef78464 --- /dev/null +++ b/pkg/eniipam/ipam/ipcache/reuse_ip_pool.go @@ -0,0 +1,59 @@ +package ipcache + +import ( + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apimachinery/networking" + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + "k8s.io/client-go/tools/cache" +) + +type ReuseIPAndWepPool struct { + *CacheMap[*networkingv1alpha1.WorkloadEndpoint] +} + +func NewReuseIPAndWepPool() *ReuseIPAndWepPool { + return &ReuseIPAndWepPool{ + CacheMap: NewCacheMap[*networkingv1alpha1.WorkloadEndpoint](), + } +} + +func (pool *ReuseIPAndWepPool) OnAdd(obj interface{}) { + wep, ok := obj.(*networkingv1alpha1.WorkloadEndpoint) + if ok { + if networking.ISCustomReuseModeWEP(wep) || networking.IsFixIPStatefulSetPodWep(wep) { + pool.CacheMap.Add(wep.Spec.IP, wep) + if wep.Spec.IPv6 != "" { + pool.CacheMap.Add(wep.Spec.IPv6, wep) + } + } + } +} +func (pool *ReuseIPAndWepPool) OnUpdate(oldObj, newObj interface{}) { + old, ok := oldObj.(*networkingv1alpha1.WorkloadEndpoint) + if ok { + pool.CacheMap.Delete(old.Spec.IP) + if old.Spec.IPv6 != "" { + pool.CacheMap.Delete(old.Spec.IPv6) + } + } + wep, ok := newObj.(*networkingv1alpha1.WorkloadEndpoint) + if ok { + if networking.ISCustomReuseModeWEP(wep) || networking.IsFixIPStatefulSetPodWep(wep) { + pool.CacheMap.Add(wep.Spec.IP, wep) + if wep.Spec.IPv6 != "" { + pool.CacheMap.Add(wep.Spec.IPv6, wep) + } + } + } +} + +func (pool *ReuseIPAndWepPool) OnDelete(obj interface{}) { + old, ok := obj.(*networkingv1alpha1.WorkloadEndpoint) + if ok { + pool.CacheMap.Delete(old.Spec.IP) + if old.Spec.IPv6 != "" { + pool.CacheMap.Delete(old.Spec.IPv6) + } + } +} + +var _ cache.ResourceEventHandler = &ReuseIPAndWepPool{} diff --git a/pkg/eniipam/ipam/roce/gc.go b/pkg/eniipam/ipam/roce/gc.go index 7295a2a..538f920 100644 --- a/pkg/eniipam/ipam/roce/gc.go +++ b/pkg/eniipam/ipam/roce/gc.go @@ -3,9 +3,9 @@ package roce import ( "context" "fmt" - "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/bce/hpc" + ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -14,7 +14,7 @@ import ( ) func (ipam *IPAM) gc(stopCh <-chan struct{}) error { - log.Infof(context.TODO(), "start gc by roce ipam,gcPeriod is %v", ipam.gcPeriod) + log.Infof(context.TODO(), "start gc by roce ipam, gcPeriod is %v", ipam.gcPeriod) err := wait.PollImmediateUntil(wait.Jitter(ipam.gcPeriod, 0.5), func() (bool, error) { ctx := log.NewContext() @@ -42,7 +42,8 @@ func (ipam *IPAM) gc(stopCh <-chan struct{}) error { return false, nil } - err = ipam.gcLeakedIP(ctx, mwepList) + // release ip when ip not in mwep + err = ipam.gcLeakedIP(ctx) if err != nil { return false, nil } @@ -56,80 +57,79 @@ func (ipam *IPAM) gc(stopCh <-chan struct{}) error { return nil } -type multiIPWorkloadInfo struct { - PodName string - EniID string - Namespace string -} - -func (ipam *IPAM) gcLeakedIP(ctx context.Context, mwepList []*v1alpha1.MultiIPWorkloadEndpoint) error { - mwepIPMap := make(map[string]multiIPWorkloadInfo) - if len(mwepList) == 0 { - log.Infof(context.TODO(), "list nodeCache count is %d ", len(ipam.nodeCache)) - for instance, _ := range ipam.nodeCache { - result, err := ipam.cloud.GetHPCEniID(ctx, instance) - if err != nil { - return err - } - if err = ipam.makeHpcEniIP(ctx, result.Result, mwepIPMap); err != nil { - return err - } - } - } - - for _, mwep := range mwepList { - for _, spec := range mwep.Spec { - if _, exist := mwepIPMap[spec.IP]; !exist { - mwepInfo := multiIPWorkloadInfo{ - EniID: spec.EniID, - PodName: mwep.Name, - Namespace: mwep.Namespace, - } - mwepIPMap[spec.IP] = mwepInfo - } - } - } - - for _, mwep := range mwepList { - result, err := ipam.cloud.GetHPCEniID(ctx, mwep.InstanceID) +// release ip when ip not in mwep +func (ipam *IPAM) gcLeakedIP(ctx context.Context) error { + for instanceID := range ipam.nodeCache { + result, err := ipam.cloud.GetHPCEniID(ctx, instanceID) if err != nil { log.Errorf(ctx, "gc getHPCEniID : get hPCEni privateIP has error %v", err) return err } - if err = ipam.makeHpcEniIP(ctx, result.Result, mwepIPMap); err != nil { - log.Errorf(ctx, "gc makeHpcEniIP : make hpcEni privateIP has error %v", err) - return err + // get mwep by instanceID + ipSet, ipErr := ipam.getIPSetForNode(ctx, instanceID) + if ipErr != nil { + log.Infof(ctx, "get ip set for %s failed: %s", instanceID, ipErr) + continue } + // delete eri ip when ip not in mwep + ipam.gcOneNodeLeakedIP(ctx, result.Result, ipSet) } + return nil } -func (ipam *IPAM) makeHpcEniIP(ctx context.Context, result []hpc.Result, mwepIPMap map[string]multiIPWorkloadInfo) error { - for _, r := range result { - for _, ips := range r.PrivateIPSet { - if err := ipam.checkIPExist(ctx, mwepIPMap, ips.PrivateIPAddress, ips.Primary, r.EniID); err != nil { - log.Errorf(ctx, "gc checkIPExist : check IP exist has error %v", err) - return err +func (ipam *IPAM) getIPSetForNode(ctx context.Context, instanceID string) (map[string]struct{}, error) { + // list mwep + mwepSelector, selectorErr := mwepListerSelector() + if selectorErr != nil { + log.Errorf(ctx, "make mwep lister selector has error: %v", selectorErr) + return nil, selectorErr + } + mwepList, mwepErr := ipam.crdInformer.Cce().V1alpha1().MultiIPWorkloadEndpoints().Lister().List(mwepSelector) + if mwepErr != nil { + log.Errorf(ctx, "gc: error list mwep in cluster: %v", mwepErr) + return nil, mwepErr + } + log.Infof(ctx, "list mwepList count is %d ", len(mwepList)) + + // collect ip for instanceID + ipSet := make(map[string]struct{}) + for _, mwep := range mwepList { + if mwep.Type != ipamgeneric.MwepTypeRoce { + continue + } + if mwep.InstanceID != instanceID { + continue + } + for _, spec := range mwep.Spec { + if _, exist := ipSet[spec.IP]; exist { + continue } + ipSet[spec.IP] = struct{}{} } } - return nil + return ipSet, nil } -func (ipam *IPAM) checkIPExist(ctx context.Context, mwepIPMap map[string]multiIPWorkloadInfo, eniPrivateIP string, primary bool, eniID string) error { - if info, exist := mwepIPMap[eniPrivateIP]; !exist && !primary { - _, err := ipam.kubeClient.CoreV1().Pods(info.Namespace).Get(ctx, info.PodName, metav1.GetOptions{}) - if err != nil { - if err := ipam.deleteRocePrivateIP(ctx, eniID, eniPrivateIP); err != nil { - log.Errorf(ctx, "gc deleteRocePrivateIP : failed to delete private IP %v on %v for leaked pod: %v", eniPrivateIP, eniID, err) - return err +func (ipam *IPAM) gcOneNodeLeakedIP(ctx context.Context, resultList []hpc.Result, ipSet map[string]struct{}) { + for _, hpcResult := range resultList { + for _, privateIP := range hpcResult.PrivateIPSet { + if privateIP.Primary { + continue + } + if _, exist := ipSet[privateIP.PrivateIPAddress]; exist { + continue + } + log.Infof(ctx, "gc: privateIP %s not found in mwep, try to delete privateIP", + privateIP.PrivateIPAddress) + + // delete ip + if err := ipam.deleteRocePrivateIP(ctx, hpcResult.EniID, privateIP.PrivateIPAddress); err != nil { + log.Errorf(ctx, "gc: failed to delete privateIP %s on %s for not found in mwep: %s", + privateIP.PrivateIPAddress, hpcResult.EniID, err) } - log.Infof(ctx, "gc deleteRocePrivateIP: delete private IP %v on %v for leaked pod successfully", eniPrivateIP, eniID) - } else { - log.Errorf(ctx, "gc checkIPExist : failed to get %s/%s pod has error %v, eniID is %s", info.Namespace, info.PodName, err, eniID) } } - return nil } func (ipam *IPAM) gcLeakedPod(ctx context.Context, mwepList []*v1alpha1.MultiIPWorkloadEndpoint) error { diff --git a/pkg/eniipam/ipam/roce/gc_test.go b/pkg/eniipam/ipam/roce/gc_test.go index 00732f8..cbf038e 100644 --- a/pkg/eniipam/ipam/roce/gc_test.go +++ b/pkg/eniipam/ipam/roce/gc_test.go @@ -3,6 +3,7 @@ package roce import ( "context" "fmt" + ipamgeneric "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam" "testing" "time" @@ -207,39 +208,87 @@ func (suite *IPAMGC) Test_gc() { } func (suite *IPAMGC) Test__gcLeakedIP() { - suite.ipam.nodeCache = map[string]*corev1.Node{"i-cdcac": &corev1.Node{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: "", - Namespace: "", + // 1. empty wep + suite.ipam.nodeCache = map[string]*corev1.Node{ + "i-0": { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-0", + }, }, - }} - mwep := MockMultiworkloadEndpoint() - suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault).Create(suite.ctx, mwep, metav1.CreateOptions{}) + } - mwep1 := MockMultiworkloadEndpoint() - mwep1.Name = "busybox-1" - suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault).Create(suite.ctx, mwep1, metav1.CreateOptions{}) + cloudClient := suite.ipam.cloud.(*mockcloud.MockInterface) + eniList := &hpc.EniList{ + Result: []hpc.Result{ + { + EniID: "eni-0", + PrivateIPSet: []hpc.PrivateIP{ + { + Primary: true, + PrivateIPAddress: "10.1.1.0", + }, + { + Primary: false, + PrivateIPAddress: "10.1.1.1", + }, + { + Primary: false, + PrivateIPAddress: "10.1.1.2", + }, + }, + }, + }, + } + args1 := &hpc.EniBatchDeleteIPArgs{ + EniID: "eni-0", + PrivateIPAddresses: []string{"10.1.1.1"}, + } + args2 := &hpc.EniBatchDeleteIPArgs{ + EniID: "eni-0", + PrivateIPAddresses: []string{"10.1.1.2"}, + } + gomock.InOrder( + cloudClient.EXPECT().GetHPCEniID(gomock.Any(), gomock.Eq("i-0")).Return(eniList, nil), + cloudClient.EXPECT().BatchDeleteHpcEniPrivateIP(gomock.Any(), gomock.Eq(args1)).Return(nil), + cloudClient.EXPECT().BatchDeleteHpcEniPrivateIP(gomock.Any(), gomock.Eq(args2)).Return(nil), + ) - mwep2 := MockMultiworkloadEndpoint() - mwep2.Name = "busybox-2" - suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault).Create(suite.ctx, mwep2, metav1.CreateOptions{}) + gcErr := suite.ipam.gcLeakedIP(suite.ctx) + suite.Assert().Nil(gcErr) - mockInterface := suite.ipam.cloud.(*mockcloud.MockInterface).EXPECT() - mockInterface.GetHPCEniID(gomock.Any(), gomock.Any()).Return(&hpc.EniList{ - Result: []hpc.Result{{ - EniID: "eni-df8888fs", - PrivateIPSet: []hpc.PrivateIP{{ - Primary: false, - PrivateIPAddress: "192.168.1.179", - }, + // 2. delete leaked ip 10.1.1.2 + gomock.InOrder( + cloudClient.EXPECT().GetHPCEniID(gomock.Any(), gomock.Eq("i-0")).Return(eniList, nil), + cloudClient.EXPECT().BatchDeleteHpcEniPrivateIP(gomock.Any(), gomock.Eq(args2)).Return(nil), + ) + + mwep0 := &networkingv1alpha1.MultiIPWorkloadEndpoint{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-0", + Namespace: corev1.NamespaceDefault, + Finalizers: []string{"cce-cni.cce.io"}, + Labels: map[string]string{ + corev1.LabelInstanceType: "BBC", + ipamgeneric.MwepLabelInstanceTypeKey: ipamgeneric.MwepTypeRoce, }, }, + NodeName: "", + InstanceID: "i-0", + Type: ipamgeneric.MwepTypeRoce, + Spec: []networkingv1alpha1.MultiIPWorkloadEndpointSpec{ + { + EniID: "eni-0", + IP: "10.1.1.1", + }, }, - }, nil).AnyTimes() - err := suite.ipam.gcLeakedIP(suite.ctx, []*networkingv1alpha1.MultiIPWorkloadEndpoint{mwep, mwep1, mwep2}) - suite.Assert().NoError(err, "get mwep error") + } + _, err0 := suite.ipam.crdClient.CceV1alpha1().MultiIPWorkloadEndpoints(corev1.NamespaceDefault). + Create(suite.ctx, mwep0, metav1.CreateOptions{}) + suite.Assert().Nil(err0) + waitForCacheSync(suite.ipam.kubeInformer, suite.ipam.crdInformer) + gcErr2 := suite.ipam.gcLeakedIP(suite.ctx) + suite.Assert().Nil(gcErr2) } func TestIPAMGC(t *testing.T) { diff --git a/pkg/eniipam/ipam/roce/ipam.go b/pkg/eniipam/ipam/roce/ipam.go index db681b3..93805f9 100644 --- a/pkg/eniipam/ipam/roce/ipam.go +++ b/pkg/eniipam/ipam/roce/ipam.go @@ -43,7 +43,7 @@ func NewIPAM( informerResyncPeriod time.Duration, eniSyncPeriod time.Duration, gcPeriod time.Duration, - debug bool, + _ bool, ) (ipam.RoceInterface, error) { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ @@ -54,7 +54,7 @@ func NewIPAM( kubeInformer := informers.NewSharedInformerFactory(kubeClient, informerResyncPeriod) crdInformer := crdinformers.NewSharedInformerFactory(crdClient, informerResyncPeriod) - ipam := &IPAM{ + ipamServer := &IPAM{ eventBroadcaster: eventBroadcaster, eventRecorder: recorder, kubeInformer: kubeInformer, @@ -71,11 +71,11 @@ func NewIPAM( allocated: make(map[string]*v1alpha1.MultiIPWorkloadEndpoint), increasePoolEventChan: make(map[string]chan *event), } - return ipam, nil + return ipamServer, nil } const ( - // minPrivateIPLifeTime is the life time of a private ip (from allocation to release), aim to trade off db slave delay + // minPrivateIPLifeTime is the lifetime of a private ip (from allocation to release), aim to trade off db slave delay minPrivateIPLifeTime = 5 * time.Second rateLimitErrorSleepPeriod = time.Millisecond * 200 @@ -116,7 +116,8 @@ func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID str wep := &v1alpha1.WorkloadEndpoint{} roceNode := &corev1.Node{} - if node, exist := ipam.nodeCache[instanceID]; !exist { + node, exist := ipam.nodeCache[instanceID] + if !exist { // get node node, err = ipam.kubeInformer.Core().V1().Nodes().Lister().Get(pod.Spec.NodeName) if err != nil { @@ -125,14 +126,14 @@ func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID str } ipam.nodeCache[instanceID] = node - roceNode = node.DeepCopy() } + roceNode = node.DeepCopy() ipam.lock.Lock() defer ipam.lock.Unlock() mwep, err := ipam.crdInformer.Cce().V1alpha1().MultiIPWorkloadEndpoints().Lister().MultiIPWorkloadEndpoints(namespace).Get(name) if err == nil { - if mwep.Type != ipamgeneric.MwepType { + if mwep.Type != ipamgeneric.MwepTypeRoce { log.Warningf(ctx, "get mwep in %v/%v type is not roce", namespace, name) return nil, fmt.Errorf("get mwep in %v/%v type is not roce", namespace, name) } @@ -186,7 +187,7 @@ func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID str }, } } - specList := []v1alpha1.MultiIPWorkloadEndpointSpec{} + specList := make([]v1alpha1.MultiIPWorkloadEndpointSpec, 0) for _, spec := range mwepInfo { specList = append(specList, spec) } @@ -227,12 +228,12 @@ func (ipam *IPAM) Allocate(ctx context.Context, name, namespace, containerID str Namespace: pod.Namespace, Finalizers: []string{ipamgeneric.MwepFinalizer}, Labels: map[string]string{ - ipamgeneric.MwepLabelInstanceTypeKey: ipamgeneric.MwepType, + ipamgeneric.MwepLabelInstanceTypeKey: ipamgeneric.MwepTypeRoce, v1.LabelInstanceType: "BBC", }, }, NodeName: roceNode.Name, - Type: ipamgeneric.MwepType, + Type: ipamgeneric.MwepTypeRoce, InstanceID: instanceID, Spec: []v1alpha1.MultiIPWorkloadEndpointSpec{ { @@ -302,7 +303,7 @@ func (ipam *IPAM) tryAllocateIPForRoceIPPod( return ipResult, nil } -func (ipam *IPAM) Release(ctx context.Context, name, namespace, containerID string) (*v1alpha1.WorkloadEndpoint, error) { +func (ipam *IPAM) Release(ctx context.Context, name, namespace, _ string) (*v1alpha1.WorkloadEndpoint, error) { log.Infof(ctx, "[Release] releasing IP for roce pod (%v/%v) starts", namespace, name) defer log.Infof(ctx, "[Release] releasing IP for roce pod (%v/%v) ends", namespace, name) @@ -375,7 +376,7 @@ func (ipam *IPAM) removeAddIPBackoffCache(eniID string, lockless bool) bool { return ok } -func (ipam *IPAM) Ready(ctx context.Context) bool { +func (ipam *IPAM) Ready(_ context.Context) bool { return ipam.cacheHasSynced } @@ -432,7 +433,7 @@ func (ipam *IPAM) Run(ctx context.Context, stopCh <-chan struct{}) error { } }() - // k8sr resource and ip cache are synced + // k8s resource and ip cache are synced ipam.cacheHasSynced = true log.Infof(ctx, "ipam cacheHasSynced is: %v", ipam.cacheHasSynced) @@ -441,7 +442,11 @@ func (ipam *IPAM) Run(ctx context.Context, stopCh <-chan struct{}) error { } func (ipam *IPAM) DeleteNodeFromCache(node *v1.Node) error { - if _, exist := ipam.nodeCache[node.Name]; exist { + instanceId, err := util.GetInstanceIDFromNode(node) + if err != nil { + return fmt.Errorf("warning: cannot get instanceID of node %s", node.Name) + } + if _, exist := ipam.nodeCache[instanceId]; exist { delete(ipam.nodeCache, node.Name) } return nil @@ -475,7 +480,7 @@ func (ipam *IPAM) syncHpcENI(stopCh <-chan struct{}) error { log.Infof(ctx, "node cache count is %v", len(ipam.nodeCache)) var hpcEni *hpc.EniList - for instance, _ := range ipam.nodeCache { + for instance := range ipam.nodeCache { // get hpc eni id log.Infof(ctx, "get instanceId is %v in hpcEni interface", instance) hpcEni, err = ipam.cloud.GetHPCEniID(ctx, instance) @@ -606,11 +611,16 @@ func (ipam *IPAM) rebuildNodeInfoCache(ctx context.Context, node *v1.Node, insta func mwepListerSelector() (labels.Selector, error) { // for mwep owned by bcc, use selector "node.kubernetes.io/instance-type", to be compatible with old versions. - requirement, err := labels.NewRequirement(v1.LabelInstanceType, selection.In, []string{"BCC", "BBC", "GPU", "DCC"}) - if err != nil { - return nil, err + requireInstanceType, insErr := labels.NewRequirement(v1.LabelInstanceType, selection.In, []string{"BCC", "BBC", "GPU", "DCC"}) + if insErr != nil { + return nil, insErr } - return labels.NewSelector().Add(*requirement), nil + requireMwepType, typeErr := labels.NewRequirement(ipamgeneric.MwepLabelInstanceTypeKey, selection.Equals, + []string{ipamgeneric.MwepTypeRoce}) + if typeErr != nil { + return nil, typeErr + } + return labels.NewSelector().Add(*requireInstanceType, *requireMwepType), nil } func (ipam *IPAM) buildAllocatedCache(ctx context.Context) error { diff --git a/pkg/eniipam/ipam/roce/ipam_test.go b/pkg/eniipam/ipam/roce/ipam_test.go index 73df1c2..399ee51 100644 --- a/pkg/eniipam/ipam/roce/ipam_test.go +++ b/pkg/eniipam/ipam/roce/ipam_test.go @@ -313,7 +313,7 @@ func TestIPAM_Allocate(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "busybox", }, - Type: ipamgeneric.MwepType, + Type: ipamgeneric.MwepTypeRoce, NodeName: "test-node", Spec: []v1alpha1.MultiIPWorkloadEndpointSpec{ { diff --git a/pkg/eniipam/ipam/testing/mock_ipam.go b/pkg/eniipam/ipam/testing/mock_ipam.go index 3551c3d..ecb2042 100644 --- a/pkg/eniipam/ipam/testing/mock_ipam.go +++ b/pkg/eniipam/ipam/testing/mock_ipam.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam (interfaces: Interface,ExclusiveEniInterface) +// Source: types.go // Package testing is a generated GoMock package. package testing @@ -12,150 +12,231 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockInterface is a mock of Interface interface. +// MockInterface is a mock of Interface interface type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } -// MockInterfaceMockRecorder is the mock recorder for MockInterface. +// MockInterfaceMockRecorder is the mock recorder for MockInterface type MockInterfaceMockRecorder struct { mock *MockInterface } -// NewMockInterface creates a new mock instance. +// NewMockInterface creates a new mock instance func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// Allocate mocks base method. -func (m *MockInterface) Allocate(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.WorkloadEndpoint, error) { +// Allocate mocks base method +func (m *MockInterface) Allocate(ctx context.Context, name, namespace, containerID string) (*v1alpha1.WorkloadEndpoint, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Allocate", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Allocate", ctx, name, namespace, containerID) ret0, _ := ret[0].(*v1alpha1.WorkloadEndpoint) ret1, _ := ret[1].(error) return ret0, ret1 } -// Allocate indicates an expected call of Allocate. -func (mr *MockInterfaceMockRecorder) Allocate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// Allocate indicates an expected call of Allocate +func (mr *MockInterfaceMockRecorder) Allocate(ctx, name, namespace, containerID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Allocate", reflect.TypeOf((*MockInterface)(nil).Allocate), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Allocate", reflect.TypeOf((*MockInterface)(nil).Allocate), ctx, name, namespace, containerID) } -// Ready mocks base method. -func (m *MockInterface) Ready(arg0 context.Context) bool { +// Release mocks base method +func (m *MockInterface) Release(ctx context.Context, name, namespace, containerID string) (*v1alpha1.WorkloadEndpoint, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Ready", arg0) - ret0, _ := ret[0].(bool) - return ret0 + ret := m.ctrl.Call(m, "Release", ctx, name, namespace, containerID) + ret0, _ := ret[0].(*v1alpha1.WorkloadEndpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// Ready indicates an expected call of Ready. -func (mr *MockInterfaceMockRecorder) Ready(arg0 interface{}) *gomock.Call { +// Release indicates an expected call of Release +func (mr *MockInterfaceMockRecorder) Release(ctx, name, namespace, containerID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ready", reflect.TypeOf((*MockInterface)(nil).Ready), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockInterface)(nil).Release), ctx, name, namespace, containerID) } -// Release mocks base method. -func (m *MockInterface) Release(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.WorkloadEndpoint, error) { +// Ready mocks base method +func (m *MockInterface) Ready(ctx context.Context) bool { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Release", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(*v1alpha1.WorkloadEndpoint) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "Ready", ctx) + ret0, _ := ret[0].(bool) + return ret0 } -// Release indicates an expected call of Release. -func (mr *MockInterfaceMockRecorder) Release(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// Ready indicates an expected call of Ready +func (mr *MockInterfaceMockRecorder) Ready(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockInterface)(nil).Release), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ready", reflect.TypeOf((*MockInterface)(nil).Ready), ctx) } -// Run mocks base method. -func (m *MockInterface) Run(arg0 context.Context, arg1 <-chan struct{}) error { +// Run mocks base method +func (m *MockInterface) Run(ctx context.Context, stopCh <-chan struct{}) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Run", arg0, arg1) + ret := m.ctrl.Call(m, "Run", ctx, stopCh) ret0, _ := ret[0].(error) return ret0 } -// Run indicates an expected call of Run. -func (mr *MockInterfaceMockRecorder) Run(arg0, arg1 interface{}) *gomock.Call { +// Run indicates an expected call of Run +func (mr *MockInterfaceMockRecorder) Run(ctx, stopCh interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockInterface)(nil).Run), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockInterface)(nil).Run), ctx, stopCh) } -// MockExclusiveEniInterface is a mock of ExclusiveEniInterface interface. +// MockExclusiveEniInterface is a mock of ExclusiveEniInterface interface type MockExclusiveEniInterface struct { ctrl *gomock.Controller recorder *MockExclusiveEniInterfaceMockRecorder } -// MockExclusiveEniInterfaceMockRecorder is the mock recorder for MockExclusiveEniInterface. +// MockExclusiveEniInterfaceMockRecorder is the mock recorder for MockExclusiveEniInterface type MockExclusiveEniInterfaceMockRecorder struct { mock *MockExclusiveEniInterface } -// NewMockExclusiveEniInterface creates a new mock instance. +// NewMockExclusiveEniInterface creates a new mock instance func NewMockExclusiveEniInterface(ctrl *gomock.Controller) *MockExclusiveEniInterface { mock := &MockExclusiveEniInterface{ctrl: ctrl} mock.recorder = &MockExclusiveEniInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use. +// EXPECT returns an object that allows the caller to indicate expected use func (m *MockExclusiveEniInterface) EXPECT() *MockExclusiveEniInterfaceMockRecorder { return m.recorder } -// Allocate mocks base method. -func (m *MockExclusiveEniInterface) Allocate(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.CrossVPCEni, error) { +// Allocate mocks base method +func (m *MockExclusiveEniInterface) Allocate(ctx context.Context, name, namespace, containerID string) (*v1alpha1.CrossVPCEni, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Allocate", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Allocate", ctx, name, namespace, containerID) ret0, _ := ret[0].(*v1alpha1.CrossVPCEni) ret1, _ := ret[1].(error) return ret0, ret1 } -// Allocate indicates an expected call of Allocate. -func (mr *MockExclusiveEniInterfaceMockRecorder) Allocate(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// Allocate indicates an expected call of Allocate +func (mr *MockExclusiveEniInterfaceMockRecorder) Allocate(ctx, name, namespace, containerID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Allocate", reflect.TypeOf((*MockExclusiveEniInterface)(nil).Allocate), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Allocate", reflect.TypeOf((*MockExclusiveEniInterface)(nil).Allocate), ctx, name, namespace, containerID) } -// Release mocks base method. -func (m *MockExclusiveEniInterface) Release(arg0 context.Context, arg1, arg2, arg3 string) (*v1alpha1.CrossVPCEni, error) { +// Release mocks base method +func (m *MockExclusiveEniInterface) Release(ctx context.Context, name, namespace, containerID string) (*v1alpha1.CrossVPCEni, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Release", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "Release", ctx, name, namespace, containerID) ret0, _ := ret[0].(*v1alpha1.CrossVPCEni) ret1, _ := ret[1].(error) return ret0, ret1 } -// Release indicates an expected call of Release. -func (mr *MockExclusiveEniInterfaceMockRecorder) Release(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +// Release indicates an expected call of Release +func (mr *MockExclusiveEniInterfaceMockRecorder) Release(ctx, name, namespace, containerID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockExclusiveEniInterface)(nil).Release), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockExclusiveEniInterface)(nil).Release), ctx, name, namespace, containerID) } -// Run mocks base method. -func (m *MockExclusiveEniInterface) Run(arg0 context.Context, arg1 <-chan struct{}) error { +// Run mocks base method +func (m *MockExclusiveEniInterface) Run(ctx context.Context, stopCh <-chan struct{}) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Run", arg0, arg1) + ret := m.ctrl.Call(m, "Run", ctx, stopCh) ret0, _ := ret[0].(error) return ret0 } -// Run indicates an expected call of Run. -func (mr *MockExclusiveEniInterfaceMockRecorder) Run(arg0, arg1 interface{}) *gomock.Call { +// Run indicates an expected call of Run +func (mr *MockExclusiveEniInterfaceMockRecorder) Run(ctx, stopCh interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockExclusiveEniInterface)(nil).Run), ctx, stopCh) +} + +// MockRoceInterface is a mock of RoceInterface interface +type MockRoceInterface struct { + ctrl *gomock.Controller + recorder *MockRoceInterfaceMockRecorder +} + +// MockRoceInterfaceMockRecorder is the mock recorder for MockRoceInterface +type MockRoceInterfaceMockRecorder struct { + mock *MockRoceInterface +} + +// NewMockRoceInterface creates a new mock instance +func NewMockRoceInterface(ctrl *gomock.Controller) *MockRoceInterface { + mock := &MockRoceInterface{ctrl: ctrl} + mock.recorder = &MockRoceInterfaceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRoceInterface) EXPECT() *MockRoceInterfaceMockRecorder { + return m.recorder +} + +// Allocate mocks base method +func (m *MockRoceInterface) Allocate(ctx context.Context, name, namespace, containerID, mac string) (*v1alpha1.WorkloadEndpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Allocate", ctx, name, namespace, containerID, mac) + ret0, _ := ret[0].(*v1alpha1.WorkloadEndpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Allocate indicates an expected call of Allocate +func (mr *MockRoceInterfaceMockRecorder) Allocate(ctx, name, namespace, containerID, mac interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Allocate", reflect.TypeOf((*MockRoceInterface)(nil).Allocate), ctx, name, namespace, containerID, mac) +} + +// Release mocks base method +func (m *MockRoceInterface) Release(ctx context.Context, name, namespace, containerID string) (*v1alpha1.WorkloadEndpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Release", ctx, name, namespace, containerID) + ret0, _ := ret[0].(*v1alpha1.WorkloadEndpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Release indicates an expected call of Release +func (mr *MockRoceInterfaceMockRecorder) Release(ctx, name, namespace, containerID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockRoceInterface)(nil).Release), ctx, name, namespace, containerID) +} + +// Run mocks base method +func (m *MockRoceInterface) Run(ctx context.Context, stopCh <-chan struct{}) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Run", ctx, stopCh) + ret0, _ := ret[0].(error) + return ret0 +} + +// Run indicates an expected call of Run +func (mr *MockRoceInterfaceMockRecorder) Run(ctx, stopCh interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockRoceInterface)(nil).Run), ctx, stopCh) +} + +// Ready mocks base method +func (m *MockRoceInterface) Ready(ctx context.Context) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Ready", ctx) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Ready indicates an expected call of Ready +func (mr *MockRoceInterfaceMockRecorder) Ready(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*MockExclusiveEniInterface)(nil).Run), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ready", reflect.TypeOf((*MockRoceInterface)(nil).Ready), ctx) } diff --git a/pkg/eniipam/ipam/types.go b/pkg/eniipam/ipam/types.go index a1acbdc..0f4f725 100644 --- a/pkg/eniipam/ipam/types.go +++ b/pkg/eniipam/ipam/types.go @@ -25,8 +25,9 @@ import ( ) const ( - WepTypeSts = "StatefulSet" - WepTypePod = "Pod" + WepTypeSts = "StatefulSet" + WepTypePod = "Pod" + WepTypeReuseIPPod = "ReuseIPPod" WepLabelStsOwnerKey = "cce.io/owner" WepLabelSubnetIDKey = "cce.io/subnet-id" @@ -34,7 +35,8 @@ const ( WepFinalizer = "cce-cni.cce.io" IPPoolCreationSourceCNI = "cce-cni" - MwepType = "roce" + MwepTypeRoce = "roce" + MwepTypeERI = "eri" MwepLabelInstanceTypeKey = "cce.io/instance-type" MwepFinalizer = "cce-cni-roce.cce.io" // Ref: https://github.com/kubernetes/kubernetes/pull/71653 diff --git a/pkg/iprange/range_pool.go b/pkg/iprange/range_pool.go new file mode 100644 index 0000000..b662ddc --- /dev/null +++ b/pkg/iprange/range_pool.go @@ -0,0 +1,297 @@ +package iprange + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "net" + "sort" + + k8sutilnet "k8s.io/utils/net" + + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/cidr" +) + +// IPFilter filte unavailable IP addresses +// this interface is used for filtering IP addresses which have been allocated by ipam +type IPFilter interface { + FilterIP(ip net.IP) bool +} + +type subnetUnivaluedIP struct { + cidr *net.IPNet +} + +func (p *subnetUnivaluedIP) FilterIP(ip net.IP) bool { + if !p.cidr.Contains(ip) { + return true + } + return cidr.IsUnicastIP(ip, p.cidr.String()) +} + +// RangePool manager ip pool control by range of ip +type RangePool struct { + sbnRange map[string][]*big.Int + ipFilter []IPFilter +} + +// NewCustomRangePool create new ip range manager +// This object is used by custom mode of ipam +func NewCustomRangePool(psts *networkingv1alpha1.PodSubnetTopologySpread, ipFilter ...IPFilter) (*RangePool, error) { + manager := &RangePool{ + sbnRange: make(map[string][]*big.Int), + ipFilter: ipFilter, + } + for sbnID, sa := range psts.Spec.Subnets { + if sbnStatus, ok := psts.Status.AvailableSubnets[sbnID]; ok { + _, subnetCIDR, err := net.ParseCIDR(sbnStatus.CIDR) + if err != nil { + return nil, err + } + ipRange, err := ListAllCustomIPRangeIndexs(subnetCIDR, sa.Custom) + if err != nil { + return nil, fmt.Errorf("sbnnet %s %w", sbnID, err) + } + manager.sbnRange[sbnID] = ipRange + manager.ipFilter = append(manager.ipFilter, &subnetUnivaluedIP{cidr: subnetCIDR}) + + } + } + if len(manager.sbnRange) == 0 { + return nil, errors.New("no index range") + } + return manager, nil +} + +// IPInRange Whether to include the specified IP in the custom IPRange +func (pool *RangePool) IPInRange(ip string) bool { + netIP := net.ParseIP(ip) + if netIP == nil { + return false + } + + ipIndex := k8sutilnet.BigForIP(netIP) + + for _, indexRange := range pool.sbnRange { + if isInRangeIndex(ipIndex, indexRange) { + return true + } + } + + return false +} + +// FirstAvailableIP try to get the first available IP +// Return: IP address can be use for pod +// +// nil if no ip can be used +func (pool *RangePool) FirstAvailableIP(sbnID string) net.IP { + indexRange, ok := pool.sbnRange[sbnID] + if ok { + for i := 0; i+1 < len(indexRange); i = i + 2 { + for ipInt := indexRange[i]; ipInt.Cmp(indexRange[i+1]) <= 0; ipInt = big.NewInt(0).Add(ipInt, big.NewInt(int64(1))) { + ip := BytesToIP(ipInt.Bytes()) + if pool.FilterIP(ip) { + return ip + } + } + + } + } + return nil +} + +// FilteIP return true if the IP can be used +func (pool *RangePool) FilterIP(ip net.IP) bool { + for _, filter := range pool.ipFilter { + if !filter.FilterIP(ip) { + return false + } + } + return true +} + +func isInRangeIndex(origin *big.Int, indexRange []*big.Int) bool { + for i := 0; i+1 < len(indexRange); i = i + 2 { + if origin.Cmp(indexRange[i]) >= 0 && origin.Cmp(indexRange[i+1]) <= 0 { + return true + } + } + return false +} + +func (pool *RangePool) String() string { + pooklMap := make(map[string][]string) + for sbnID, rs := range pool.sbnRange { + for _, bint := range rs { + pooklMap[sbnID] = append(pooklMap[sbnID], BytesToIP(bint.Bytes()).String()) + } + } + b, _ := json.Marshal(pooklMap) + return string(b) +} + +// ListAllCustomIPRangeIndexs The IP address range is listed in a list. Every two elements of the returned array are a group +// eg: [1,10, 10,100, 109,107] +// In the example array: +// if v=2, then v is in this element list. +// If v=101, v is not in this list +func ListAllCustomIPRangeIndexs(subnetCIDR *net.IPNet, customs []networkingv1alpha1.CustomAllocation) (indexRanges []*big.Int, err error) { + var ipVersion k8sutilnet.IPFamily = k8sutilnet.IPv4 + if subnetCIDR.IP.To4() == nil { + ipVersion = k8sutilnet.IPv6 + } + + for _, custom := range customs { + if custom.Family == ipVersion { + for _, r := range custom.CustomIPRange { + start, end := customerRangeToBigInt(r) + if start == nil || end == nil { + return nil, fmt.Errorf("") + } + if !subnetCIDR.Contains(BytesToIP(start.Bytes())) || !subnetCIDR.Contains(BytesToIP(end.Bytes())) { + return nil, fmt.Errorf("ip %s %s not in cidr range %s", r.Start, r.End, subnetCIDR.String()) + } + indexRanges = append(indexRanges, start, end) + } + } + } + + if !overlappingCheck(indexRanges) { + return nil, fmt.Errorf("overlapping IP Range") + } + + // fill ip ranges + sort.Slice(indexRanges, func(i, j int) bool { + return indexRanges[i].Cmp(indexRanges[j]) == -1 + }) + + if len(indexRanges) == 0 { + // first ip of cidr + firstIndex := k8sutilnet.BigForIP(subnetCIDR.IP) + lastIndex := big.NewInt(0).Add(firstIndex, big.NewInt(k8sutilnet.RangeSize(subnetCIDR))) + indexRanges = append(indexRanges, firstIndex, lastIndex) + } + + return +} + +func customerRangeToBigInt(r networkingv1alpha1.CustomIPRange) (start, end *big.Int) { + if startIP := net.ParseIP(r.Start); startIP != nil { + start = k8sutilnet.BigForIP(startIP) + } + if endIP := net.ParseIP(r.End); endIP != nil { + end = k8sutilnet.BigForIP(endIP) + } + return +} + +// BytesToIP translate []bytes to net.IP +func BytesToIP(r []byte) net.IP { + r = append(make([]byte, 16), r...) + return net.IP(r[len(r)-16:]) +} + +// NewCIDRRangePool create new ip range manager +// This object is used by fixed IP and manually assigned IP +func NewCIDRRangePool(psts *networkingv1alpha1.PodSubnetTopologySpread, ipFilter ...IPFilter) (*RangePool, error) { + manager := &RangePool{ + sbnRange: make(map[string][]*big.Int), + ipFilter: ipFilter, + } + for sbnID, sa := range psts.Spec.Subnets { + if sbnStatus, ok := psts.Status.AvailableSubnets[sbnID]; ok { + _, subnetCIDR, err := net.ParseCIDR(sbnStatus.CIDR) + if err != nil { + return nil, err + } + ipRange, err := ListAllCIDRIPRangeIndexs(subnetCIDR, sa) + if err != nil { + return nil, err + } + manager.sbnRange[sbnID] = ipRange + manager.ipFilter = append(manager.ipFilter, &subnetUnivaluedIP{cidr: subnetCIDR}) + + } + } + if len(manager.sbnRange) == 0 { + return nil, errors.New("no index range") + } + return manager, nil +} + +// ListAllCIDRIPRangeIndexs The IP address range is listed in a list. Every two elements of the returned array are a group +// eg: [1,10, 10,100, 109,107] +// In the example array: +// if v=2, then v is in this element list. +// If v=101, v is not in this list +func ListAllCIDRIPRangeIndexs(subnetCIDR *net.IPNet, sa networkingv1alpha1.SubnetAllocation) (indexRanges []*big.Int, err error) { + ipVersion := 4 + if subnetCIDR.IP.To4() == nil { + ipVersion = 6 + } + + if ipVersion == 4 { + for _, str := range sa.IPv4 { + ip := net.ParseIP(str) + if ip != nil { + ipint := k8sutilnet.BigForIP(ip) + if !subnetCIDR.Contains(ip) { + return nil, fmt.Errorf("ip not in cidr range") + } + indexRanges = append(indexRanges, ipint, ipint) + } + } + + cidrs, err := k8sutilnet.ParseCIDRs(sa.IPv4Range) + if err != nil { + return nil, err + } + for _, cidr := range cidrs { + first := k8sutilnet.BigForIP(cidr.IP) + last := big.NewInt(0).Add(first, big.NewInt(k8sutilnet.RangeSize(cidr)-1)) + if !subnetCIDR.Contains(BytesToIP(first.Bytes())) || !subnetCIDR.Contains(BytesToIP(last.Bytes())) { + return nil, fmt.Errorf("ip not in cidr range") + } + indexRanges = append(indexRanges, first, last) + } + } + + if !overlappingCheck(indexRanges) { + return nil, fmt.Errorf("overlapping IP Range") + } + + // fill ip ranges + sort.Slice(indexRanges, func(i, j int) bool { + return indexRanges[i].Cmp(indexRanges[j]) == -1 + }) + + if len(indexRanges) == 0 { + // first ip of cidr + firstIndex := k8sutilnet.BigForIP(subnetCIDR.IP) + lastIndex := big.NewInt(0).Add(firstIndex, big.NewInt(k8sutilnet.RangeSize(subnetCIDR)-1)) + indexRanges = append(indexRanges, firstIndex, lastIndex) + } + + return +} + +// Overlapping detection range +// Compare each group of ranges. The start and end of other groups cannot overlap +// return true if each goup does not overlap +func overlappingCheck(indexRanges []*big.Int) bool { + for i := 0; i+1 < len(indexRanges); i = i + 2 { + for j := i + 2; j+1 < len(indexRanges); j = j + 2 { + a := indexRanges[i].Cmp(indexRanges[j]) + b := indexRanges[i+1].Cmp(indexRanges[j]) + c := indexRanges[i].Cmp(indexRanges[j+1]) + d := indexRanges[i+1].Cmp(indexRanges[j+1]) + if a != b || b != c || c != d { + return false + } + } + } + return true +} diff --git a/pkg/iprange/range_pool_test.go b/pkg/iprange/range_pool_test.go new file mode 100644 index 0000000..af680e3 --- /dev/null +++ b/pkg/iprange/range_pool_test.go @@ -0,0 +1,237 @@ +package iprange + +import ( + "net" + "testing" + + networkingv1alpha1 "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apis/networking/v1alpha1" + "github.com/baidubce/baiducloud-cce-cni-driver/test/data" + "github.com/stretchr/testify/suite" + "k8s.io/apimachinery/pkg/labels" + k8sutilnet "k8s.io/utils/net" +) + +type CustomRangePoolTest struct { + suite.Suite +} + +// 每次测试前设置上下文 +func (suite *CustomRangePoolTest) SetupTest() { + +} + +// 模拟全过滤器 +type mockFilter struct{} + +func (*mockFilter) FilterIP(ip net.IP) bool { + return true +} + +func (suite *CustomRangePoolTest) TestSimpleRangePool() { + sbn := data.MockSubnet("default", "sbn-test", "10.7.8.0/24") + l := labels.Set{"app": "demo"} + psts := data.MockPodSubnetTopologySpreadWithSubnet("default", "psts-test", sbn, l) + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + TTL: networkingv1alpha1.DefaultReuseIPTTL, + EnableReuseIPAddress: true, + } + + // case1: with start and end + allocation := psts.Spec.Subnets["sbn-test"] + custom := networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{ + { + Start: "10.7.8.56", + End: "10.7.8.57", + }, + }, + } + allocation.Custom = append(allocation.Custom, custom) + psts.Spec.Subnets["sbn-test"] = allocation + filter := []IPFilter{&mockFilter{}} + pool, err := NewCustomRangePool(psts, filter...) + if suite.NoErrorf(err, "create pool error") { + if suite.Truef(pool.IPInRange("10.7.8.56"), "ip not in range") { + suite.Equalf("10.7.8.56", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + } + } + + // case2: no start and end + custom = networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{}, + } + allocation.Custom = []networkingv1alpha1.CustomAllocation{custom} + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCustomRangePool(psts, filter...) + if suite.NoErrorf(err, "create pool error") { + if suite.Truef(pool.IPInRange("10.7.8.56"), "ip should in range") { + suite.Equalf("10.7.8.2", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + } + suite.Falsef(pool.IPInRange("10.7.7.56"), "ip should not in range") + } + + // case 3: mutiple custom ip range + allocation = psts.Spec.Subnets["sbn-test"] + custom = networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{ + { + Start: "10.7.8.56", + End: "10.7.8.190", + }, + { + Start: "10.7.8.58", + End: "10.7.8.190", + }, + }, + } + allocation.Custom = append(allocation.Custom, custom) + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCustomRangePool(psts, filter...) + suite.Errorf(err, "overlapping error") + + allocation = psts.Spec.Subnets["sbn-test"] + custom = networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{ + { + End: "10.7.8.190", + }, + }, + } + allocation.Custom = append(allocation.Custom, custom) + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCustomRangePool(psts, filter...) + suite.Errorf(err, "start is nil") + + // case 5: no range + allocation.Custom = []networkingv1alpha1.CustomAllocation{} + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCustomRangePool(psts, filter...) + if suite.NoErrorf(err, "create pool error") { + if suite.Truef(pool.IPInRange("10.7.8.56"), "ip should in range") { + suite.Equalf("10.7.8.2", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + } + suite.Falsef(pool.IPInRange("10.7."), "ip should in range") + suite.Falsef(pool.IPInRange("10.7.7.56"), "ip should not in range") + } +} + +func (suite *CustomRangePoolTest) TestSimpleRangePoolNoStartAndEnd() { + sbn := data.MockSubnet("default", "sbn-test", "10.7.8.0/24") + l := labels.Set{"app": "demo"} + psts := data.MockPodSubnetTopologySpreadWithSubnet("default", "psts-test", sbn, l) + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + TTL: networkingv1alpha1.DefaultReuseIPTTL, + EnableReuseIPAddress: true, + } + allocation := psts.Spec.Subnets["sbn-test"] + custom := networkingv1alpha1.CustomAllocation{ + Family: k8sutilnet.IPv4, + CustomIPRange: []networkingv1alpha1.CustomIPRange{ + { + Start: "10.7.8.56", + End: "10.7.8.57", + }, + }, + } + allocation.Custom = append(allocation.Custom, custom) + psts.Spec.Subnets["sbn-test"] = allocation + filter := []IPFilter{&mockFilter{}} + pool, err := NewCustomRangePool(psts, filter...) + if suite.NoErrorf(err, "create pool error") { + if suite.Truef(pool.IPInRange("10.7.8.56"), "ip not in range") { + suite.Equalf("10.7.8.56", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + } + } + + // case no subnet + psts.Spec.Subnets = make(map[string]networkingv1alpha1.SubnetAllocation) + pool, err = NewCustomRangePool(psts, filter...) + suite.Errorf(err, "create pool error") +} + +func (suite *CustomRangePoolTest) TestSimpleCIDRPool() { + sbn := data.MockSubnet("default", "sbn-test", "10.7.8.0/24") + l := labels.Set{"app": "demo"} + psts := data.MockPodSubnetTopologySpreadWithSubnet("default", "psts-test", sbn, l) + psts.Spec.Strategy = &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeManual, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + } + allocation := psts.Spec.Subnets["sbn-test"] + + allocation.IPv4Range = []string{"10.7.8.4/30"} + + psts.Spec.Subnets["sbn-test"] = allocation + filter := []IPFilter{&mockFilter{}} + pool, err := NewCIDRRangePool(psts, filter...) + if suite.NoErrorf(err, "create pool error") { + if suite.Truef(pool.IPInRange("10.7.8.4"), "ip not in range") { + suite.Equalf("10.7.8.4", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + } + } + + // case with cidr + allocation.IPv4Range = []string{} + allocation.IPv4 = []string{} + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCIDRRangePool(psts, filter...) + if suite.NoErrorf(err, "create pool error") { + if suite.Truef(pool.IPInRange("10.7.8.2"), "ip not in range") { + suite.Equalf("10.7.8.2", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + } + } + + // case 2: with ip list + allocation.IPv4Range = []string{"10.7.8.4/30"} + allocation.IPv4 = []string{"10.7.8.128", "10.7.8.192"} + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCIDRRangePool(psts, filter...) + if suite.NoErrorf(err, "create pool error") { + _ = pool.String() + if suite.Falsef(pool.IPInRange("10.7.8.10"), "ip not in range") { + suite.Equalf("10.7.8.4", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + suite.Equalf("10.7.8.4", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + suite.Equalf("10.7.8.4", pool.FirstAvailableIP("sbn-test").String(), "first ip not euqal") + } + suite.Falsef(pool.IPInRange("10.7.8.129"), "ip not in range") + suite.Truef(pool.IPInRange("10.7.8.128"), "ip not in range") + } + + // case 3: with ip list + allocation.IPv4Range = []string{"10.7.8.4/30"} + allocation.IPv4 = []string{"10.7.8.7", "10.7.8.19"} + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCIDRRangePool(psts, filter...) + suite.Errorf(err, "create pool error") + + // case 4: not in range + allocation.IPv4Range = []string{"10.7.8.4/30"} + allocation.IPv4 = []string{"10.7.7.7", "10.7.8.19"} + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCIDRRangePool(psts, filter...) + suite.Errorf(err, "create pool error") + + // case 5 : cidr not in range + allocation.IPv4Range = []string{"10.7.7.4/30"} + allocation.IPv4 = []string{"10.7.8.7", "10.7.8.19"} + psts.Spec.Subnets["sbn-test"] = allocation + pool, err = NewCIDRRangePool(psts, filter...) + suite.Errorf(err, "create pool error") + + // case 6 : no subnet + psts.Spec.Subnets = make(map[string]networkingv1alpha1.SubnetAllocation) + pool, err = NewCIDRRangePool(psts, filter...) + suite.Errorf(err, "create pool error") +} + +func TestIPRange(t *testing.T) { + suite.Run(t, new(CustomRangePoolTest)) +} diff --git a/pkg/nodeagent/controller/cniconf/cni_conf_controller.go b/pkg/nodeagent/controller/cniconf/cni_conf_controller.go index 9c0b1c9..01d7d8a 100644 --- a/pkg/nodeagent/controller/cniconf/cni_conf_controller.go +++ b/pkg/nodeagent/controller/cniconf/cni_conf_controller.go @@ -40,8 +40,7 @@ import ( v1alpha1network "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/listers/networking/v1alpha1" utilenv "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/env" utilpool "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/ippool" - roce "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/roce" - + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/roce" fsutil "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/fs" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/kernel" log "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" @@ -53,6 +52,9 @@ const ( ipvlanKernelModuleName = "ipvlan" vethDefaultMTU = 1500 forceRecreatePeriod = 60 * time.Second + + pluginsConfigKey = "plugins" + eriVifFeatures = "elastic_rdma" ) func New( @@ -76,18 +78,26 @@ func New( } } -func (c *Controller) SyncNode(nodeKey string, nodeLister corelisters.NodeLister) error { +func (c *Controller) SyncNode(nodeKey string, _ corelisters.NodeLister) error { ctx := log.NewContext() return c.syncCNIConfig(ctx, nodeKey) } func (c *Controller) ReconcileCNIConfig() { - wait.PollImmediateInfinite(forceRecreatePeriod, func() (bool, error) { + ctx := log.NewContext() + + waitErr := wait.PollImmediateInfinite(forceRecreatePeriod, func() (bool, error) { ctx := log.NewContext() - c.syncCNIConfig(ctx, c.nodeName) + syncErr := c.syncCNIConfig(ctx, c.nodeName) + if syncErr != nil { + log.Errorf(ctx, "syncCNIConfig error: %s", syncErr) + } return false, nil }) + if waitErr != nil { + log.Errorf(ctx, "execute syncCNIConfig error: %s", waitErr) + } } func (c *Controller) syncCNIConfig(ctx context.Context, nodeName string) error { @@ -137,13 +147,13 @@ func (c *Controller) syncCNIConfig(ctx context.Context, nodeName string) error { return nil } -func (c *Controller) getCNIConfigTemplateFilePath(ctx context.Context) (string, error) { +func (c *Controller) getCNIConfigTemplateFilePath(_ context.Context) (string, error) { if c.config.AutoDetectConfigTemplateFile { - filepath, ok := CCETemplateFilePathMap[c.cniMode] + tplFilePath, ok := CCETemplateFilePathMap[c.cniMode] if !ok { return "", fmt.Errorf("cannot find cni template file for cni mode %v", c.cniMode) } - return filepath, nil + return tplFilePath, nil } return c.config.CNIConfigTemplateFile, nil @@ -243,16 +253,16 @@ func (c *Controller) fillCNIConfigData(ctx context.Context) (*CNIConfigData, err configData.IPAMEndPoint = fmt.Sprintf("%s:%d", svc.Spec.ClusterIP, svc.Spec.Ports[0].Port) } - if types.IsCCECNIModeBasedOnSecondaryIP(c.cniMode) || types.IsCrossVPCEniMode(c.cniMode) { - // get instance type from meta - if configData.InstanceType == "" { - insType, err := c.getNodeInstanceTypeEx(ctx) - if err != nil { - log.Errorf(ctx, "failed to get instance type via metadata: %v", err) - } - configData.InstanceType = string(insType) + // get instance type from meta + if configData.InstanceType == "" { + insType, err := c.getNodeInstanceTypeEx(ctx) + if err != nil { + log.Errorf(ctx, "failed to get instance type via metadata: %v", err) } + configData.InstanceType = string(insType) + } + if types.IsCCECNIModeBasedOnSecondaryIP(c.cniMode) || types.IsCrossVPCEniMode(c.cniMode) { if configData.InstanceType == string(metadata.InstanceTypeExBCC) { if c.cniMode == types.CCEModeBBCSecondaryIPIPVlan { c.cniMode = types.CCEModeSecondaryIPIPVlan @@ -367,21 +377,20 @@ func renderTemplate(ctx context.Context, tplContent string, dataObject *CNIConfi } func (c *Controller) patchCNIConfig(ctx context.Context, oriYamlStr string, dataObject *CNIConfigData) (string, error) { - var str string - var err error - - str = oriYamlStr + if dataObject.InstanceType == string(metadata.InstanceTypeExBBC) { + return c.patchCNIConfigForMellanox8(ctx, oriYamlStr, dataObject) + } - str, err = c.patchCNIConfigForMellanox8(ctx, str, dataObject) - if err != nil { - return oriYamlStr, err + if dataObject.InstanceType == string(metadata.InstanceTypeExBCC) { + return c.patchCNIConfigForERI(ctx, oriYamlStr, dataObject) } - return str, nil + log.Errorf(ctx, "unknown instance type: %s", dataObject.InstanceType) + return oriYamlStr, nil } func (c *Controller) patchCNIConfigForMellanox8(ctx context.Context, oriYamlStr string, dataObject *CNIConfigData) (string, error) { - var bHasRoCEMellanox8 bool = false + var bHasRoCEMellanox8 = false var b bool var key string var err error @@ -398,20 +407,20 @@ func (c *Controller) patchCNIConfigForMellanox8(ctx context.Context, oriYamlStr //if etc... //exit if no need to update if !bHasRoCEMellanox8 { - log.Warningf(ctx, "No RoCE Mellanox8 Available. bHasRoCEMellanox8:[%t]", bHasRoCEMellanox8) + log.Infof(ctx, "No RoCE Mellanox8 Available. bHasRoCEMellanox8:[%t]", bHasRoCEMellanox8) goto unchanged } // log.Infof("arrPlugins:[%+v]", arrPlugins); //do Update - m = make(map[string](interface{})) + m = make(map[string]interface{}) err = json.Unmarshal([]byte(oriYamlStr), &m) if err != nil { log.Errorf(ctx, "oriYamlStr: [%s]", oriYamlStr) return "", err } - key = "plugins" /** optional */ + key = pluginsConfigKey /** optional */ if _, b = m[key]; !b { log.Errorf(ctx, ": ", "") goto unchanged @@ -427,6 +436,7 @@ func (c *Controller) patchCNIConfigForMellanox8(ctx context.Context, oriYamlStr mR["type"] = "rdma" mR["ipam"] = make(map[string]interface{}) mR["ipam"].(map[string]interface{})["endpoint"] = dataObject.IPAMEndPoint + mR["instanceType"] = dataObject.InstanceType //arrPlugins = append(arrPlugins, mR) m[key] = append(m[key].([]interface{}), mR) @@ -439,6 +449,62 @@ unchanged: return oriYamlStr, nil } +func (c *Controller) patchCNIConfigForERI(ctx context.Context, oriYamlStr string, dataObject *CNIConfigData) (string, error) { + // has eri + hasERI, eriErr := c.hasERI(ctx) + if eriErr != nil { + log.Errorf(ctx, "check eri failed: %s", eriErr) + return oriYamlStr, nil + } + + if !hasERI { + return oriYamlStr, nil + } + // patch eri info to cni config + oriConfigMap := make(map[string]interface{}) + jsonErr := json.Unmarshal([]byte(oriYamlStr), &oriConfigMap) + if jsonErr != nil { + log.Errorf(ctx, "invalid format oriYamlStr: [%s]", oriYamlStr) + return "", jsonErr + } + if _, ok := oriConfigMap[pluginsConfigKey]; !ok { + oriConfigMap[pluginsConfigKey] = make([]interface{}, 0) + } + + eriConfigMap := map[string]interface{}{ + "type": "eri", + "ipam": map[string]string{ + "endpoint": dataObject.IPAMEndPoint, + }, + "instanceType": dataObject.InstanceType, + } + oriConfigMap[pluginsConfigKey] = append(oriConfigMap[pluginsConfigKey].([]interface{}), eriConfigMap) + + newConfig, jsonErr := json.MarshalIndent(&oriConfigMap, "", " ") + return string(newConfig), nil +} + +func (c *Controller) hasERI(ctx context.Context) (bool, error) { + // list network interface macs + macList, macErr := c.metaClient.ListMacs() + if macErr != nil { + return false, macErr + } + + // check whether there is ERI + for _, macAddress := range macList { + vifFeatures, vifErr := c.metaClient.GetVifFeatures(macAddress) + if vifErr != nil { + log.Errorf(ctx, "get mac %s vif features failed: %s", macAddress, vifErr) + continue + } + if vifFeatures == eriVifFeatures { + return true, nil + } + } + return false, nil +} + func canUseIPVlan(kernelVersion *version.Version, kernelModules []string) bool { if kernelVersion == nil || kernelVersion.LessThan(version.MustParseGeneric(ipvlanRequiredKernelVersion)) { return false diff --git a/pkg/nodeagent/controller/cniconf/cni_conf_controller_test.go b/pkg/nodeagent/controller/cniconf/cni_conf_controller_test.go index 060665b..0fcd15a 100644 --- a/pkg/nodeagent/controller/cniconf/cni_conf_controller_test.go +++ b/pkg/nodeagent/controller/cniconf/cni_conf_controller_test.go @@ -659,7 +659,7 @@ func Test_renderTemplate(t *testing.T) { } } -func Test_patchCNIConfig(t *testing.T) { +func Test_patchCNIConfigForBBC(t *testing.T) { type fields struct { ctrl *gomock.Controller kubeClient kubernetes.Interface @@ -708,12 +708,13 @@ func Test_patchCNIConfig(t *testing.T) { IPAMEndPoint: "10.0.0.2:80", VethMTU: 1500, MasterInterface: "eth0", + InstanceType: string(metadata.InstanceTypeExBBC), }, }, want: `{"cniVersion":"0.3.1","name":"cce-cni","plugins":[{"enableARPProxy":true,"ipam":{"ranges":` + `[[{"subnet":"10.1.3.0/24"}]],"routes":[{"dst":"0.0.0.0/0"}],"type":"host-local"},"mtu":1500,"type":"ptp"` + `,"vethPrefix":"veth"},{"capabilities":{"portMappings":true},"externalSetMarkChain":"KUBE-MARK-MASQ","type"` + - `:"portmap"},{"ipam":{"endpoint":"10.0.0.2:80"},"type":"rdma"}]}`, + `:"portmap"},{"instanceType":"bbc","ipam":{"endpoint":"10.0.0.2:80"},"type":"rdma"}]}`, wantErr: false, }, /** exceptional 1: no roce mallanox8 available */ @@ -741,6 +742,7 @@ func Test_patchCNIConfig(t *testing.T) { IPAMEndPoint: "10.0.0.3:80", VethMTU: 1500, MasterInterface: "eth0", + InstanceType: string(metadata.InstanceTypeExBBC), }, }, want: `{"name":"cce-cni","cniVersion":"0.3.1","plugins":[{"type":"ptp","enableARPProxy":true,` + @@ -774,6 +776,7 @@ func Test_patchCNIConfig(t *testing.T) { IPAMEndPoint: "10.0.0.3:80", VethMTU: 1500, MasterInterface: "eth0", + InstanceType: string(metadata.InstanceTypeExBBC), }, }, want: `{"name":"cce-cni","cniVersion":"0.3.1","plugxxx":[{"type":"ptp","enableARPProxy":true,"vethPrefix":` + @@ -806,6 +809,7 @@ func Test_patchCNIConfig(t *testing.T) { IPAMEndPoint: "10.0.0.3:80", VethMTU: 1500, MasterInterface: "eth0", + InstanceType: string(metadata.InstanceTypeExBBC), }, }, want: `{"name":"cce-cni","cniVersion":"0.3.1","plugxxx":{"type":"ptp","enableARPProxy":true,` + @@ -857,3 +861,108 @@ func Test_patchCNIConfig(t *testing.T) { } } + +func Test_patchCNIConfigForBCC(t *testing.T) { + type fields struct { + ctrl *gomock.Controller + kubeClient kubernetes.Interface + cniMode types.ContainerNetworkMode + nodeName string + config *v1alpha1.CNIConfigControllerConfiguration + netutil network.Interface + kernelhandler kernel.Interface + filesystem fs.FileSystem + metaClient metadata.Interface + } + type args struct { + ctx context.Context + tplContent string + dataObject *CNIConfigData + } + tests := []struct { + name string + fields fields + args args + want string + wantErr bool + }{ + /** normal: */ + { + fields: func() fields { + ctrl := gomock.NewController(t) + metaClient := mockmetadata.NewMockInterface(ctrl) + macList := []string{"test-mac-1", "test-mac-2"} + metaClient.EXPECT().ListMacs().Return(macList, nil) + metaClient.EXPECT().GetVifFeatures(gomock.Eq("test-mac-1")).Return("None", nil) + metaClient.EXPECT().GetVifFeatures(gomock.Eq("test-mac-2")).Return("elastic_rdma", nil) + + return fields{ + ctrl: ctrl, + config: &v1alpha1.CNIConfigControllerConfiguration{}, + metaClient: metaClient, + } + }(), + args: args{ + ctx: context.TODO(), + tplContent: `{"name":"cce-cni","cniVersion":"0.3.1","plugins":[{"type":"ptp","enableARPProxy":true,` + + `"vethPrefix":"veth","mtu":1500,"ipam":{"type":"host-local","ranges":[[{"subnet":"10.1.3.0/24"}]],"routes"` + + `:[{"dst":"0.0.0.0/0"}]}},{"type":"portmap","capabilities":{"portMappings":true},"externalSetMarkChain":` + + `"KUBE-MARK-MASQ"}]}`, + + dataObject: &CNIConfigData{ + NetworkName: "cce-cni", + IPAMEndPoint: "10.0.0.2:80", + VethMTU: 1500, + MasterInterface: "eth0", + InstanceType: string(metadata.InstanceTypeExBCC), + }, + }, + want: `{"cniVersion":"0.3.1","name":"cce-cni","plugins":[{"enableARPProxy":true,"ipam":{"ranges":` + + `[[{"subnet":"10.1.3.0/24"}]],"routes":[{"dst":"0.0.0.0/0"}],"type":"host-local"},"mtu":1500,"type":"ptp"` + + `,"vethPrefix":"veth"},{"capabilities":{"portMappings":true},"externalSetMarkChain":"KUBE-MARK-MASQ","type"` + + `:"portmap"},{"instanceType":"bcc","ipam":{"endpoint":"10.0.0.2:80"},"type":"eri"}]}`, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var jsonout bytes.Buffer + + if tt.fields.ctrl != nil { + defer tt.fields.ctrl.Finish() + } + c := &Controller{ + kubeClient: tt.fields.kubeClient, + cniMode: tt.fields.cniMode, + nodeName: tt.fields.nodeName, + config: tt.fields.config, + netutil: tt.fields.netutil, + kernelhandler: tt.fields.kernelhandler, + filesystem: tt.fields.filesystem, + metaClient: tt.fields.metaClient, + } + + rawgot, err := c.patchCNIConfig(tt.args.ctx, tt.args.tplContent, tt.args.dataObject) + // t.Errorf("got:[%s] err:[%+v]", got, err) + if (err != nil) != tt.wantErr { + t.Errorf("patchCNIConfigTemplate() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if len(tt.want) > 0 { + err = json.Compact(&jsonout, []byte(rawgot)) + if err != nil { + t.Errorf("patchCNIConfigTemplate() err = %+v", err) + return + } + + got := jsonout.String() + + if got != tt.want { + t.Errorf("patchCNIConfigTemplate() got = %v, want %v", got, tt.want) + } + } + }) + } +} diff --git a/pkg/nodeagent/controller/ippool/ip_resource_manager.go b/pkg/nodeagent/controller/ippool/ip_resource_manager.go index 5911259..21336d5 100644 --- a/pkg/nodeagent/controller/ippool/ip_resource_manager.go +++ b/pkg/nodeagent/controller/ippool/ip_resource_manager.go @@ -9,6 +9,7 @@ import ( "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" @@ -16,6 +17,7 @@ import ( "modernc.org/mathutil" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/apimachinery/networking" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/eniipam/ipam/crossvpceni" utileni "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/eni" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/logger" bccapi "github.com/baidubce/bce-sdk-go/services/bcc/api" @@ -257,13 +259,47 @@ func NewCrossVPCEniResourceManager(kubeClient kubernetes.Interface, node *corev1 } func (manager *crossVPCEniResourceManager) SyncCapacity(ctx context.Context) error { - var maxENINum int - maxENINum = utileni.GetMaxENIPerNode(manager.bccInstance.CpuCount) - if maxENINum < customerMaxENINum { - maxENINum = customerMaxENINum + var ( + maxEniNum int + maxEniNumByAnno int + maxEniNumByLabel int + node *v1.Node + ) + + maxEniNum = utileni.GetMaxENIPerNode(manager.bccInstance.CpuCount) + if maxEniNum < customerMaxENINum { + maxEniNum = customerMaxENINum + } + + node = manager.node + + maxEniNumStr, ok := node.Annotations[crossvpceni.NodeAnnotationMaxCrossVPCEni] + if ok { + i, err := strconv.ParseInt(maxEniNumStr, 10, 32) + if err != nil { + return err + } + maxEniNumByAnno = int(i) + + if maxEniNumByAnno < maxEniNum { + maxEniNum = maxEniNumByAnno + } + } + + maxEniNumStr, ok = node.Labels[crossvpceni.NodeLabelMaxCrossVPCEni] + if ok { + i, err := strconv.ParseInt(maxEniNumStr, 10, 32) + if err != nil { + return err + } + maxEniNumByLabel = int(i) + + if maxEniNumByLabel < maxEniNum { + maxEniNum = maxEniNumByLabel + } } - return manager.patchCrossVPCEniCapacityInfoToNode(ctx, maxENINum) + return manager.patchCrossVPCEniCapacityInfoToNode(ctx, maxEniNum) } func (manager *crossVPCEniResourceManager) patchCrossVPCEniCapacityInfoToNode(ctx context.Context, maxEniNum int) error { diff --git a/pkg/nodeagent/controller/ippool/ip_resource_manager_test.go b/pkg/nodeagent/controller/ippool/ip_resource_manager_test.go index f09dd3e..a61c439 100644 --- a/pkg/nodeagent/controller/ippool/ip_resource_manager_test.go +++ b/pkg/nodeagent/controller/ippool/ip_resource_manager_test.go @@ -50,6 +50,7 @@ func Test_crossVPCEniResourceManager_SyncCapacity(t *testing.T) { kubeClient, _, _, _, _ := setupEnv(ctrl) return fields{ + ctrl: ctrl, simpleIPResourceManager: &simpleIPResourceManager{ kubeClient: kubeClient, preAttachedENINum: 1, @@ -86,6 +87,7 @@ func Test_crossVPCEniResourceManager_SyncCapacity(t *testing.T) { }, metav1.CreateOptions{}) return fields{ + ctrl: ctrl, simpleIPResourceManager: &simpleIPResourceManager{ kubeClient: kubeClient, preAttachedENINum: 1, @@ -93,6 +95,9 @@ func Test_crossVPCEniResourceManager_SyncCapacity(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "6.0.16.4", }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{}, + }, }, }, bccInstance: &bccapi.InstanceModel{ @@ -121,7 +126,38 @@ func Test_crossVPCEniResourceManager_SyncCapacity(t *testing.T) { }, }, metav1.CreateOptions{}) - kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), &v1.Node{ + return fields{ + ctrl: ctrl, + simpleIPResourceManager: &simpleIPResourceManager{ + kubeClient: kubeClient, + preAttachedENINum: 1, + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "6.0.16.4", + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "cross-vpc-eni.cce.io/eni": resource.Quantity{}, + }, + }, + }, + }, + bccInstance: &bccapi.InstanceModel{ + CpuCount: 8, + }, + } + }(), + args: args{ + ctx: context.TODO(), + }, + wantErr: false, + }, + { + name: "正常新增 resource 流程,node anno 自定义最大 eni 数量为 3", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, _, _, _, _ := setupEnv(ctrl) + kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "6.0.16.4", }, @@ -130,15 +166,24 @@ func Test_crossVPCEniResourceManager_SyncCapacity(t *testing.T) { "cross-vpc-eni.cce.io/eni": resource.Quantity{}, }, }, - }, metav1.UpdateOptions{}) + }, metav1.CreateOptions{}) return fields{ + ctrl: ctrl, simpleIPResourceManager: &simpleIPResourceManager{ kubeClient: kubeClient, preAttachedENINum: 1, node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "6.0.16.4", + Annotations: map[string]string{ + "cross-vpc-eni.cce.io/maxEniNumber": "3", + }, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "cross-vpc-eni.cce.io/eni": resource.MustParse("3"), + }, }, }, }, @@ -152,6 +197,141 @@ func Test_crossVPCEniResourceManager_SyncCapacity(t *testing.T) { }, wantErr: false, }, + { + name: "正常新增 resource 流程,node anno 自定义最大 eni 错误", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, _, _, _, _ := setupEnv(ctrl) + kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "6.0.16.4", + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "cross-vpc-eni.cce.io/eni": resource.Quantity{}, + }, + }, + }, metav1.CreateOptions{}) + + return fields{ + ctrl: ctrl, + simpleIPResourceManager: &simpleIPResourceManager{ + kubeClient: kubeClient, + preAttachedENINum: 1, + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "6.0.16.4", + Annotations: map[string]string{ + "cross-vpc-eni.cce.io/maxEniNumber": "xxx", + }, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "cross-vpc-eni.cce.io/eni": resource.MustParse("3"), + }, + }, + }, + }, + bccInstance: &bccapi.InstanceModel{ + CpuCount: 8, + }, + } + }(), + args: args{ + ctx: context.TODO(), + }, + wantErr: true, + }, + { + name: "正常新增 resource 流程,node label 自定义最大 eni 数量为 3", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, _, _, _, _ := setupEnv(ctrl) + kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "6.0.16.4", + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "cross-vpc-eni.cce.io/eni": resource.Quantity{}, + }, + }, + }, metav1.CreateOptions{}) + + return fields{ + ctrl: ctrl, + simpleIPResourceManager: &simpleIPResourceManager{ + kubeClient: kubeClient, + preAttachedENINum: 1, + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "6.0.16.4", + Labels: map[string]string{ + "cross-vpc-eni.cce.io/max-eni-number": "3", + }, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "cross-vpc-eni.cce.io/eni": resource.MustParse("3"), + }, + }, + }, + }, + bccInstance: &bccapi.InstanceModel{ + CpuCount: 8, + }, + } + }(), + args: args{ + ctx: context.TODO(), + }, + wantErr: false, + }, + { + name: "正常新增 resource 流程,node label 自定义最大 eni 错误", + fields: func() fields { + ctrl := gomock.NewController(t) + kubeClient, _, _, _, _ := setupEnv(ctrl) + kubeClient.CoreV1().Nodes().Create(context.TODO(), &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "6.0.16.4", + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "cross-vpc-eni.cce.io/eni": resource.Quantity{}, + }, + }, + }, metav1.CreateOptions{}) + + return fields{ + ctrl: ctrl, + simpleIPResourceManager: &simpleIPResourceManager{ + kubeClient: kubeClient, + preAttachedENINum: 1, + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "6.0.16.4", + Labels: map[string]string{ + "cross-vpc-eni.cce.io/max-eni-number": "xxx", + }, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + "cross-vpc-eni.cce.io/eni": resource.MustParse("3"), + }, + }, + }, + }, + bccInstance: &bccapi.InstanceModel{ + CpuCount: 8, + }, + } + }(), + args: args{ + ctx: context.TODO(), + }, + wantErr: true, + }, } for _, tt := range tests { if tt.fields.ctrl != nil { diff --git a/pkg/rpc/rpc.pb.go b/pkg/rpc/rpc.pb.go index 8663c7a..f2f379a 100644 --- a/pkg/rpc/rpc.pb.go +++ b/pkg/rpc/rpc.pb.go @@ -31,6 +31,7 @@ const ( IPType_BBCPrimaryENIMultiIPType IPType = 1 IPType_CrossVPCENIIPType IPType = 2 IPType_RoceENIMultiIPType IPType = 3 + IPType_ERIENIMultiIPType IPType = 4 ) var IPType_name = map[int32]string{ @@ -38,6 +39,7 @@ var IPType_name = map[int32]string{ 1: "BBCPrimaryENIMultiIPType", 2: "CrossVPCENIIPType", 3: "RoceENIMultiIPType", + 4: "ERIENIMultiIPType", } var IPType_value = map[string]int32{ @@ -45,6 +47,7 @@ var IPType_value = map[string]int32{ "BBCPrimaryENIMultiIPType": 1, "CrossVPCENIIPType": 2, "RoceENIMultiIPType": 3, + "ERIENIMultiIPType": 4, } func (x IPType) String() string { @@ -1028,53 +1031,54 @@ func init() { func init() { proto.RegisterFile("pkg/rpc/rpc.proto", fileDescriptor_bd1e66095a603e73) } var fileDescriptor_bd1e66095a603e73 = []byte{ - // 734 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xdd, 0x6a, 0xdb, 0x4a, - 0x10, 0x8e, 0xa4, 0xf8, 0x47, 0xe3, 0x73, 0x1c, 0x7b, 0x8e, 0xe3, 0xa3, 0x9a, 0xd0, 0x1a, 0x97, - 0x82, 0x29, 0x25, 0x85, 0x04, 0x42, 0x42, 0x73, 0x53, 0xcb, 0x6e, 0xb2, 0x84, 0x08, 0xa1, 0x94, - 0xdc, 0x2b, 0xf2, 0x3a, 0x15, 0x56, 0x24, 0x45, 0x3f, 0x24, 0x7e, 0x8a, 0xf6, 0x79, 0xfa, 0x20, - 0x7d, 0x86, 0x42, 0x69, 0x9f, 0xa1, 0x68, 0xb5, 0xb1, 0x64, 0x89, 0xb4, 0xd0, 0xde, 0x94, 0xd0, - 0x0b, 0xc3, 0xce, 0x37, 0x33, 0x62, 0xbf, 0x6f, 0xbe, 0x5d, 0x2f, 0xb4, 0xfd, 0xf9, 0xe5, 0xcb, - 0xc0, 0xb7, 0x92, 0xdf, 0xb6, 0x1f, 0x78, 0x91, 0x87, 0x52, 0xe0, 0x5b, 0x83, 0x0f, 0x12, 0xb4, - 0x5f, 0x3b, 0x8e, 0x67, 0x99, 0x11, 0x25, 0xba, 0x41, 0xaf, 0x63, 0x1a, 0x46, 0xf8, 0x18, 0xe0, - 0x64, 0x3f, 0xd4, 0xbd, 0xa9, 0x66, 0x5e, 0x51, 0x45, 0xe8, 0x0b, 0x43, 0xd9, 0xc8, 0x21, 0x38, - 0x84, 0x8d, 0x2c, 0x0a, 0x7d, 0xd3, 0xa2, 0x8a, 0xc8, 0x8a, 0x8a, 0x30, 0xee, 0x41, 0x37, 0x85, - 0x88, 0x3b, 0x0b, 0x4c, 0xd5, 0x73, 0x23, 0xd3, 0x76, 0x69, 0x40, 0xc6, 0x8a, 0xc4, 0x1a, 0xee, - 0xc9, 0x62, 0x07, 0x2a, 0x1a, 0x8d, 0xdc, 0x50, 0x59, 0x67, 0x65, 0x69, 0x80, 0x5d, 0xa8, 0x92, - 0x19, 0xdb, 0x53, 0x85, 0xc1, 0x3c, 0xc2, 0x3e, 0x34, 0x4e, 0xf6, 0x43, 0xcd, 0x9b, 0x52, 0x96, - 0xac, 0xb2, 0x64, 0x1e, 0xc2, 0xa7, 0x50, 0x25, 0xfa, 0xdb, 0x85, 0x4f, 0x95, 0x5a, 0x5f, 0x18, - 0x36, 0x77, 0x1a, 0xdb, 0x89, 0x10, 0x29, 0x64, 0xf0, 0x14, 0xee, 0x03, 0x4c, 0x34, 0x72, 0x1a, - 0x3b, 0x91, 0x4d, 0x74, 0xa5, 0xde, 0x17, 0x86, 0x8d, 0x9d, 0x2e, 0x2b, 0xcc, 0x60, 0x2e, 0xd1, - 0xf1, 0x9a, 0x91, 0xab, 0xc5, 0x57, 0xd0, 0x50, 0x03, 0x2f, 0x0c, 0xcf, 0x75, 0x75, 0xa2, 0x11, - 0x45, 0x66, 0xad, 0xff, 0xb3, 0xd6, 0x1c, 0x9e, 0xf5, 0xe6, 0xab, 0x47, 0xff, 0x42, 0x43, 0xa3, - 0xd1, 0x8d, 0x17, 0xcc, 0x89, 0x3b, 0xf3, 0x06, 0xdf, 0x04, 0xd8, 0xc8, 0x8f, 0xc4, 0x77, 0x16, - 0xb8, 0x05, 0x32, 0x09, 0xcf, 0x62, 0xcb, 0xa2, 0x61, 0xc8, 0xe6, 0x51, 0x37, 0x32, 0x20, 0x91, - 0x65, 0x12, 0x04, 0xa7, 0xe1, 0x25, 0x9f, 0x02, 0x8f, 0x72, 0xa4, 0xa5, 0xfb, 0x49, 0xef, 0xad, - 0x90, 0x5e, 0x67, 0x3b, 0xef, 0x94, 0x48, 0xfb, 0xce, 0xa2, 0x40, 0xf9, 0x60, 0x95, 0x72, 0x85, - 0x35, 0x6e, 0x96, 0x29, 0xa7, 0x9d, 0x3f, 0x22, 0xfc, 0x5e, 0x82, 0x96, 0x41, 0x1d, 0x6a, 0x86, - 0x7f, 0x2d, 0xf8, 0x67, 0x58, 0xf0, 0xab, 0x00, 0xcd, 0xdc, 0x44, 0x1e, 0xbc, 0x03, 0x3f, 0x8b, - 0xd0, 0x54, 0xdf, 0x51, 0x6b, 0xfe, 0x90, 0xfd, 0xb7, 0x6a, 0xad, 0xda, 0xaf, 0x5b, 0xab, 0xfe, - 0x3b, 0xd6, 0xfa, 0x22, 0xc0, 0x3f, 0x4b, 0xa9, 0x1f, 0xbc, 0xb1, 0x9e, 0x41, 0xbb, 0x24, 0x2e, - 0xb6, 0x40, 0x3a, 0x35, 0x2d, 0xee, 0xa9, 0x64, 0x99, 0x9c, 0xb7, 0x8d, 0xc2, 0x96, 0xb0, 0x09, - 0x22, 0xd1, 0x79, 0x91, 0x48, 0x74, 0x44, 0x58, 0x67, 0x7c, 0x53, 0x1d, 0xd8, 0xfa, 0xee, 0x4b, - 0xd2, 0xf2, 0x4b, 0x49, 0xd7, 0xd1, 0x0d, 0x77, 0x8c, 0x78, 0x74, 0x93, 0x98, 0x68, 0xa2, 0x11, - 0x32, 0xe6, 0x6e, 0x49, 0x83, 0xe4, 0x5b, 0x89, 0x2d, 0xb8, 0x4b, 0xd8, 0x1a, 0x7b, 0x50, 0x3f, - 0x8b, 0x2f, 0x5c, 0x1a, 0x91, 0x31, 0x33, 0x87, 0x6c, 0x2c, 0xe3, 0xc4, 0x5c, 0x13, 0xd7, 0xbc, - 0x70, 0xe8, 0x1b, 0xfb, 0x96, 0x5f, 0x4b, 0xb2, 0x91, 0x87, 0xf0, 0x05, 0xb4, 0xd9, 0x62, 0x4c, - 0x1d, 0x1a, 0x51, 0xdd, 0x73, 0x6c, 0x6b, 0xc1, 0xee, 0x20, 0xd9, 0x28, 0x27, 0x06, 0x1d, 0xc0, - 0xb2, 0x71, 0x06, 0x9f, 0x04, 0x68, 0x15, 0xf5, 0x2d, 0xc9, 0xc0, 0x29, 0x8b, 0x19, 0x65, 0x05, - 0x6a, 0xe7, 0xba, 0xaa, 0x92, 0xb1, 0xc1, 0x85, 0xb8, 0x0b, 0xf1, 0x18, 0x9e, 0x8c, 0xe9, 0xcc, - 0x8c, 0x9d, 0xc8, 0xf0, 0xe2, 0x88, 0x12, 0x37, 0xa2, 0xc1, 0xcc, 0xb4, 0x68, 0xb2, 0x97, 0x4b, - 0x33, 0xb2, 0x3d, 0x97, 0x2b, 0xf5, 0xb3, 0x32, 0x3c, 0x84, 0x47, 0xf9, 0x92, 0xc9, 0xad, 0xe5, - 0xc4, 0x53, 0x3a, 0x55, 0xed, 0x69, 0x10, 0x2a, 0x95, 0xbe, 0x34, 0x94, 0x8d, 0xfb, 0x0b, 0x9e, - 0x5f, 0xdf, 0x99, 0x15, 0x7b, 0xd0, 0x1d, 0xa9, 0x2a, 0x9b, 0x73, 0x36, 0xef, 0x24, 0xd3, 0x5a, - 0xc3, 0x2d, 0x50, 0x46, 0x23, 0x55, 0x0f, 0xec, 0x2b, 0x33, 0x58, 0x14, 0xb2, 0x02, 0x6e, 0x42, - 0x3b, 0xa7, 0x0d, 0x87, 0x45, 0xec, 0x02, 0x1a, 0x9e, 0x45, 0x0b, 0xe5, 0xd2, 0xce, 0x47, 0x01, - 0x40, 0xd5, 0xc8, 0xc8, 0xb4, 0xe6, 0xd4, 0x9d, 0xe2, 0x21, 0x40, 0xf6, 0xa4, 0xc0, 0xf4, 0xd4, - 0x97, 0x9e, 0x7d, 0xbd, 0x4e, 0x09, 0xf7, 0x9d, 0xc5, 0x60, 0x0d, 0x0f, 0x40, 0x5e, 0xfe, 0x1b, - 0x60, 0x7a, 0x0e, 0x8a, 0xff, 0xd7, 0xbd, 0xff, 0x8a, 0x70, 0xda, 0xba, 0x0b, 0x35, 0x7e, 0xda, - 0x31, 0xad, 0x58, 0xbd, 0x66, 0x7b, 0xed, 0x55, 0x90, 0x35, 0x5d, 0x54, 0xd9, 0x03, 0x75, 0xf7, - 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x09, 0x10, 0x3d, 0xb5, 0x0a, 0x00, 0x00, + // 740 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xef, 0x6a, 0xd3, 0x50, + 0x14, 0x5f, 0x9a, 0xfe, 0xcb, 0xa9, 0x76, 0xed, 0xb1, 0xab, 0xb1, 0x0c, 0x2d, 0x15, 0xa1, 0x88, + 0x4c, 0xd8, 0x60, 0x6c, 0xb8, 0x2f, 0x36, 0xad, 0xdb, 0x65, 0x2c, 0x84, 0x3b, 0xd9, 0xf7, 0x2c, + 0xbd, 0x9d, 0xa5, 0x59, 0x12, 0x93, 0x94, 0xad, 0x6f, 0xe0, 0x37, 0x7d, 0x1e, 0x1f, 0xc4, 0x67, + 0x10, 0x44, 0x9f, 0x41, 0x72, 0x73, 0xd7, 0xa4, 0x09, 0x53, 0xd0, 0x2f, 0x32, 0xfc, 0x50, 0xb8, + 0xe7, 0x77, 0x7e, 0xe7, 0x72, 0xcf, 0xef, 0xfc, 0x7a, 0x73, 0xa1, 0xe9, 0xcd, 0x2e, 0x5e, 0xfa, + 0x9e, 0x15, 0xfd, 0xb6, 0x3c, 0xdf, 0x0d, 0x5d, 0x94, 0x7d, 0xcf, 0xea, 0x7d, 0x92, 0xa1, 0xf9, + 0xda, 0xb6, 0x5d, 0xcb, 0x0c, 0x19, 0x31, 0x28, 0x7b, 0x3f, 0x67, 0x41, 0x88, 0x8f, 0x01, 0x8e, + 0xf7, 0x02, 0xc3, 0x1d, 0xeb, 0xe6, 0x25, 0x53, 0xa5, 0xae, 0xd4, 0x57, 0x68, 0x0a, 0xc1, 0x3e, + 0xac, 0x27, 0x51, 0xe0, 0x99, 0x16, 0x53, 0x0b, 0x9c, 0x94, 0x85, 0x71, 0x17, 0xda, 0x31, 0x44, + 0x9c, 0x89, 0x6f, 0x6a, 0xae, 0x13, 0x9a, 0x53, 0x87, 0xf9, 0x64, 0xa8, 0xca, 0xbc, 0xe0, 0x96, + 0x2c, 0xb6, 0xa0, 0xa4, 0xb3, 0xd0, 0x09, 0xd4, 0x22, 0xa7, 0xc5, 0x01, 0xb6, 0xa1, 0x4c, 0x26, + 0xfc, 0x4c, 0x25, 0x0e, 0x8b, 0x08, 0xbb, 0x50, 0x3b, 0xde, 0x0b, 0x74, 0x77, 0xcc, 0x78, 0xb2, + 0xcc, 0x93, 0x69, 0x08, 0x9f, 0x42, 0x99, 0x18, 0x6f, 0x17, 0x1e, 0x53, 0x2b, 0x5d, 0xa9, 0x5f, + 0xdf, 0xae, 0x6d, 0x45, 0x42, 0xc4, 0x10, 0x15, 0x29, 0xdc, 0x03, 0x18, 0xe9, 0xe4, 0x64, 0x6e, + 0x87, 0x53, 0x62, 0xa8, 0xd5, 0xae, 0xd4, 0xaf, 0x6d, 0xb7, 0x39, 0x31, 0x81, 0x85, 0x44, 0x47, + 0x6b, 0x34, 0xc5, 0xc5, 0x57, 0x50, 0xd3, 0x7c, 0x37, 0x08, 0xce, 0x0c, 0x6d, 0xa4, 0x13, 0x55, + 0xe1, 0xa5, 0x0f, 0x79, 0x69, 0x0a, 0x4f, 0x6a, 0xd3, 0xec, 0xc1, 0x7d, 0xa8, 0xe9, 0x2c, 0xbc, + 0x72, 0xfd, 0x19, 0x71, 0x26, 0x6e, 0xef, 0x87, 0x04, 0xeb, 0xe9, 0x91, 0x78, 0xf6, 0x02, 0x37, + 0x41, 0x21, 0xc1, 0xe9, 0xdc, 0xb2, 0x58, 0x10, 0xf0, 0x79, 0x54, 0x69, 0x02, 0x44, 0xb2, 0x8c, + 0x7c, 0xff, 0x24, 0xb8, 0x10, 0x53, 0x10, 0x51, 0xaa, 0x69, 0xf9, 0xf6, 0xa6, 0x77, 0x57, 0x9a, + 0x2e, 0xf2, 0x93, 0xb7, 0x72, 0x4d, 0x7b, 0xf6, 0x22, 0xd3, 0xf2, 0xfe, 0x6a, 0xcb, 0x25, 0x5e, + 0xb8, 0x91, 0x6f, 0x39, 0xae, 0xfc, 0x55, 0xc3, 0x1f, 0x65, 0x68, 0x50, 0x66, 0x33, 0x33, 0xf8, + 0x6f, 0xc1, 0x7f, 0xc3, 0x82, 0xdf, 0x25, 0xa8, 0xa7, 0x26, 0x72, 0xe7, 0x1d, 0xf8, 0xb5, 0x00, + 0x75, 0xed, 0x1d, 0xb3, 0x66, 0x77, 0xd9, 0x7f, 0xab, 0xd6, 0xaa, 0xfc, 0xb9, 0xb5, 0xaa, 0x7f, + 0x63, 0xad, 0x6f, 0x12, 0xdc, 0x5b, 0x4a, 0x7d, 0xe7, 0x8d, 0xf5, 0x0c, 0x9a, 0x39, 0x71, 0xb1, + 0x01, 0xf2, 0x89, 0x69, 0x09, 0x4f, 0x45, 0xcb, 0xe8, 0xff, 0xb6, 0x9e, 0x39, 0x12, 0xd6, 0xa1, + 0x40, 0x0c, 0x41, 0x2a, 0x10, 0x03, 0x11, 0x8a, 0xbc, 0xdf, 0x58, 0x07, 0xbe, 0xbe, 0xd9, 0x49, + 0x5e, 0xee, 0x14, 0x55, 0x1d, 0x5e, 0x09, 0xc7, 0x14, 0x0e, 0xaf, 0x22, 0x13, 0x8d, 0x74, 0x42, + 0x86, 0xc2, 0x2d, 0x71, 0x10, 0xed, 0x15, 0xd9, 0x42, 0xb8, 0x84, 0xaf, 0xb1, 0x03, 0xd5, 0xd3, + 0xf9, 0xb9, 0xc3, 0x42, 0x32, 0xe4, 0xe6, 0x50, 0xe8, 0x32, 0x8e, 0xcc, 0x35, 0x72, 0xcc, 0x73, + 0x9b, 0xbd, 0x99, 0x5e, 0x8b, 0x6b, 0x49, 0xa1, 0x69, 0x08, 0x5f, 0x40, 0x93, 0x2f, 0x86, 0xcc, + 0x66, 0x21, 0x33, 0x5c, 0x7b, 0x6a, 0x2d, 0xf8, 0x1d, 0xa4, 0xd0, 0x7c, 0xa2, 0xd7, 0x02, 0xcc, + 0x1b, 0xa7, 0xf7, 0x45, 0x82, 0x46, 0x56, 0xdf, 0x9c, 0x0c, 0xa2, 0xe5, 0x42, 0xd2, 0xb2, 0x0a, + 0x95, 0x33, 0x43, 0xd3, 0xc8, 0x90, 0x0a, 0x21, 0x6e, 0x42, 0x3c, 0x82, 0x27, 0x43, 0x36, 0x31, + 0xe7, 0x76, 0x48, 0xdd, 0x79, 0xc8, 0x88, 0x13, 0x32, 0x7f, 0x62, 0x5a, 0x2c, 0x3a, 0xcb, 0x85, + 0x19, 0x4e, 0x5d, 0x47, 0x28, 0xf5, 0x3b, 0x1a, 0x1e, 0xc0, 0xa3, 0x34, 0x65, 0x74, 0x6d, 0xd9, + 0xf3, 0x31, 0x1b, 0x6b, 0xd3, 0xb1, 0x1f, 0xa8, 0xa5, 0xae, 0xdc, 0x57, 0xe8, 0xed, 0x84, 0xe7, + 0x1f, 0xa4, 0x1b, 0xb7, 0x62, 0x07, 0xda, 0x03, 0x4d, 0xe3, 0x83, 0x4e, 0x06, 0x1e, 0x65, 0x1a, + 0x6b, 0xb8, 0x09, 0xea, 0x60, 0xa0, 0x19, 0xfe, 0xf4, 0xd2, 0xf4, 0x17, 0x99, 0xac, 0x84, 0x1b, + 0xd0, 0x4c, 0x89, 0x23, 0xe0, 0x02, 0xb6, 0x01, 0xa9, 0x6b, 0xb1, 0x0c, 0x5d, 0x8e, 0xe8, 0x23, + 0x4a, 0x32, 0x70, 0x71, 0xfb, 0xb3, 0x04, 0xa0, 0xe9, 0x64, 0x60, 0x5a, 0x33, 0xe6, 0x8c, 0xf1, + 0x00, 0x20, 0x79, 0x6a, 0x60, 0x7c, 0x1b, 0xe4, 0x9e, 0x83, 0x9d, 0x56, 0x0e, 0xf7, 0xec, 0x45, + 0x6f, 0x0d, 0xf7, 0x41, 0x59, 0x7e, 0x25, 0x30, 0xfe, 0x7f, 0x64, 0xbf, 0xe3, 0x9d, 0x07, 0x59, + 0x38, 0x2e, 0xdd, 0x81, 0x8a, 0xb8, 0x05, 0x30, 0x66, 0xac, 0x5e, 0xbf, 0x9d, 0xe6, 0x2a, 0xc8, + 0x8b, 0xce, 0xcb, 0xfc, 0xe1, 0xba, 0xf3, 0x33, 0x00, 0x00, 0xff, 0xff, 0x40, 0x1e, 0x3e, 0xa9, + 0xcd, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/pkg/rpc/rpc.proto b/pkg/rpc/rpc.proto index 986c0bb..0717c0a 100644 --- a/pkg/rpc/rpc.proto +++ b/pkg/rpc/rpc.proto @@ -17,6 +17,7 @@ enum IPType { BBCPrimaryENIMultiIPType = 1; CrossVPCENIIPType = 2; RoceENIMultiIPType = 3; + ERIENIMultiIPType = 4; } message AllocateIPRequest { diff --git a/pkg/util/cidr/cidr.go b/pkg/util/cidr/cidr.go index 4259ef0..2d32eba 100644 --- a/pkg/util/cidr/cidr.go +++ b/pkg/util/cidr/cidr.go @@ -3,6 +3,7 @@ package cidr import ( "net" + "github.com/spf13/pflag" k8sutilnet "k8s.io/utils/net" ) @@ -22,8 +23,17 @@ var ( _, IPv6ZeroCIDR, _ = net.ParseCIDR("::/0") _, LinkLocalCIDR, _ = net.ParseCIDR("169.254.0.0/16") + + // subnetAvailableIndex Which index IP address is available in a subnet + // In BCE Cloud, the index is 2 + // In a private cloud, the number may be 4 + subnetAvailableIndex = 2 ) +func RegisterCIDRFlags(pset *pflag.FlagSet) { + pset.IntVar(&subnetAvailableIndex, "subnet-available-index", subnetAvailableIndex, "Which index IP address is available in a subnet") +} + func IsUnicastIP(ip net.IP, subnet string) bool { if !ip.IsGlobalUnicast() { return false @@ -98,19 +108,17 @@ func ListFirtstAndLastIPStringFromCIDR(cidr string) []net.IP { if size == 0 { return []net.IP{cidr.IP} } - ip, err := k8sutilnet.GetIndexedIP(cidr, 0) - if err == nil { - ipList = append(ipList, ip) - } + // Exclude first ip // The first IP address of the subnet is usually the gateway address - if size > 1 { - ip, err := k8sutilnet.GetIndexedIP(cidr, 1) + for i := 0; i < subnetAvailableIndex && i < int(size); i++ { + ip, err := k8sutilnet.GetIndexedIP(cidr, i) if err == nil { ipList = append(ipList, ip) } } - ip, err = k8sutilnet.GetIndexedIP(cidr, int(size-1)) + + ip, err := k8sutilnet.GetIndexedIP(cidr, int(size-1)) if err == nil { ipList = append(ipList, ip) } diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go index b01f483..10fc6b1 100644 --- a/pkg/util/logger/logger.go +++ b/pkg/util/logger/logger.go @@ -20,12 +20,14 @@ import ( "flag" "fmt" - "k8s.io/klog" + "k8s.io/klog/v2" ) // ContextKeyType context key type ContextKeyType string +var enabled bool + const ( // TraceID context key name TraceID ContextKeyType = "TraceId" @@ -104,18 +106,20 @@ func EnsureTraceIDInCtx(ctx context.Context) context.Context { type Verbose klog.Verbose func V(level klog.Level) Verbose { - return Verbose(klog.V(level)) + verbose := klog.V(level) + enabled = verbose.Enabled() + return Verbose(verbose) } func (v Verbose) Info(ctx context.Context, args ...interface{}) { - if v { + if enabled { prefix := buildFormat(ctx, "") klog.InfoDepth(1, prefix+fmt.Sprint(args...)) } } func (v Verbose) Infof(ctx context.Context, format string, args ...interface{}) { - if v { + if enabled { format = buildFormat(ctx, format) klog.InfoDepth(1, fmt.Sprintf(format, args...)) } diff --git a/pkg/webhook/pod/mutating/webhook_test.go b/pkg/webhook/pod/mutating/webhook_test.go index 0a00b6b..f4f30a3 100644 --- a/pkg/webhook/pod/mutating/webhook_test.go +++ b/pkg/webhook/pod/mutating/webhook_test.go @@ -62,8 +62,10 @@ func TestMutatingPodHandler_validatePstsSpec(t *testing.T) { obj := MockPSTS("psts-test") obj.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ "sbn-aaaaa": { - Type: networkv1alpha1.IPAllocTypeFixed, - ReleaseStrategy: networkv1alpha1.ReleaseStrategyNever, + IPAllocationStrategy: networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeFixed, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyNever, + }, IPv4: []string{ "10.178.245.1", "10.178.245.2", diff --git a/pkg/webhook/psts/validating/webhook.go b/pkg/webhook/psts/validating/webhook.go index 8181724..84826d9 100644 --- a/pkg/webhook/psts/validating/webhook.go +++ b/pkg/webhook/psts/validating/webhook.go @@ -23,6 +23,7 @@ import ( "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/clientset/versioned" crdinformers "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions" networkinformer "github.com/baidubce/baiducloud-cce-cni-driver/pkg/generated/informers/externalversions/networking/v1alpha1" + "github.com/baidubce/baiducloud-cce-cni-driver/pkg/iprange" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/util/cidr" "github.com/baidubce/baiducloud-cce-cni-driver/pkg/webhook/injector" ) @@ -135,10 +136,11 @@ func (h *ValidatingPSTSHandler) validatePstsSpec(spec *networkv1alpha1.PodSubnet if len(spec.Subnets) == 0 { allErrs = append(allErrs, field.Invalid(fldPath.Child("subnets"), spec.Subnets, "length of subnets must greater than 0")) } + if len(allErrs) > 0 { return allErrs } - return h.validateSubnet(spec.Subnets, fldPath.Child("subnets")) + return h.validateSubnet(spec.Strategy, spec.Subnets, fldPath.Child("subnets")) } // Verify the availability of the subnet @@ -147,28 +149,60 @@ func (h *ValidatingPSTSHandler) validatePstsSpec(spec *networkv1alpha1.PodSubnet // does not exist, a private subnet will be automatically created. // If the corresponding subnet already exists, but it is not a // exclusive subnet, an error will be triggered -func (h *ValidatingPSTSHandler) validateSubnet(sas map[string]networkv1alpha1.SubnetAllocation, fldPath *field.Path) field.ErrorList { +func (h *ValidatingPSTSHandler) validateSubnet( + strategy *networkv1alpha1.IPAllocationStrategy, + sas map[string]networkv1alpha1.SubnetAllocation, + fldPath *field.Path) field.ErrorList { var ( allErrs = field.ErrorList{} lastType networkv1alpha1.IPAllocType ) for sbnID, sbnSpec := range sas { - var sbn *networkv1alpha1.Subnet - sbn, allErrs = h.getORCreateSubnet(sbnID, &sbnSpec, fldPath.Child(sbnID), allErrs) - if len(allErrs) > 0 { - return allErrs + newStrategy := sbnSpec.IPAllocationStrategy + newType := sbnSpec.Type + if strategy != nil { + newType = strategy.Type + newStrategy = *strategy } - switch sbnSpec.Type { + var sbn *networkv1alpha1.Subnet + + switch newType { case networkv1alpha1.IPAllocTypeFixed: - allErrs = h.validateFixedMode(sbn, &sbnSpec, fldPath.Child(sbnID), allErrs) + sbn, allErrs = h.getORCreateSubnet(sbnID, true, fldPath.Child(sbnID), allErrs) + if len(allErrs) > 0 { + return allErrs + } + allErrs = h.validateExclusiveSubnet(sbn, &sbnSpec, fldPath.Child(sbnID), allErrs) case networkv1alpha1.IPAllocTypeManual: - allErrs = h.validateManualMode(sbn, &sbnSpec, fldPath.Child(sbnID), allErrs) + // manaual ip validating + sbn, allErrs = h.getORCreateSubnet(sbnID, true, fldPath.Child(sbnID), allErrs) + if len(allErrs) > 0 { + return allErrs + } + if newStrategy.ReleaseStrategy != networkv1alpha1.ReleaseStrategyTTL && newStrategy.ReleaseStrategy != "" { + allErrs = append(allErrs, + field.Invalid( + fldPath.Child(fmt.Sprintf("%s.%s", sbn.Name, "releaseStrategy")), + sbnSpec, "releaseStrategy is not TTL on Elastic mode"), + ) + } + allErrs = h.validateExclusiveSubnet(sbn, &sbnSpec, fldPath.Child(sbnID), allErrs) + case networkv1alpha1.IPAllocTypeCustom: + sbn, allErrs = h.getORCreateSubnet(sbnID, true, fldPath.Child(sbnID), allErrs) + if len(allErrs) > 0 { + return allErrs + } + allErrs = h.validateCustomMode(sbn, &sbnSpec, fldPath.Child(sbnID), allErrs) case "": sbnSpec.Type = networkv1alpha1.IPAllocTypeElastic fallthrough case networkv1alpha1.IPAllocTypeElastic: + sbn, allErrs = h.getORCreateSubnet(sbnID, false, fldPath.Child(sbnID), allErrs) + if len(allErrs) > 0 { + return allErrs + } allErrs = h.validateElasticMode(sbnSpec, allErrs, fldPath, sbnID, sbn) default: return append(allErrs, field.Invalid(fldPath.Child(fmt.Sprintf("%s.%s", sbnID, "type")), sbnSpec, fmt.Sprintf("unknown subnet type %s, the value must be Fixed, Manual or Elastic", sbnSpec.Type))) @@ -209,16 +243,22 @@ func (h *ValidatingPSTSHandler) validateElasticMode(sbnSpec networkv1alpha1.Subn // does not exist, a private subnet will be automatically created. // If the corresponding subnet already exists, but it is not a // exclusive subnet, an error will be triggered -func (h *ValidatingPSTSHandler) getORCreateSubnet(sbnID string, sbnSpec *networkv1alpha1.SubnetAllocation, fldPath *field.Path, allErrs field.ErrorList) (*networkv1alpha1.Subnet, field.ErrorList) { +func (h *ValidatingPSTSHandler) getORCreateSubnet( + sbnID string, + exclusive bool, + fldPath *field.Path, + allErrs field.ErrorList, +) (*networkv1alpha1.Subnet, + field.ErrorList) { sbn, err := h.subnetInformer.Lister().Subnets(corev1.NamespaceDefault).Get(sbnID) if err != nil { if errors.IsNotFound(err) { // 1. create a new subnet cr err = subnet.CreateSubnetCR(context.Background(), h.bceClient, h.crdClient, sbnID) if err != nil { - return nil, append(allErrs, field.Invalid(fldPath, sbnSpec, fmt.Sprintf("create subnet %s error: %v", sbnID, err))) + return nil, append(allErrs, field.Invalid(fldPath, sbnID, fmt.Sprintf("create subnet %s error: %v", sbnID, err))) } - if sbnSpec.Type == networkv1alpha1.IPAllocTypeFixed || sbnSpec.Type == networkv1alpha1.IPAllocTypeManual { + if exclusive { // 2. mark subnet as exclusive sbn, err = subnet.MarkExclusiveSubnet(context.Background(), h.crdClient, sbnID, true) if err != nil { @@ -232,7 +272,7 @@ func (h *ValidatingPSTSHandler) getORCreateSubnet(sbnID string, sbnSpec *network } } } else { - return nil, append(allErrs, field.Invalid(fldPath.Child(sbnID), sbnSpec, fmt.Sprintf("get subnet %s error: %v", sbnID, err))) + return nil, append(allErrs, field.Invalid(fldPath.Child(sbnID), sbnID, fmt.Sprintf("get subnet %s error: %v", sbnID, err))) } } @@ -281,17 +321,27 @@ func (h *ValidatingPSTSHandler) validateExclusiveSubnet(sbn *networkv1alpha1.Sub return allErrs } -func (h *ValidatingPSTSHandler) validateFixedMode(sbn *networkv1alpha1.Subnet, sbnSpec *networkv1alpha1.SubnetAllocation, fldPath *field.Path, allErrs field.ErrorList) field.ErrorList { - // fixed ip validating - return h.validateExclusiveSubnet(sbn, sbnSpec, fldPath, allErrs) -} +func (h *ValidatingPSTSHandler) validateCustomMode( + sbn *networkv1alpha1.Subnet, + sbnSpec *networkv1alpha1.SubnetAllocation, + fldPath *field.Path, + allErrs field.ErrorList, +) field.ErrorList { + // mark subnet as exclusive + if !sbn.Spec.Exclusive { + return append(allErrs, field.Invalid(fldPath, sbn, "subnet is not exclusive")) + } -func (h *ValidatingPSTSHandler) validateManualMode(sbn *networkv1alpha1.Subnet, sbnSpec *networkv1alpha1.SubnetAllocation, fldPath *field.Path, allErrs field.ErrorList) field.ErrorList { - // manaual ip validating - if sbnSpec.ReleaseStrategy != networkv1alpha1.ReleaseStrategyTTL && sbnSpec.ReleaseStrategy != "" { - allErrs = append(allErrs, field.Invalid(fldPath.Child(fmt.Sprintf("%s.%s", sbn.Name, "releaseStrategy")), sbnSpec, "releaseStrategy is not TTL on Elastic mode")) + _, cidr, err := net.ParseCIDR(sbn.Spec.CIDR) + if err != nil { + return append(allErrs, field.Invalid(fldPath, sbn, "cidr of subnet is invalide")) + } + _, err = iprange.ListAllCustomIPRangeIndexs(cidr, sbnSpec.Custom) + if err != nil { + return append(allErrs, field.Invalid(fldPath, sbnSpec.Custom, err.Error())) } - return h.validateExclusiveSubnet(sbn, sbnSpec, fldPath, allErrs) + + return allErrs } var _ admission.DecoderInjector = &ValidatingPSTSHandler{} diff --git a/pkg/webhook/psts/validating/webhook_test.go b/pkg/webhook/psts/validating/webhook_test.go index b6344a8..ba7f485 100644 --- a/pkg/webhook/psts/validating/webhook_test.go +++ b/pkg/webhook/psts/validating/webhook_test.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/wait" + k8sutilnet "k8s.io/utils/net" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) @@ -62,8 +63,10 @@ func TestValidatingPSTSHandler_validatePstsSpec(t *testing.T) { obj := MockPSTS("psts-test") obj.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ "sbn-aaaaa": { - Type: networkv1alpha1.IPAllocTypeFixed, - ReleaseStrategy: networkv1alpha1.ReleaseStrategyNever, + IPAllocationStrategy: networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeFixed, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyNever, + }, IPv4: []string{ "10.178.245.1", "10.178.245.2", @@ -79,8 +82,10 @@ func TestValidatingPSTSHandler_validatePstsSpec(t *testing.T) { obj := MockPSTS("psts-test") obj.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ "sbn-aaaaa": { - Type: networkv1alpha1.IPAllocTypeFixed, - ReleaseStrategy: networkv1alpha1.ReleaseStrategyNever, + IPAllocationStrategy: networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeFixed, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyNever, + }, IPv4Range: []string{ "10.178.245.1/25", }, @@ -94,8 +99,10 @@ func TestValidatingPSTSHandler_validatePstsSpec(t *testing.T) { obj := MockPSTS("psts-test") obj.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ "sbn-aaaaa": { - Type: networkv1alpha1.IPAllocTypeElastic, - ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + IPAllocationStrategy: networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeElastic, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + }, }, } return obj @@ -106,8 +113,10 @@ func TestValidatingPSTSHandler_validatePstsSpec(t *testing.T) { obj := MockPSTS("psts-test") obj.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ "sbn-aaaaa": { - Type: networkv1alpha1.IPAllocTypeElastic, - ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + IPAllocationStrategy: networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeElastic, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + }, IPv4: []string{ "10.178.245.1", "10.178.245.2", @@ -135,8 +144,11 @@ func TestValidatingPSTSHandler_validatePstsSpec(t *testing.T) { obj := MockPSTS("psts-test") obj.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ "sbn-aaaaa": { - Type: networkv1alpha1.IPAllocTypeManual, - ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + IPAllocationStrategy: networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeManual, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + }, + IPv4Range: []string{ "10.178.245.1/25", }, @@ -144,6 +156,46 @@ func TestValidatingPSTSHandler_validatePstsSpec(t *testing.T) { } return obj }, + }, { + name: "range in custom ip mode", + psts: func() *networkv1alpha1.PodSubnetTopologySpread { + obj := MockPSTS("psts-test") + obj.Spec.Strategy = &networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + EnableReuseIPAddress: true, + TTL: &metav1.Duration{Duration: time.Hour}, + } + obj.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ + "sbn-aaaaa": { + Custom: []networkv1alpha1.CustomAllocation{ + {Family: k8sutilnet.IPv4, CustomIPRange: []networkv1alpha1.CustomIPRange{{Start: "10.178.245.2", End: "10.178.245.5"}}}, + }, + }, + } + return obj + }, + }, + { + name: "range not in custom ip mode", + psts: func() *networkv1alpha1.PodSubnetTopologySpread { + obj := MockPSTS("psts-test") + obj.Spec.Strategy = &networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeCustom, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + EnableReuseIPAddress: true, + TTL: &metav1.Duration{Duration: time.Hour}, + } + obj.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ + "sbn-aaaaa": { + Custom: []networkv1alpha1.CustomAllocation{ + {Family: k8sutilnet.IPv4, CustomIPRange: []networkv1alpha1.CustomIPRange{{Start: "10.0.245.2", End: "10.178.245.5"}}}, + }, + }, + } + return obj + }, + wantErr: 1, }, } for _, tt := range tests { @@ -223,8 +275,10 @@ func TestValidatingPSTSHandler_Handle(t *testing.T) { psts := MockPSTS("psts-test") psts.Spec.Subnets = map[string]networkv1alpha1.SubnetAllocation{ "sbn-aaaaa": { - Type: networkv1alpha1.IPAllocTypeManual, - ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + IPAllocationStrategy: networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeManual, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + }, IPv4Range: []string{ "10.178.245.1/25", }, @@ -253,8 +307,10 @@ func TestValidatingPSTSHandler_Handle(t *testing.T) { Name: "psts-test", Subnets: map[string]networkv1alpha1.SubnetAllocation{ "sbn-aaaaa": { - Type: networkv1alpha1.IPAllocTypeManual, - ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + IPAllocationStrategy: networkv1alpha1.IPAllocationStrategy{ + Type: networkv1alpha1.IPAllocTypeManual, + ReleaseStrategy: networkv1alpha1.ReleaseStrategyTTL, + }, IPv4Range: []string{ "10.178.245.1/25", }, diff --git a/test/data/mock_core.go b/test/data/mock_core.go new file mode 100644 index 0000000..0fe29dd --- /dev/null +++ b/test/data/mock_core.go @@ -0,0 +1,40 @@ +package data + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + eniutil "github.com/baidubce/baiducloud-cce-cni-driver/pkg/nodeagent/util/eni" +) + +func MockNode(name, mtype, id string) *corev1.Node { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: make(map[string]string), + Labels: map[string]string{ + corev1.LabelInstanceType: mtype, + }, + }, + Spec: corev1.NodeSpec{ + ProviderID: id, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + }, + }, + } + + if mtype == "BCC" { + node.Annotations[eniutil.NodeAnnotationPreAttachedENINum] = "1" + node.Annotations[eniutil.NodeAnnotationMaxENINum] = "8" + node.Annotations[eniutil.NodeAnnotationMaxIPPerENI] = "8" + node.Annotations[eniutil.NodeAnnotationWarmIPTarget] = "8" + } + return node +} diff --git a/test/data/mock_networking.go b/test/data/mock_networking.go index 49e23a4..1aa3e7a 100644 --- a/test/data/mock_networking.go +++ b/test/data/mock_networking.go @@ -31,6 +31,45 @@ func MockSubnet(namespace, name, cidr string) *networkingv1alpha1.Subnet { } } +func MockPodSubnetTopologySpreadWithSubnet(namespace, name string, subnet *networkingv1alpha1.Subnet, label labels.Set) *networkingv1alpha1.PodSubnetTopologySpread { + return &networkingv1alpha1.PodSubnetTopologySpread{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: networkingv1alpha1.PodSubnetTopologySpreadSpec{ + Name: name, + Subnets: map[string]networkingv1alpha1.SubnetAllocation{ + subnet.Name: { + IPAllocationStrategy: networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeElastic, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + }, + }, + }, + EnablePodTopologySpread: true, + Priority: 1, + Selector: metav1.SetAsLabelSelector(label), + }, + Status: networkingv1alpha1.PodSubnetTopologySpreadStatus{ + AvailableSubnets: map[string]networkingv1alpha1.SubnetPodStatus{ + subnet.Name: { + SubenetDetail: networkingv1alpha1.SubenetDetail{ + Enable: subnet.Status.Enable, + HasNoMoreIP: subnet.Status.HasNoMoreIP, + ID: subnet.Spec.ID, + Name: subnet.Spec.Name, + CIDR: subnet.Spec.CIDR, + AvailabilityZone: subnet.Spec.AvailabilityZone, + AvailableIPNum: subnet.Status.AvailableIPNum, + }, + }, + }, + }, + } +} + // mock a PodSubnetTopologySpread func MockPodSubnetTopologySpread(namespace, name, subnet string, label labels.Set) *networkingv1alpha1.PodSubnetTopologySpread { return &networkingv1alpha1.PodSubnetTopologySpread{ @@ -43,10 +82,16 @@ func MockPodSubnetTopologySpread(namespace, name, subnet string, label labels.Se Name: name, Subnets: map[string]networkingv1alpha1.SubnetAllocation{ subnet: { - Type: networkingv1alpha1.IPAllocTypeElastic, - ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + IPAllocationStrategy: networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeElastic, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + }, }, }, + Strategy: &networkingv1alpha1.IPAllocationStrategy{ + Type: networkingv1alpha1.IPAllocTypeElastic, + ReleaseStrategy: networkingv1alpha1.ReleaseStrategyTTL, + }, EnablePodTopologySpread: true, Priority: 1, Selector: metav1.SetAsLabelSelector(label), @@ -81,6 +126,7 @@ func MockFixedWorkloadEndpoint() *networkingv1alpha1.WorkloadEndpoint { EnableFixIP: "True", SubnetTopologyReference: "psts-test", FixIPDeletePolicy: string(networkingv1alpha1.ReleaseStrategyNever), + Phase: networkingv1alpha1.WorkloadEndpointPhasePodRuning, }, } } diff --git a/test/data/psts/custom/custom-psts.yaml b/test/data/psts/custom/custom-psts.yaml new file mode 100644 index 0000000..3343fc4 --- /dev/null +++ b/test/data/psts/custom/custom-psts.yaml @@ -0,0 +1,25 @@ +apiVersion: cce.io/v1alpha1 +kind: PodSubnetTopologySpread +metadata: + name: custom-psts + namespace: default +spec: + priority: 0 + subnets: + # exclude-f-2 172.22.79.0/24 + sbn-gxz5a4f0dtzp: + custom: + - family: "4" + customRanges: + - start: 172.22.79.3 + end: 172.22.79.10 + # exclude-f-2 172.22.79.0/24 + # sbn-gxz5a4f0dtzp: {} + maxSkew: 10 + whenUnsatisfiable: ScheduleAnyway + enablePodTopologySpread: true + strategy: + type: Custom + releaseStrategy: TTL + ttl: 3m + enableReuseIPAddress: false diff --git a/test/data/psts/custom/custom-reuse-psts.yaml b/test/data/psts/custom/custom-reuse-psts.yaml new file mode 100644 index 0000000..1faa999 --- /dev/null +++ b/test/data/psts/custom/custom-reuse-psts.yaml @@ -0,0 +1,30 @@ +apiVersion: cce.io/v1alpha1 +kind: PodSubnetTopologySpread +metadata: + name: custom-reuse + namespace: default +spec: + priority: 11 + subnets: + # exclude-f-1 172.22.78.0/24 + sbn-eu0hnb6g7v5z: + custom: + - family: "4" + customIPRange: + - start: 172.22.78.4 + end: 172.22.78.6 + # exclude-f-2 172.22.79.0/24 + # sbn-gxz5a4f0dtzp: {} + maxSkew: 10 + whenUnsatisfiable: ScheduleAnyway + enablePodTopologySpread: true + strategy: + type: Custom + releaseStrategy: TTL + ttl: 3m + enableReuseIPAddress: true + selector: + matchLabels: + workloadType: sts + reuseIP: "true" + app: reuseIPApp diff --git a/test/data/psts/custom/elastic-deploy.yaml b/test/data/psts/custom/elastic-deploy.yaml new file mode 100644 index 0000000..4fe50b1 --- /dev/null +++ b/test/data/psts/custom/elastic-deploy.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: custom-elastic-deploy +spec: + replicas: 30 + selector: + matchLabels: + app: custom-override + template: + metadata: + labels: + app: custom-override + spec: + containers: + - name: myapp + image: registry.baidubce.com/cce-plugin-dev/system-config-override:0.0.4 + command: + - /bin/bash + - -c + - sleep 3600 + ports: + - containerPort: 8068 + name: web \ No newline at end of file diff --git a/test/data/psts/custom/reuse-sts.yaml b/test/data/psts/custom/reuse-sts.yaml new file mode 100644 index 0000000..960c115 --- /dev/null +++ b/test/data/psts/custom/reuse-sts.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: reuseip +spec: + serviceName: reuseip + replicas: 3 + selector: + matchLabels: + workloadType: sts + reuseIP: "true" + app: reuseIPApp + template: + metadata: + labels: + workloadType: sts + reuseIP: "true" + app: reuseIPApp + spec: + containers: + - name: myapp + image: registry.baidubce.com/cce-plugin-dev/system-config-override:0.0.4 + command: + - /bin/bash + - -c + - sleep 3600 + ports: + - containerPort: 8068 + name: web \ No newline at end of file diff --git a/test/data/psts/elastic/default_psts.yaml b/test/data/psts/elastic/default_psts.yaml index a6766b8..2d9e72f 100644 --- a/test/data/psts/elastic/default_psts.yaml +++ b/test/data/psts/elastic/default_psts.yaml @@ -6,10 +6,10 @@ metadata: spec: priority: 0 subnets: - # elastic-f 192.168.100.0/24 - sbn-xcee24pjj0p1: {} + # elastic-f sbn-50st4bb33pri + sbn-50st4bb33pri: {} # elastic-c 192.168.101.0/24 - sbn-e8rk4zxn2ys6: {} + # sbn-e8rk4zxn2ys6: {} maxSkew: 10 whenUnsatisfiable: ScheduleAnyway enablePodTopologySpread: true \ No newline at end of file diff --git a/test/data/psts/fixed/fixed-ip-pstt.yaml b/test/data/psts/fixed/fixed-ip-pstt.yaml index c7cab10..db20bf0 100644 --- a/test/data/psts/fixed/fixed-ip-pstt.yaml +++ b/test/data/psts/fixed/fixed-ip-pstt.yaml @@ -5,13 +5,13 @@ metadata: spec: - name: fixed-ip-psts-test subnets: - sbn-mxxvp4gfdjyv: + sbn-gxz5a4f0dtzp: type: Fixed releaseStrategy: Never ipv4: - - 192.168.78.4 - - 192.168.78.5 - - 192.168.78.6 + - 172.22.79.41 + - 172.22.79.42 + - 172.22.79.50 priority: 10 selector: matchLabels: @@ -20,14 +20,14 @@ spec: app: fixedIPApp - name: fixed-ip-psts-ttl subnets: - sbn-mxxvp4gfdjyv: + sbn-gxz5a4f0dtzp: type: Fixed releaseStrategy: TTL ipv4: - - 192.168.78.100 - - 192.168.78.102 - - 192.168.78.104 - - 192.168.78.106 + - 172.22.79.100 + - 172.22.79.102 + - 172.22.79.104 + - 172.22.79.106 priority: 10 selector: matchLabels: diff --git a/test/data/psts/munual/munual-ip-pstt.yaml b/test/data/psts/munual/munual-ip-pstt.yaml index c596da0..f736d93 100644 --- a/test/data/psts/munual/munual-ip-pstt.yaml +++ b/test/data/psts/munual/munual-ip-pstt.yaml @@ -5,17 +5,17 @@ metadata: spec: name: fixed-ip-psts-test subnets: - sbn-mxxvp4gfdjyv: + sbn-gxz5a4f0dtzp: type: Manual releaseStrategy: TTL ipv4Range: - - 192.168.78.228/32 - - 192.168.78.229/32 - sbn-s985prieeicc: + - 172.22.79.228/32 + - 172.22.79.229/32 + sbn-eu0hnb6g7v5z: type: Manual releaseStrategy: TTL ipv4Range: - - 192.168.79.228/31 + - 172.22.78.228/31 priority: 10 selector: